##// END OF EJS Templates
Added ToLilBlock class from Roberto
Christianpl -
r1789:2739006ee497 isr_v2
parent child
Show More
@@ -1,677 +1,705
1 1 # Copyright (c) 2012-2020 Jicamarca Radio Observatory
2 2 # All rights reserved.
3 3 #
4 4 # Distributed under the terms of the BSD 3-clause license.
5 5 """API to create signal chain projects
6 6
7 7 The API is provide through class: Project
8 8 """
9 9
10 10 import re
11 11 import sys
12 12 import ast
13 13 import datetime
14 14 import traceback
15 15 import time
16 16 import multiprocessing
17 17 from multiprocessing import Process, Queue
18 18 from threading import Thread
19 19 from xml.etree.ElementTree import ElementTree, Element, SubElement
20 20
21 21 from schainpy.admin import Alarm, SchainWarning
22 22 from schainpy.model import *
23 23 from schainpy.utils import log
24 24
25 25 if 'darwin' in sys.platform and sys.version_info[0] == 3 and sys.version_info[1] > 7:
26 26 multiprocessing.set_start_method('fork')
27 27
28 28 class ConfBase():
29 29
30 30 def __init__(self):
31 31
32 32 self.id = '0'
33 33 self.name = None
34 34 self.priority = None
35 35 self.parameters = {}
36 36 self.object = None
37 37 self.operations = []
38 38
39 39 def getId(self):
40 40
41 41 return self.id
42 42
43 43 def getNewId(self):
44 44
45 45 return int(self.id) * 10 + len(self.operations) + 1
46 46
47 47 def updateId(self, new_id):
48 48
49 49 self.id = str(new_id)
50 50
51 51 n = 1
52 52 for conf in self.operations:
53 53 conf_id = str(int(new_id) * 10 + n)
54 54 conf.updateId(conf_id)
55 55 n += 1
56 56
57 57 def getKwargs(self):
58 58
59 59 params = {}
60 60
61 61 for key, value in self.parameters.items():
62 62 if value not in (None, '', ' '):
63 63 params[key] = value
64 64
65 65 return params
66 66
67 67 def update(self, **kwargs):
68 68
69 69 for key, value in kwargs.items():
70 70 self.addParameter(name=key, value=value)
71 71
72 72 def addParameter(self, name, value, format=None):
73 73 '''
74 74 '''
75 75
76 76 if format is not None:
77 77 self.parameters[name] = eval(format)(value)
78 78 elif isinstance(value, str) and re.search(r'(\d+/\d+/\d+)', value):
79 79 self.parameters[name] = datetime.date(*[int(x) for x in value.split('/')])
80 80 elif isinstance(value, str) and re.search(r'(\d+:\d+:\d+)', value):
81 81 self.parameters[name] = datetime.time(*[int(x) for x in value.split(':')])
82 82 else:
83 83 try:
84 84 self.parameters[name] = ast.literal_eval(value)
85 85 except:
86 86 if isinstance(value, str) and ',' in value:
87 87 self.parameters[name] = value.split(',')
88 88 else:
89 89 self.parameters[name] = value
90 90
91 91 def getParameters(self):
92 92
93 93 params = {}
94 94 for key, value in self.parameters.items():
95 95 s = type(value).__name__
96 96 if s == 'date':
97 97 params[key] = value.strftime('%Y/%m/%d')
98 98 elif s == 'time':
99 99 params[key] = value.strftime('%H:%M:%S')
100 100 else:
101 101 params[key] = str(value)
102 102
103 103 return params
104 104
105 105 def makeXml(self, element):
106 106
107 107 xml = SubElement(element, self.ELEMENTNAME)
108 108 for label in self.xml_labels:
109 109 xml.set(label, str(getattr(self, label)))
110 110
111 111 for key, value in self.getParameters().items():
112 112 xml_param = SubElement(xml, 'Parameter')
113 113 xml_param.set('name', key)
114 114 xml_param.set('value', value)
115 115
116 116 for conf in self.operations:
117 117 conf.makeXml(xml)
118 118
119 119 def __str__(self):
120 120
121 121 if self.ELEMENTNAME == 'Operation':
122 122 s = ' {}[id={}]\n'.format(self.name, self.id)
123 123 else:
124 124 s = '{}[id={}, inputId={}]\n'.format(self.name, self.id, self.inputId)
125 125
126 126 for key, value in self.parameters.items():
127 127 if self.ELEMENTNAME == 'Operation':
128 128 s += ' {}: {}\n'.format(key, value)
129 129 else:
130 130 s += ' {}: {}\n'.format(key, value)
131 131
132 132 for conf in self.operations:
133 133 s += str(conf)
134 134
135 135 return s
136 136
137 137 class OperationConf(ConfBase):
138 138
139 139 ELEMENTNAME = 'Operation'
140 140 xml_labels = ['id', 'name']
141 141
142 142 def setup(self, id, name, priority, project_id, err_queue):
143 143
144 144 self.id = str(id)
145 145 self.project_id = project_id
146 146 self.name = name
147 147 self.type = 'other'
148 148 self.err_queue = err_queue
149 149
150 150 def readXml(self, element, project_id, err_queue):
151 151
152 152 self.id = element.get('id')
153 153 self.name = element.get('name')
154 154 self.type = 'other'
155 155 self.project_id = str(project_id)
156 156 self.err_queue = err_queue
157 157
158 158 for elm in element.iter('Parameter'):
159 159 self.addParameter(elm.get('name'), elm.get('value'))
160 160
161 161 def createObject(self):
162 162
163 163 className = eval(self.name)
164 164
165 165 if 'Plot' in self.name or 'Writer' in self.name or 'Send' in self.name or 'print' in self.name:
166 166 kwargs = self.getKwargs()
167 167 opObj = className(self.id, self.id, self.project_id, self.err_queue, **kwargs)
168 168 opObj.start()
169 169 self.type = 'external'
170 170 else:
171 171 opObj = className()
172 172
173 173 self.object = opObj
174 174 return opObj
175 175
176 176 class ProcUnitConf(ConfBase):
177 177
178 178 ELEMENTNAME = 'ProcUnit'
179 179 xml_labels = ['id', 'inputId', 'name']
180 180
181 181 def setup(self, project_id, id, name, datatype, inputId, err_queue):
182 182 '''
183 183 '''
184 184
185 185 if datatype == None and name == None:
186 186 raise ValueError('datatype or name should be defined')
187 187
188 188 if name == None:
189 189 if 'Proc' in datatype:
190 190 name = datatype
191 191 else:
192 192 name = '%sProc' % (datatype)
193 193
194 194 if datatype == None:
195 195 datatype = name.replace('Proc', '')
196 196
197 197 self.id = str(id)
198 198 self.project_id = project_id
199 199 self.name = name
200 200 self.datatype = datatype
201 201 self.inputId = inputId
202 202 self.err_queue = err_queue
203 203 self.operations = []
204 204 self.parameters = {}
205 205
206 206 def removeOperation(self, id):
207 207
208 208 i = [1 if x.id == id else 0 for x in self.operations]
209 209 self.operations.pop(i.index(1))
210 210
211 211 def getOperation(self, id):
212 212
213 213 for conf in self.operations:
214 214 if conf.id == id:
215 215 return conf
216 216
217 217 def addOperation(self, name, optype='self'):
218 218 '''
219 219 '''
220 220
221 221 id = self.getNewId()
222 222 conf = OperationConf()
223 223 conf.setup(id, name=name, priority='0', project_id=self.project_id, err_queue=self.err_queue)
224 224 self.operations.append(conf)
225 225
226 226 return conf
227 227
228 228 def readXml(self, element, project_id, err_queue):
229 229
230 230 self.id = element.get('id')
231 231 self.name = element.get('name')
232 232 self.inputId = None if element.get('inputId') == 'None' else element.get('inputId')
233 233 self.datatype = element.get('datatype', self.name.replace(self.ELEMENTNAME.replace('Unit', ''), ''))
234 234 self.project_id = str(project_id)
235 235 self.err_queue = err_queue
236 236 self.operations = []
237 237 self.parameters = {}
238 238
239 239 for elm in element:
240 240 if elm.tag == 'Parameter':
241 241 self.addParameter(elm.get('name'), elm.get('value'))
242 242 elif elm.tag == 'Operation':
243 243 conf = OperationConf()
244 244 conf.readXml(elm, project_id, err_queue)
245 245 self.operations.append(conf)
246 246
247 247 def createObjects(self):
248 248 '''
249 249 Instancia de unidades de procesamiento.
250 250 '''
251 251
252 252 className = eval(self.name)
253 253 kwargs = self.getKwargs()
254 254 procUnitObj = className()
255 255 procUnitObj.name = self.name
256 256 log.success('creating process...', self.name)
257 257
258 258 for conf in self.operations:
259 259
260 260 opObj = conf.createObject()
261 261
262 262 log.success('adding operation: {}, type:{}'.format(
263 263 conf.name,
264 264 conf.type), self.name)
265 265
266 266 procUnitObj.addOperation(conf, opObj)
267 267
268 268 self.object = procUnitObj
269 269
270 270 def run(self):
271 271 '''
272 272 '''
273 273 #self.object.call(**self.getKwargs())
274 274
275 275 return self.object.call(**self.getKwargs())
276 276
277 277
278 278 class ReadUnitConf(ProcUnitConf):
279 279
280 280 ELEMENTNAME = 'ReadUnit'
281 281
282 282 def __init__(self):
283 283
284 284 self.id = None
285 285 self.datatype = None
286 286 self.name = None
287 287 self.inputId = None
288 288 self.operations = []
289 289 self.parameters = {}
290 290
291 291 def setup(self, project_id, id, name, datatype, err_queue, path='', startDate='', endDate='',
292 292 startTime='', endTime='', server=None, topic='', **kwargs):
293 293
294 294 if datatype == None and name == None:
295 295 raise ValueError('datatype or name should be defined')
296 296 if name == None:
297 297 if 'Reader' in datatype:
298 298 name = datatype
299 299 datatype = name.replace('Reader', '')
300 300 else:
301 301 name = '{}Reader'.format(datatype)
302 302 if datatype == None:
303 303 if 'Reader' in name:
304 304 datatype = name.replace('Reader', '')
305 305 else:
306 306 datatype = name
307 307 name = '{}Reader'.format(name)
308 308
309 309 self.id = id
310 310 self.project_id = project_id
311 311 self.name = name
312 312 self.datatype = datatype
313 313 self.err_queue = err_queue
314 314
315 315 self.addParameter(name='path', value=path)
316 316 self.addParameter(name='startDate', value=startDate)
317 317 self.addParameter(name='endDate', value=endDate)
318 318 self.addParameter(name='startTime', value=startTime)
319 319 self.addParameter(name='endTime', value=endTime)
320 320 self.addParameter(name='server', value=server)
321 321 self.addParameter(name='topic', value=topic)
322 322
323 323 for key, value in kwargs.items():
324 324 self.addParameter(name=key, value=value)
325 325
326 326
327 327 class Project(Process):
328 328 """API to create signal chain projects"""
329 329
330 330 ELEMENTNAME = 'Project'
331 331
332 332 def __init__(self, name=''):
333 333
334 334 Process.__init__(self)
335 335 self.id = '1'
336 336 if name:
337 337 self.name = '{} ({})'.format(Process.__name__, name)
338 338 self.filename = None
339 339 self.description = None
340 340 self.email = None
341 341 self.alarm = []
342 342 self.configurations = {}
343 343 # self.err_queue = Queue()
344 344 self.err_queue = None
345 345 self.started = False
346 346
347 347 def getNewId(self):
348 348
349 349 idList = list(self.configurations.keys())
350 350 id = int(self.id) * 10
351 351
352 352 while True:
353 353 id += 1
354 354
355 355 if str(id) in idList:
356 356 continue
357 357
358 358 break
359 359
360 360 return str(id)
361 361
362 362 def updateId(self, new_id):
363 363
364 364 self.id = str(new_id)
365 365
366 366 keyList = list(self.configurations.keys())
367 367 keyList.sort()
368 368
369 369 n = 1
370 370 new_confs = {}
371 371
372 372 for procKey in keyList:
373 373
374 374 conf = self.configurations[procKey]
375 375 idProcUnit = str(int(self.id) * 10 + n)
376 376 conf.updateId(idProcUnit)
377 377 new_confs[idProcUnit] = conf
378 378 n += 1
379 379
380 380 self.configurations = new_confs
381 381
382 382 def setup(self, id=1, name='', description='', email=None, alarm=[]):
383 383
384 384 self.id = str(id)
385 385 self.description = description
386 386 self.email = email
387 387 self.alarm = alarm
388 388 if name:
389 389 self.name = '{} ({})'.format(Process.__name__, name)
390 390
391 391 def update(self, **kwargs):
392 392
393 393 for key, value in kwargs.items():
394 394 setattr(self, key, value)
395 395
396 396 def clone(self):
397 397
398 398 p = Project()
399 399 p.id = self.id
400 400 p.name = self.name
401 401 p.description = self.description
402 402 p.configurations = self.configurations.copy()
403 403
404 404 return p
405 405
406 406 def addReadUnit(self, id=None, datatype=None, name=None, **kwargs):
407 407
408 408 '''
409 409 '''
410 410
411 411 if id is None:
412 412 idReadUnit = self.getNewId()
413 413 else:
414 414 idReadUnit = str(id)
415 415
416 416 conf = ReadUnitConf()
417 417 conf.setup(self.id, idReadUnit, name, datatype, self.err_queue, **kwargs)
418 418 self.configurations[conf.id] = conf
419 419
420 420 return conf
421 421
422 422 def addProcUnit(self, id=None, inputId='0', datatype=None, name=None):
423 423
424 424 '''
425 425 '''
426 426
427 427 if id is None:
428 428 idProcUnit = self.getNewId()
429 429 else:
430 430 idProcUnit = id
431 431
432 432 conf = ProcUnitConf()
433 433 conf.setup(self.id, idProcUnit, name, datatype, inputId, self.err_queue)
434 434 self.configurations[conf.id] = conf
435 435
436 436 return conf
437 437
438 438 def removeProcUnit(self, id):
439 439
440 440 if id in self.configurations:
441 441 self.configurations.pop(id)
442 442
443 443 def getReadUnit(self):
444 444
445 445 for obj in list(self.configurations.values()):
446 446 if obj.ELEMENTNAME == 'ReadUnit':
447 447 return obj
448 448
449 449 return None
450 450
451 451 def getProcUnit(self, id):
452 452
453 453 return self.configurations[id]
454 454
455 455 def getUnits(self):
456 456
457 457 keys = list(self.configurations)
458 458 keys.sort()
459 459
460 460 for key in keys:
461 461 yield self.configurations[key]
462 462
463 463 def updateUnit(self, id, **kwargs):
464 464
465 465 conf = self.configurations[id].update(**kwargs)
466 466
467 467 def makeXml(self):
468 468
469 469 xml = Element('Project')
470 470 xml.set('id', str(self.id))
471 471 xml.set('name', self.name)
472 472 xml.set('description', self.description)
473 473
474 474 for conf in self.configurations.values():
475 475 conf.makeXml(xml)
476 476
477 477 self.xml = xml
478 478
479 479 def writeXml(self, filename=None):
480 480
481 481 if filename == None:
482 482 if self.filename:
483 483 filename = self.filename
484 484 else:
485 485 filename = 'schain.xml'
486 486
487 487 if not filename:
488 488 print('filename has not been defined. Use setFilename(filename) for do it.')
489 489 return 0
490 490
491 491 abs_file = os.path.abspath(filename)
492 492
493 493 if not os.access(os.path.dirname(abs_file), os.W_OK):
494 494 print('No write permission on %s' % os.path.dirname(abs_file))
495 495 return 0
496 496
497 497 if os.path.isfile(abs_file) and not(os.access(abs_file, os.W_OK)):
498 498 print('File %s already exists and it could not be overwriten' % abs_file)
499 499 return 0
500 500
501 501 self.makeXml()
502 502
503 503 ElementTree(self.xml).write(abs_file, method='xml')
504 504
505 505 self.filename = abs_file
506 506
507 507 return 1
508 508
509 509 def readXml(self, filename):
510 510
511 511 abs_file = os.path.abspath(filename)
512 512
513 513 self.configurations = {}
514 514
515 515 try:
516 516 self.xml = ElementTree().parse(abs_file)
517 517 except:
518 518 log.error('Error reading %s, verify file format' % filename)
519 519 return 0
520 520
521 521 self.id = self.xml.get('id')
522 522 self.name = self.xml.get('name')
523 523 self.description = self.xml.get('description')
524 524
525 525 for element in self.xml:
526 526 if element.tag == 'ReadUnit':
527 527 conf = ReadUnitConf()
528 528 conf.readXml(element, self.id, self.err_queue)
529 529 self.configurations[conf.id] = conf
530 530 elif element.tag == 'ProcUnit':
531 531 conf = ProcUnitConf()
532 532 input_proc = self.configurations[element.get('inputId')]
533 533 conf.readXml(element, self.id, self.err_queue)
534 534 self.configurations[conf.id] = conf
535 535
536 536 self.filename = abs_file
537 537
538 538 return 1
539 539
540 540 def __str__(self):
541 541
542 542 text = '\nProject[id=%s, name=%s, description=%s]\n\n' % (
543 543 self.id,
544 544 self.name,
545 545 self.description,
546 546 )
547 547
548 548 for conf in self.configurations.values():
549 549 text += '{}'.format(conf)
550 550
551 551 return text
552 552
553 553 def createObjects(self):
554 554
555 555 keys = list(self.configurations.keys())
556 556 keys.sort()
557 557 for key in keys:
558 558 conf = self.configurations[key]
559 559 conf.createObjects()
560 560 if 'Reader' in str(conf):
561 561 reader = conf.object
562 562 else:
563 563 conf.object.reader = reader
564 564 if conf.inputId is not None:
565 565 if isinstance(conf.inputId, list):
566 566 conf.object.setInput([self.configurations[x].object for x in conf.inputId])
567 567 else:
568 568 conf.object.setInput([self.configurations[conf.inputId].object])
569 569
570 570 def monitor(self):
571 571
572 572 t = Thread(target=self._monitor, args=(self.err_queue, self.ctx))
573 573 t.start()
574 574
575 575 def _monitor(self, queue, ctx):
576 576
577 577 import socket
578 578
579 579 procs = 0
580 580 err_msg = ''
581 581
582 582 while True:
583 583 msg = queue.get()
584 584 if '#_start_#' in msg:
585 585 procs += 1
586 586 elif '#_end_#' in msg:
587 587 procs -= 1
588 588 else:
589 589 err_msg = msg
590 590
591 591 if procs == 0 or 'Traceback' in err_msg:
592 592 break
593 593 time.sleep(0.1)
594 594
595 595 if '|' in err_msg:
596 596 name, err = err_msg.split('|')
597 597 if 'SchainWarning' in err:
598 598 log.warning(err.split('SchainWarning:')[-1].split('\n')[0].strip(), name)
599 599 elif 'SchainError' in err:
600 600 log.error(err.split('SchainError:')[-1].split('\n')[0].strip(), name)
601 601 else:
602 602 log.error(err, name)
603 603 else:
604 604 name, err = self.name, err_msg
605 605
606 606 time.sleep(1)
607 607
608 608 ctx.term()
609 609
610 610 message = ''.join(err)
611 611
612 612 if err_msg:
613 613 subject = 'SChain v%s: Error running %s\n' % (
614 614 schainpy.__version__, self.name)
615 615
616 616 subtitle = 'Hostname: %s\n' % socket.gethostbyname(
617 617 socket.gethostname())
618 618 subtitle += 'Working directory: %s\n' % os.path.abspath('./')
619 619 subtitle += 'Configuration file: %s\n' % self.filename
620 620 subtitle += 'Time: %s\n' % str(datetime.datetime.now())
621 621
622 622 readUnitConfObj = self.getReadUnit()
623 623 if readUnitConfObj:
624 624 subtitle += '\nInput parameters:\n'
625 625 subtitle += '[Data path = %s]\n' % readUnitConfObj.parameters['path']
626 626 subtitle += '[Start date = %s]\n' % readUnitConfObj.parameters['startDate']
627 627 subtitle += '[End date = %s]\n' % readUnitConfObj.parameters['endDate']
628 628 subtitle += '[Start time = %s]\n' % readUnitConfObj.parameters['startTime']
629 629 subtitle += '[End time = %s]\n' % readUnitConfObj.parameters['endTime']
630 630
631 631 a = Alarm(
632 632 modes=self.alarm,
633 633 email=self.email,
634 634 message=message,
635 635 subject=subject,
636 636 subtitle=subtitle,
637 637 filename=self.filename
638 638 )
639 639
640 640 a.start()
641 641
642 642 def setFilename(self, filename):
643 643
644 644 self.filename = filename
645 645
646 646 def runProcs(self):
647 647
648 648 err = False
649 649 n = len(self.configurations)
650 #print(n)
651
650 flag_no_read = False
651 nProc_noRead = 0
652
653 #while not err:
654 # for conf in self.getUnits():
655 # ok = conf.run()
656 # if ok == 'Error':
657 # n -= 1
658 # continue
659 # elif not ok:
660 # break
661 # if n == 0:
662 # err = True
663
652 664 while not err:
653 #print(self.getUnits())
665 n_proc = 0
654 666 for conf in self.getUnits():
655 #print(conf)
656 ok = conf.run()
657 #print("ok", ok)
667 if flag_no_read:
668 if n_proc >= nProc_noRead:
669 ok = conf.run()
670 else:
671 n_proc += 1
672 continue
673 else:
674 ok = conf.run()
675
676 n_proc += 1
677
658 678 if ok == 'Error':
659 679 n -= 1
660 680 continue
681
682 elif ok == 'no_Read' and (not flag_no_read):
683 nProc_noRead = n_proc - 1
684 flag_no_read = True
685 continue
686 elif ok == 'new_Read':
687 nProc_noRead = 0
688 flag_no_read = False
689 continue
661 690 elif not ok:
662 691 break
663 #print("****************************************************end")
664 #exit(1)
692
665 693 if n == 0:
666 694 err = True
667 695
668 696 def run(self):
669 697
670 698 log.success('\nStarting Project {} [id={}]'.format(self.name, self.id), tag='')
671 699 self.started = True
672 700 self.start_time = time.time()
673 701 self.createObjects()
674 702 self.runProcs()
675 703 log.success('{} Done (Time: {:4.2f}s)'.format(
676 704 self.name,
677 705 time.time() - self.start_time), '')
@@ -1,252 +1,246
1 1 '''
2 2 Base clases to create Processing units and operations, the MPDecorator
3 3 must be used in plotting and writing operations to allow to run as an
4 4 external process.
5 5 '''
6 6
7 7 import os
8 8 import inspect
9 9 import zmq
10 10 import time
11 11 import pickle
12 12 import traceback
13 13 from threading import Thread
14 14 from multiprocessing import Process, Queue
15 15 from schainpy.utils import log
16 16
17 17 QUEUE_SIZE = int(os.environ.get('QUEUE_MAX_SIZE', '100'))
18 18
19 19 class ProcessingUnit(object):
20 20 '''
21 21 Base class to create Signal Chain Units
22 22 '''
23 23
24 24 proc_type = 'processing'
25 25 bypass = False
26 26
27 27 def __init__(self):
28 28
29 29 self.dataIn = None
30 30 self.dataOut = None
31 31 self.isConfig = False
32 32 self.operations = []
33 33 self.name = 'Test'
34 34 self.inputs = []
35 35
36 36 def setInput(self, unit):
37 37
38 38 attr = 'dataIn'
39 39 for i, u in enumerate(unit):
40 40 if i==0:
41 41 #print(u.dataOut.flagNoData)
42 42 #exit(1)
43 43 self.dataIn = u.dataOut#.copy()
44 44 self.inputs.append('dataIn')
45 45 else:
46 46 setattr(self, 'dataIn{}'.format(i), u.dataOut)#.copy())
47 47 self.inputs.append('dataIn{}'.format(i))
48 48
49 49
50 50 def getAllowedArgs(self):
51 51 if hasattr(self, '__attrs__'):
52 52 return self.__attrs__
53 53 else:
54 54 return inspect.getargspec(self.run).args
55 55
56 56 def addOperation(self, conf, operation):
57 57 '''
58 58 '''
59 59
60 60 self.operations.append((operation, conf.type, conf.getKwargs()))
61 61
62 62 def getOperationObj(self, objId):
63 63
64 64 if objId not in list(self.operations.keys()):
65 65 return None
66 66
67 67 return self.operations[objId]
68 68
69 69 def call(self, **kwargs):
70 '''
71 '''
72 70
71 mybool = (self.dataOut.type == 'Voltage') and self.dataOut.useInputBuffer and (not self.dataOut.buffer_empty) #liberar desde buffer
72
73 73 try:
74 if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error:
75 #if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error and not self.dataIn.runNextUnit:
76 if self.dataIn.runNextUnit:
77 #print("SUCCESSSSSSS")
78 #exit(1)
79 return not self.dataIn.isReady()
80 else:
81 return self.dataIn.isReady()
82 elif self.dataIn is None or not self.dataIn.error:
83 if 'Reader' in self.name and self.bypass:
84 print('Skipping...reader')
85 return self.dataOut.isReady()
74 if mybool:
75 #print("run jeje")
86 76 self.run(**kwargs)
87 elif self.dataIn.error:
88 #print("Elif 2")
89 self.dataOut.error = self.dataIn.error
90 self.dataOut.flagNoData = True
77 else:
78 if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error:
79 return self.dataIn.isReady()
80 elif self.dataIn is None or not self.dataIn.error: #unidad de lectura o procesamiento regular
81 self.run(**kwargs)
82 elif self.dataIn.error:
83 self.dataOut.error = self.dataIn.error
84 self.dataOut.flagNoData = True
85 print("exec proc error")
86
91 87 except:
92 #print("Except")
88
93 89 err = traceback.format_exc()
94 90 if 'SchainWarning' in err:
95 91 log.warning(err.split('SchainWarning:')[-1].split('\n')[0].strip(), self.name)
96 92 elif 'SchainError' in err:
97 93 log.error(err.split('SchainError:')[-1].split('\n')[0].strip(), self.name)
98 94 else:
99 95 log.error(err, self.name)
100 96 self.dataOut.error = True
101 #print("before op")
97
98
102 99 for op, optype, opkwargs in self.operations:
103 aux = self.dataOut.copy()
104 #aux = copy.deepcopy(self.dataOut)
105 #print("**********************Before",op)
106 if optype == 'other' and not self.dataOut.flagNoData:
107 #print("**********************Other",op)
108 #print(self.dataOut.flagNoData)
109 self.dataOut = op.run(self.dataOut, **opkwargs)
110 elif optype == 'external' and not self.dataOut.flagNoData:
111 op.queue.put(aux)
100
101 if (optype == 'other' and self.dataOut.isReady()) or mybool:
102 try:
103 self.dataOut = op.run(self.dataOut, **opkwargs)
104 except Exception as e:
105 print(e)
106 self.dataOut.error = True
107 return 'Error'
108 elif optype == 'external' and self.dataOut.isReady() :
109 op.queue.put(copy.deepcopy(self.dataOut))
112 110 elif optype == 'external' and self.dataOut.error:
113 op.queue.put(aux)
114 #elif optype == 'external' and self.dataOut.isReady():
115 #op.queue.put(copy.deepcopy(self.dataOut))
116 #print(not self.dataOut.isReady())
111 op.queue.put(copy.deepcopy(self.dataOut))
117 112
118 try:
119 if self.dataOut.runNextUnit:
120 runNextUnit = self.dataOut.runNextUnit
121 #print(self.operations)
122 #print("Tru")
123 113
114 if not self.dataOut.error:
115 if self.dataOut.type == 'Voltage':
116 if not self.dataOut.buffer_empty : #continue
117 return 'no_Read'
118 elif self.dataOut.useInputBuffer and (self.dataOut.buffer_empty) and self.dataOut.isReady() :
119 return 'new_Read'
120 else:
121 return True
124 122 else:
125 runNextUnit = self.dataOut.isReady()
126 except:
127 runNextUnit = self.dataOut.isReady()
128 #exit(1)
129 #if not self.dataOut.isReady():
130 #return 'Error' if self.dataOut.error else input()
131 #print("NexT",runNextUnit)
132 #print("error: ",self.dataOut.error)
133 return 'Error' if self.dataOut.error else runNextUnit# self.dataOut.isReady()
123 #print("ret True")
124 return True
125 else:
126 return 'Error'
127 #return 'Error' if self.dataOut.error else True #self.dataOut.isReady()
134 128
135 129 def setup(self):
136 130
137 131 raise NotImplementedError
138 132
139 133 def run(self):
140 134
141 135 raise NotImplementedError
142 136
143 137 def close(self):
144 138
145 139 return
146 140
147 141
148 142 class Operation(object):
149 143
150 144 '''
151 145 '''
152 146
153 147 proc_type = 'operation'
154 148
155 149 def __init__(self):
156 150
157 151 self.id = None
158 152 self.isConfig = False
159 153
160 154 if not hasattr(self, 'name'):
161 155 self.name = self.__class__.__name__
162 156
163 157 def getAllowedArgs(self):
164 158 if hasattr(self, '__attrs__'):
165 159 return self.__attrs__
166 160 else:
167 161 return inspect.getargspec(self.run).args
168 162
169 163 def setup(self):
170 164
171 165 self.isConfig = True
172 166
173 167 raise NotImplementedError
174 168
175 169 def run(self, dataIn, **kwargs):
176 170 """
177 171 Realiza las operaciones necesarias sobre la dataIn.data y actualiza los
178 172 atributos del objeto dataIn.
179 173
180 174 Input:
181 175
182 176 dataIn : objeto del tipo JROData
183 177
184 178 Return:
185 179
186 180 None
187 181
188 182 Affected:
189 183 __buffer : buffer de recepcion de datos.
190 184
191 185 """
192 186 if not self.isConfig:
193 187 self.setup(**kwargs)
194 188
195 189 raise NotImplementedError
196 190
197 191 def close(self):
198 192
199 193 return
200 194
201 195
202 196 def MPDecorator(BaseClass):
203 197 """
204 198 Multiprocessing class decorator
205 199
206 200 This function add multiprocessing features to a BaseClass.
207 201 """
208 202
209 203 class MPClass(BaseClass, Process):
210 204
211 205 def __init__(self, *args, **kwargs):
212 206 super(MPClass, self).__init__()
213 207 Process.__init__(self)
214 208
215 209 self.args = args
216 210 self.kwargs = kwargs
217 211 self.t = time.time()
218 212 self.op_type = 'external'
219 213 self.name = BaseClass.__name__
220 214 self.__doc__ = BaseClass.__doc__
221 215
222 216 if 'plot' in self.name.lower() and not self.name.endswith('_'):
223 217 self.name = '{}{}'.format(self.CODE.upper(), 'Plot')
224 218
225 219 self.start_time = time.time()
226 220 self.err_queue = args[3]
227 221 self.queue = Queue(maxsize=QUEUE_SIZE)
228 222 self.myrun = BaseClass.run
229 223
230 224 def run(self):
231 225
232 226 while True:
233 227
234 228 dataOut = self.queue.get()
235 229
236 230 if not dataOut.error:
237 231 try:
238 232 BaseClass.run(self, dataOut, **self.kwargs)
239 233 except:
240 234 err = traceback.format_exc()
241 235 log.error(err, self.name)
242 236 else:
243 237 break
244 238
245 239 self.close()
246 240
247 241 def close(self):
248 242
249 243 BaseClass.close(self)
250 244 log.success('Done...(Time:{:4.2f} secs)'.format(time.time() - self.start_time), self.name)
251 245
252 246 return MPClass
@@ -1,7485 +1,7579
1 1
2 2 import os, json
3 3 import sys
4 4 import numpy, math
5 5
6 6 from scipy import interpolate
7 7 from scipy.optimize import nnls
8 8 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
9 9 from schainpy.model.data.jrodata import Voltage,hildebrand_sekhon
10 10 from schainpy.utils import log
11 11 from time import time, mktime, strptime, gmtime, ctime
12 12 from scipy.optimize import least_squares
13 13 import datetime
14 14 import collections.abc
15 15 import csv
16 16 import ast #new added
17 17
18 18 try:
19 19 from schainpy.model.proc import fitacf_guess
20 20 from schainpy.model.proc import fitacf_fit_short
21 21 from schainpy.model.proc import fitacf_acf2
22 22 from schainpy.model.proc import full_profile_profile
23 23 except:
24 24 log.warning('Missing Faraday fortran libs')
25 25
26 26 class VoltageProc(ProcessingUnit):
27 27
28 28 def __init__(self):
29 29
30 30 ProcessingUnit.__init__(self)
31 31
32 32 self.dataOut = Voltage()
33 33 self.flip = 1
34 34 self.setupReq = False
35 35
36 36 def run(self, runNextUnit = 0):
37 37
38 38 if self.dataIn.type == 'AMISR':
39 39 self.__updateObjFromAmisrInput()
40 40
41 41 if self.dataIn.type == 'Voltage':
42 42 self.dataOut.copy(self.dataIn)
43 43 self.dataOut.runNextUnit = runNextUnit
44 44 #print("data shape: ", self.dataOut.data.shape)
45 45 #exit(1)
46 46
47 47 def __updateObjFromAmisrInput(self):
48 48
49 49 self.dataOut.timeZone = self.dataIn.timeZone
50 50 self.dataOut.dstFlag = self.dataIn.dstFlag
51 51 self.dataOut.errorCount = self.dataIn.errorCount
52 52 self.dataOut.useLocalTime = self.dataIn.useLocalTime
53 53
54 54 self.dataOut.flagNoData = self.dataIn.flagNoData
55 55 self.dataOut.data = self.dataIn.data
56 56 self.dataOut.utctime = self.dataIn.utctime
57 57 self.dataOut.channelList = self.dataIn.channelList
58 58 #self.dataOut.timeInterval = self.dataIn.timeInterval
59 59 self.dataOut.heightList = self.dataIn.heightList
60 60 self.dataOut.nProfiles = self.dataIn.nProfiles
61 61
62 62 self.dataOut.nCohInt = self.dataIn.nCohInt
63 63 self.dataOut.ippSeconds = self.dataIn.ippSeconds
64 64 self.dataOut.frequency = self.dataIn.frequency
65 65
66 66 self.dataOut.azimuth = self.dataIn.azimuth
67 67 self.dataOut.zenith = self.dataIn.zenith
68 68
69 69 self.dataOut.beam.codeList = self.dataIn.beam.codeList
70 70 self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
71 71 self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
72 72
73 73 class selectChannels(Operation):
74 74
75 75 def run(self, dataOut, channelList):
76 76
77 77 channelIndexList = []
78 78 self.dataOut = dataOut
79 79 for channel in channelList:
80 80 if channel not in self.dataOut.channelList:
81 81 raise ValueError("Channel %d is not in %s" %(channel, str(self.dataOut.channelList)))
82 82
83 83 index = self.dataOut.channelList.index(channel)
84 84 channelIndexList.append(index)
85 85 self.selectChannelsByIndex(channelIndexList)
86 86
87 87 return self.dataOut
88 88
89 89
90 90 def selectChannelsByIndex(self, channelIndexList):
91 91 """
92 92 Selecciona un bloque de datos en base a canales segun el channelIndexList
93 93
94 94 Input:
95 95 channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7]
96 96
97 97 Affected:
98 98 self.dataOut.data
99 99 self.dataOut.channelIndexList
100 100 self.dataOut.nChannels
101 101 self.dataOut.m_ProcessingHeader.totalSpectra
102 102 self.dataOut.systemHeaderObj.numChannels
103 103 self.dataOut.m_ProcessingHeader.blockSize
104 104
105 105 Return:
106 106 None
107 107 """
108 108
109 109 for channelIndex in channelIndexList:
110 110 if channelIndex not in self.dataOut.channelIndexList:
111 111 raise ValueError("The value %d in channelIndexList is not valid" %channelIndex)
112 112
113 113 if self.dataOut.type == 'Voltage':
114 114 if self.dataOut.flagDataAsBlock:
115 115 """
116 116 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
117 117 """
118 118 data = self.dataOut.data[channelIndexList,:,:]
119 119 else:
120 120 data = self.dataOut.data[channelIndexList,:]
121 121
122 122 self.dataOut.data = data
123 123 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
124 124 self.dataOut.channelList = range(len(channelIndexList))
125 125
126 126 elif self.dataOut.type == 'Spectra':
127 127 data_spc = self.dataOut.data_spc[channelIndexList, :]
128 128 data_dc = self.dataOut.data_dc[channelIndexList, :]
129 129
130 130 self.dataOut.data_spc = data_spc
131 131 self.dataOut.data_dc = data_dc
132 132
133 133 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
134 134 self.dataOut.channelList = range(len(channelIndexList))
135 135 self.__selectPairsByChannel(channelIndexList)
136 136
137 137 return 1
138 138
139 139 def __selectPairsByChannel(self, channelList=None):
140 140
141 141 if channelList == None:
142 142 return
143 143
144 144 pairsIndexListSelected = []
145 145 for pairIndex in self.dataOut.pairsIndexList:
146 146 # First pair
147 147 if self.dataOut.pairsList[pairIndex][0] not in channelList:
148 148 continue
149 149 # Second pair
150 150 if self.dataOut.pairsList[pairIndex][1] not in channelList:
151 151 continue
152 152
153 153 pairsIndexListSelected.append(pairIndex)
154 154
155 155 if not pairsIndexListSelected:
156 156 self.dataOut.data_cspc = None
157 157 self.dataOut.pairsList = []
158 158 return
159 159
160 160 self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndexListSelected]
161 161 self.dataOut.pairsList = [self.dataOut.pairsList[i]
162 162 for i in pairsIndexListSelected]
163 163
164 164 return
165 165
166 166 class CombineChannels(Operation):
167 167 '''Digital hybrid implementation'''
168 168
169 169 def run(self, dataout, sum_list=[], sub_list=[]):
170 170 '''
171 171 Input:
172 172 sum_list : list of pairs [[0,2],[1,3]]
173 173 sub_list : list of pairs [[2,4],[6,8]]
174 174 '''
175 175 #print(dataout.data[0, :, :])
176 176 tmp = []
177 177
178 178 if sub_list:
179 179 for i, j in sub_list:
180 180 if dataout.flagDataAsBlock:
181 181 tmp.append( dataout.data[i, :, :] - dataout.data[j, :, :])
182 182 else:
183 183 tmp.append(dataout.data[i,:] - dataout.data[j,:,:])
184 184
185 185 if sum_list:
186 186 for i, j in sum_list:
187 187 if dataout.flagDataAsBlock:
188 188 tmp.append(dataout.data[i, :, :] + dataout.data[j, :, :])
189 189 #tmp.append(numpy.sum(dataout.data[i, :, :],dataout.data[j, :, :]))
190 190 else:
191 191 tmp.append(dataout.data[i,:] + dataout.data[j,:,:])
192 192
193 193
194 194
195 195
196 196 dataout.data = numpy.array(tmp)
197 197 dataout.channelList = range(len(tmp))
198 198
199 199 return dataout
200 200
201 201 class selectHeights(Operation):
202 202
203 203 def run(self, dataOut, minHei=None, maxHei=None, minIndex=None, maxIndex=None):
204 204 """
205 205 Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango
206 206 minHei <= height <= maxHei
207 207
208 208 Input:
209 209 minHei : valor minimo de altura a considerar
210 210 maxHei : valor maximo de altura a considerar
211 211
212 212 Affected:
213 213 Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex
214 214
215 215 Return:
216 216 1 si el metodo se ejecuto con exito caso contrario devuelve 0
217 217 """
218 218
219 219 self.dataOut = dataOut
220 220
221 221 if minHei and maxHei:
222 222 #if 1:
223 223 if minHei == None:
224 224 minHei = self.dataOut.heightList[0]
225 225
226 226 if maxHei == None:
227 227 maxHei = self.dataOut.heightList[-1]
228 228
229 229 if (minHei < self.dataOut.heightList[0]):
230 230 minHei = self.dataOut.heightList[0]
231 231
232 232 if (maxHei > self.dataOut.heightList[-1]):
233 233 maxHei = self.dataOut.heightList[-1]
234 234
235 235 minIndex = 0
236 236 maxIndex = 0
237 237 heights = self.dataOut.heightList
238 238
239 239 inda = numpy.where(heights >= minHei)
240 240 indb = numpy.where(heights <= maxHei)
241 241
242 242 try:
243 243 minIndex = inda[0][0]
244 244 except:
245 245 minIndex = 0
246 246
247 247 try:
248 248 maxIndex = indb[0][-1]
249 249 except:
250 250 maxIndex = len(heights)
251 251
252 252 self.selectHeightsByIndex(minIndex, maxIndex)
253 253 #print(self.dataOut.nHeights)
254 254
255 255
256 256 return self.dataOut
257 257
258 258 def selectHeightsByIndex(self, minIndex, maxIndex):
259 259 """
260 260 Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango
261 261 minIndex <= index <= maxIndex
262 262
263 263 Input:
264 264 minIndex : valor de indice minimo de altura a considerar
265 265 maxIndex : valor de indice maximo de altura a considerar
266 266
267 267 Affected:
268 268 self.dataOut.data
269 269 self.dataOut.heightList
270 270
271 271 Return:
272 272 1 si el metodo se ejecuto con exito caso contrario devuelve 0
273 273 """
274 274
275 275 if self.dataOut.type == 'Voltage':
276 276 if (minIndex < 0) or (minIndex > maxIndex):
277 277 raise ValueError("Height index range (%d,%d) is not valid" % (minIndex, maxIndex))
278 278
279 279 if (maxIndex >= self.dataOut.nHeights):
280 280 maxIndex = self.dataOut.nHeights
281 281
282 282 #voltage
283 283 if self.dataOut.flagDataAsBlock:
284 284 """
285 285 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
286 286 """
287 287 data = self.dataOut.data[:,:, minIndex:maxIndex]
288 288 else:
289 289 data = self.dataOut.data[:, minIndex:maxIndex]
290 290
291 291 # firstHeight = self.dataOut.heightList[minIndex]
292 292
293 293 self.dataOut.data = data
294 294 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex]
295 295
296 296 if self.dataOut.nHeights <= 1:
297 297 raise ValueError("selectHeights: Too few heights. Current number of heights is %d" %(self.dataOut.nHeights))
298 298 elif self.dataOut.type == 'Spectra':
299 299 if (minIndex < 0) or (minIndex > maxIndex):
300 300 raise ValueError("Error selecting heights: Index range (%d,%d) is not valid" % (
301 301 minIndex, maxIndex))
302 302
303 303 if (maxIndex >= self.dataOut.nHeights):
304 304 maxIndex = self.dataOut.nHeights - 1
305 305
306 306 # Spectra
307 307 data_spc = self.dataOut.data_spc[:, :, minIndex:maxIndex + 1]
308 308 if hasattr(self.dataOut, "ByLags"):
309 309 if self.dataOut.ByLags:
310 310 self.dataOut.dataLag_spc = self.dataOut.dataLag_spc[:, :, minIndex:maxIndex + 1]
311 311
312 312 data_cspc = None
313 313 if self.dataOut.data_cspc is not None:
314 314 data_cspc = self.dataOut.data_cspc[:, :, minIndex:maxIndex + 1]
315 315 if hasattr(self.dataOut, "ByLags"):
316 316 if self.dataOut.ByLags:
317 317 self.dataOut.dataLag_cspc = self.dataOut.dataLag_cspc[:, :, minIndex:maxIndex + 1]
318 318
319 319 data_dc = None
320 320 if self.dataOut.data_dc is not None:
321 321 data_dc = self.dataOut.data_dc[:, minIndex:maxIndex + 1]
322 322 if hasattr(self.dataOut, "ByLags"):
323 323 if self.dataOut.ByLags:
324 324 self.dataOut.dataLag_dc = self.dataOut.dataLag_dc[:, minIndex:maxIndex + 1]
325 325
326 326 self.dataOut.data_spc = data_spc
327 327 self.dataOut.data_cspc = data_cspc
328 328 self.dataOut.data_dc = data_dc
329 329
330 330 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex + 1]
331 331
332 332 return 1
333 333
334 334
335 335 class filterByHeights(Operation):
336 336
337 337 def run(self, dataOut, window):
338 338 #print("nHeights \n", dataOut.nHeights)
339 339 #print("heighList \n", dataOut.heightList)
340 340 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
341 341 #print("dataOut \n", dataOut, dataOut.len)
342 342 #print("dataOut.data \n", dataOut.data)
343 343 if window == None:
344 344 window = (dataOut.radarControllerHeaderObj.txA/dataOut.radarControllerHeaderObj.nBaud) / deltaHeight
345 345
346 346 newdelta = deltaHeight * window
347 347 r = dataOut.nHeights % window
348 348 newheights = (dataOut.nHeights-r)/window
349 349 if newheights <= 1:
350 350 raise ValueError("filterByHeights: Too few heights. Current number of heights is %d and window is %d" %(dataOut.nHeights, window))
351 351
352 352 if dataOut.flagDataAsBlock:
353 353 """
354 354 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
355 355 """
356 356 buffer = dataOut.data[:, :, 0:int(dataOut.nHeights-r)]
357 357 buffer = buffer.reshape(dataOut.nChannels, dataOut.nProfiles, int(dataOut.nHeights/window), window)
358 358 buffer = numpy.sum(buffer,3)
359 359
360 360 else:
361 361 buffer = dataOut.data[:,0:int(dataOut.nHeights-r)]
362 362 buffer = buffer.reshape(dataOut.nChannels,int(dataOut.nHeights/window),int(window))
363 363 buffer = numpy.sum(buffer,2)
364 364
365 365 dataOut.data = buffer#/window
366 366 dataOut.heightList = dataOut.heightList[0] + numpy.arange( newheights )*newdelta
367 367 dataOut.windowOfFilter = window
368 368
369 369 return dataOut
370 370
371 371 class setOffset(Operation):
372 372
373 373 def run(self, dataOut, offset=None):
374 374
375 375 if not offset:
376 376 offset = 0.0
377 377
378 378 newHeiRange = dataOut.heightList - offset
379 379
380 380 dataOut.heightList = newHeiRange
381 381
382 382 return dataOut
383 383
384 384 class setH0(Operation):
385 385
386 386 def run(self, dataOut, h0, deltaHeight = None):
387 387
388 388 if not deltaHeight:
389 389 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
390 390
391 391 nHeights = dataOut.nHeights
392 392
393 393 newHeiRange = h0 + numpy.arange(nHeights)*deltaHeight
394 394
395 395 dataOut.heightList = newHeiRange
396 396
397 397 return dataOut
398 398
399 399
400 400 class deFlip(Operation):
401 401 def __init__(self):
402 402
403 403 self.flip = 1
404 404
405 405 def run(self, dataOut, channelList = []):
406 406
407 407 data = dataOut.data.copy()
408 408
409 409 if channelList==1: #PARCHE #Lista de un solo canal produce error
410 410 channelList=[1]
411 411
412 412 dataOut.FlipChannels=channelList
413 413 if dataOut.flagDataAsBlock:
414 414 flip = self.flip
415 415 profileList = list(range(dataOut.nProfiles))
416 416
417 417 if not channelList:
418 418 for thisProfile in profileList:
419 419 data[:,thisProfile,:] = data[:,thisProfile,:]*flip
420 420 flip *= -1.0
421 421 else:
422 422 for thisChannel in channelList:
423 423 if thisChannel not in dataOut.channelList:
424 424 continue
425 425
426 426 for thisProfile in profileList:
427 427 data[thisChannel,thisProfile,:] = data[thisChannel,thisProfile,:]*flip
428 428 flip *= -1.0
429 429
430 430 self.flip = flip
431 431
432 432 else:
433 433 if not channelList:
434 434 data[:,:] = data[:,:]*self.flip
435 435 else:
436 436 #channelList=[1]
437 437 #print(channelList)
438 438 #exit(1)
439 439 for thisChannel in channelList:
440 440 if thisChannel not in dataOut.channelList:
441 441 continue
442 442
443 443 data[thisChannel,:] = data[thisChannel,:]*self.flip
444 444
445 445 self.flip *= -1.
446 446
447 447 dataOut.data = data
448 448
449 449 return dataOut
450 450
451 451 class deFlipHP(Operation):
452 452 '''
453 453 Written by R. Flores
454 454 '''
455 455 def __init__(self):
456 456
457 457 self.flip = 1
458 458
459 459 def run(self, dataOut, byHeights = False, channelList = [], HeiRangeList = None):
460 460
461 461 data = dataOut.data.copy()
462 462
463 463 firstHeight = HeiRangeList[0]
464 464 lastHeight = HeiRangeList[1]+1
465 465
466 466 #if channelList==1: #PARCHE #Lista de un solo canal produce error
467 467 #channelList=[1]
468 468
469 469 dataOut.FlipChannels=channelList
470 470 if dataOut.flagDataAsBlock:
471 471 flip = self.flip
472 472 profileList = list(range(dataOut.nProfiles))
473 473
474 474 if not channelList:
475 475 for thisProfile in profileList:
476 476 data[:,thisProfile,:] = data[:,thisProfile,:]*flip
477 477 flip *= -1.0
478 478 else:
479 479 for thisChannel in channelList:
480 480 if thisChannel not in dataOut.channelList:
481 481 continue
482 482 if not byHeights:
483 483 for thisProfile in profileList:
484 484 data[thisChannel,thisProfile,:] = data[thisChannel,thisProfile,:]*flip
485 485 flip *= -1.0
486 486
487 487 else:
488 488 firstHeight = HeiRangeList[0]
489 489 lastHeight = HeiRangeList[1]+1
490 490 flip = -1.0
491 491 data[thisChannel,:,firstHeight:lastHeight] = data[thisChannel,:,firstHeight:lastHeight]*flip
492 492
493 493
494 494 self.flip = flip
495 495
496 496 else:
497 497 if not channelList:
498 498 data[:,:] = data[:,:]*self.flip
499 499 else:
500 500 #channelList=[1]
501 501
502 502 for thisChannel in channelList:
503 503 if thisChannel not in dataOut.channelList:
504 504 continue
505 505
506 506 if not byHeights:
507 507 data[thisChannel,:] = data[thisChannel,:]*flip
508 508
509 509 else:
510 510 firstHeight = HeiRangeList[0]
511 511 lastHeight = HeiRangeList[1]+1
512 512 flip = -1.0
513 513 data[thisChannel,firstHeight:lastHeight] = data[thisChannel,firstHeight:lastHeight]*flip
514 514
515 515 #data[thisChannel,:] = data[thisChannel,:]*self.flip
516 516
517 517 self.flip *= -1.
518 518
519 519 #print(dataOut.data[0,:12,1066+2])
520 520 #print(dataOut.data[1,:12,1066+2])
521 521 dataOut.data =data
522 522 #print(dataOut.data[0,:12,1066+2])
523 523 #print(dataOut.data[1,:12,1066+2])
524 524 #exit(1)
525 525
526 526 return dataOut
527 527
528 528 class setAttribute(Operation):
529 529 '''
530 530 Set an arbitrary attribute(s) to dataOut
531 531 '''
532 532
533 533 def __init__(self):
534 534
535 535 Operation.__init__(self)
536 536 self._ready = False
537 537
538 538 def run(self, dataOut, **kwargs):
539 539
540 540 for key, value in kwargs.items():
541 541
542 542 setattr(dataOut, key, value)
543 543
544 544 return dataOut
545 545
546 546
547 547 @MPDecorator
548 548 class printAttribute(Operation):
549 549 '''
550 550 Print an arbitrary attribute of dataOut
551 551 '''
552 552
553 553 def __init__(self):
554 554
555 555 Operation.__init__(self)
556 556
557 557 def run(self, dataOut, attributes):
558 558
559 559 if isinstance(attributes, str):
560 560 attributes = [attributes]
561 561 for attr in attributes:
562 562 if hasattr(dataOut, attr):
563 563 log.log(getattr(dataOut, attr), attr)
564 564
565 565
566 566 class interpolateHeights(Operation):
567 567
568 568 def run(self, dataOut, topLim, botLim):
569 569 #69 al 72 para julia
570 570 #82-84 para meteoros
571 571 if len(numpy.shape(dataOut.data))==2:
572 572 sampInterp = (dataOut.data[:,botLim-1] + dataOut.data[:,topLim+1])/2
573 573 sampInterp = numpy.transpose(numpy.tile(sampInterp,(topLim-botLim + 1,1)))
574 574 #dataOut.data[:,botLim:limSup+1] = sampInterp
575 575 dataOut.data[:,botLim:topLim+1] = sampInterp
576 576 else:
577 577 nHeights = dataOut.data.shape[2]
578 578 x = numpy.hstack((numpy.arange(botLim),numpy.arange(topLim+1,nHeights)))
579 579 y = dataOut.data[:,:,list(range(botLim))+list(range(topLim+1,nHeights))]
580 580 f = interpolate.interp1d(x, y, axis = 2)
581 581 xnew = numpy.arange(botLim,topLim+1)
582 582 ynew = f(xnew)
583 583 dataOut.data[:,:,botLim:topLim+1] = ynew
584 584
585 585 return dataOut
586 586
587 587
588 588 class LagsReshape(Operation):
589 589 '''
590 590 Written by R. Flores
591 591 '''
592 592 """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction.
593 593
594 594 Parameters:
595 595 -----------
596 596
597 597
598 598 Example
599 599 --------
600 600
601 601 op = proc_unit.addOperation(name='LagsReshape')
602 602
603 603
604 604 """
605 605
606 606 def __init__(self, **kwargs):
607 607
608 608 Operation.__init__(self, **kwargs)
609 609
610 610 self.buffer=None
611 611 self.buffer_HR=None
612 612 self.buffer_HRonelag=None
613 613
614 614 def LagDistribution(self,dataOut):
615 615
616 616 dataOut.datapure=numpy.copy(dataOut.data[:,0:dataOut.NSCAN,:])
617 617 self.buffer = numpy.zeros((dataOut.nChannels,
618 618 int(dataOut.NSCAN/dataOut.DPL),
619 619 dataOut.nHeights,dataOut.DPL),
620 620 dtype='complex')
621 621
622 622 for j in range(int(self.buffer.shape[1]/2)):
623 623 for i in range(dataOut.DPL):
624 624 if j+1==int(self.buffer.shape[1]/2) and i+1==dataOut.DPL:
625 625 self.buffer[:,2*j:,:,i]=dataOut.datapure[:,2*i+int(2*j*dataOut.DPL):,:]
626 626 else:
627 627 self.buffer[:,2*j:2*(j+1),:,i]=dataOut.datapure[:,2*i+int(2*j*dataOut.DPL):2*(i+1)+int(2*j*dataOut.DPL),:]
628 628
629 629 return self.buffer
630 630
631 631 def HeightReconstruction(self,dataOut):
632 632
633 633 self.buffer_HR = numpy.zeros((int(dataOut.NSCAN/dataOut.DPL),
634 634 dataOut.nHeights,dataOut.DPL),
635 635 dtype='complex')
636 636
637 637 for i in range(int(dataOut.DPL)): #Only channel B
638 638 if i==0:
639 639 self.buffer_HR[:,:,i]=dataOut.datalags[1,:,:,i]
640 640 else:
641 641 self.buffer_HR[:,:,i]=self.HRonelag(dataOut,i)
642 642
643 643 return self.buffer_HR
644 644
645 645
646 646 def HRonelag(self,dataOut,whichlag):
647 647 self.buffer_HRonelag = numpy.zeros((int(dataOut.NSCAN/dataOut.DPL),
648 648 dataOut.nHeights),
649 649 dtype='complex')
650 650 TxLagRate = dataOut.TxLagRate
651 651 for i in range(self.buffer_HRonelag.shape[0]): #perfil
652 652 for j in range(dataOut.nHeights): # height
653 653 if j+int(TxLagRate*whichlag)<dataOut.nHeights:
654 654 self.buffer_HRonelag[i,j]=dataOut.datalags[1,i,j+TxLagRate*whichlag,whichlag]
655 655 else:
656 656 if whichlag!=10:
657 657 self.buffer_HRonelag[i,j]=dataOut.datalags[1,i,(j+TxLagRate*whichlag)%dataOut.nHeights,whichlag+1]
658 658 else:
659 659 if i+2<self.buffer_HRonelag.shape[0]:
660 660 self.buffer_HRonelag[i,j]=dataOut.datalags[1,i+2,(j+TxLagRate*whichlag)%dataOut.nHeights,0]
661 661 else: #i+1==self.buffer_HRonelag.shape[0]:
662 662 self.buffer_HRonelag[i,j]=dataOut.datalags[1,i,(j+TxLagRate*whichlag)%dataOut.nHeights,whichlag] #1, 198,64 = 1,198, 0, 10
663 663
664 664 return self.buffer_HRonelag
665 665
666 666
667 667
668 668 def run(self,dataOut,DPL=11,NSCAN=132, TxLagRate=2):
669 669 dataOut.TxLagRate = TxLagRate
670 670 '''PA = dataOut.data[0,:,:]
671 671 PB = dataOut.data[1,:,:]
672 672 import matplotlib.pyplot as plt
673 673 fig, axes = plt.subplots(2, 11, figsize=(18, 6), sharex=True, sharey=True)
674 674
675 675 for i in range(11):
676 676 axes[0,i].plot(PA[i, :], dataOut.heightList, label=f'PA {i+1}')
677 677 axes[0, i].set_title(f'Lag {i+1}')
678 678 #axes[0, i].set_xscale("log") # Log scale for y-axis
679 679 #axes[0, i].set_xlim([0,1e+7])
680 680 axes[1,i].plot(PB[i, :], dataOut.heightList, label=f'PB {i+1}')
681 681 #axes[1, i].set_xscale("log") # Log scale for y-axis
682 682 #axes[1, i].set_xlim([0,1e+7])
683 683
684 684
685 685 plt.tight_layout()
686 686 plt.show()'''
687 687
688 688
689 689 dataOut.DPL=DPL
690 690 dataOut.NSCAN=NSCAN
691 691 dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 )
692 692 dataOut.lat=-11.95
693 693 dataOut.lon=-76.87
694 694 dataOut.datalags=None
695 695
696 696 dataOut.datalags=numpy.copy(self.LagDistribution(dataOut))
697 697 dataOut.datalags[1,:,:,:]=self.HeightReconstruction(dataOut)
698 698
699 699 '''
700 700 PA = dataOut.datalags[0,5,:,:]
701 701 PB = dataOut.datalags[1,5,:,:]
702 702 import matplotlib.pyplot as plt
703 703 fig, axes = plt.subplots(2, 11, figsize=(18, 6), sharex=True, sharey=True)
704 704
705 705 for i in range(11):
706 706 axes[0,i].plot(PA[:, i], dataOut.heightList, label=f'PA {i+1}')
707 707 axes[0, i].set_title(f'Lag {i+1}')
708 708 #axes[0, i].set_xscale("log") # Log scale for y-axis
709 709 #axes[0, i].set_xlim([0,1e+7])
710 710 axes[1,i].plot(PB[:, i], dataOut.heightList, label=f'PB {i+1}')
711 711 #axes[1, i].set_xscale("log") # Log scale for y-axis
712 712 #axes[1, i].set_xlim([0,1e+7])
713 713
714 714
715 715 plt.tight_layout()
716 716 plt.show()#'''
717 717
718 718 return dataOut
719 719
720 720
721 721 class CrossProdDP(Operation):
722 722 '''
723 723 Written by R. Flores
724 724 '''
725 725 """Operation to calculate cross products of the Double Pulse Experiment.
726 726
727 727 Parameters:
728 728 -----------
729 729 NLAG : int
730 730 Number of lags Long Pulse.
731 731 NRANGE : int
732 732 Number of samples for Long Pulse.
733 733 NCAL : int
734 734 .*
735 735 DPL : int
736 736 Number of lags Double Pulse.
737 737 NDN : int
738 738 .*
739 739 NDT : int
740 740 Number of heights for Double Pulse.*
741 741 NDP : int
742 742 Number of heights for Double Pulse.*
743 743 NSCAN : int
744 744 Number of profiles when the transmitter is on.
745 745 flags_array : intlist
746 746 .*
747 747 NAVG : int
748 748 Number of blocks to be "averaged".
749 749 nkill : int
750 750 Number of blocks not to be considered when averaging.
751 751
752 752 Example
753 753 --------
754 754
755 755 op = proc_unit.addOperation(name='CrossProdDP', optype='other')
756 756 op.addParameter(name='NLAG', value='16', format='int')
757 757 op.addParameter(name='NRANGE', value='0', format='int')
758 758 op.addParameter(name='NCAL', value='0', format='int')
759 759 op.addParameter(name='DPL', value='11', format='int')
760 760 op.addParameter(name='NDN', value='0', format='int')
761 761 op.addParameter(name='NDT', value='66', format='int')
762 762 op.addParameter(name='NDP', value='66', format='int')
763 763 op.addParameter(name='NSCAN', value='132', format='int')
764 764 op.addParameter(name='flags_array', value='(0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300)', format='intlist')
765 765 op.addParameter(name='NAVG', value='16', format='int')
766 766 op.addParameter(name='nkill', value='6', format='int')
767 767
768 768 """
769 769
770 770 def __init__(self, **kwargs):
771 771
772 772 Operation.__init__(self, **kwargs)
773 773 self.bcounter=0
774 774 self.aux=1
775 775 self.lag_products_LP_median_estimates_aux=0
776 776
777 777 def set_header_output(self,dataOut):
778 778
779 779 dataOut.read_samples=len(dataOut.heightList)#int(dataOut.systemHeaderObj.nSamples/dataOut.windowOfFilter)
780 780 padding=numpy.zeros(1,'int32')
781 781 hsize=numpy.zeros(1,'int32')
782 782 bufsize=numpy.zeros(1,'int32')
783 783 nr=numpy.zeros(1,'int32')
784 784 ngates=numpy.zeros(1,'int32') ### ### ### 2
785 785 time1=numpy.zeros(1,'uint64') # pos 3
786 786 time2=numpy.zeros(1,'uint64') # pos 4
787 787 lcounter=numpy.zeros(1,'int32')
788 788 groups=numpy.zeros(1,'int32')
789 789 system=numpy.zeros(4,'int8') # pos 7
790 790 h0=numpy.zeros(1,'float32')
791 791 dh=numpy.zeros(1,'float32')
792 792 ipp=numpy.zeros(1,'float32')
793 793 process=numpy.zeros(1,'int32')
794 794 tx=numpy.zeros(1,'int32')
795 795 ngates1=numpy.zeros(1,'int32') ### ### ### 13
796 796 time0=numpy.zeros(1,'uint64') # pos 14
797 797 nlags=numpy.zeros(1,'int32')
798 798 nlags1=numpy.zeros(1,'int32')
799 799 txb=numpy.zeros(1,'float32') ### ### ### 17
800 800 time3=numpy.zeros(1,'uint64') # pos 18
801 801 time4=numpy.zeros(1,'uint64') # pos 19
802 802 h0_=numpy.zeros(1,'float32')
803 803 dh_=numpy.zeros(1,'float32')
804 804 ipp_=numpy.zeros(1,'float32')
805 805 txa_=numpy.zeros(1,'float32')
806 806 pad=numpy.zeros(100,'int32')
807 807 nbytes=numpy.zeros(1,'int32')
808 808 limits=numpy.zeros(1,'int32')
809 809 ngroups=numpy.zeros(1,'int32') ### ### ### 27
810 810
811 811 dataOut.header=[hsize,bufsize,nr,ngates,time1,time2,
812 812 lcounter,groups,system,h0,dh,ipp,
813 813 process,tx,ngates1,padding,time0,nlags,
814 814 nlags1,padding,txb,time3,time4,h0_,dh_,
815 815 ipp_,txa_,pad,nbytes,limits,padding,ngroups]
816 816
817 817
818 818 #dataOut.header[1][0]=81864
819 819 dataOut.FirstHeight=int(dataOut.heightList[0])
820 820 dataOut.MAXNRANGENDT=max(dataOut.NRANGE,dataOut.NDT)
821 821 dataOut.header[3][0]=max(dataOut.NRANGE,dataOut.NDT)
822 822 dataOut.header[7][0]=dataOut.NAVG
823 823 dataOut.header[9][0]=int(dataOut.heightList[0])
824 824 dataOut.header[10][0]=dataOut.DH
825 825 dataOut.header[17][0]=dataOut.DPL
826 826 dataOut.header[18][0]=dataOut.NLAG
827 827 #self.header[5][0]=0
828 828 dataOut.header[15][0]=dataOut.NDP
829 829 dataOut.header[2][0]=dataOut.NR
830 830
831 831
832 832 def get_products_cabxys(self,dataOut):
833 833
834 834 if self.aux==1:
835 835 self.set_header_output(dataOut)
836 836 self.aux=0
837 837
838 838 dataOut.lags_array=[x / dataOut.DH for x in dataOut.flags_array]
839 839 self.cax=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
840 840 self.cay=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
841 841 self.cbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
842 842 self.cby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
843 843 self.cax2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
844 844 self.cay2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
845 845 self.cbx2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
846 846 self.cby2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
847 847 self.caxbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
848 848 self.caxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
849 849 self.caybx=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
850 850 self.cayby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
851 851 self.caxay=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
852 852 self.cbxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
853 853
854 854 for i in range(2):
855 855 for j in range(dataOut.NDP):
856 856 for k in range(int(dataOut.NSCAN/2)):
857 857 n=k%dataOut.DPL
858 858 ax=dataOut.data[0,2*k+i,j].real
859 859 ay=dataOut.data[0,2*k+i,j].imag
860 860 if j+dataOut.lags_array[n]<dataOut.NDP:
861 861 bx=dataOut.data[1,2*k+i,j+int(dataOut.lags_array[n])].real
862 862 by=dataOut.data[1,2*k+i,j+int(dataOut.lags_array[n])].imag
863 863 else:
864 864 if k+1<int(dataOut.NSCAN/2):
865 865 bx=dataOut.data[1,2*(k+1)+i,(dataOut.NRANGE+dataOut.NCAL+j+int(dataOut.lags_array[n]))%dataOut.NDP].real
866 866 by=dataOut.data[1,2*(k+1)+i,(dataOut.NRANGE+dataOut.NCAL+j+int(dataOut.lags_array[n]))%dataOut.NDP].imag
867 867
868 868 if k+1==int(dataOut.NSCAN/2):
869 869 bx=dataOut.data[1,2*k+i,(dataOut.NRANGE+dataOut.NCAL+j+int(dataOut.lags_array[n]))%dataOut.NDP].real
870 870 by=dataOut.data[1,2*k+i,(dataOut.NRANGE+dataOut.NCAL+j+int(dataOut.lags_array[n]))%dataOut.NDP].imag
871 871
872 872 if(k<dataOut.DPL):
873 873 self.cax[j][n][i]=ax
874 874 self.cay[j][n][i]=ay
875 875 self.cbx[j][n][i]=bx
876 876 self.cby[j][n][i]=by
877 877 self.cax2[j][n][i]=ax*ax
878 878 self.cay2[j][n][i]=ay*ay
879 879 self.cbx2[j][n][i]=bx*bx
880 880 self.cby2[j][n][i]=by*by
881 881 self.caxbx[j][n][i]=ax*bx
882 882 self.caxby[j][n][i]=ax*by
883 883 self.caybx[j][n][i]=ay*bx
884 884 self.cayby[j][n][i]=ay*by
885 885 self.caxay[j][n][i]=ax*ay
886 886 self.cbxby[j][n][i]=bx*by
887 887 else:
888 888 self.cax[j][n][i]+=ax
889 889 self.cay[j][n][i]+=ay
890 890 self.cbx[j][n][i]+=bx
891 891 self.cby[j][n][i]+=by
892 892 self.cax2[j][n][i]+=ax*ax
893 893 self.cay2[j][n][i]+=ay*ay
894 894 self.cbx2[j][n][i]+=bx*bx
895 895 self.cby2[j][n][i]+=by*by
896 896 self.caxbx[j][n][i]+=ax*bx
897 897 self.caxby[j][n][i]+=ax*by
898 898 self.caybx[j][n][i]+=ay*bx
899 899 self.cayby[j][n][i]+=ay*by
900 900 self.caxay[j][n][i]+=ax*ay
901 901 self.cbxby[j][n][i]+=bx*by
902 902
903 903
904 904 def medi(self,data_navg,NAVG,nkill):
905 905 sorts=sorted(data_navg)
906 906 rsorts=numpy.arange(NAVG)
907 907 result=0.0
908 908 for k in range(NAVG):
909 909 if k>=nkill/2 and k<NAVG-nkill/2:
910 910 result+=sorts[k]*float(NAVG)/(float(NAVG-nkill))
911 911 return result
912 912
913 913
914 914 def get_dc(self,dataOut):
915 915 if self.bcounter==0:
916 916 dataOut.dc=numpy.zeros(dataOut.NR,dtype='complex64')
917 917 def cabxys_navg(self,dataOut):
918 918
919 919
920 920 dataOut.header[5][0]=dataOut.TimeBlockSeconds
921 921
922 922 dataOut.LastAVGDate=dataOut.TimeBlockSeconds
923 923
924 924 if self.bcounter==0:
925 925 dataOut.FirstAVGDate=dataOut.TimeBlockSeconds
926 926 dataOut.header[4][0]=dataOut.header[5][0]#firsttimeofNAVG
927 927 if dataOut.CurrentBlock==1:
928 928 dataOut.FirstBlockDate=dataOut.TimeBlockSeconds
929 929 dataOut.header[16][0]=dataOut.header[5][0]#FirsTimeOfTotalBlocks
930 930
931 931 self.cax_navg=[]
932 932 self.cay_navg=[]
933 933 self.cbx_navg=[]
934 934 self.cby_navg=[]
935 935 self.cax2_navg=[]
936 936 self.cay2_navg=[]
937 937 self.cbx2_navg=[]
938 938 self.cby2_navg=[]
939 939 self.caxbx_navg=[]
940 940 self.caxby_navg=[]
941 941 self.caybx_navg=[]
942 942 self.cayby_navg=[]
943 943 self.caxay_navg=[]
944 944 self.cbxby_navg=[]
945 945
946 946 dataOut.noisevector=numpy.zeros((dataOut.MAXNRANGENDT,dataOut.NR,dataOut.NAVG),'float32')
947 947
948 948 dataOut.noisevector_=numpy.zeros((dataOut.read_samples,dataOut.NR,dataOut.NAVG),'float32')
949 949
950 950 self.noisevectorizer(dataOut.NSCAN,dataOut.nProfiles,dataOut.NR,dataOut.MAXNRANGENDT,dataOut.noisevector,dataOut.data,dataOut.dc) #30/03/2020
951 951
952 952 self.cax_navg.append(self.cax)
953 953 self.cay_navg.append(self.cay)
954 954 self.cbx_navg.append(self.cbx)
955 955 self.cby_navg.append(self.cby)
956 956 self.cax2_navg.append(self.cax2)
957 957 self.cay2_navg.append(self.cay2)
958 958 self.cbx2_navg.append(self.cbx2)
959 959 self.cby2_navg.append(self.cby2)
960 960 self.caxbx_navg.append(self.caxbx)
961 961 self.caxby_navg.append(self.caxby)
962 962 self.caybx_navg.append(self.caybx)
963 963 self.cayby_navg.append(self.cayby)
964 964 self.caxay_navg.append(self.caxay)
965 965 self.cbxby_navg.append(self.cbxby)
966 966 self.bcounter+=1
967 967
968 968 def noise_estimation4x_DP(self,dataOut):
969 969 if self.bcounter==dataOut.NAVG:
970 970 dataOut.noise_final=numpy.zeros(dataOut.NR,'float32')
971 971 snoise=numpy.zeros((dataOut.NR,dataOut.NAVG),'float32')
972 972 nvector1=numpy.zeros((dataOut.NR,dataOut.NAVG,dataOut.MAXNRANGENDT),'float32')
973 973 for i in range(dataOut.NR):
974 974 dataOut.noise_final[i]=0.0
975 975 for k in range(dataOut.NAVG):
976 976 snoise[i][k]=0.0
977 977 for j in range(dataOut.MAXNRANGENDT):
978 978 nvector1[i][k][j]= dataOut.noisevector[j][i][k];
979 979 snoise[i][k]=self.noise_hs4x(dataOut.MAXNRANGENDT, nvector1[i][k])
980 980 dataOut.noise_final[i]=self.noise_hs4x(dataOut.NAVG, snoise[i])
981 981
982 982 def kabxys(self,dataOut):
983 983
984 984 if self.bcounter==dataOut.NAVG:
985 985
986 986 dataOut.flagNoData = False
987 987
988 988 self.kax=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
989 989 self.kay=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
990 990 self.kbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
991 991 self.kby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
992 992 self.kax2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
993 993 self.kay2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
994 994 self.kbx2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
995 995 self.kby2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
996 996 self.kaxbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
997 997 self.kaxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
998 998 self.kaybx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
999 999 self.kayby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
1000 1000 self.kaxay=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
1001 1001 self.kbxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
1002 1002 auxx = numpy.array(self.cax2_navg)
1003 1003 auxx2 = numpy.array(self.cay2_navg)
1004 1004 auxx3 = numpy.array(self.cbx2_navg)
1005 1005 auxx4 = numpy.array(self.cby2_navg)
1006 1006
1007 1007 pa1 = 20
1008 1008 pa2 = 10
1009 1009 #aux1 = dataOut.kax2[pa1,pa2,0]+dataOut.kax2[pa1,pa2,1]+dataOut.kay2[pa1,pa2,0]+dataOut.kay2[pa1,pa2,1]
1010 1010 #aux2 = dataOut.kbx2[pa1,pa2,0]+dataOut.kbx2[pa1,pa2,1]+dataOut.kby2[pa1,pa2,0]+dataOut.kby2[pa1,pa2,1]
1011 1011
1012 1012 #print(aux1)#*numpy.conjugate(aux1))
1013 1013 #print(aux2)#*numpy.conjugate(aux2))
1014 1014 #print(auxx.shape)
1015 1015 '''
1016 1016 print(numpy.sum(auxx2[:,pa1,pa2,0]+auxx2[:,pa1,pa2,1]))#auxx[:,pa1,pa2,0]+auxx[:,pa1,pa2,1]))#+auxx2[:,pa1,pa2,0]+auxx2[:,pa1,pa2,1]))
1017 1017 print(numpy.sum(auxx3[:,pa1,pa2,0]+auxx3[:,pa1,pa2,1]+auxx4[:,pa1,pa2,0]+auxx4[:,pa1,pa2,1]))
1018 1018 #print(self.cax_navg[:,53,0,1])
1019 1019 #exit(1)
1020 1020 '''
1021 1021
1022 1022 for i in range(self.cax_navg[0].shape[0]):
1023 1023 for j in range(self.cax_navg[0].shape[1]):
1024 1024 for k in range(self.cax_navg[0].shape[2]):
1025 1025 data_navg=[item[i,j,k] for item in self.cax_navg]
1026 1026 self.kax[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1027 1027 data_navg=[item[i,j,k] for item in self.cay_navg]
1028 1028 self.kay[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1029 1029 data_navg=[item[i,j,k] for item in self.cbx_navg]
1030 1030 self.kbx[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1031 1031 data_navg=[item[i,j,k] for item in self.cby_navg]
1032 1032 self.kby[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1033 1033 data_navg=[item[i,j,k] for item in self.cax2_navg]
1034 1034 self.kax2[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1035 1035 data_navg=[item[i,j,k] for item in self.cay2_navg]
1036 1036 self.kay2[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1037 1037 data_navg=[item[i,j,k] for item in self.cbx2_navg]
1038 1038 self.kbx2[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1039 1039 data_navg=[item[i,j,k] for item in self.cby2_navg]
1040 1040 self.kby2[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1041 1041 data_navg=[item[i,j,k] for item in self.caxbx_navg]
1042 1042 self.kaxbx[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1043 1043 data_navg=[item[i,j,k] for item in self.caxby_navg]
1044 1044 self.kaxby[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1045 1045 data_navg=[item[i,j,k] for item in self.caybx_navg]
1046 1046 self.kaybx[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1047 1047 data_navg=[item[i,j,k] for item in self.cayby_navg]
1048 1048 self.kayby[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1049 1049 data_navg=[item[i,j,k] for item in self.caxay_navg]
1050 1050 self.kaxay[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1051 1051 data_navg=[item[i,j,k] for item in self.cbxby_navg]
1052 1052 self.kbxby[i,j,k]=self.medi(data_navg,dataOut.NAVG,dataOut.nkill)
1053 1053
1054 1054
1055 1055 dataOut.kax=self.kax
1056 1056 dataOut.kay=self.kay
1057 1057 dataOut.kbx=self.kbx
1058 1058 dataOut.kby=self.kby
1059 1059 dataOut.kax2=self.kax2
1060 1060 dataOut.kay2=self.kay2
1061 1061 dataOut.kbx2=self.kbx2
1062 1062 dataOut.kby2=self.kby2
1063 1063 dataOut.kaxbx=self.kaxbx
1064 1064 dataOut.kaxby=self.kaxby
1065 1065 dataOut.kaybx=self.kaybx
1066 1066 dataOut.kayby=self.kayby
1067 1067 dataOut.kaxay=self.kaxay
1068 1068 dataOut.kbxby=self.kbxby
1069 1069
1070 1070 #FindMe
1071 1071 pa1 = 20
1072 1072 pa2 = 0
1073 1073 '''
1074 1074 aux1 = dataOut.kax2[pa1,pa2,0]+dataOut.kax2[pa1,pa2,1]#+dataOut.kay2[pa1,pa2,0]+dataOut.kay2[pa1,pa2,1]
1075 1075 aux2 = dataOut.kbx2[pa1,pa2,0]+dataOut.kbx2[pa1,pa2,1]+dataOut.kby2[pa1,pa2,0]+dataOut.kby2[pa1,pa2,1]
1076 1076 aux3 = dataOut.kay2[pa1,pa2,0]+dataOut.kay2[pa1,pa2,1]
1077 1077 aux4 = dataOut.kax2[pa1,pa2,0]+dataOut.kax2[pa1,pa2,1]+dataOut.kay2[pa1,pa2,0]+dataOut.kay2[pa1,pa2,1]
1078 1078
1079 1079 print(aux1)#*numpy.conjugate(aux1))
1080 1080 print(aux3)
1081 1081 print("sum",aux4)
1082 1082 print(aux2)#*numpy.conjugate(aux2))
1083 1083 '''
1084 1084 '''
1085 1085 aux1 = dataOut.kaxbx[pa1,pa2,0]+dataOut.kaxbx[pa1,pa2,1]+dataOut.kayby[pa1,pa2,0]+dataOut.kayby[pa1,pa2,1]
1086 1086 aux2 = dataOut.kaybx[pa1,pa2,0]+dataOut.kaybx[pa1,pa2,1]-dataOut.kaxby[pa1,pa2,0]-dataOut.kaxby[pa1,pa2,1]
1087 1087 print(aux1)
1088 1088 print(aux2)
1089 1089 exit(1)
1090 1090 '''
1091 1091
1092 1092 #print(dataOut.kax[53,0,0])
1093 1093 #exit(1)
1094 1094
1095 1095 self.bcounter=0
1096 1096
1097 1097 dataOut.crossprods=numpy.zeros((3,4,numpy.shape(dataOut.kax)[0],numpy.shape(dataOut.kax)[1],numpy.shape(dataOut.kax)[2]))
1098 1098
1099 1099 dataOut.crossprods[0]=[dataOut.kax,dataOut.kay,dataOut.kbx,dataOut.kby]
1100 1100 dataOut.crossprods[1]=[dataOut.kax2,dataOut.kay2,dataOut.kbx2,dataOut.kby2]
1101 1101 dataOut.crossprods[2]=[dataOut.kaxay,dataOut.kbxby,dataOut.kaxbx,dataOut.kaxby]
1102 1102 dataOut.data_for_RTI_DP=numpy.zeros((3,dataOut.NDP))
1103 1103 dataOut.data_for_RTI_DP[0],dataOut.data_for_RTI_DP[1],dataOut.data_for_RTI_DP[2]=self.RTI_COLUMN(dataOut.kax2,dataOut.kay2,dataOut.kbx2,dataOut.kby2,dataOut.kaxbx,dataOut.kayby,dataOut.kaybx,dataOut.kaxby, dataOut.NDP)
1104 1104
1105 1105
1106 1106
1107 1107 def RTI_COLUMN(self,kax2,kay2,kbx2,kby2,kaxbx,kayby,kaybx,kaxby, NDP):
1108 1108 x00=numpy.zeros(NDP,dtype='float32')
1109 1109 x01=numpy.zeros(NDP,dtype='float32')
1110 1110 x02=numpy.zeros(NDP,dtype='float32')
1111 1111 for j in range(2):# first couple lags
1112 1112 for k in range(2): #flip
1113 1113 for i in range(NDP): #
1114 1114 fx=numpy.sqrt((kaxbx[i,j,k]+kayby[i,j,k])**2+(kaybx[i,j,k]-kaxby[i,j,k])**2)
1115 1115 x00[i]=x00[i]+(kax2[i,j,k]+kay2[i,j,k])
1116 1116 x01[i]=x01[i]+(kbx2[i,j,k]+kby2[i,j,k])
1117 1117 x02[i]=x02[i]+fx
1118 1118
1119 1119 x00[i]=10.0*numpy.log10(x00[i]/512.)
1120 1120 x01[i]=10.0*numpy.log10(x01[i]/512.)
1121 1121 x02[i]=10.0*numpy.log10(x02[i])
1122 1122 return x02,x00,x01
1123 1123
1124 1124
1125 1125 def noisevectorizer(self,NSCAN,nProfiles,NR,MAXNRANGENDT,noisevector,data,dc):
1126 1126
1127 1127 rnormalizer= 1./(float(nProfiles - NSCAN))
1128 1128 #rnormalizer= float(NSCAN)/((float(nProfiles - NSCAN))*float(MAXNRANGENDT))
1129 1129 for i in range(NR):
1130 1130 for j in range(MAXNRANGENDT):
1131 1131 for k in range(NSCAN,nProfiles):
1132 1132 #TODO:integrate just 2nd quartile gates
1133 1133 if k==NSCAN:
1134 1134 noisevector[j][i][self.bcounter]=(abs(data[i][k][j]-dc[i])**2)*rnormalizer
1135 1135 else:
1136 1136 noisevector[j][i][self.bcounter]+=(abs(data[i][k][j]-dc[i])**2)*rnormalizer
1137 1137
1138 1138
1139 1139
1140 1140
1141 1141 def noise_hs4x(self, ndatax, datax):
1142 1142 divider=10#divider was originally 10
1143 1143 noise=0.0
1144 1144 data=numpy.zeros(ndatax,'float32')
1145 1145 ndata1=int(ndatax/4)
1146 1146 ndata2=int(2.5*(ndatax/4.))
1147 1147 ndata=int(ndata2-ndata1)
1148 1148 sorts=sorted(datax)
1149 1149
1150 1150 for k in range(ndata2): # select just second quartile
1151 1151 data[k]=sorts[k+ndata1]
1152 1152 nums_min= int(ndata/divider)
1153 1153 if(int(ndata/divider)> 2):
1154 1154 nums_min= int(ndata/divider)
1155 1155 else:
1156 1156 nums_min=2
1157 1157 sump=0.0
1158 1158 sumq=0.0
1159 1159 j=0
1160 1160 cont=1
1161 1161 while ( (cont==1) and (j<ndata)):
1162 1162 sump+=data[j]
1163 1163 sumq+= data[j]*data[j]
1164 1164 j=j+1
1165 1165 if (j> nums_min):
1166 1166 rtest= float(j/(j-1)) +1.0/ndata
1167 1167 if( (sumq*j) > (rtest*sump*sump ) ):
1168 1168 j=j-1
1169 1169 sump-= data[j]
1170 1170 sumq-=data[j]*data[j]
1171 1171 cont= 0
1172 1172 noise= (sump/j)
1173 1173
1174 1174 return noise
1175 1175
1176 1176
1177 1177
1178 1178 def run(self, dataOut, NLAG=16, NRANGE=0, NCAL=0, DPL=11,
1179 1179 NDN=0, NDT=66, NDP=66, NSCAN=132,
1180 1180 flags_array=(0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300), NAVG=16, nkill=6, **kwargs):
1181 1181
1182 1182 dataOut.NLAG=NLAG
1183 1183 dataOut.NR=len(dataOut.channelList)
1184 1184 dataOut.NRANGE=NRANGE
1185 1185 dataOut.NCAL=NCAL
1186 1186 dataOut.DPL=DPL
1187 1187 dataOut.NDN=NDN
1188 1188 dataOut.NDT=NDT
1189 1189 dataOut.NDP=NDP
1190 1190 dataOut.NSCAN=NSCAN
1191 1191 dataOut.DH=dataOut.heightList[1]-dataOut.heightList[0]
1192 1192 dataOut.H0=int(dataOut.heightList[0])
1193 1193 dataOut.flags_array=flags_array
1194 1194 dataOut.NAVG=NAVG
1195 1195 dataOut.nkill=nkill
1196 1196 dataOut.flagNoData = True
1197 1197
1198 1198 self.get_dc(dataOut)
1199 1199 self.get_products_cabxys(dataOut)
1200 1200 self.cabxys_navg(dataOut)
1201 1201 self.noise_estimation4x_DP(dataOut)
1202 1202 self.kabxys(dataOut)
1203 1203
1204 1204 return dataOut
1205 1205
1206 1206
1207 1207
1208 1208 class IntegrationDP(Operation):
1209 1209 '''
1210 1210 Written by R. Flores
1211 1211 '''
1212 1212 """Operation to integrate the Double Pulse data.
1213 1213
1214 1214 Parameters:
1215 1215 -----------
1216 1216 nint : int
1217 1217 Number of integrations.
1218 1218
1219 1219 Example
1220 1220 --------
1221 1221
1222 1222 op = proc_unit.addOperation(name='IntegrationDP', optype='other')
1223 1223 op.addParameter(name='nint', value='30', format='int')
1224 1224
1225 1225 """
1226 1226
1227 1227 def __init__(self, **kwargs):
1228 1228
1229 1229 Operation.__init__(self, **kwargs)
1230 1230
1231 1231 self.counter=0
1232 1232 self.aux=0
1233 1233 self.init_time=None
1234 1234
1235 1235 def integration_for_double_pulse(self,dataOut):
1236 1236
1237 1237 if self.aux==1:
1238 1238
1239 1239 dataOut.TimeBlockSeconds_for_dp_power=dataOut.utctime
1240 1240 dataOut.bd_time=gmtime(dataOut.TimeBlockSeconds_for_dp_power)
1241 1241 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
1242 1242 dataOut.ut_Faraday=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
1243 1243 self.aux=0
1244 1244
1245 1245 if self.counter==0:
1246 1246
1247 1247 tmpx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32')
1248 1248 dataOut.kabxys_integrated=[tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx]
1249 1249 self.init_time=dataOut.utctime
1250 1250
1251 1251 if self.counter < dataOut.nint:
1252 1252
1253 1253 dataOut.final_cross_products=[dataOut.kax,dataOut.kay,dataOut.kbx,dataOut.kby,dataOut.kax2,dataOut.kay2,dataOut.kbx2,dataOut.kby2,dataOut.kaxbx,dataOut.kaxby,dataOut.kaybx,dataOut.kayby,dataOut.kaxay,dataOut.kbxby]
1254 1254
1255 1255 for ind in range(len(dataOut.kabxys_integrated)): #final cross products
1256 1256 dataOut.kabxys_integrated[ind]=dataOut.kabxys_integrated[ind]+dataOut.final_cross_products[ind]
1257 1257
1258 1258 self.counter+=1
1259 1259
1260 1260 if self.counter==dataOut.nint-1:
1261 1261 self.aux=1
1262 1262
1263 1263 if self.counter==dataOut.nint:
1264 1264 dataOut.flagNoData=False
1265 1265 pa1 = 20
1266 1266 pa2 = 10
1267 1267 '''
1268 1268 print(32*(dataOut.kabxys_integrated[4][pa1,pa2,0]+dataOut.kabxys_integrated[5][pa1,pa2,0]+dataOut.kabxys_integrated[4][pa1,pa2,1]+dataOut.kabxys_integrated[5][pa1,pa2,1]))
1269 1269 print(32*(dataOut.kabxys_integrated[6][pa1,pa2,0]+dataOut.kabxys_integrated[7][pa1,pa2,0]+dataOut.kabxys_integrated[6][pa1,pa2,1]+dataOut.kabxys_integrated[7][pa1,pa2,1]))
1270 1270
1271 1271 exit(1)
1272 1272 '''
1273 1273 dataOut.utctime=self.init_time
1274 1274 self.counter=0
1275 1275 '''
1276 1276 print(dataOut.kabxys_integrated[8][53,6,0]+dataOut.kabxys_integrated[11][53,6,0])
1277 1277 print(dataOut.kabxys_integrated[8][53,9,0]+dataOut.kabxys_integrated[11][53,9,0])
1278 1278 exit(1)
1279 1279 '''
1280 1280
1281 1281
1282 1282 def run(self,dataOut,nint=20):
1283 1283
1284 1284 dataOut.flagNoData=True
1285 1285 dataOut.nint=nint
1286 1286 dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 )
1287 1287 dataOut.lat=-11.95
1288 1288 dataOut.lon=-76.87
1289 1289
1290 1290 self.integration_for_double_pulse(dataOut)
1291 1291
1292 1292
1293 1293 return dataOut
1294 1294
1295 1295
1296 1296 class SumFlips(Operation):
1297 1297 '''
1298 1298 Written by R. Flores
1299 1299 '''
1300 1300 """Operation to sum the flip and unflip part of certain cross products of the Double Pulse.
1301 1301
1302 1302 Parameters:
1303 1303 -----------
1304 1304 None
1305 1305
1306 1306 Example
1307 1307 --------
1308 1308
1309 1309 op = proc_unit.addOperation(name='SumFlips', optype='other')
1310 1310
1311 1311 """
1312 1312
1313 1313 def __init__(self, **kwargs):
1314 1314
1315 1315 Operation.__init__(self, **kwargs)
1316 1316
1317 1317
1318 1318 def rint2DP(self,dataOut):
1319 1319
1320 1320 dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32')
1321 1321
1322 1322 for l in range(dataOut.DPL):
1323 1323
1324 1324 dataOut.rnint2[l]=1.0/(dataOut.nint*dataOut.NAVG*12.0)
1325 1325
1326 1326
1327 1327 def SumLags(self,dataOut):
1328 1328
1329 1329 for l in range(dataOut.DPL):
1330 1330 '''
1331 1331 if l == 10:
1332 1332 print(32*(dataOut.kabxys_integrated[4][20,10,0]+dataOut.kabxys_integrated[5][20,10,0]+dataOut.kabxys_integrated[4][20,10,1]+dataOut.kabxys_integrated[5][20,10,1]))
1333 1333 print(32*(dataOut.kabxys_integrated[6][20,10,0]+dataOut.kabxys_integrated[7][20,10,0]+dataOut.kabxys_integrated[6][20,10,1]+dataOut.kabxys_integrated[7][20,10,1]))
1334 1334 '''
1335 1335 dataOut.kabxys_integrated[4][:,l,0]=(dataOut.kabxys_integrated[4][:,l,0]+dataOut.kabxys_integrated[4][:,l,1])*dataOut.rnint2[l]
1336 1336 dataOut.kabxys_integrated[5][:,l,0]=(dataOut.kabxys_integrated[5][:,l,0]+dataOut.kabxys_integrated[5][:,l,1])*dataOut.rnint2[l]
1337 1337 dataOut.kabxys_integrated[6][:,l,0]=(dataOut.kabxys_integrated[6][:,l,0]+dataOut.kabxys_integrated[6][:,l,1])*dataOut.rnint2[l]
1338 1338 dataOut.kabxys_integrated[7][:,l,0]=(dataOut.kabxys_integrated[7][:,l,0]+dataOut.kabxys_integrated[7][:,l,1])*dataOut.rnint2[l]
1339 1339
1340 1340 dataOut.kabxys_integrated[8][:,l,0]=(dataOut.kabxys_integrated[8][:,l,0]-dataOut.kabxys_integrated[8][:,l,1])*dataOut.rnint2[l]
1341 1341 dataOut.kabxys_integrated[9][:,l,0]=(dataOut.kabxys_integrated[9][:,l,0]-dataOut.kabxys_integrated[9][:,l,1])*dataOut.rnint2[l]
1342 1342 dataOut.kabxys_integrated[10][:,l,0]=(dataOut.kabxys_integrated[10][:,l,0]-dataOut.kabxys_integrated[10][:,l,1])*dataOut.rnint2[l]
1343 1343 dataOut.kabxys_integrated[11][:,l,0]=(dataOut.kabxys_integrated[11][:,l,0]-dataOut.kabxys_integrated[11][:,l,1])*dataOut.rnint2[l]
1344 1344 '''
1345 1345 if l == 10:
1346 1346 print(32*(dataOut.kabxys_integrated[4][20,10,0]+dataOut.kabxys_integrated[5][20,10,0]))
1347 1347 print(32*(dataOut.kabxys_integrated[6][20,10,0]+dataOut.kabxys_integrated[7][20,10,0]))
1348 1348 exit(1)
1349 1349 '''
1350 1350 def run(self,dataOut):
1351 1351
1352 1352 self.rint2DP(dataOut)
1353 1353 self.SumLags(dataOut)
1354 1354
1355 1355
1356 1356 return dataOut
1357 1357
1358 1358
1359 1359 class FlagBadHeights(Operation):
1360 1360 '''
1361 1361 Written by R. Flores
1362 1362 '''
1363 1363 """Operation to flag bad heights (bad data) of the Double Pulse.
1364 1364
1365 1365 Parameters:
1366 1366 -----------
1367 1367 None
1368 1368
1369 1369 Example
1370 1370 --------
1371 1371
1372 1372 op = proc_unit.addOperation(name='FlagBadHeights', optype='other')
1373 1373
1374 1374 """
1375 1375
1376 1376 def __init__(self, **kwargs):
1377 1377
1378 1378 Operation.__init__(self, **kwargs)
1379 1379
1380 1380 def run(self,dataOut):
1381 1381
1382 1382 dataOut.ibad=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32')
1383 1383
1384 1384 for j in range(dataOut.NDP):
1385 1385 for l in range(dataOut.DPL):
1386 1386 ip1=j+dataOut.NDP*(0+2*l)
1387 1387
1388 1388 if( (dataOut.kabxys_integrated[5][j,l,0] <= 0.) or (dataOut.kabxys_integrated[4][j,l,0] <= 0.) or (dataOut.kabxys_integrated[7][j,l,0] <= 0.) or (dataOut.kabxys_integrated[6][j,l,0] <= 0.)):
1389 1389 dataOut.ibad[j][l]=1
1390 1390 else:
1391 1391 dataOut.ibad[j][l]=0
1392 1392 #print("dataOut.ibad",dataOut.ibad)
1393 1393 return dataOut
1394 1394
1395 1395 class FlagBadHeightsSpectra(Operation):
1396 1396 '''
1397 1397 Written by R. Flores
1398 1398 '''
1399 1399 """Operation to flag bad heights (bad data) of the Double Pulse.
1400 1400
1401 1401 Parameters:
1402 1402 -----------
1403 1403 None
1404 1404
1405 1405 Example
1406 1406 --------
1407 1407
1408 1408 op = proc_unit.addOperation(name='FlagBadHeightsSpectra', optype='other')
1409 1409
1410 1410 """
1411 1411
1412 1412 def __init__(self, **kwargs):
1413 1413
1414 1414 Operation.__init__(self, **kwargs)
1415 1415
1416 1416 def run(self,dataOut):
1417 1417
1418 1418 dataOut.ibad=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32')
1419 1419
1420 1420 for j in range(dataOut.NDP):
1421 1421 for l in range(dataOut.DPL):
1422 1422 ip1=j+dataOut.NDP*(0+2*l)
1423 1423
1424 1424 if( (dataOut.kabxys_integrated[4][j,l,0] <= 0.) or (dataOut.kabxys_integrated[6][j,l,0] <= 0.)):
1425 1425 dataOut.ibad[j][l]=1
1426 1426 else:
1427 1427 dataOut.ibad[j][l]=0
1428 1428
1429 1429 return dataOut
1430 1430
1431 1431 class CleanCohEchoes(Operation):
1432 1432 '''
1433 1433 Written by R. Flores
1434 1434 '''
1435 1435 """Operation to clean coherent echoes.
1436 1436
1437 1437 Parameters:
1438 1438 -----------
1439 1439 None
1440 1440
1441 1441 Example
1442 1442 --------
1443 1443
1444 1444 op = proc_unit.addOperation(name='CleanCohEchoes')
1445 1445
1446 1446 """
1447 1447
1448 1448 def __init__(self, **kwargs):
1449 1449
1450 1450 Operation.__init__(self, **kwargs)
1451 1451
1452 1452 def remove_coh(self,pow):
1453 1453 #print("pow inside: ",pow)
1454 1454 #print(pow.shape)
1455 1455 q75,q25 = numpy.percentile(pow,[75,25],axis=0)
1456 1456 #print(q75,q25)
1457 1457 intr_qr = q75-q25
1458 1458
1459 1459 max = q75+(1.5*intr_qr)
1460 1460 min = q25-(1.5*intr_qr)
1461 1461
1462 1462 pow[pow > max] = numpy.nan
1463 1463
1464 1464 #print("Max: ",max)
1465 1465 #print("Min: ",min)
1466 1466
1467 1467 return pow
1468 1468
1469 1469 def mad_based_outlier_V0(self, points, thresh=3.5):
1470 1470 #print("points: ",points)
1471 1471 if len(points.shape) == 1:
1472 1472 points = points[:,None]
1473 1473 median = numpy.nanmedian(points, axis=0)
1474 1474 diff = numpy.nansum((points - median)**2, axis=-1)
1475 1475 diff = numpy.sqrt(diff)
1476 1476 med_abs_deviation = numpy.nanmedian(diff)
1477 1477
1478 1478 modified_z_score = 0.6745 * diff / med_abs_deviation
1479 1479 #print(modified_z_score)
1480 1480 return modified_z_score > thresh
1481 1481
1482 1482 def mad_based_outlier(self, points, thresh=3.5):
1483 1483
1484 1484 median = numpy.nanmedian(points)
1485 1485 diff = (points - median)**2
1486 1486 diff = numpy.sqrt(diff)
1487 1487 med_abs_deviation = numpy.nanmedian(diff)
1488 1488
1489 1489 modified_z_score = 0.6745 * diff / med_abs_deviation
1490 1490
1491 1491 return modified_z_score > thresh
1492 1492
1493 1493 def removeSpreadF_V0(self,dataOut):
1494 1494 for i in range(11):
1495 1495 print("BEFORE Chb: ",i,dataOut.kabxys_integrated[6][:,i,0])
1496 1496 #exit(1)
1497 1497
1498 1498 #Removing echoes greater than 35 dB
1499 1499 maxdB = 35 #DEBERÍA SER NOISE+ALGO!!!!!!!!!!!!!!!!!!!!!!
1500 1500 #print(dataOut.kabxys_integrated[6][:,0,0])
1501 1501 data = numpy.copy(10*numpy.log10(dataOut.kabxys_integrated[6][:,0,0])) #Lag0 ChB
1502 1502 #print(data)
1503 1503 for i in range(12,data.shape[0]):
1504 1504 #for j in range(data.shape[1]):
1505 1505 if data[i]>maxdB:
1506 1506 dataOut.kabxys_integrated[4][i-2:i+3,:,0] = numpy.nan #Debido a que estos ecos son intensos, se
1507 1507 dataOut.kabxys_integrated[6][i-2:i+3,:,0] = numpy.nan #remueve ademΓ‘s dos muestras antes y despuΓ©s
1508 1508 #dataOut.kabxys_integrated[4][i-1,:,0] = numpy.nan
1509 1509 #dataOut.kabxys_integrated[6][i-1,:,0] = numpy.nan
1510 1510 #dataOut.kabxys_integrated[4][i+1,:,0] = numpy.nan
1511 1511 #dataOut.kabxys_integrated[6][i+1,:,0] = numpy.nan
1512 1512 dataOut.flagSpreadF = True
1513 1513 print("Removing Threshold",i)
1514 1514 #print("i: ",i)
1515 1515
1516 1516 #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,0,0])
1517 1517 #exit(1)
1518 1518
1519 1519 #Removing outliers from the profile
1520 1520 nlag = 9
1521 1521 minHei = 180
1522 1522 #maxHei = 600
1523 1523 maxHei = 525
1524 1524 inda = numpy.where(dataOut.heightList >= minHei)
1525 1525 indb = numpy.where(dataOut.heightList <= maxHei)
1526 1526 minIndex = inda[0][0]
1527 1527 maxIndex = indb[0][-1]
1528 1528 #l0 = 0
1529 1529 #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0])
1530 1530 #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0])
1531 1531 #exit(1)
1532 1532 #'''
1533 1533 l0 = 0
1534 1534 #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0])
1535 1535 #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0])
1536 1536
1537 1537 import matplotlib.pyplot as plt
1538 1538 for i in range(l0,l0+11):
1539 1539 plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i))
1540 1540 #plt.xlim(1.e5,1.e8)
1541 1541 plt.legend()
1542 1542 plt.xlim(0,2000)
1543 1543 plt.show()
1544 1544 #'''
1545 1545 #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0
1546 1546 outliers_IDs = []
1547 1547 '''
1548 1548 for lag in range(11):
1549 1549 outliers = self.mad_based_outlier(dataOut.kabxys_integrated[4][minIndex:,lag,0], thresh=3.)
1550 1550 #print("Outliers: ",outliers)
1551 1551 #indexes.append(outliers.nonzero())
1552 1552 #numpy.concatenate((outliers))
1553 1553 #dataOut.kabxys_integrated[4][minIndex:,lag,0][outliers == True] = numpy.nan
1554 1554 outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero())
1555 1555 '''
1556 1556 for lag in range(11):
1557 1557 #outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.)
1558 1558 outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0])
1559 1559 outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero())
1560 1560 #print(outliers_IDs)
1561 1561 #exit(1)
1562 1562 if outliers_IDs != []:
1563 1563 outliers_IDs=numpy.array(outliers_IDs)
1564 1564 outliers_IDs=outliers_IDs.ravel()
1565 1565 outliers_IDs=outliers_IDs.astype(numpy.dtype('int64'))
1566 1566
1567 1567 (uniq, freq) = (numpy.unique(outliers_IDs, return_counts=True))
1568 1568 aux_arr = numpy.column_stack((uniq,freq))
1569 1569 #print("repetitions: ",aux_arr)
1570 1570
1571 1571 #if aux_arr != []:
1572 1572 final_index = []
1573 1573 for i in range(aux_arr.shape[0]):
1574 1574 if aux_arr[i,1] >= 10:
1575 1575 final_index.append(aux_arr[i,0])
1576 1576
1577 1577 if final_index != [] and len(final_index) > 1:
1578 1578 final_index += minIndex
1579 1579 #print("final_index: ",final_index)
1580 1580 following_index = final_index[-1]+1 #Remove following index to ensure we remove remaining SpreadF
1581 1581 previous_index = final_index[0]-1 #Remove previous index to ensure we remove remaning SpreadF
1582 1582 final_index = numpy.concatenate(([previous_index],final_index,[following_index]))
1583 1583 final_index = numpy.unique(final_index) #If there was only one outlier
1584 1584 #print("final_index: ",final_index)
1585 1585 #exit(1)
1586 1586 dataOut.kabxys_integrated[4][final_index,:,0] = numpy.nan
1587 1587 dataOut.kabxys_integrated[6][final_index,:,0] = numpy.nan
1588 1588
1589 1589 dataOut.flagSpreadF = True
1590 1590
1591 1591 #print(final_index+minIndex)
1592 1592 #print(outliers_IDs)
1593 1593 #exit(1)
1594 1594 #print("flagSpreadF",dataOut.flagSpreadF)
1595 1595
1596 1596 '''
1597 1597 for lag in range(11):
1598 1598 #print("Lag: ",lag)
1599 1599 outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.)
1600 1600 dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0][outliers == True] = numpy.nan
1601 1601 '''
1602 1602 #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0])
1603 1603 '''
1604 1604 import matplotlib.pyplot as plt
1605 1605 for i in range(11):
1606 1606 plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i))
1607 1607 plt.xlim(0,2000)
1608 1608 plt.legend()
1609 1609 plt.grid()
1610 1610 plt.show()
1611 1611 '''
1612 1612 '''
1613 1613 for nlag in range(11):
1614 1614 print("BEFORE",dataOut.kabxys_integrated[6][:,nlag,0])
1615 1615 #exit(1)
1616 1616 '''
1617 1617 #dataOut.kabxys_integrated[6][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[6][minIndex:,:,0])
1618 1618
1619 1619
1620 1620 '''
1621 1621 for nlag in range(11):
1622 1622 print("AFTER",dataOut.kabxys_integrated[6][:,nlag,0])
1623 1623 exit(1)
1624 1624 '''
1625 1625 #print("AFTER",dataOut.kabxys_integrated[4][33,:,0])
1626 1626 #print("AFTER",dataOut.kabxys_integrated[6][33,:,0])
1627 1627 #exit(1)
1628 1628
1629 1629 def removeSpreadF(self,dataOut):
1630 1630 #for i in range(11):
1631 1631 #print("BEFORE Chb: ",i,dataOut.kabxys_integrated[6][:,i,0])
1632 1632 #exit(1)
1633 1633
1634 1634 #for i in range(12,data.shape[0]):
1635 1635 #for j in range(data.shape[1]):
1636 1636 #if data[i]>maxdB:
1637 1637 #dataOut.kabxys_integrated[4][i-2:i+3,:,0] = numpy.nan #Debido a que estos ecos son intensos, se
1638 1638 #dataOut.kabxys_integrated[6][i-2:i+3,:,0] = numpy.nan #remueven ademΓ‘s dos muestras antes y despuΓ©s
1639 1639 #dataOut.flagSpreadF = True
1640 1640 #print("Removing Threshold",i)
1641 1641 #print("i: ",i)
1642 1642
1643 1643 #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,0,0])
1644 1644 #exit(1)
1645 1645
1646 1646 #Removing outliers from the profile
1647 1647 nlag = 9
1648 1648 minHei = 180
1649 1649 #maxHei = 600
1650 1650 maxHei = 525
1651 1651 inda = numpy.where(dataOut.heightList >= minHei)
1652 1652 indb = numpy.where(dataOut.heightList <= maxHei)
1653 1653 minIndex = inda[0][0]
1654 1654 maxIndex = indb[0][-1]
1655 1655 #l0 = 0
1656 1656 #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0])
1657 1657 #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0])
1658 1658 #exit(1)
1659 1659 '''
1660 1660 l0 = 0
1661 1661 #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0])
1662 1662 #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0])
1663 1663
1664 1664 import matplotlib.pyplot as plt
1665 1665 for i in range(l0,l0+11):
1666 1666 plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i))
1667 1667 #plt.xlim(1.e5,1.e8)
1668 1668 plt.legend()
1669 1669 plt.xlim(0,2000)
1670 1670 plt.show()
1671 1671 '''
1672 1672 #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0
1673 1673 outliers_IDs = []
1674 1674 '''
1675 1675 for lag in range(11):
1676 1676 outliers = self.mad_based_outlier(dataOut.kabxys_integrated[4][minIndex:,lag,0], thresh=3.)
1677 1677 #print("Outliers: ",outliers)
1678 1678 #indexes.append(outliers.nonzero())
1679 1679 #numpy.concatenate((outliers))
1680 1680 #dataOut.kabxys_integrated[4][minIndex:,lag,0][outliers == True] = numpy.nan
1681 1681 outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero())
1682 1682 '''
1683 1683 '''
1684 1684 for lag in range(11):
1685 1685 #outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.)
1686 1686 outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0])
1687 1687 outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero())
1688 1688 '''
1689 1689
1690 1690 for i in range(15):
1691 1691 minIndex = 12+i#12
1692 1692 #maxIndex = 22+i#35
1693 1693 if gmtime(dataOut.utctime).tm_hour >= 23. or gmtime(dataOut.utctime).tm_hour < 3.:
1694 1694 maxIndex = 31+i#35
1695 1695 else:
1696 1696 maxIndex = 22+i#35
1697 1697 for lag in range(11):
1698 1698 #outliers = mad_based_outlier(pow_clean3[12:27], thresh=2.)
1699 1699 #print("Cuts: ",first_cut*15, last_cut*15)
1700 1700 outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0])
1701 1701 aux = minIndex+numpy.array(outliers.nonzero()).ravel()
1702 1702 outliers_IDs=numpy.append(outliers_IDs,aux)
1703 1703 #print(minIndex+numpy.array(outliers.nonzero()).ravel())
1704 1704 #print(outliers_IDs)
1705 1705 #exit(1)
1706 1706 if outliers_IDs != []:
1707 1707 outliers_IDs=numpy.array(outliers_IDs)
1708 1708 #outliers_IDs=outliers_IDs.ravel()
1709 1709 outliers_IDs=outliers_IDs.astype(numpy.dtype('int64'))
1710 1710 #print(outliers_IDs)
1711 1711 #exit(1)
1712 1712
1713 1713 (uniq, freq) = (numpy.unique(outliers_IDs, return_counts=True))
1714 1714 aux_arr = numpy.column_stack((uniq,freq))
1715 1715 #print("repetitions: ",aux_arr)
1716 1716 #exit(1)
1717 1717
1718 1718 #if aux_arr != []:
1719 1719 final_index = []
1720 1720 for i in range(aux_arr.shape[0]):
1721 1721 if aux_arr[i,1] >= 3*11:
1722 1722 final_index.append(aux_arr[i,0])
1723 1723
1724 1724 if final_index != []:# and len(final_index) > 1:
1725 1725 #final_index += minIndex
1726 1726 #print("final_index: ",final_index)
1727 1727 following_index = final_index[-1]+1 #Remove following index to ensure we remove remaining SpreadF
1728 1728 previous_index = final_index[0]-1 #Remove previous index to ensure we remove remaning SpreadF
1729 1729 final_index = numpy.concatenate(([previous_index],final_index,[following_index]))
1730 1730 final_index = numpy.unique(final_index) #If there was only one outlier
1731 1731 #print("final_index: ",final_index)
1732 1732 #exit(1)
1733 1733 dataOut.kabxys_integrated[4][final_index,:,0] = numpy.nan
1734 1734 dataOut.kabxys_integrated[6][final_index,:,0] = numpy.nan
1735 1735
1736 1736 dataOut.flagSpreadF = True
1737 1737
1738 1738 #Removing echoes greater than 35 dB
1739 1739 if hasattr(dataOut.pbn, "__len__"):
1740 1740 maxdB = 10*numpy.log10(dataOut.pbn[0]) + 10 #Lag 0 Noise
1741 1741 else:
1742 1742 maxdB = 10*numpy.log10(dataOut.pbn) + 10
1743 1743
1744 1744 #print(dataOut.kabxys_integrated[6][:,0,0])
1745 1745 data = numpy.copy(10*numpy.log10(dataOut.kabxys_integrated[6][:,0,0])) #Lag0 ChB
1746 1746 #print("data: ",data)
1747 1747
1748 1748 for i in range(12,data.shape[0]):
1749 1749 #for j in range(data.shape[1]):
1750 1750 if data[i]>maxdB:
1751 1751 dataOut.kabxys_integrated[4][i-2:i+3,:,0] = numpy.nan #Debido a que estos ecos son intensos, se
1752 1752 dataOut.kabxys_integrated[6][i-2:i+3,:,0] = numpy.nan #remueven ademΓ‘s dos muestras antes y despuΓ©s
1753 1753 dataOut.flagSpreadF = True
1754 1754 #print("Removing Threshold",i)
1755 1755
1756 1756 #print(final_index+minIndex)
1757 1757 #print(outliers_IDs)
1758 1758 #exit(1)
1759 1759 #print("flagSpreadF",dataOut.flagSpreadF)
1760 1760
1761 1761 '''
1762 1762 for lag in range(11):
1763 1763 #print("Lag: ",lag)
1764 1764 outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.)
1765 1765 dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0][outliers == True] = numpy.nan
1766 1766 '''
1767 1767 #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0])
1768 1768 '''
1769 1769 import matplotlib.pyplot as plt
1770 1770 for i in range(11):
1771 1771 plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i))
1772 1772 plt.xlim(0,2000)
1773 1773 plt.legend()
1774 1774 plt.grid()
1775 1775 plt.show()
1776 1776 '''
1777 1777 '''
1778 1778 for nlag in range(11):
1779 1779 print("BEFORE",dataOut.kabxys_integrated[6][:,nlag,0])
1780 1780 #exit(1)
1781 1781 '''
1782 1782 #dataOut.kabxys_integrated[6][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[6][minIndex:,:,0])
1783 1783
1784 1784
1785 1785 '''
1786 1786 for nlag in range(11):
1787 1787 print("AFTER",dataOut.kabxys_integrated[6][:,nlag,0])
1788 1788 exit(1)
1789 1789 '''
1790 1790
1791 1791 def run(self,dataOut):
1792 1792 dataOut.flagSpreadF = False
1793 1793 #print(gmtime(dataOut.utctime).tm_hour)
1794 1794 #print(dataOut.ut_Faraday)
1795 1795 #exit(1)
1796 1796 if gmtime(dataOut.utctime).tm_hour >= 23. or gmtime(dataOut.utctime).tm_hour < 11.: #18-06 LT
1797 1797 #print("Inside if we are in SpreadF Time: ",gmtime(dataOut.utctime).tm_hour)
1798 1798 #if gmtime(dataOut.utctime).tm_hour == 2 and gmtime(dataOut.utctime).tm_min == 10: #Year: 2023, DOY:310
1799 1799 #if gmtime(dataOut.utctime).tm_hour == 3 and gmtime(dataOut.utctime).tm_min == 10: #Year: 2023, DOY:312
1800 1800 #if gmtime(dataOut.utctime).tm_hour == 0 and gmtime(dataOut.utctime).tm_min == 0: #Year: 2024, DOY:082
1801 1801 #if 1: #Year: 2024, DOY:081
1802 1802 #if gmtime(dataOut.utctime).tm_hour == 0: #Year: 2024, DOY:080
1803 1803 #pass
1804 1804 #else:
1805 1805 self.removeSpreadF(dataOut)
1806 1806 #exit(1)
1807 1807
1808 1808 return dataOut
1809 1809
1810 1810
1811 1811 class NoisePower(Operation):
1812 1812 '''
1813 1813 Written by R. Flores
1814 1814 '''
1815 1815 """Operation to get noise power from the integrated data of the Double Pulse.
1816 1816
1817 1817 Parameters:
1818 1818 -----------
1819 1819 None
1820 1820
1821 1821 Example
1822 1822 --------
1823 1823
1824 1824 op = proc_unit.addOperation(name='NoisePower', optype='other')
1825 1825
1826 1826 """
1827 1827
1828 1828 def __init__(self, **kwargs):
1829 1829
1830 1830 Operation.__init__(self, **kwargs)
1831 1831
1832 1832 def hildebrand(self,dataOut,data):
1833 1833
1834 1834 divider=10 # divider was originally 10
1835 1835 noise=0.0
1836 1836 n1=0
1837 1837 n2=int(dataOut.NDP/2)
1838 1838 sorts= sorted(data)
1839 1839 nums_min= dataOut.NDP/divider
1840 1840 if((dataOut.NDP/divider)> 2):
1841 1841 nums_min= int(dataOut.NDP/divider)
1842 1842
1843 1843 else:
1844 1844 nums_min=2
1845 1845 sump=0.0
1846 1846 sumq=0.0
1847 1847 j=0
1848 1848 cont=1
1849 1849 while( (cont==1) and (j<n2)):
1850 1850 sump+=sorts[j+n1]
1851 1851 sumq+= sorts[j+n1]*sorts[j+n1]
1852 1852 t3= sump/(j+1)
1853 1853 j=j+1
1854 1854 if(j> nums_min):
1855 1855 rtest= float(j/(j-1)) +1.0/dataOut.NAVG
1856 1856 t1= (sumq*j)
1857 1857 t2=(rtest*sump*sump)
1858 1858 if( (t1/t2) > 0.990):
1859 1859 j=j-1
1860 1860 sump-= sorts[j+n1]
1861 1861 sumq-=sorts[j+n1]*sorts[j+n1]
1862 1862 cont= 0
1863 1863
1864 1864 noise= sump/j
1865 1865 stdv=numpy.sqrt((sumq- noise*noise)/(j-1))
1866 1866 return noise
1867 1867
1868 1868 def run(self,dataOut):
1869 1869
1870 1870 p=numpy.zeros((dataOut.NR,dataOut.NDP,dataOut.DPL),'float32')
1871 1871 av=numpy.zeros(dataOut.NDP,'float32')
1872 1872 dataOut.pnoise=numpy.zeros(dataOut.NR,'float32')
1873 1873
1874 1874 p[0,:,:]=dataOut.kabxys_integrated[4][:,:,0]+dataOut.kabxys_integrated[5][:,:,0] #total power for channel 0, just pulse with non-flip
1875 1875 p[1,:,:]=dataOut.kabxys_integrated[6][:,:,0]+dataOut.kabxys_integrated[7][:,:,0] #total power for channel 1
1876 1876
1877 1877 for i in range(dataOut.NR):
1878 1878 dataOut.pnoise[i]=0.0
1879 1879 for k in range(dataOut.DPL):
1880 1880 dataOut.pnoise[i]+= self.hildebrand(dataOut,p[i,:,k])
1881 1881
1882 1882 dataOut.pnoise[i]=dataOut.pnoise[i]/dataOut.DPL
1883 1883
1884 1884
1885 1885 dataOut.pan=.8*dataOut.pnoise[0] # weights could change
1886 1886 dataOut.pbn=.8*dataOut.pnoise[1] # weights could change
1887 1887 '''
1888 1888 print("pan: ",dataOut.pan)
1889 1889 print("pbn: ",dataOut.pbn)
1890 1890 print("pan dB: ",10*numpy.log10(dataOut.pan))
1891 1891 print("pbn dB: ",10*numpy.log10(dataOut.pbn))
1892 1892 exit(1)
1893 1893 '''
1894 1894 dataOut.power = dataOut.getPower()
1895 1895 return dataOut
1896 1896
1897 1897
1898 1898 class DoublePulseACFs(Operation):
1899 1899 '''
1900 1900 Written by R. Flores
1901 1901 '''
1902 1902 """Operation to get the ACFs of the Double Pulse.
1903 1903
1904 1904 Parameters:
1905 1905 -----------
1906 1906 None
1907 1907
1908 1908 Example
1909 1909 --------
1910 1910
1911 1911 op = proc_unit.addOperation(name='DoublePulseACFs', optype='other')
1912 1912
1913 1913 """
1914 1914
1915 1915 def __init__(self, **kwargs):
1916 1916
1917 1917 Operation.__init__(self, **kwargs)
1918 1918 self.aux=1
1919 1919
1920 1920 def run(self,dataOut):
1921 1921
1922 1922 dataOut.igcej=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32')
1923 1923 #print("init")
1924 1924 if self.aux==1:
1925 1925 dataOut.rhor=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
1926 1926 dataOut.rhoi=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
1927 1927 dataOut.sdp=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
1928 1928 dataOut.sd=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
1929 1929 dataOut.p=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
1930 1930 dataOut.alag=numpy.zeros(dataOut.NDP,'float32')
1931 1931 for l in range(dataOut.DPL):
1932 1932 dataOut.alag[l]=l*dataOut.DH*2.0/150.0
1933 1933 self.aux=0
1934 1934 sn4=dataOut.pan*dataOut.pbn
1935 1935 rhorn=0
1936 1936 rhoin=0
1937 1937 panrm=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
1938 1938
1939 1939 id = numpy.where(dataOut.heightList>700)[0]
1940 1940 #print("kabxys: ", numpy.shape(dataOut.kabxys_integrated))
1941 1941 for i in range(dataOut.NDP):
1942 1942 for j in range(dataOut.DPL):
1943 1943 ################# Total power
1944 1944 pa=numpy.abs(dataOut.kabxys_integrated[4][i,j,0]+dataOut.kabxys_integrated[5][i,j,0])
1945 1945 #print("pa::",pa)
1946 1946 pb=numpy.abs(dataOut.kabxys_integrated[6][i,j,0]+dataOut.kabxys_integrated[7][i,j,0])
1947 1947 st4=pa*pb
1948 1948
1949 1949 '''
1950 1950 if i > id[0]:
1951 1951 dataOut.p[i,j] pa-dataOut.pan
1952 1952 else:
1953 1953 dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn)
1954 1954 '''
1955 1955 #print("init 2.6",pa,dataOut.pan)
1956 1956 #dataOut.pan = 23600/2
1957 1957 #dataOut.pbn = 23600/2
1958 1958 dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn)
1959 1959 #print(i,j,dataOut.p[i,j])
1960 1960 dataOut.sdp[i,j]=2*dataOut.rnint2[j]*((pa+pb)*(pa+pb))
1961 1961 ## ACF
1962 1962
1963 1963 rhorp=dataOut.kabxys_integrated[8][i,j,0]+dataOut.kabxys_integrated[11][i,j,0]
1964 1964 rhoip=dataOut.kabxys_integrated[10][i,j,0]-dataOut.kabxys_integrated[9][i,j,0]
1965 1965 '''
1966 1966 import matplotlib.pyplot as plt
1967 1967 plt.plot(numpy.abs(dataOut.kabxys_integrated[4][:,j,0]+dataOut.kabxys_integrated[5][:,j,0])+numpy.abs(dataOut.kabxys_integrated[6][:,j,0]+dataOut.kabxys_integrated[7][:,j,0]),dataOut.heightList)
1968 1968 plt.axvline((dataOut.pan+dataOut.pbn))
1969 1969 plt.xlim(20000,30000)
1970 1970 #plt.plot(numpy.abs(dataOut.kabxys_integrated[4][:,j,0]+dataOut.kabxys_integrated[5][:,j,0])+numpy.abs(dataOut.kabxys_integrated[6][:,j,0]+dataOut.kabxys_integrated[7][:,j,0])-(dataOut.pan+dataOut.pbn),dataOut.heightList)
1971 1971 #plt.xlim(1,10000)
1972 1972 plt.grid()
1973 1973 plt.show()
1974 1974 '''
1975 1975 if ((pa>dataOut.pan)&(pb>dataOut.pbn)):
1976 1976
1977 1977 ss4=numpy.abs((pa-dataOut.pan)*(pb-dataOut.pbn))
1978 1978 panrm[i,j]=math.sqrt(ss4)
1979 1979 rnorm=1/panrm[i,j]
1980 1980 ## ACF
1981 1981 dataOut.rhor[i,j]=rhorp*rnorm
1982 1982 dataOut.rhoi[i,j]=rhoip*rnorm
1983 1983 #if i==13 and j== 0 or i ==14 and j==0:
1984 1984 #print(numpy.sum(rhorp))
1985 1985 #exit(1)
1986 1986 ############# Compute standard error for ACF
1987 1987 stoss4=st4/ss4
1988 1988 snoss4=sn4/ss4
1989 1989 rp2=((rhorp*rhorp)+(rhoip*rhoip))/st4
1990 1990 rn2=((rhorn*rhorn)+(rhoin*rhoin))/sn4
1991 1991 rs2=(dataOut.rhor[i,j]*dataOut.rhor[i,j])+(dataOut.rhoi[i,j]*dataOut.rhoi[i,j])
1992 1992 st=1.0+rs2*(stoss4-(2*math.sqrt(stoss4*snoss4)))
1993 1993 stn=1.0+rs2*(snoss4-(2*math.sqrt(stoss4*snoss4)))
1994 1994 dataOut.sd[i,j]=((stoss4*((1.0+rp2)*st+(2.0*rp2*rs2*snoss4)-4.0*math.sqrt(rs2*rp2)))+(0.25*snoss4*((1.0+rn2)*stn+(2.0*rn2*rs2*stoss4)-4.0*math.sqrt(rs2*rn2))))*dataOut.rnint2[j]
1995 1995 dataOut.sd[i,j]=numpy.abs(dataOut.sd[i,j])
1996 1996 '''
1997 1997 if i == 4:
1998 1998 print(i,j,dataOut.sd[i,j])
1999 1999 '''
2000 2000 #print(i,j,dataOut.rhor[i,j])
2001 2001 #exit(1)
2002 2002 else: #default values for bad points
2003 2003 rnorm=1/math.sqrt(st4)
2004 2004 dataOut.sd[i,j]=1.e30
2005 2005 dataOut.ibad[i,j]=4
2006 2006 dataOut.rhor[i,j]=rhorp*rnorm
2007 2007 dataOut.rhoi[i,j]=rhoip*rnorm
2008 2008 '''
2009 2009 if i==47:
2010 2010 print("j",j)
2011 2011 print("pa",pa/dataOut.pan)
2012 2012 print("pb",pb/dataOut.pbn)
2013 2013 print((pa/dataOut.pan-1.0))
2014 2014 print((pb/dataOut.pbn-1.0))
2015 2015 '''
2016 2016 #'''
2017 2017 if ((pb/dataOut.pbn-1.0)>2.25*(pa/dataOut.pan-1.0)): #To flag bad points from the pulse and EEJ for lags != 0 for Channel B
2018 2018 #print(dataOut.heightList[i],"EJJ")
2019 2019 dataOut.igcej[i,j]=1
2020 2020
2021 2021 elif ((pa/dataOut.pan-1.0)>2.25*(pb/dataOut.pbn-1.0)):
2022 2022 #print(dataOut.heightList[i],"EJJ")
2023 2023 dataOut.igcej[i,j]=1
2024 2024 #'''
2025 2025 '''
2026 2026 if ((pa/dataOut.pan-1.0)>2.25*(pb/dataOut.pbn-1.0)):
2027 2027 #print("EJJ")
2028 2028 dataOut.igcej[i,j]=1
2029 2029 '''
2030 2030 '''
2031 2031 if i == 4:
2032 2032 exit(1)
2033 2033 '''
2034 2034 #print(numpy.sum(dataOut.kabxys_integrated[8][:,:,0]+dataOut.kabxys_integrated[11][:,:,0]))
2035 2035 #print(numpy.sum(dataOut.kabxys_integrated[10][:,:,0]-dataOut.kabxys_integrated[9][:,:,0]))
2036 2036 #print(numpy.sum(dataOut.rhor))
2037 2037 #print("bad points: ",dataOut.igcej[47,:])
2038 2038 #exit(1)
2039 2039 '''
2040 2040 for l in range(11):
2041 2041 print("p: ",dataOut.p[:,l])
2042 2042 exit(1)
2043 2043 '''
2044 2044 #print(pa)
2045 2045 #print("pa: ", numpy.shape(pa))
2046 2046 #print(numpy.shape(dataOut.heightList))
2047 2047 '''
2048 2048 import matplotlib.pyplot as plt
2049 2049 plt.plot(dataOut.p[:,-1],dataOut.heightList)
2050 2050 #plt.plot(pa/dataOut.pan-1.,dataOut.heightList)
2051 2051 #plt.plot(pb/dataOut.pbn-1.,dataOut.heightList)
2052 2052 plt.grid()
2053 2053 plt.xlim(0,1e5)
2054 2054 plt.show()
2055 2055 #print("p: ",dataOut.p[33,:])
2056 2056 #exit(1)
2057 2057 #'''
2058 2058 '''
2059 2059 import matplotlib.pyplot as plt
2060 2060 #plt.plot(numpy.abs(dataOut.kabxys_integrated[4][:,j,0]+dataOut.kabxys_integrated[5][:,j,0])+numpy.abs(dataOut.kabxys_integrated[6][:,j,0]+dataOut.kabxys_integrated[7][:,j,0]),dataOut.heightList)
2061 2061 #plt.axvline((dataOut.pan+dataOut.pbn))
2062 2062 print(numpy.shape(dataOut.p))
2063 2063 plt.plot(dataOut.p[:,0]*dataOut.heightList*dataOut.heightList,dataOut.heightList)
2064 2064
2065 2065 #plt.xlim(1,100000000)
2066 2066 plt.xlim(100,100000000)
2067 2067 plt.grid()
2068 2068 plt.show()
2069 2069 '''
2070 2070 #print(numpy.sum(dataOut.rhor))
2071 2071 #exit(1)
2072 2072 return dataOut
2073 2073
2074 2074 class DoublePulseACFs_PerLag(Operation):
2075 2075 '''
2076 2076 Written by R. Flores
2077 2077 '''
2078 2078 """Operation to get the ACFs of the Double Pulse.
2079 2079
2080 2080 Parameters:
2081 2081 -----------
2082 2082 None
2083 2083
2084 2084 Example
2085 2085 --------
2086 2086
2087 2087 op = proc_unit.addOperation(name='DoublePulseACFs', optype='other')
2088 2088
2089 2089 """
2090 2090
2091 2091 def __init__(self, **kwargs):
2092 2092
2093 2093 Operation.__init__(self, **kwargs)
2094 2094 self.aux=1
2095 2095
2096 2096 def run(self,dataOut):
2097 2097
2098 2098 # flag bad data points
2099 2099 dataOut.igcej=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32')
2100 2100
2101 2101 if self.aux==1:
2102 2102 # Real part of ACF
2103 2103 dataOut.rhor = numpy.zeros((dataOut.NDP, dataOut.DPL), dtype=float)
2104 2104 # Imaginary part of ACF
2105 2105 dataOut.rhoi = numpy.zeros((dataOut.NDP, dataOut.DPL), dtype=float)
2106 2106 # Standard deviation of power
2107 2107 dataOut.sdp = numpy.zeros((dataOut.NDP, dataOut.DPL), dtype=float)
2108 2108 # Standard deviation of ACF
2109 2109 dataOut.sd = numpy.zeros((dataOut.NDP, dataOut.DPL), dtype=float)
2110 2110 # Stores the power with noise level removed
2111 2111 dataOut.p=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
2112 2112 # Stores lags for which ACFs are calculated
2113 2113 dataOut.alag=numpy.zeros(dataOut.NDP,'float32')
2114 2114 for l in range(dataOut.DPL):
2115 2115 dataOut.alag[l]=l*dataOut.DH*dataOut.TxLagRate/150.0
2116 2116 self.aux=0
2117 2117 # dataOut.pan.- Power noise level of channel A - definned in SpectraDataToFaraday
2118 2118 # Signal noise
2119 2119 sn4=dataOut.pan*dataOut.pbn
2120 2120 rhorn=0
2121 2121 rhoin=0
2122 2122 panrm=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float)
2123 2123
2124 2124 id = numpy.where(dataOut.heightList>700)[0]
2125 2125
2126 2126 PA = numpy.zeros((dataOut.NDP, dataOut.DPL), dtype=float)
2127 2127 PB = numpy.zeros((dataOut.NDP, dataOut.DPL), dtype=float)
2128 2128
2129 2129 for i in range(dataOut.NDP): #Heights
2130 2130 for j in range(dataOut.DPL): # Lags
2131 2131 ################# Total power
2132 2132 # Power channel A
2133 2133 pa=numpy.abs(dataOut.kabxys_integrated[4][i,j,0]+dataOut.kabxys_integrated[5][i,j,0])
2134 2134 # Power channel B
2135 2135 pb=numpy.abs(dataOut.kabxys_integrated[6][i,j,0]+dataOut.kabxys_integrated[7][i,j,0])
2136 2136 st4=pa*pb
2137 2137 '''
2138 2138 if i > id[0]:
2139 2139 dataOut.p[i,j] pa-dataOut.pan
2140 2140 else:
2141 2141 dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn)
2142 2142 '''
2143 2143 dataOut.p[i,j]=pa+pb-(dataOut.pan[j]+dataOut.pbn[j])
2144 2144 dataOut.sdp[i,j]=2*dataOut.rnint2[j]*((pa+pb)*(pa+pb))
2145 2145 ## ACF
2146 2146 rhorp=dataOut.kabxys_integrated[8][i,j,0]+dataOut.kabxys_integrated[11][i,j,0]
2147 2147 rhoip=dataOut.kabxys_integrated[10][i,j,0]-dataOut.kabxys_integrated[9][i,j,0]
2148 2148 #PA[i,j] = pa; PB[i,j] = pb
2149 2149 if ((pa>dataOut.pan[j])&(pb>dataOut.pbn[j])):
2150 2150 # panrm is RMS of power, used to normalize ACFs
2151 2151 ss4=numpy.abs((pa-dataOut.pan[j])*(pb-dataOut.pbn[j]))
2152 2152 panrm[i,j]=math.sqrt(ss4)
2153 2153 rnorm=1/panrm[i,j]
2154 2154 ## ACF
2155 2155 dataOut.rhor[i,j]=rhorp*rnorm
2156 2156 dataOut.rhoi[i,j]=rhoip*rnorm
2157 2157 #if i==13 and j== 0 or i ==14 and j==0:
2158 2158 #print(numpy.sum(rhorp))
2159 2159 #exit(1)
2160 2160 ############# Compute standard error for ACF
2161 2161 stoss4=st4/ss4
2162 2162 snoss4=sn4[j]/ss4
2163 2163 rp2=((rhorp*rhorp)+(rhoip*rhoip))/st4
2164 2164 rn2=((rhorn*rhorn)+(rhoin*rhoin))/sn4[j]
2165 2165 rs2=(dataOut.rhor[i,j]*dataOut.rhor[i,j])+(dataOut.rhoi[i,j]*dataOut.rhoi[i,j])
2166 2166 st=1.0+rs2*(stoss4-(2*math.sqrt(stoss4*snoss4)))
2167 2167 stn=1.0+rs2*(snoss4-(2*math.sqrt(stoss4*snoss4)))
2168 2168 dataOut.sd[i,j]=((stoss4*((1.0+rp2)*st+(2.0*rp2*rs2*snoss4)-4.0*math.sqrt(rs2*rp2)))+(0.25*snoss4*((1.0+rn2)*stn+(2.0*rn2*rs2*stoss4)-4.0*math.sqrt(rs2*rn2))))*dataOut.rnint2[j]
2169 2169 dataOut.sd[i,j]=numpy.abs(dataOut.sd[i,j])
2170 2170 '''
2171 2171 if i == 4:
2172 2172 print(i,j,dataOut.sd[i,j])
2173 2173 '''
2174 2174 #print(i,j,dataOut.rhor[i,j])
2175 2175 #exit(1)
2176 2176 else: #default values for bad points
2177 2177 rnorm=1/math.sqrt(st4)
2178 2178 dataOut.sd[i,j]=1.e30
2179 2179 dataOut.ibad[i,j]=4
2180 2180 dataOut.rhor[i,j]=rhorp*rnorm
2181 2181 dataOut.rhoi[i,j]=rhoip*rnorm
2182 2182 '''
2183 2183 if i==47:
2184 2184 print("j",j)
2185 2185 print("pa",pa/dataOut.pan)
2186 2186 print("pb",pb/dataOut.pbn)
2187 2187 print((pa/dataOut.pan-1.0))
2188 2188 print((pb/dataOut.pbn-1.0))
2189 2189 '''
2190 2190 #'''
2191 2191 if ((pb/dataOut.pbn[j]-1.0)>2.25*(pa/dataOut.pan[j]-1.0)): #To flag bad points from the pulse and EEJ for lags != 0 for Channel B
2192 2192 #print(dataOut.heightList[i],j,"EJJ")
2193 2193 dataOut.igcej[i,j]=1
2194 2194
2195 2195 elif ((pa/dataOut.pan[j]-1.0)>2.25*(pb/dataOut.pbn[j]-1.0)):
2196 2196 #print(dataOut.heightList[i],j,"EJJ")
2197 2197 dataOut.igcej[i,j]=1
2198 2198 #'''
2199 2199 '''
2200 2200 if ((pa/dataOut.pan-1.0)>2.25*(pb/dataOut.pbn-1.0)):
2201 2201 #print("EJJ")
2202 2202 dataOut.igcej[i,j]=1
2203 2203 #'''
2204 2204 '''import matplotlib.pyplot as plt
2205 2205 fig, axes = plt.subplots(2, dataOut.DPL, figsize=(18, 6), sharex=True, sharey=True)
2206 2206
2207 2207 for i in range(dataOut.DPL):
2208 2208 axes[0,i].plot(PA[:, i], dataOut.heightList, label=f'PA {i+1}')
2209 2209 axes[0, i].axvline(dataOut.pan[i], color='gray', linestyle='--', linewidth=1)
2210 2210 axes[0, i].set_title(f'Lag {i+1}')
2211 2211 axes[0, i].set_xscale("log") # Log scale for y-axis
2212 2212 axes[0, i].set_xlim([0,1e+7])
2213 2213 axes[1,i].plot(PB[:, i], dataOut.heightList, label=f'PB {i+1}')
2214 2214 axes[1, i].axvline(dataOut.pbn[i], color='gray', linestyle='--', linewidth=1)
2215 2215 axes[1, i].set_xscale("log") # Log scale for y-axis
2216 2216 axes[1, i].set_xlim([0,1e+7])
2217 2217
2218 2218
2219 2219 plt.tight_layout()
2220 2220 plt.show()'''
2221 2221
2222 2222
2223 2223 #print("dataOut.p",datetime.datetime.utcfromtimestamp(dataOut.utctime), dataOut.p)
2224 2224
2225 2225 #print(numpy.sum(dataOut.kabxys_integrated[8][:,:,0]+dataOut.kabxys_integrated[11][:,:,0]))
2226 2226 #print(numpy.sum(dataOut.kabxys_integrated[10][:,:,0]-dataOut.kabxys_integrated[9][:,:,0]))
2227 2227 #print(numpy.sum(dataOut.rhor))
2228 2228 #print("bad points: ",dataOut.igcej[47,:])
2229 2229 #exit(1)
2230 2230 '''
2231 2231 for l in range(11):
2232 2232 print("p: ",dataOut.p[:,l])
2233 2233 exit(1)
2234 2234 '''
2235 2235 #print(pa)
2236 2236 '''
2237 2237 import matplotlib.pyplot as plt
2238 2238 #plt.plot(dataOut.p[:,-1],dataOut.heightList)
2239 2239 plt.plot(pa/dataOut.pan-1.,dataOut.heightList)
2240 2240 plt.plot(pb/dataOut.pbn-1.,dataOut.heightList)
2241 2241 plt.grid()
2242 2242 #plt.xlim(0,1e5)
2243 2243 plt.show()
2244 2244 #print("p: ",dataOut.p[33,:])
2245 2245 #exit(1)
2246 2246 '''
2247 2247 return dataOut
2248 2248
2249 2249 class FaradayAngleAndDPPower(Operation):
2250 2250 '''
2251 2251 Written by R. Flores
2252 2252 '''
2253 2253 """Operation to calculate Faraday angle and Double Pulse power.
2254 2254
2255 2255 Parameters:
2256 2256 -----------
2257 2257 None
2258 2258
2259 2259 Example
2260 2260 --------
2261 2261
2262 2262 op = proc_unit.addOperation(name='FaradayAngleAndDPPower', optype='other')
2263 2263
2264 2264 """
2265 2265
2266 2266 def __init__(self, **kwargs):
2267 2267
2268 2268 Operation.__init__(self, **kwargs)
2269 2269 self.aux=1
2270 2270
2271 2271 def run(self,dataOut):
2272 2272
2273 2273 if self.aux==1:
2274 2274 dataOut.h2=numpy.zeros(dataOut.MAXNRANGENDT,'float32')
2275 2275 dataOut.range1=numpy.zeros(dataOut.MAXNRANGENDT,order='F',dtype='float32')
2276 2276 dataOut.sdn2=numpy.zeros(dataOut.NDP,'float32')
2277 2277 dataOut.ph2=numpy.zeros(dataOut.NDP,'float32')
2278 2278 dataOut.sdp2=numpy.zeros(dataOut.NDP,'float32')
2279 2279 dataOut.ibd=numpy.zeros(dataOut.NDP,'float32')
2280 2280 dataOut.phi=numpy.zeros(dataOut.NDP,'float32')
2281 2281
2282 2282 self.aux=0
2283 2283
2284 2284 for i in range(dataOut.MAXNRANGENDT):
2285 2285 dataOut.range1[i]=dataOut.H0 + i*dataOut.DH
2286 2286 dataOut.h2[i]=dataOut.range1[i]**2
2287 2287 #print("shape ph2",numpy.shape(dataOut.ph2))
2288 2288 for j in range(dataOut.NDP):
2289 2289 dataOut.ph2[j]=0.
2290 2290 dataOut.sdp2[j]=0.
2291 2291 ri=dataOut.rhoi[j][0]/dataOut.sd[j][0]
2292 2292 rr=dataOut.rhor[j][0]/dataOut.sd[j][0]
2293 2293 dataOut.sdn2[j]=1./dataOut.sd[j][0]
2294 2294
2295 2295 pt=0.# // total power
2296 2296 st=0.# // total signal
2297 2297 ibt=0# // bad lags
2298 2298 ns=0# // no. good lags
2299 2299 #print(dataOut.heightList[j])
2300 2300 for l in range(dataOut.DPL):
2301 2301 #add in other lags if outside of e-jet contamination
2302 2302 if( (dataOut.igcej[j][l] == 0) and (dataOut.ibad[j][l] == 0) ):
2303 2303 #print("l", l, dataOut.range1[l])
2304 2304 dataOut.ph2[j]+=dataOut.p[j][l]/dataOut.sdp[j][l]
2305 2305 dataOut.sdp2[j]=dataOut.sdp2[j]+1./dataOut.sdp[j][l]
2306 2306 ns+=1
2307 2307 #if dataOut.igcej[j][l] != 0:
2308 2308 #print(l)
2309 2309 pt+=dataOut.p[j][l]/dataOut.sdp[j][l]
2310 2310 st+=1./dataOut.sdp[j][l]
2311 2311 ibt|=dataOut.ibad[j][l];
2312 2312 #print(dataOut.sdp2[j],st)
2313 2313 if(ns!= 0):
2314 2314 #print("Good lags: ",j,ns)
2315 2315 dataOut.ibd[j]=0
2316 2316 dataOut.ph2[j]=dataOut.ph2[j]/dataOut.sdp2[j]
2317 2317 dataOut.sdp2[j]=1./dataOut.sdp2[j]
2318 2318 #print("j", dataOut.range1[j])
2319 2319 #print(dataOut.sdp2[j])
2320 2320 else:
2321 2321 #print("Bad lags: ",j)
2322 2322 #print("Bad j", dataOut.range1[j])
2323 2323 dataOut.ibd[j]=ibt
2324 2324 dataOut.ph2[j]=pt/st
2325 2325 dataOut.sdp2[j]=1./st
2326 2326 #dataOut.sdp2[j]**=2 #Added on May 22, 2024 by R.Flores #To Validate
2327 2327 #print(dataOut.sdp2[j])
2328 2328
2329 2329 dataOut.ph2[j]=dataOut.ph2[j]*dataOut.h2[j]
2330 2330 dataOut.sdp2[j]=numpy.sqrt(dataOut.sdp2[j])*dataOut.h2[j]
2331 2331 rr=rr/dataOut.sdn2[j]
2332 2332 ri=ri/dataOut.sdn2[j]
2333 2333 #rm[j]=np.sqrt(rr*rr + ri*ri) it is not used in c program
2334 2334 dataOut.sdn2[j]=1./(dataOut.sdn2[j]*(rr*rr + ri*ri))
2335 2335 if( (ri == 0.) and (rr == 0.) ):
2336 2336 dataOut.phi[j]=0.
2337 2337 else:
2338 2338 dataOut.phi[j]=math.atan2( ri , rr )
2339 2339
2340 2340 dataOut.flagTeTiCorrection = False
2341 2341 #print("ph2: ", numpy.sum(dataOut.ph2[:16]))
2342 2342 #print("ph2: ", numpy.sum(dataOut.ph2[16:32]))
2343 2343
2344 2344 '''import matplotlib.pyplot as plt
2345 2345 #plt.plot(numpy.abs(dataOut.kabxys_integrated[4][:,j,0]+dataOut.kabxys_integrated[5][:,j,0])+numpy.abs(dataOut.kabxys_integrated[6][:,j,0]+dataOut.kabxys_integrated[7][:,j,0]),dataOut.heightList)
2346 2346 #plt.axvline((dataOut.pan+dataOut.pbn))
2347 2347 #print(numpy.shape(dataOut.p))
2348 2348 plt.plot(dataOut.ph2,dataOut.heightList)
2349 2349 plt.plot(dataOut.phi,dataOut.heightList)
2350 2350
2351 2351 plt.xlim(1000,1000000000)
2352 2352 #plt.ylim(50,400)
2353 2353 plt.grid()
2354 2354 plt.show()
2355 2355 #exit(1)'''
2356 2356
2357 2357 return dataOut
2358 2358
2359 2359 class ElectronDensityFaraday(Operation):
2360 2360 '''
2361 2361 Written by R. Flores
2362 2362 '''
2363 2363 """Operation to calculate electron density from Faraday angle.
2364 2364
2365 2365 Parameters:
2366 2366 -----------
2367 2367 NSHTS : int
2368 2368 .*
2369 2369 RATE : float
2370 2370 .*
2371 2371
2372 2372 Example
2373 2373 --------
2374 2374
2375 2375 op = proc_unit.addOperation(name='ElectronDensityFaraday', optype='other')
2376 2376 op.addParameter(name='NSHTS', value='50', format='int')
2377 2377 op.addParameter(name='RATE', value='1.8978873e-6', format='float')
2378 2378
2379 2379 """
2380 2380
2381 2381 def __init__(self, **kwargs):
2382 2382
2383 2383 Operation.__init__(self, **kwargs)
2384 2384 self.aux=1
2385 2385
2386 2386 def run(self,dataOut,NSHTS=50,RATE=1.8978873e-6):
2387 2387
2388 2388 dataOut.NSHTS=NSHTS
2389 2389 dataOut.RATE=RATE
2390 2390
2391 2391 if self.aux==1:
2392 2392 dataOut.dphi=numpy.zeros(dataOut.NDP,'float32')
2393 2393 #dataOut.dphi_uc=numpy.zeros(dataOut.NDP,'float32')
2394 2394 dataOut.sdn1=numpy.zeros(dataOut.NDP,'float32')
2395 2395 self.aux=0
2396 2396 theta=numpy.zeros(dataOut.NDP,dtype=numpy.complex_)
2397 2397 thetai=numpy.zeros(dataOut.NDP,dtype=numpy.complex_)
2398 2398 # use complex numbers for phase
2399 2399 '''
2400 2400 for i in range(dataOut.NSHTS):
2401 2401 theta[i]=math.cos(dataOut.phi[i])+math.sin(dataOut.phi[i])*1j
2402 2402 thetai[i]=-math.sin(dataOut.phi[i])+math.cos(dataOut.phi[i])*1j
2403 2403 ''' #Old Method
2404 2404
2405 2405 # differentiate and convert to number density
2406 2406 ndphi=dataOut.NSHTS-4
2407 2407 #print(dataOut.phi)
2408 2408 #exit(1)
2409 2409 #'''
2410 2410 if hasattr(dataOut, 'flagSpreadF') and dataOut.flagSpreadF:
2411 2411 #if dataOut.flagSpreadF:
2412 2412 nanindex = numpy.argwhere(numpy.isnan(dataOut.phi))
2413 2413 i1 = nanindex[-1][0]
2414 2414 #Analizar cuando SpreadF es Pluma
2415 2415
2416 2416 #print(i1)
2417 2417 dataOut.phi[i1+1:]=numpy.unwrap(dataOut.phi[i1+1:]) #Better results
2418 2418 else:
2419 2419 #dataOut.phi_uwrp = dataOut.phi.copy()
2420 2420 dataOut.phi[:]=numpy.unwrap(dataOut.phi[:]) #Better results
2421 2421 #'''
2422 2422 #print(dataOut.phi)
2423 2423 #print(dataOut.ph2)
2424 2424 #exit(1)
2425 2425
2426 2426 '''
2427 2427 #if dataOut.flagDecodeData:
2428 2428 if 1:
2429 2429 import matplotlib.pyplot as plt
2430 2430 plt.plot(dataOut.phi,dataOut.heightList,'*-')
2431 2431 #plt.ylim(60,95)
2432 2432 plt.grid()
2433 2433 plt.show()
2434 2434 '''
2435 2435 #print(dataOut.bki)
2436 2436 #print(dataOut.NDP,dataOut.NSHTS)
2437 2437 #print("phi: ", dataOut.phi)
2438 2438 for i in range(2,dataOut.NSHTS-2):
2439 2439 fact=(-0.5/(dataOut.RATE*dataOut.DH))*dataOut.bki[i]
2440 2440 #print("fact: ", fact,dataOut.RATE,dataOut.DH,dataOut.bki[i])
2441 2441 #four-point derivative, no phase unwrapping necessary
2442 2442 #####dataOut.dphi[i]=((((theta[i+1]-theta[i-1])+(2.0*(theta[i+2]-theta[i-2])))/thetai[i])).real/10.0 #Original from C program
2443 2443
2444 2444 ##dataOut.dphi[i]=((((theta[i-2]-theta[i+2])+(8.0*(theta[i+1]-theta[i-1])))/thetai[i])).real/12.0
2445 2445 dataOut.dphi[i]=((dataOut.phi[i+1]-dataOut.phi[i-1])+(2.0*(dataOut.phi[i+2]-dataOut.phi[i-2])))/10.0 #Better results
2446 2446
2447 2447 #dataOut.dphi_uc[i] = abs(dataOut.phi[i]*dataOut.bki[i]*(-0.5)/dataOut.DH)
2448 2448 #dataOut.dphi[i]=abs(dataOut.dphi[i]*fact)
2449 2449 dataOut.dphi[i]=dataOut.dphi[i]*abs(fact)
2450 2450 dataOut.sdn1[i]=(4.*(dataOut.sdn2[i-2]+dataOut.sdn2[i+2])+dataOut.sdn2[i-1]+dataOut.sdn2[i+1])
2451 2451 dataOut.sdn1[i]=numpy.sqrt(dataOut.sdn1[i])*fact
2452 2452
2453 2453 #print("dphi: ", dataOut.dphi)
2454 2454 '''
2455 2455 if dataOut.flagDecodeData:
2456 2456 #exit(1)
2457 2457 import matplotlib.pyplot as plt
2458 2458 plt.plot(abs(dataOut.dphi),dataOut.heightList)
2459 2459 plt.grid()
2460 2460 #plt.xlim(0,1e7)
2461 2461 plt.show()
2462 2462
2463 2463 '''
2464 2464 #print("dH: ", dataOut.heightList[1]-dataOut.heightList[0])
2465 2465
2466 2466
2467 2467
2468 2468
2469 2469 return dataOut
2470 2470
2471 2471
2472 2472 class NormalizeDPPowerRoberto_V2(Operation):
2473 2473 '''
2474 2474 Written by R. Flores
2475 2475 '''
2476 2476 """Operation to normalize relative electron density from power with total electron density from Farday angle.
2477 2477
2478 2478 Parameters:
2479 2479 -----------
2480 2480 None
2481 2481
2482 2482 Example
2483 2483 --------
2484 2484
2485 2485 op = proc_unit.addOperation(name='NormalizeDPPower', optype='other')
2486 2486
2487 2487 """
2488 2488
2489 2489 def __init__(self, **kwargs):
2490 2490
2491 2491 Operation.__init__(self, **kwargs)
2492 2492 self.aux=1
2493 2493
2494 2494 def normal(self,a,b,n,m):
2495 2495 chmin=1.0e30
2496 2496 chisq=numpy.zeros(150,'float32')
2497 2497 temp=numpy.zeros(150,'float32')
2498 2498
2499 2499 for i in range(2*m-1):
2500 2500 an=al=be=chisq[i]=0.0
2501 2501 for j in range(int(n/m)):
2502 2502 k=int(j+i*n/(2*m))
2503 2503 if(a[k]>0.0 and b[k]>0.0):
2504 2504 al+=a[k]*b[k]
2505 2505 be+=b[k]*b[k]
2506 2506
2507 2507 if(be>0.0):
2508 2508 temp[i]=al/be
2509 2509 else:
2510 2510 temp[i]=1.0
2511 2511
2512 2512 for j in range(int(n/m)):
2513 2513 k=int(j+i*n/(2*m))
2514 2514 #print("a,b",a[k],b[k])
2515 2515 if(a[k]>0.0 and b[k]>0.0):
2516 2516 chisq[i]+=(numpy.log10(b[k]*temp[i]/a[k]))**2
2517 2517 an=an+1
2518 2518
2519 2519 if(chisq[i]>0.0):
2520 2520 chisq[i]/=an
2521 2521
2522 2522 for i in range(int(2*m-1)):
2523 2523 #print("xi",chisq[i])
2524 2524 if(chisq[i]<chmin and chisq[i]>1.0e-6):
2525 2525 chmin=chisq[i]
2526 2526 cf=temp[i]
2527 2527 return cf
2528 2528
2529 2529
2530 2530
2531 2531 def normalize(self,dataOut):
2532 2532 # cf .- constant factor of normalization
2533 2533
2534 2534 if self.aux==1:
2535 2535 dataOut.cf=numpy.zeros(1,'float32')
2536 2536 dataOut.cflast=numpy.zeros(1,'float32')
2537 2537 self.aux=0
2538 2538
2539 2539 print(dataOut.ut_Faraday)
2540 2540
2541 2541 if (dataOut.ut_Faraday>=11.5 and dataOut.ut_Faraday<23): # 6 30am to 6pm
2542 2542 i2=(500.-dataOut.range1[0])/dataOut.DH
2543 2543 i1=(200.-dataOut.range1[0])/dataOut.DH
2544 2544
2545 2545 elif(dataOut.ut_Faraday>=5 and dataOut.ut_Faraday<8): # 0 am to 3am
2546 2546 inda = numpy.where(dataOut.heightList >= 330) # 260 #200 km
2547 2547 minIndex = inda[0][0]
2548 2548 indb = numpy.where(dataOut.heightList < 470) # 350 # 700 km
2549 2549 maxIndex = indb[0][-1]
2550 2550 print(minIndex)
2551 2551 print(dataOut.heightList)
2552 2552
2553 2553 ph2max_idx = numpy.nanargmax(dataOut.ph2[minIndex:maxIndex])
2554 2554 #print("dataOut.ph2[minIndex:maxIndex]: ", dataOut.ph2[minIndex:maxIndex])
2555 2555 print("dataOut.ph2: ", dataOut.ph2)
2556 2556 print("dataOut.phi: ", dataOut.phi)
2557 2557 print("minIndex", minIndex, "maxIndex", maxIndex)
2558 2558 print(ph2max_idx)
2559 2559
2560 2560 ph2max_idx += minIndex
2561 2561
2562 2562 i2 = maxIndex #ph2max_idx + 6
2563 2563 i1 = minIndex #ph2max_idx - 6
2564 2564
2565 2565 print("ELSE^^^^^^^^^^^^^^^^^^^^^")
2566 2566 print(dataOut.heightList[i1])
2567 2567 print(dataOut.heightList[i2])
2568 2568 elif(dataOut.ut_Faraday>=8 and dataOut.ut_Faraday<11.5): # 3 am to 6 30am ADDED
2569 2569 inda = numpy.where(dataOut.heightList >= 260) # 260 #200 km
2570 2570 minIndex = inda[0][0]
2571 2571 indb = numpy.where(dataOut.heightList < 350) # 350 # 700 km
2572 2572 maxIndex = indb[0][-1]
2573 2573 print(minIndex)
2574 2574 print(dataOut.heightList)
2575 2575
2576 2576 ph2max_idx = numpy.nanargmax(dataOut.ph2[minIndex:maxIndex])
2577 2577 #print("dataOut.ph2[minIndex:maxIndex]: ", dataOut.ph2[minIndex:maxIndex])
2578 2578 #print("dataOut.ph2: ", dataOut.ph2)
2579 2579 print("minIndex", minIndex, "maxIndex", maxIndex)
2580 2580 print(ph2max_idx)
2581 2581
2582 2582 ph2max_idx += minIndex
2583 2583
2584 2584 i2 = maxIndex #ph2max_idx + 6
2585 2585 i1 = minIndex #ph2max_idx - 6
2586 2586
2587 2587 print("ELSE^^^^^^^^^^^^^^^^^^^^^")
2588 2588 print(dataOut.heightList[i1])
2589 2589 print(dataOut.heightList[i2])
2590 2590 else: #6pm to 12am
2591 2591 inda = numpy.where(dataOut.heightList >= 200) #200 km
2592 2592 minIndex = inda[0][0]
2593 2593 indb = numpy.where(dataOut.heightList < 700) # 700 km
2594 2594 maxIndex = indb[0][-1]
2595 2595 print(minIndex)
2596 2596 print(dataOut.heightList)
2597 2597
2598 2598 ph2max_idx = numpy.nanargmax(dataOut.ph2[minIndex:maxIndex])
2599 2599 #print("dataOut.ph2[minIndex:maxIndex]: ", dataOut.ph2[minIndex:maxIndex])
2600 2600 #print("dataOut.ph2: ", dataOut.ph2)
2601 2601 '''
2602 2602 #if dataOut.flagTeTiCorrection:
2603 2603 if 1:
2604 2604 import matplotlib.pyplot as plt
2605 2605 plt.figure()
2606 2606 plt.plot(dataOut.ph2[minIndex:maxIndex],dataOut.heightList[minIndex:maxIndex],'*-')
2607 2607 plt.show()
2608 2608 '''
2609 2609 ph2max_idx += minIndex
2610 2610
2611 2611 i2 = ph2max_idx + 6
2612 2612 i1 = ph2max_idx - 6
2613 2613
2614 2614 print(dataOut.heightList[i1])
2615 2615 print(dataOut.heightList[i2])
2616 2616 '''
2617 2617 elif(dataOut.ut_Faraday>=1.66 and dataOut.ut_Faraday<3.16): # 20 40 - 22 10 (1 40 - 3 10)
2618 2618 inda = numpy.where(dataOut.heightList >= 435) #200 km
2619 2619 minIndex = inda[0][0]
2620 2620 indb = numpy.where(dataOut.heightList < 480) # 700 km
2621 2621 maxIndex = indb[0][-1]
2622 2622 print(minIndex)
2623 2623 print(dataOut.heightList)
2624 2624
2625 2625 ph2max_idx = numpy.nanargmax(dataOut.ph2[minIndex:maxIndex])
2626 2626 #print("dataOut.ph2[minIndex:maxIndex]: ", dataOut.ph2[minIndex:maxIndex])
2627 2627 #print("dataOut.ph2: ", dataOut.ph2)
2628 2628 print("minIndex", minIndex, "maxIndex", maxIndex)
2629 2629 print(ph2max_idx)
2630 2630
2631 2631 ph2max_idx += minIndex
2632 2632
2633 2633 i2 = maxIndex #ph2max_idx + 6
2634 2634 i1 = minIndex #ph2max_idx - 6
2635 2635
2636 2636 print("ELSE^^^^^^^^^^^^^^^^^^^^^")
2637 2637 print(dataOut.heightList[i1])
2638 2638 print(dataOut.heightList[i2])
2639 2639 '''
2640 2640
2641 2641
2642 2642 try:
2643 2643 dataOut.heightList[i2]
2644 2644 except:
2645 2645 i2 -= 1
2646 2646
2647 2647 '''
2648 2648 if not dataOut.flagSpreadF:
2649 2649 i2=(420-dataOut.range1[0])/dataOut.DH
2650 2650 else:
2651 2651 i2=(620-dataOut.range1[0])/dataOut.DH
2652 2652 '''
2653 2653 #i1=(200 -dataOut.range1[0])/dataOut.DH
2654 2654 ##print(i1*dataOut.DH)
2655 2655 ##print(i2*dataOut.DH)
2656 2656
2657 2657 i1=int(i1)
2658 2658 i2=int(i2)
2659 2659 print("Bounds 1: ", dataOut.heightList[i1],dataOut.heightList[i2])
2660 2660 '''
2661 2661 print(dataOut.ph2)
2662 2662 import matplotlib.pyplot as plt
2663 2663 plt.plot(dataOut.ph2,dataOut.heightList)
2664 2664 plt.xlim(1.e5,1.e8)
2665 2665 plt.show()
2666 2666 '''
2667 2667 #print("Flag: ",dataOut.flagTeTiCorrection)
2668 2668 #print(dataOut.dphi[i1::])
2669 2669 #print(dataOut.ph2[:])
2670 2670
2671 2671 if dataOut.flagTeTiCorrection:
2672 2672 for i in range(dataOut.NSHTS):
2673 2673 dataOut.ph2[i]/=dataOut.cf
2674 2674 dataOut.sdp2[i]/=dataOut.cf
2675 2675
2676 2676 #'''
2677 2677 #if dataOut.flagSpreadF:
2678 2678 if hasattr(dataOut, 'flagSpreadF') and dataOut.flagSpreadF:
2679 2679 print("flagSpreadF activated!!")
2680 2680 i2=int((700-dataOut.range1[0])/dataOut.DH)
2681 2681 #print(dataOut.ph2)
2682 2682 #print(dataOut.heightList)
2683 2683 nanindex = numpy.argwhere(numpy.isnan(dataOut.ph2))
2684 2684 #print("nanindex",nanindex)
2685 2685 i1 = nanindex[-1][0] #VER CUANDO i1>i2
2686 2686 if i1 != numpy.shape(dataOut.heightList)[0]:
2687 2687 i1 += 1+2 #Se suma uno para no tomar el nan, se suma 2 para no tomar datos nan de "phi" debido al calculo de la derivada
2688 2688 if i1 >= i2:
2689 2689 i1 = i2-4
2690 2690 #print("i1, i2",i1,i2)
2691 2691 #print(dataOut.heightList)
2692 2692 #print("Bounds: ", dataOut.heightList[i1],dataOut.heightList[i2])
2693 2693 #print(dataOut.dphi[33])
2694 2694 #print(dataOut.ph2[33])
2695 2695 #print(dataOut.dphi[i1::])
2696 2696 #print(dataOut.ph2[i1::])
2697 2697 #'''
2698 2698 print("Bounds 2: ", dataOut.heightList[i1],dataOut.heightList[i2])
2699 2699
2700 2700 try:
2701 2701 dataOut.cf=self.normal(dataOut.dphi[i1::], dataOut.ph2[i1::], i2-i1, 1)
2702 2702
2703 2703 except:
2704 2704 print("except: chi factor not achieved in normalization")
2705 2705 dataOut.cf = numpy.nan
2706 2706
2707 2707 #print("cf: ",dataOut.cf)
2708 2708 #print(dataOut.ph2)
2709 2709 #input()
2710 2710 # in case of spread F, normalize much higher
2711 2711 #print("dens: ", dataOut.dphi,dataOut.ph2)
2712 2712
2713 2713 night_first1= 300.0#350.0
2714 2714 night_end= 450.0
2715 2715 night_first1= 220.0#350.0
2716 2716 night_end= 400.0
2717 2717
2718 2718 if(dataOut.cf<dataOut.cflast[0]/10.0):
2719 2719 i1=(night_first1-dataOut.range1[0])/dataOut.DH
2720 2720 i2=(night_end-dataOut.range1[0])/dataOut.DH
2721 2721 i1=int(i1)
2722 2722 i2=int(i2)
2723 2723 try:
2724 2724 dataOut.cf=self.normal(dataOut.dphi[int(i1)::], dataOut.ph2[int(i1)::], int(i2-i1), 1)
2725 2725 except:
2726 2726 pass
2727 2727
2728 2728 #print(dataOut.cf,dataOut.cflast[0])
2729 2729 time_text = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2730 2730
2731 2731 print("Bounds 3: ", dataOut.heightList[i1], dataOut.heightList[i2])
2732 2732 print('time text', time_text)
2733 2733
2734 2734
2735 2735 ### Manual cf correction ###
2736 2736 flagcfcorrection = True
2737 2737 DOY = time_text.timetuple().tm_yday
2738 2738 print('time text', time_text, DOY)
2739 2739 if flagcfcorrection:
2740 2740 print("***Cleaning*** cflast: ", dataOut.cflast[0])
2741 2741 print("***Cleaning*** cf: ", dataOut.cf)
2742 2742 path = os.path.join(os.path.dirname(__file__), 'normalize_factor2.json')
2743 2743 with open(path) as f:
2744 2744 jsondata= json.load(f)
2745 2745
2746 2746 corrections = {}
2747 2747 for condition in jsondata['conditions']:
2748 2748 year = condition['year']
2749 2749 doy = condition['doy']
2750 2750 cf = condition['cf']
2751 2751
2752 2752 for time_condition in condition['time']:
2753 2753 hour = time_condition[0]
2754 2754 minute = time_condition[1]
2755 2755 corrections[(year, doy, hour, minute)] = cf
2756 2756 key = (time_text.year, DOY, time_text.hour, time_text.minute)
2757 2757
2758 2758 if key in corrections:
2759 2759 cf_value = corrections[key]
2760 2760
2761 2761 if isinstance(cf_value, str) and "cflast" in cf_value:
2762 2762 dataOut.cf = eval(cf_value) #ast.literal_eval(cf_value)
2763 2763 else:
2764 2764 dataOut.cf = float(cf_value)
2765 2765
2766 2766 print(f"Correction applied: {dataOut.cf}")
2767 2767 print("***Cleaning*** cf After: ", dataOut.cf)
2768 2768
2769 2769
2770 2770
2771 2771
2772 2772 dataOut.cflast[0]=dataOut.cf
2773 2773 #print("cf: ", dataOut.cf)
2774 2774
2775 2775 #print(dataOut.ph2)
2776 2776 #print(dataOut.sdp2)
2777 2777
2778 2778 ## normalize double pulse power and error bars to Faraday
2779 2779 for i in range(dataOut.NSHTS):
2780 2780 dataOut.ph2[i]*=dataOut.cf
2781 2781 dataOut.sdp2[i]*=dataOut.cf
2782 2782 #print(dataOut.ph2)
2783 2783 #print(dataOut.sdp2)
2784 2784 #exit(1)
2785 2785
2786 2786 for i in range(dataOut.NSHTS):
2787 2787 dataOut.ph2[i]=(max(1.0, dataOut.ph2[i]))
2788 2788 dataOut.dphi[i]=(max(1.0, dataOut.dphi[i]))
2789 2789
2790 2790 def run(self,dataOut):
2791 2791
2792 2792 self.normalize(dataOut)
2793 2793 #print(dataOut.ph2)
2794 2794 #print(dataOut.sdp2)
2795 2795 #input()
2796 2796 #print("shape before" ,numpy.shape(dataOut.ph2))
2797 2797
2798 2798 return dataOut
2799 2799
2800 2800
2801 2801 class suppress_stdout_stderr(object):
2802 2802 '''
2803 2803 A context manager for doing a "deep suppression" of stdout and stderr in
2804 2804 Python, i.e. will suppress all print, even if the print originates in a
2805 2805 compiled C/Fortran sub-function.
2806 2806 This will not suppress raised exceptions, since exceptions are printed
2807 2807 to stderr just before a script exits, and after the context manager has
2808 2808 exited (at least, I think that is why it lets exceptions through).
2809 2809
2810 2810 '''
2811 2811 def __init__(self):
2812 2812 # Open a pair of null files
2813 2813 self.null_fds = [os.open(os.devnull,os.O_RDWR) for x in range(2)]
2814 2814 # Save the actual stdout (1) and stderr (2) file descriptors.
2815 2815 self.save_fds = [os.dup(1), os.dup(2)]
2816 2816
2817 2817 def __enter__(self):
2818 2818 # Assign the null pointers to stdout and stderr.
2819 2819 os.dup2(self.null_fds[0],1)
2820 2820 os.dup2(self.null_fds[1],2)
2821 2821
2822 2822 def __exit__(self, *_):
2823 2823 # Re-assign the real stdout/stderr back to (1) and (2)
2824 2824 os.dup2(self.save_fds[0],1)
2825 2825 os.dup2(self.save_fds[1],2)
2826 2826 # Close all file descriptors
2827 2827 for fd in self.null_fds + self.save_fds:
2828 2828 os.close(fd)
2829 2829
2830 2830
2831 2831 class DPTemperaturesEstimation(Operation):
2832 2832 '''
2833 2833 Written by R. Flores
2834 2834 '''
2835 2835 """Operation to estimate temperatures for Double Pulse data.
2836 2836
2837 2837 Parameters:
2838 2838 -----------
2839 2839 IBITS : int
2840 2840 .*
2841 2841
2842 2842 Example
2843 2843 --------
2844 2844
2845 2845 op = proc_unit.addOperation(name='DPTemperaturesEstimation', optype='other')
2846 2846 op.addParameter(name='IBITS', value='16', format='int')
2847 2847
2848 2848 """
2849 2849 '''
2850 2850 NSTHS Number of sample heights (input), for temperature processing
2851 2851 NDP Number of Data Points (nHeights dependent)
2852 2852 NSTHS < NDP
2853 2853
2854 2854 Input/Output Data:
2855 2855
2856 2856 te2, ti2: Estimated electron and ion temperatures.
2857 2857
2858 2858 ete2, eti2: Errors in the estimated temperatures.
2859 2859
2860 2860 phy2, ephy2: Physical parameter and its error.
2861 2861
2862 2862 Fitting Process:
2863 2863
2864 2864 ifit: Flags for which parameters are being fitted.
2865 2865
2866 2866 params: Initial guesses and fitted parameters.
2867 2867
2868 2868 cov, covinv: Covariance matrix and its inverse for uncertainty estimation.
2869 2869
2870 2870 Metadata/Status:
2871 2871
2872 2872 m: Status or counter for the fitting process.
2873 2873
2874 2874 info2: Additional information about the fitting results.
2875 2875
2876 2876 '''
2877 2877
2878 2878
2879 2879 def __init__(self, **kwargs):
2880 2880
2881 2881 Operation.__init__(self, **kwargs)
2882 2882
2883 2883 self.aux=1
2884 2884
2885 2885 def Estimation(self,dataOut):
2886 2886 #with suppress_stdout_stderr():
2887 2887
2888 2888 if self.aux==1:
2889 2889 dataOut.ifit=numpy.zeros(5,order='F',dtype='int32')
2890 2890 dataOut.m=numpy.zeros(1,order='F',dtype='int32')
2891 2891 dataOut.te2=numpy.zeros(dataOut.NSHTS,order='F',dtype='float32')
2892 2892 dataOut.ti2=numpy.zeros(dataOut.NSHTS,order='F',dtype='float32')
2893 2893 dataOut.ete2=numpy.zeros(dataOut.NSHTS,order='F',dtype='float32')
2894 2894 dataOut.eti2=numpy.zeros(dataOut.NSHTS,order='F',dtype='float32')
2895 2895
2896 2896 self.aux=0
2897 2897
2898 2898 dataOut.phy2=numpy.zeros(dataOut.NSHTS,order='F',dtype='float32')
2899 2899 dataOut.ephy2=numpy.zeros(dataOut.NSHTS,order='F',dtype='float32')
2900 2900 dataOut.info2=numpy.zeros(dataOut.NDP,order='F',dtype='float32')
2901 2901 dataOut.params=numpy.zeros(10,order='F',dtype='float32')
2902 2902 dataOut.cov=numpy.zeros(dataOut.IBITS*dataOut.IBITS,order='F',dtype='float32')
2903 2903 dataOut.covinv=numpy.zeros(dataOut.IBITS*dataOut.IBITS,order='F',dtype='float32')
2904 2904
2905 2905 #null_fd = os.open(os.devnull, os.O_RDWR)
2906 2906 #os.dup2(null_fd, 1)
2907 2907 ymin_index = numpy.abs(dataOut.heightList - 150).argmin() #no point below 150 km
2908 2908 for i in range(ymin_index,dataOut.NSHTS):
2909 2909
2910 2910 #some definitions
2911 2911 iflag=0 # inicializado a cero?
2912 2912 wl = 3.0
2913 2913 x=numpy.zeros(dataOut.DPL+dataOut.IBITS,order='F',dtype='float32')
2914 2914 y=numpy.zeros(dataOut.DPL+dataOut.IBITS,order='F',dtype='float32')
2915 2915 e=numpy.zeros(dataOut.DPL+dataOut.IBITS,order='F',dtype='float32')
2916 2916 eb=numpy.zeros(5,order='F',dtype='float32')
2917 2917 zero=numpy.zeros(1,order='F',dtype='float32')
2918 2918 depth=numpy.zeros(1,order='F',dtype='float32')
2919 2919 t1=numpy.zeros(1,order='F',dtype='float32')
2920 2920 t2=numpy.zeros(1,order='F',dtype='float32')
2921 2921
2922 2922 '''
2923 2923 x lag time y correlation e their uncertanities
2924 2924 t1 t2 initial guesses eb errors
2925 2925 '''
2926 2926 if i>ymin_index and l1>=0:
2927 2927 if l1==0:
2928 2928 l1=1
2929 2929
2930 2930 dataOut.cov=numpy.reshape(dataOut.cov,l1*l1)
2931 2931 dataOut.cov=numpy.resize(dataOut.cov,dataOut.DPL*dataOut.DPL)
2932 2932 dataOut.covinv=numpy.reshape(dataOut.covinv,l1*l1)
2933 2933 dataOut.covinv=numpy.resize(dataOut.covinv,dataOut.DPL*dataOut.DPL)
2934 2934
2935 2935 for l in range(dataOut.DPL*dataOut.DPL):
2936 2936 dataOut.cov[l]=0.0
2937 2937 acfm= (dataOut.rhor[i][0])**2 + (dataOut.rhoi[i][0])**2
2938 2938 if acfm> 0.0:
2939 2939 cc=dataOut.rhor[i][0]/acfm
2940 2940 ss=dataOut.rhoi[i][0]/acfm
2941 2941 else:
2942 2942 cc=1.
2943 2943 ss=0.
2944 2944 # keep only uncontaminated data, don't pass zero lag to fitter
2945 2945 l1=0
2946 2946 '''
2947 2947 if i==13 or i==14:
2948 2948 print(numpy.sum(dataOut.rhor))
2949 2949 print(numpy.sum(dataOut.rhoi))
2950 2950 print(acfm)
2951 2951 #exit(1)
2952 2952 '''
2953 2953 for l in range(0+1,dataOut.DPL):
2954 2954 if dataOut.igcej[i][l]==0 and dataOut.ibad[i][l]==0:
2955 2955 y[l1]=dataOut.rhor[i][l]*cc + dataOut.rhoi[i][l]*ss
2956 2956 x[l1]=dataOut.alag[l]*1.0e-3 # *1.0e-3
2957 2957 dataOut.sd[i][l]=dataOut.sd[i][l]/((acfm)**2)# important
2958 2958 e[l1]=dataOut.sd[i][l] #this is the variance, not the st. dev.
2959 2959 l1=l1+1
2960 2960
2961 2961 for l in range(l1*(l1+1)):
2962 2962 dataOut.cov[l]=0.0
2963 2963 for l in range(l1):
2964 2964 dataOut.cov[l*(1+l1)]=e[l]
2965 2965 angle=dataOut.thb[i]*0.01745
2966 2966 bm=dataOut.bfm[i]
2967 2967 dataOut.params[0]=1.0 #norm
2968 2968 dataOut.params[1]=1000.0 #te
2969 2969 dataOut.params[2]=800.0 #ti
2970 2970 dataOut.params[3]=0.00 #ph
2971 2971 dataOut.params[4]=0.00 #phe
2972 2972
2973 2973 if l1!=0:
2974 2974 x=numpy.resize(x,l1)
2975 2975 y=numpy.resize(y,l1)
2976 2976 else:
2977 2977 x=numpy.resize(x,1)
2978 2978 y=numpy.resize(y,1)
2979 2979
2980 2980 if True: #len(y)!=0:
2981 2981 with suppress_stdout_stderr():
2982 2982 fitacf_guess.guess(y,x,zero,depth,t1,t2,len(y)) #t1 = te , t2 = tr = te/ti
2983 2983 t2=t1/t2 # ti
2984 2984
2985 2985 if (t1<5000.0 and t1> 600.0):
2986 2986 dataOut.params[1]=t1
2987 2987 dataOut.params[2]=min(t2,t1)
2988 2988 dataOut.ifit[1]=dataOut.ifit[2]=1
2989 2989 dataOut.ifit[0]=dataOut.ifit[3]=dataOut.ifit[4]=0
2990 2990
2991 2991 if dataOut.ut_Faraday<10.0 and dataOut.ut_Faraday>=0.5: # 6 30 pm to 5 am LT
2992 2992 dataOut.ifit[2]=0
2993 2993
2994 2994 den=dataOut.ph2[i]
2995 2995
2996 2996 if l1!=0:
2997 2997 dataOut.covinv=dataOut.covinv[0:l1*l1].reshape((l1,l1))
2998 2998 dataOut.cov=dataOut.cov[0:l1*l1].reshape((l1,l1))
2999 2999 e=numpy.resize(e,l1)
3000 3000 else:
3001 3001 dataOut.covinv=numpy.resize(dataOut.covinv,1)
3002 3002 dataOut.cov=numpy.resize(dataOut.cov,1)
3003 3003 e=numpy.resize(e,1)
3004 3004
3005 3005 eb=numpy.resize(eb,10)
3006 3006 dataOut.ifit=numpy.resize(dataOut.ifit,10)
3007 3007 #print("*********************FITACF_FIT*********************",dataOut.covinv,e,dataOut.params,eb,dataOut.m)
3008 3008 #exit(1)
3009 3009 with suppress_stdout_stderr():
3010 3010 dataOut.covinv,e,dataOut.params,eb,dataOut.m=fitacf_fit_short.fit(wl,x,y,dataOut.cov,dataOut.covinv,e,dataOut.params,bm,angle,den,dataOut.range1[i],dataOut.year,dataOut.ifit,dataOut.m,l1) #
3011 3011 #exit(1)
3012 3012 if dataOut.params[2]>dataOut.params[1]*1.05:
3013 3013 dataOut.ifit[2]=0
3014 3014 dataOut.params[1]=dataOut.params[2]=t1
3015 3015 with suppress_stdout_stderr():
3016 3016 dataOut.covinv,e,dataOut.params,eb,dataOut.m=fitacf_fit_short.fit(wl,x,y,dataOut.cov,dataOut.covinv,e,dataOut.params,bm,angle,den,dataOut.range1[i],dataOut.year,dataOut.ifit,dataOut.m,l1) #
3017 3017 #print("*********************FIT SUCCESS*********************",dataOut.covinv,e,dataOut.params,eb,dataOut.m)
3018 3018 #exit(1)
3019 3019 if (dataOut.ifit[2]==0):
3020 3020 dataOut.params[2]=dataOut.params[1]
3021 3021 if (dataOut.ifit[3]==0 and iflag==0):
3022 3022 dataOut.params[3]=0.0
3023 3023 if (dataOut.ifit[4]==0):
3024 3024 dataOut.params[4]=0.0
3025 3025 dataOut.te2[i]=dataOut.params[1]
3026 3026 #if i == 13 or i ==14 or i ==15:
3027 3027 #print(dataOut.te2[i])
3028 3028 dataOut.ti2[i]=dataOut.params[2]
3029 3029 dataOut.ete2[i]=eb[1]
3030 3030 dataOut.eti2[i]=eb[2]
3031 3031
3032 3032 if dataOut.eti2[i]==0:
3033 3033 dataOut.eti2[i]=dataOut.ete2[i]
3034 3034
3035 3035 dataOut.phy2[i]=dataOut.params[3]
3036 3036 dataOut.ephy2[i]=eb[3]
3037 3037 if(iflag==1):
3038 3038 dataOut.ephy2[i]=0.0
3039 3039
3040 3040 if (dataOut.m<=3 and dataOut.m!= 0 and dataOut.te2[i]>400.0):
3041 3041 dataOut.info2[i]=1
3042 3042 else:
3043 3043 dataOut.info2[i]=0
3044 3044
3045 3045 def gaussian(self, x, a, b, c, d):
3046 3046 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
3047 3047 return val
3048 3048
3049 3049 def run(self,dataOut,IBITS=16):
3050 3050
3051 3051 dataOut.IBITS = IBITS
3052 3052 self.Estimation(dataOut)
3053 3053 '''
3054 3054 from scipy.optimize import least_squares
3055 3055
3056 3056 ratio = dataOut.te2/dataOut.ti2
3057 3057
3058 3058 ratio[:11] = ratio[:11] = 1.
3059 3059 ratio[20:] = ratio[20:] = 1.
3060 3060
3061 3061 #print(ratio)
3062 3062 #print(ratio-self.gaussian(dataOut.heightList[:dataOut.NSHTS],1,250,20,1))
3063 3063 def lsq_func(params):
3064 3064 return (ratio-self.gaussian(dataOut.heightList[:dataOut.NSHTS],params[0],params[1],params[2],params[3]))
3065 3065
3066 3066
3067 3067 x0_value = numpy.array([1,250,20,1])
3068 3068
3069 3069 popt = least_squares(lsq_func,x0=x0_value,verbose=0)
3070 3070
3071 3071 A = popt.x[0]; B = popt.x[1]; C = popt.x[2]; D = popt.x[3]
3072 3072
3073 3073 aux = self.gaussian(dataOut.heightList[:dataOut.NSHTS], A, B, C, D)
3074 3074
3075 3075 import matplotlib.pyplot as plt
3076 3076 #plt.plot(dataOut.te2,dataOut.heightList[:dataOut.NSHTS])
3077 3077 #plt.plot(dataOut.ti2,dataOut.heightList[:dataOut.NSHTS])
3078 3078 #plt.xlim(0,5000)
3079 3079 plt.plot(ratio,dataOut.heightList[:dataOut.NSHTS])
3080 3080 plt.plot(aux,dataOut.heightList[:dataOut.NSHTS])
3081 3081 #plt.plot(self.gaussian(dataOut.heightList[:dataOut.NSHTS],1,250,20,1),dataOut.heightList[:dataOut.NSHTS])
3082 3082
3083 3083 plt.show()
3084 3084 '''
3085 3085
3086 3086 return dataOut
3087 3087
3088 3088
3089 3089
3090 3090 class DenCorrection(NormalizeDPPowerRoberto_V2):
3091 3091 '''
3092 3092 Written by R. Flores
3093 3093 '''
3094 3094 def __init__(self, **kwargs):
3095 3095
3096 3096 Operation.__init__(self, **kwargs)
3097 3097 self.aux = 0
3098 3098 self.csv_flag = 1
3099 3099
3100 3100 def gaussian(self, x, a, b, c):
3101 3101 val = a * numpy.exp(-(x - b)**2 / (2*c**2))
3102 3102 return val
3103 3103
3104 3104 def TeTiEstimation(self,dataOut):
3105 3105
3106 3106 #dataOut.DPL = 2 #for MST
3107 3107 y=numpy.zeros(dataOut.DPL,order='F',dtype='float32')
3108 3108
3109 3109 #y_aux = numpy.zeros(1,,dtype='float32')
3110 3110 for i in range(dataOut.NSHTS):
3111 3111 y[0]=y[1]=dataOut.range1[i]
3112 3112
3113 3113 y = y.astype(dtype='float64',order='F')
3114 3114 three=int(3)
3115 3115 wl = 3.0
3116 3116 tion=numpy.zeros(three,order='F',dtype='float32')
3117 3117 fion=numpy.zeros(three,order='F',dtype='float32')
3118 3118 nui=numpy.zeros(three,order='F',dtype='float32')
3119 3119 wion=numpy.zeros(three,order='F',dtype='int32')
3120 3120 bline=0.0
3121 3121 #bline=numpy.zeros(1,order='F',dtype='float32')
3122 3122 my_aux = numpy.ones(dataOut.NSHTS,order='F',dtype='float32')
3123 3123 acf_Temps = numpy.ones(dataOut.NSHTS,order='F',dtype='float32')*numpy.nan
3124 3124 acf_no_Temps = numpy.ones(dataOut.NSHTS,order='F',dtype='float32')*numpy.nan
3125 3125
3126 3126 from scipy import signal
3127 3127
3128 3128 #def func(params):
3129 3129 # return (ratio2-self.gaussian(dataOut.heightList[:dataOut.NSHTS],params[0],params[1],params[2]))
3130 3130
3131 3131 #print("Before loop")
3132 3132 dataOut.info2[0] = 1
3133 3133 for i in range(dataOut.NSHTS):
3134 3134 if dataOut.info2[i]==1:
3135 3135 angle=dataOut.thb[i]*0.01745
3136 3136 nue=nui[0]=nui[1]=nui[2]=0.0#nui[3]=0.0
3137 3137 wion[0]=16 #O
3138 3138 wion[1]=1 #H
3139 3139 wion[2]=4 #He
3140 3140 tion[0]=tion[1]=tion[2]=dataOut.ti2[i]
3141 3141 #tion[0]=tion[1]=tion[2]=ti2_smooth[i]
3142 3142 fion[0]=1.0-dataOut.phy2[i] #1
3143 3143 fion[1]=dataOut.phy2[i] #0
3144 3144 fion[2]=0.0 #0
3145 3145 for j in range(dataOut.DPL):
3146 3146 tau=dataOut.alag[j]*1.0e-3
3147 3147 #print("***********ACF2***********")
3148 3148 with suppress_stdout_stderr():#The smoothness in range of "y" depends on the smoothness of the input parameters
3149 3149 y[j]=fitacf_acf2.acf2(wl,tau,dataOut.te2[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three)
3150 3150 #y[j]=fitacf_acf2.acf2(wl,tau,te2_smooth[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three)
3151 3151 #print("i: ", i, "j: ", j)
3152 3152 #if i == 0 and j == 1:
3153 3153 #print("Params: ",wl,tau,dataOut.te2[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three)
3154 3154 #y[j]=fitacf_acf2.acf2(wl,tau,my_te2[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three)
3155 3155 #exit(1)
3156 3156 #if dataOut.ut_Faraday>11.0 and dataOut.range1[i]>150.0 and dataOut.range1[i]<400.0:
3157 3157
3158 3158 if dataOut.ut_Faraday>11.0 and dataOut.range1[i]>150.0 and dataOut.range1[i]<300.0:
3159 3159 #if dataOut.ut_Faraday>11.0 and dataOut.range1[i]>150.0 and dataOut.range1[i]<400.0:
3160 3160 tau=0.0
3161 3161 with suppress_stdout_stderr():
3162 3162 bline=fitacf_acf2.acf2(wl,tau,tion,tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],bline,three)
3163 3163
3164 3164 #if i == 0 and j == 1:
3165 3165 #print("bline: ",bline)
3166 3166 #y[j]=fitacf_acf2.acf2(wl,tau,my_te2[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three)
3167 3167 #exit(1)
3168 3168 cf=min(1.2,max(1.0,bline/y[0])) #FACTOR DE EFICIENCIA
3169 3169 #cf = bline/y[0]
3170 3170 #cf=min(2.,max(1.0,bline/y[0]))
3171 3171 my_aux[i] = cf
3172 3172 acf_Temps[i] = y[0]
3173 3173 acf_no_Temps[i] = bline
3174 3174 #dataOut.ph2[i]=cf*dataOut.ph2[i] #Instead we adjust the curve "cf" into a Gaussian,
3175 3175 #dataOut.sdp2[i]=cf*dataOut.sdp2[i] #in order to get smoother values of density
3176 3176 for j in range(1,dataOut.DPL):
3177 3177 #y[j]=(y[j]/y[0])*dataOut.DH+dataOut.range1[i]
3178 3178 y[j]=min(max((y[j]/y[0]),-1.0),1.0)*dataOut.DH+dataOut.range1[i]
3179 3179 y[0]=dataOut.range1[i]+dataOut.DH
3180 3180
3181 3181
3182 3182 ratio = my_aux-1
3183 3183 #ratio = dataOut.te2[:dataOut.NSHTS]/dataOut.ti2[:dataOut.NSHTS]
3184 3184 def lsq_func(params):
3185 3185 return (ratio-self.gaussian(dataOut.heightList[:dataOut.NSHTS],params[0],params[1],params[2]))
3186 3186
3187 3187 x0_value = numpy.array([max(ratio),250,20])
3188 3188
3189 3189 popt = least_squares(lsq_func,x0=x0_value,verbose=0)
3190 3190
3191 3191 A = popt.x[0]; B = popt.x[1]; C = popt.x[2]
3192 3192
3193 3193 aux = self.gaussian(dataOut.heightList[:dataOut.NSHTS], A, B, C) + 1 #ratio + 1
3194 3194
3195 3195 dataOut.ph2[:dataOut.NSHTS]*=aux
3196 3196 dataOut.sdp2[:dataOut.NSHTS]*=aux
3197 3197 #dataOut.ph2[:26]*=aux[:26]
3198 3198 #dataOut.sdp2[:26]*=aux[:26]
3199 3199 #print(aux)
3200 3200 #print("inside correction",dataOut.ph2)
3201 3201
3202 3202 def run(self,dataOut,savecf=0):
3203 3203 #print("hour",gmtime(dataOut.utctime).tm_hour)
3204 3204 if gmtime(dataOut.utctime).tm_hour < 24. and gmtime(dataOut.utctime).tm_hour >= 11.:
3205 3205 if hasattr(dataOut, 'flagSpreadF') and dataOut.flagSpreadF:
3206 3206 pass
3207 3207 else:
3208 3208 #print("inside")
3209 3209 self.TeTiEstimation(dataOut)
3210 3210 dataOut.flagTeTiCorrection = True
3211 3211 self.normalize(dataOut)
3212 3212 #'''
3213 3213 #Here save dataOut.cf
3214 3214 if savecf:
3215 3215 try:
3216 3216 import pandas as pd
3217 3217 if self.csv_flag:
3218 3218 if not os.path.exists("./cf"):
3219 3219 os.makedirs("./cf")
3220 3220 self.doy_csv = datetime.datetime.fromtimestamp(dataOut.utctime).strftime('%j')
3221 3221 self.year_csv = datetime.datetime.fromtimestamp(dataOut.utctime).strftime('%Y')
3222 3222 file = open("./cf/cf{0}{1}.csv".format(self.year_csv,self.doy_csv), "x")
3223 3223 f = csv.writer(file)
3224 3224 f.writerow(numpy.array(["timestamp",'cf']))
3225 3225 self.csv_flag = 0
3226 3226 print("Creating cf File")
3227 3227 print("Writing cf File")
3228 3228 except:
3229 3229 file = open("./cf/cf{0}{1}.csv".format(self.year_csv,self.doy_csv), "a")
3230 3230 f = csv.writer(file)
3231 3231 print("Writing cf File")
3232 3232 cf = numpy.array([dataOut.utctime,dataOut.cf])
3233 3233 f.writerow(cf)
3234 3234 file.close()
3235 3235 #'''
3236 3236
3237 3237 return dataOut
3238 3238
3239 3239 class DataPlotCleaner(Operation):
3240 3240 '''
3241 3241 Written by R. Flores
3242 3242 '''
3243 3243 def __init__(self, **kwargs):
3244 3244
3245 3245 Operation.__init__(self, **kwargs)
3246 3246
3247 3247 def run(self,dataOut):
3248 3248
3249 3249 THRESH_MIN_POW=10000
3250 3250 THRESH_MAX_POW=10000000
3251 3251 THRESH_MIN_TEMP=500
3252 3252 THRESH_MAX_TEMP=4000
3253 3253 dataOut.DensityClean=numpy.zeros((1,dataOut.NDP))
3254 3254 dataOut.EDensityClean=numpy.zeros((1,dataOut.NDP))
3255 3255 dataOut.ElecTempClean=numpy.zeros((1,dataOut.NDP))
3256 3256 dataOut.EElecTempClean=numpy.zeros((1,dataOut.NDP))
3257 3257 dataOut.IonTempClean=numpy.zeros((1,dataOut.NDP))
3258 3258 dataOut.EIonTempClean=numpy.zeros((1,dataOut.NDP))
3259 3259
3260 3260 dataOut.DensityClean[0]=numpy.copy(dataOut.ph2)
3261 3261 dataOut.EDensityClean[0]=numpy.copy(dataOut.sdp2)
3262 3262 dataOut.ElecTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.te2)
3263 3263 dataOut.EElecTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.ete2)
3264 3264 dataOut.IonTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.ti2)
3265 3265 dataOut.EIonTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.eti2)
3266 3266
3267 3267 for i in range(dataOut.NDP):
3268 3268 if dataOut.DensityClean[0,i]<THRESH_MIN_POW:
3269 3269 dataOut.DensityClean[0,i]=THRESH_MIN_POW
3270 3270
3271 3271 for i in range(dataOut.NDP):
3272 3272 if dataOut.DensityClean[0,i]>THRESH_MAX_POW:
3273 3273 dataOut.DensityClean[0,i]=THRESH_MAX_POW
3274 3274
3275 3275 for i in range(dataOut.NSHTS):
3276 3276 dataOut.ElecTempClean[0,i]=(max(1.0, dataOut.ElecTempClean[0,i]))
3277 3277 dataOut.IonTempClean[0,i]=(max(1.0, dataOut.IonTempClean[0,i]))
3278 3278 for i in range(dataOut.NSHTS):
3279 3279 if dataOut.ElecTempClean[0,i]<THRESH_MIN_TEMP:
3280 3280 dataOut.ElecTempClean[0,i]=THRESH_MIN_TEMP
3281 3281 if dataOut.IonTempClean[0,i]<THRESH_MIN_TEMP:
3282 3282 dataOut.IonTempClean[0,i]=THRESH_MIN_TEMP
3283 3283 for i in range(dataOut.NSHTS):
3284 3284 if dataOut.ElecTempClean[0,i]>THRESH_MAX_TEMP:
3285 3285 dataOut.ElecTempClean[0,i]=THRESH_MAX_TEMP
3286 3286 if dataOut.IonTempClean[0,i]>THRESH_MAX_TEMP:
3287 3287 dataOut.IonTempClean[0,i]=THRESH_MAX_TEMP
3288 3288 for i in range(dataOut.NSHTS):
3289 3289 if dataOut.EElecTempClean[0,i]>500:#
3290 3290 dataOut.ElecTempClean[0,i]=500
3291 3291 if dataOut.EIonTempClean[0,i]>500:#
3292 3292 dataOut.IonTempClean[0,i]=500
3293 3293
3294 3294 missing=numpy.nan
3295 3295
3296 3296 for i in range(dataOut.NSHTS,dataOut.NDP):
3297 3297
3298 3298 dataOut.ElecTempClean[0,i]=missing
3299 3299 dataOut.EElecTempClean[0,i]=missing
3300 3300 dataOut.IonTempClean[0,i]=missing
3301 3301 dataOut.EIonTempClean[0,i]=missing
3302 3302
3303 3303 return dataOut
3304 3304
3305 3305
3306 3306 class DataSaveCleaner(Operation):
3307 3307 '''
3308 3308 Written by R. Flores
3309 3309 '''
3310 3310 def __init__(self, **kwargs):
3311 3311
3312 3312 Operation.__init__(self, **kwargs)
3313 3313 self.csv_flag = 1
3314 3314
3315 3315 def run(self,dataOut,savecfclean=0):
3316 3316 #print(dataOut.heightList)
3317 3317 #exit(1)
3318 3318 dataOut.DensityFinal=numpy.zeros((1,dataOut.NDP))
3319 3319 dataOut.dphiFinal=numpy.zeros((1,dataOut.NDP))
3320 3320 dataOut.EDensityFinal=numpy.zeros((1,dataOut.NDP))
3321 3321 dataOut.ElecTempFinal=numpy.zeros((1,dataOut.NDP))
3322 3322 dataOut.EElecTempFinal=numpy.zeros((1,dataOut.NDP))
3323 3323 dataOut.IonTempFinal=numpy.zeros((1,dataOut.NDP))
3324 3324 dataOut.EIonTempFinal=numpy.zeros((1,dataOut.NDP))
3325 3325 dataOut.PhyFinal=numpy.zeros((1,dataOut.NDP))
3326 3326 dataOut.EPhyFinal=numpy.zeros((1,dataOut.NDP))
3327 3327
3328 3328 dataOut.DensityFinal[0]=numpy.copy(dataOut.ph2)
3329 3329 dataOut.dphiFinal[0]=numpy.copy(dataOut.dphi)
3330 3330 dataOut.EDensityFinal[0]=numpy.copy(dataOut.sdp2)
3331 3331 dataOut.ElecTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.te2)
3332 3332 dataOut.EElecTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ete2)
3333 3333 dataOut.IonTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ti2)
3334 3334 dataOut.EIonTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.eti2)
3335 3335 dataOut.PhyFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.phy2)
3336 3336 dataOut.EPhyFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ephy2)
3337 3337
3338 3338 missing=numpy.nan
3339 3339 #print("den1: ",dataOut.DensityFinal)
3340 3340 temp_min=100.0
3341 3341 temp_max=3000.0#6000.0e
3342 3342 #print("Density: ",dataOut.DensityFinal[0])
3343 3343 #print("Error: ",dataOut.EDensityFinal[0])
3344 3344 #print(100*dataOut.EDensityFinal[0]/dataOut.DensityFinal[0])
3345 3345 den_err_percent = 100*dataOut.EDensityFinal[0]/dataOut.DensityFinal[0]
3346 3346 max_den_err_per = 35#30 #Densidades con error mayor al 35% se setean en NaN
3347 3347 for i in range(dataOut.NSHTS):
3348 3348
3349 3349 if den_err_percent[i] >= max_den_err_per:
3350 3350 dataOut.DensityFinal[0,i]=dataOut.EDensityFinal[0,i]=missing
3351 3351 if i > 40: #Alturas mayores que 600
3352 3352 dataOut.DensityFinal[0,i:]=dataOut.EDensityFinal[0,i:]=missing
3353 3353
3354 3354 if dataOut.info2[i]!=1:
3355 3355 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3356 3356
3357 3357 if dataOut.ElecTempFinal[0,i]<=temp_min or dataOut.ElecTempFinal[0,i]>temp_max or dataOut.EElecTempFinal[0,i]>temp_max:
3358 3358
3359 3359 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing
3360 3360
3361 3361 if dataOut.IonTempFinal[0,i]<=temp_min or dataOut.IonTempFinal[0,i]>temp_max or dataOut.EIonTempFinal[0,i]>temp_max:
3362 3362 dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3363 3363
3364 3364 if dataOut.lags_to_plot[i,:][~numpy.isnan(dataOut.lags_to_plot[i,:])].shape[0]<6:
3365 3365 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3366 3366
3367 3367 if dataOut.ut_Faraday>4 and dataOut.ut_Faraday<11:
3368 3368 if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10:
3369 3369 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3370 3370
3371 3371 if dataOut.EPhyFinal[0,i]<0.0 or dataOut.EPhyFinal[0,i]>1.0:
3372 3372 dataOut.PhyFinal[0,i]=dataOut.EPhyFinal[0,i]=missing
3373 3373 #'''
3374 3374 if dataOut.EDensityFinal[0,i]>0.0 and dataOut.DensityFinal[0,i]>0.0 and dataOut.DensityFinal[0,i]<9.9e6:
3375 3375 dataOut.EDensityFinal[0,i]=max(dataOut.EDensityFinal[0,i],1000.0)
3376 3376 else:
3377 3377 dataOut.DensityFinal[0,i]=dataOut.EDensityFinal[0,i]=missing
3378 3378 #'''
3379 3379
3380 3380 if dataOut.PhyFinal[0,i]==0 or dataOut.PhyFinal[0,i]>0.4:
3381 3381 dataOut.PhyFinal[0,i]=dataOut.EPhyFinal[0,i]=missing
3382 3382 if dataOut.ElecTempFinal[0,i]==dataOut.IonTempFinal[0,i]:
3383 3383 dataOut.EElecTempFinal[0,i]=dataOut.EIonTempFinal[0,i]
3384 3384 if numpy.isnan(dataOut.ElecTempFinal[0,i]):
3385 3385 dataOut.EElecTempFinal[0,i]=missing
3386 3386 if numpy.isnan(dataOut.IonTempFinal[0,i]):
3387 3387 dataOut.EIonTempFinal[0,i]=missing
3388 3388 if numpy.isnan(dataOut.ElecTempFinal[0,i]) or numpy.isnan(dataOut.EElecTempFinal[0,i]):
3389 3389 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3390 3390
3391 3391 for i in range(12,dataOut.NSHTS-1):
3392 3392
3393 3393 if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i+1]):
3394 3394 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing
3395 3395
3396 3396 if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i+1]):
3397 3397 dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3398 3398
3399 3399 if dataOut.ut_Faraday>4 and dataOut.ut_Faraday<11:
3400 3400
3401 3401 if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i-2]) and numpy.isnan(dataOut.ElecTempFinal[0,i+2]) and numpy.isnan(dataOut.ElecTempFinal[0,i+3]): #and numpy.isnan(dataOut.ElecTempFinal[0,i-5]):
3402 3402
3403 3403 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing
3404 3404 if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i-2]) and numpy.isnan(dataOut.IonTempFinal[0,i+2]) and numpy.isnan(dataOut.IonTempFinal[0,i+3]): #and numpy.isnan(dataOut.IonTempFinal[0,i-5]):
3405 3405
3406 3406 dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3407 3407
3408 3408
3409 3409
3410 3410 if i>25:
3411 3411 if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i-2]) and numpy.isnan(dataOut.ElecTempFinal[0,i-3]) and numpy.isnan(dataOut.ElecTempFinal[0,i-4]): #and numpy.isnan(dataOut.ElecTempFinal[0,i-5]):
3412 3412 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing
3413 3413 if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i-2]) and numpy.isnan(dataOut.IonTempFinal[0,i-3]) and numpy.isnan(dataOut.IonTempFinal[0,i-4]): #and numpy.isnan(dataOut.IonTempFinal[0,i-5]):
3414 3414
3415 3415 dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3416 3416
3417 3417 if numpy.isnan(dataOut.ElecTempFinal[0,i]) or numpy.isnan(dataOut.EElecTempFinal[0,i]):
3418 3418
3419 3419 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3420 3420
3421 3421 for i in range(12,dataOut.NSHTS-1):
3422 3422
3423 3423 if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i+1]):
3424 3424 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing
3425 3425
3426 3426 if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i+1]):
3427 3427 dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3428 3428
3429 3429 if numpy.isnan(dataOut.DensityFinal[0,i-1]) and numpy.isnan(dataOut.DensityFinal[0,i+1]): ##NEW
3430 3430 dataOut.DensityFinal[0,i]=dataOut.EDensityFinal[0,i]=missing ##NEW
3431 3431
3432 3432 if numpy.isnan(dataOut.ElecTempFinal[0,i]) or numpy.isnan(dataOut.EElecTempFinal[0,i]):
3433 3433
3434 3434 dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing
3435 3435
3436 3436 if numpy.count_nonzero(~numpy.isnan(dataOut.ElecTempFinal[0,12:50]))<5:
3437 3437 dataOut.ElecTempFinal[0,:]=dataOut.EElecTempFinal[0,:]=missing
3438 3438 if numpy.count_nonzero(~numpy.isnan(dataOut.IonTempFinal[0,12:50]))<5:
3439 3439 dataOut.IonTempFinal[0,:]=dataOut.EIonTempFinal[0,:]=missing
3440 3440
3441 3441
3442 3442 '''
3443 3443 nanindex = numpy.argwhere(numpy.isnan(dataOut.DensityFinal[0,:]))
3444 3444 #print(nanindex)
3445 3445 i1 = nanindex[-1][0]
3446 3446 #print("i1",i1)
3447 3447 dataOut.EDensityFinal[0,i1:] = dataOut.DensityFinal[0,i1:] = missing
3448 3448 '''
3449 3449
3450 3450 '''
3451 3451 #print("den2: ",dataOut.DensityFinal)
3452 3452 if gmtime(dataOut.utctime).tm_hour >= 23. or gmtime(dataOut.utctime).tm_hour < 5.: #18-00 LT
3453 3453 nanindex = numpy.argwhere(numpy.isnan(dataOut.DensityFinal[0,:33]))
3454 3454 #print(nanindex)
3455 3455 i1 = nanindex[-1][0]
3456 3456 #print("i1",i1)
3457 3457 dataOut.EDensityFinal[0,:i1] = dataOut.DensityFinal[0,:i1] = missing
3458 3458 #print("den3: ",dataOut.DensityFinal)
3459 3459 elif gmtime(dataOut.utctime).tm_hour >= 6. or gmtime(dataOut.utctime).tm_hour < 11.: #18-00 LT
3460 3460 nanindex = numpy.argwhere(numpy.isnan(dataOut.DensityFinal[0,:20]))
3461 3461 #print(nanindex)
3462 3462 i1 = nanindex[-1][0]
3463 3463 #print("i1",i1)
3464 3464 dataOut.EDensityFinal[0,:i1] = dataOut.DensityFinal[0,:i1] = missing
3465 3465 '''
3466 3466 #print("den_nans: ",dataOut.DensityFinal[0,12:50])
3467 3467 if numpy.count_nonzero(~numpy.isnan(dataOut.DensityFinal[0,12:50]))<=5:
3468 3468 dataOut.DensityFinal[0,:]=dataOut.EDensityFinal[0,:]=missing
3469 3469 #for i in range(dataOut.NSHTS,dataOut.NDP):
3470 3470 #for i in range(40,dataOut.NDP):
3471 3471 #print("den2: ",dataOut.DensityFinal)
3472 3472 dataOut.DensityFinal[0,dataOut.NSHTS:]=missing
3473 3473 dataOut.EDensityFinal[0,dataOut.NSHTS:]=missing
3474 3474 dataOut.ElecTempFinal[0,dataOut.NSHTS:]=missing
3475 3475 dataOut.EElecTempFinal[0,dataOut.NSHTS:]=missing
3476 3476 dataOut.IonTempFinal[0,dataOut.NSHTS:]=missing
3477 3477 dataOut.EIonTempFinal[0,dataOut.NSHTS:]=missing
3478 3478 dataOut.PhyFinal[0,dataOut.NSHTS:]=missing
3479 3479 dataOut.EPhyFinal[0,dataOut.NSHTS:]=missing
3480 3480
3481 3481 #To be tested
3482 3482 '''
3483 3483 nanindex = numpy.argwhere(numpy.isnan(dataOut.DensityFinal))
3484 3484 i1 = nanindex[-1][0]
3485 3485 print("i1",i1)
3486 3486 dataOut.DensityFinal[0,i1+1:]=missing
3487 3487 dataOut.EDensityFinal[0,i1+1:]=missing
3488 3488 dataOut.ElecTempFinal[0,i1+1:]=missing
3489 3489 dataOut.EElecTempFinal[0,i1+1:]=missing
3490 3490 dataOut.IonTempFinal[0,i1+1:]=missing
3491 3491 dataOut.EIonTempFinal[0,i1+1:]=missing
3492 3492 dataOut.PhyFinal[0,i1+1:]=missing
3493 3493 dataOut.EPhyFinal[0,i1+1:]=missing
3494 3494 '''
3495 3495 #'''
3496 3496 if gmtime(dataOut.utctime).tm_hour >= 13. and gmtime(dataOut.utctime).tm_hour < 21.: #07-16 LT
3497 3497 dataOut.DensityFinal[0,:13]=missing
3498 3498 dataOut.EDensityFinal[0,:13]=missing
3499 3499 dataOut.ElecTempFinal[0,:13]=missing
3500 3500 dataOut.EElecTempFinal[0,:13]=missing
3501 3501 dataOut.IonTempFinal[0,:13]=missing
3502 3502 dataOut.EIonTempFinal[0,:13]=missing
3503 3503 dataOut.PhyFinal[0,:13]=missing
3504 3504 dataOut.EPhyFinal[0,:13]=missing
3505 3505 #'''
3506 3506 else:
3507 3507 if gmtime(dataOut.utctime).tm_hour == 9 and gmtime(dataOut.utctime).tm_min == 20:
3508 3508 pass
3509 3509 else:
3510 3510 dataOut.DensityFinal[0,:dataOut.min_id_eej+1]=missing
3511 3511 dataOut.EDensityFinal[0,:dataOut.min_id_eej+1]=missing
3512 3512 dataOut.ElecTempFinal[0,:dataOut.min_id_eej+1]=missing
3513 3513 dataOut.EElecTempFinal[0,:dataOut.min_id_eej+1]=missing
3514 3514 dataOut.IonTempFinal[0,:dataOut.min_id_eej+1]=missing
3515 3515 dataOut.EIonTempFinal[0,:dataOut.min_id_eej+1]=missing
3516 3516 dataOut.PhyFinal[0,:dataOut.min_id_eej+1]=missing
3517 3517 dataOut.EPhyFinal[0,:dataOut.min_id_eej+1]=missing
3518 3518 '''
3519 3519 if gmtime(dataOut.utctime).tm_hour >= 11. or gmtime(dataOut.utctime).tm_hour < 23.: #06-18 LT
3520 3520 dataOut.DensityFinal[0,:13]=missing
3521 3521 dataOut.EDensityFinal[0,:13]=missing
3522 3522 dataOut.ElecTempFinal[0,:13]=missing
3523 3523 dataOut.EElecTempFinal[0,:13]=missing
3524 3524 dataOut.IonTempFinal[0,:13]=missing
3525 3525 dataOut.EIonTempFinal[0,:13]=missing
3526 3526 dataOut.PhyFinal[0,:13]=missing
3527 3527 dataOut.EPhyFinal[0,:13]=missing
3528 3528 else:
3529 3529 #for i in range(12):
3530 3530 dataOut.DensityFinal[0,:12]=missing
3531 3531 dataOut.EDensityFinal[0,:12]=missing
3532 3532 dataOut.ElecTempFinal[0,:12]=missing
3533 3533 dataOut.EElecTempFinal[0,:12]=missing
3534 3534 dataOut.IonTempFinal[0,:12]=missing
3535 3535 dataOut.EIonTempFinal[0,:12]=missing
3536 3536 dataOut.PhyFinal[0,:12]=missing
3537 3537 dataOut.EPhyFinal[0,:12]=missing
3538 3538 '''
3539 3539 #print(dataOut.EDensityFinal)
3540 3540 #exit(1)
3541 3541 '''
3542 3542 print(dataOut.ElecTempFinal)
3543 3543 print(dataOut.heightList)
3544 3544 exit(1)
3545 3545 '''
3546 3546 ### Manual Data Cleaning
3547 3547
3548 3548 time_text = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3549 3549 DOY = time_text.timetuple().tm_yday
3550 3550 '''
3551 3551 # 2 Sep 24
3552 3552 if (time_text.hour == 1 and (time_text.minute == 40)): #06-18 LT
3553 3553 #dataOut.DensityFinal[0,27:]=missing
3554 3554 #dataOut.EDensityFinal[0,27:]=missing
3555 3555 dataOut.ElecTempFinal[0,:]=missing
3556 3556 dataOut.EElecTempFinal[0,:]=missing
3557 3557 dataOut.IonTempFinal[0,:]=missing
3558 3558 dataOut.EIonTempFinal[0,:]=missing
3559 3559 dataOut.PhyFinal[0,:]=missing
3560 3560 dataOut.EPhyFinal[0,:] = missing
3561 3561 if (time_text.hour == 5 and (time_text.minute >= 20)) or (time_text.hour == 6 and (time_text.minute <= 5)): #06-18 LT
3562 3562 #dataOut.DensityFinal[0,27:]=missing
3563 3563 #dataOut.EDensityFinal[0,27:]=missing
3564 3564 dataOut.ElecTempFinal[0,27:]=missing
3565 3565 dataOut.EElecTempFinal[0,27:]=missing
3566 3566 dataOut.IonTempFinal[0,27:]=missing
3567 3567 dataOut.EIonTempFinal[0,27:]=missing
3568 3568 dataOut.PhyFinal[0,27:]=missing
3569 3569 dataOut.EPhyFinal[0, 27:] = missing
3570 3570 '''
3571 3571 '''
3572 3572 # 5 Sep 24
3573 3573
3574 3574 if (time_text.hour == 3 and (time_text.minute == 35)) or (time_text.hour == 3 and (time_text.minute == 20)): #06-18 LT
3575 3575 #dataOut.DensityFinal[0,27:]=missing
3576 3576 #dataOut.EDensityFinal[0,27:]=missing
3577 3577 dataOut.ElecTempFinal[0,:]=missing
3578 3578 dataOut.EElecTempFinal[0,:]=missing
3579 3579 dataOut.IonTempFinal[0,:]=missing
3580 3580 dataOut.EIonTempFinal[0,:]=missing
3581 3581 dataOut.PhyFinal[0,:]=missing
3582 3582 dataOut.EPhyFinal[0,:] = missing
3583 3583
3584 3584 if (time_text.hour == 5 and (time_text.minute >= 20)) or (time_text.hour == 6 and (time_text.minute <= 5)): #06-18 LT
3585 3585 #dataOut.DensityFinal[0,27:]=missing
3586 3586 #dataOut.EDensityFinal[0,27:]=missing
3587 3587 dataOut.ElecTempFinal[0,27:]=missing
3588 3588 dataOut.EElecTempFinal[0,27:]=missing
3589 3589 dataOut.IonTempFinal[0,27:]=missing
3590 3590 dataOut.EIonTempFinal[0,27:]=missing
3591 3591 dataOut.PhyFinal[0,27:]=missing
3592 3592 dataOut.EPhyFinal[0,27:] = missing
3593 3593 '''
3594 3594 '''
3595 3595 # 6 Sep 24
3596 3596 if (time_text.hour == 5 and (time_text.minute <= 25)) or (time_text.hour == 6 and (time_text.minute >= 5)): #06-18 LT
3597 3597 #dataOut.DensityFinal[0,27:]=missing
3598 3598 #dataOut.EDensityFinal[0,27:]=missing
3599 3599 dataOut.ElecTempFinal[0,25:]=missing
3600 3600 dataOut.EElecTempFinal[0,25:]=missing
3601 3601 dataOut.IonTempFinal[0,25:]=missing
3602 3602 dataOut.EIonTempFinal[0,25:]=missing
3603 3603 dataOut.PhyFinal[0,25:]=missing
3604 3604 dataOut.EPhyFinal[0, 25:] = missing
3605 3605 '''
3606 3606 '''# 8 Sep 24
3607 3607 if True: #06-18 LT
3608 3608 #dataOut.DensityFinal[0,27:]=missing
3609 3609 #dataOut.EDensityFinal[0,27:]=missing
3610 3610 dataOut.ElecTempFinal[0,36:]=missing
3611 3611 dataOut.EElecTempFinal[0,36:]=missing
3612 3612 dataOut.IonTempFinal[0,36:]=missing
3613 3613 dataOut.EIonTempFinal[0,36:]=missing
3614 3614 dataOut.PhyFinal[0,36:]=missing
3615 3615 dataOut.EPhyFinal[0, 36:] = missing'''
3616 3616 '''# 24 Jan 25
3617 3617 if (time_text.hour >= 5 ) and (time_text.hour <= 7):
3618 3618 #dataOut.DensityFinal[0,27:]=missing
3619 3619 #dataOut.EDensityFinal[0,27:]=missing
3620 3620 dataOut.ElecTempFinal[0,:]=missing
3621 3621 dataOut.EElecTempFinal[0,:]=missing
3622 3622 dataOut.IonTempFinal[0,:]=missing
3623 3623 dataOut.EIonTempFinal[0,:]=missing
3624 3624 dataOut.PhyFinal[0,:]=missing
3625 3625 dataOut.EPhyFinal[0, :] = missing'''
3626 3626 start = time()
3627 3627 flagcleandata = True
3628 3628 if flagcleandata:
3629 3629 #print("Final Cleaning Process", time_text.hour, time_text.minute)
3630 3630 path = os.path.join(os.path.dirname(__file__), 'clean_data.json')
3631 3631 with open(path) as f:
3632 3632 jsondata= json.load(f)
3633 3633
3634 3634 corrections = {}
3635 3635
3636 3636
3637 3637 for condition in jsondata['conditions']:
3638 3638 year = condition['year']
3639 3639 doy = condition['doy']
3640 3640 init = condition['initial_time']
3641 3641 final = condition['final_time']
3642 3642 aux_index = condition['aux_index']
3643 3643
3644 3644 input_time_obj = datetime.time(hour=time_text.hour, minute=time_text.minute)
3645 3645 init_time_obj = datetime.time(hour=init[0], minute=init[1])
3646 3646 final_time_obj = datetime.time(hour=final[0], minute=final[1])
3647 3647
3648 3648 is_between = init_time_obj <= input_time_obj <= final_time_obj
3649 3649
3650 3650 if (year != time_text.year) or (DOY != doy) or (is_between == False):
3651 3651 #print("NON valid condition:", condition)
3652 3652 continue
3653 3653
3654 3654 print("valid condition:", condition)
3655 3655 indexi, indexf = aux_index[0], aux_index[1]
3656 3656 #index = slice(indexi, indexf)
3657 3657 #print(index, "index")
3658 3658 dataOut.DensityFinal[0,indexi:indexf]=missing
3659 3659 dataOut.EDensityFinal[0,indexi:indexf]=missing
3660 3660 dataOut.ElecTempFinal[0,indexi:indexf]=missing
3661 3661 dataOut.EElecTempFinal[0,indexi:indexf]=missing
3662 3662 dataOut.IonTempFinal[0,indexi:indexf]=missing
3663 3663 dataOut.EIonTempFinal[0,indexi:indexf]=missing
3664 3664 dataOut.PhyFinal[0,indexi:indexf]=missing
3665 3665 dataOut.EPhyFinal[0,indexi:indexf]=missing
3666 3666
3667 3667 print(f"** Cleaning applied ** Data eliminated at {time_text} from heigh index {indexi} to {indexf}")
3668 3668
3669 3669 end = time() ########
3670 3670 #spend_clean_time += end - start
3671 3671 print("clean data time: ", end - start)
3672 3672
3673 3673
3674 3674 '''
3675 3675 if key in corrections:
3676 3676 #
3677 3677 input_time_obj = datetime.time(hour=time_text.hour, minute=time_text.minute)
3678 3678 init_time_obj = datetime.time(hour=init[0], minute=init[1])
3679 3679 final_time_obj = datetime.time(hour=final[0], minute=final[1])
3680 3680
3681 3681 if init_time_obj <= final_time_obj:
3682 3682 # Interval does not cross midnight
3683 3683 is_between = init_time_obj <= input_time_obj <= final_time_obj
3684 3684 else:
3685 3685 # Interval crosses midnight
3686 3686 is_between = init_time_obj <= input_time_obj or input_time_obj <= final_time_obj
3687 3687
3688 3688 if not is_between:
3689 3689 pass
3690 3690 #
3691 3691
3692 3692 clean = corrections[key]
3693 3693 indexi, indexf = clean[0], clean[1]
3694 3694 index = slice(indexi, indexf)
3695 3695
3696 3696 dataOut.DensityFinal[0,index]=missing
3697 3697 dataOut.EDensityFinal[0,index]=missing
3698 3698 dataOut.ElecTempFinal[0,index]=missing
3699 3699 dataOut.EElecTempFinal[0,index]=missing
3700 3700 dataOut.IonTempFinal[0,index]=missing
3701 3701 dataOut.EIonTempFinal[0,index]=missing
3702 3702 dataOut.PhyFinal[0,index]=missing
3703 3703 dataOut.EPhyFinal[0,index]=missing
3704 3704
3705 3705 print(f"Cleaning applied:")
3706 3706 '''
3707 3707
3708 3708
3709 3709
3710 3710 #print("den_final",dataOut.DensityFinal)
3711 3711
3712 3712 # for MAD
3713 3713 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.DensityFinal)) #Si todos los valores son NaN no se prosigue
3714 3714 '''Save of clean data information in temp csv file for den correction'''
3715 3715 if not dataOut.flagNoData:
3716 3716 if savecfclean:
3717 3717 try:
3718 3718 import pandas as pd
3719 3719 if self.csv_flag:
3720 3720 if not os.path.exists("./cfclean"):
3721 3721 os.makedirs("./cfclean")
3722 3722 self.doy_csv = datetime.datetime.fromtimestamp(dataOut.utctime).strftime('%j')
3723 3723 self.year_csv = datetime.datetime.fromtimestamp(dataOut.utctime).strftime('%Y')
3724 3724 file = open("./cfclean/cfclean{0}{1}.csv".format(self.year_csv,self.doy_csv), "x")
3725 3725 f = csv.writer(file)
3726 3726 f.writerow(numpy.array(["timestamp",'cf']))
3727 3727 self.csv_flag = 0
3728 3728 print("Creating cf clean File")
3729 3729 print("Writing cf clean File")
3730 3730 except:
3731 3731 file = open("./cfclean/cfclean{0}{1}.csv".format(self.year_csv,self.doy_csv), "a")
3732 3732 f = csv.writer(file)
3733 3733 print("Writing cf clean File")
3734 3734 cf = numpy.array([dataOut.utctime,dataOut.cf])
3735 3735 f.writerow(cf)
3736 3736 file.close()
3737 3737 # for plot
3738 3738 #dataOut.flagNoData = False #Descomentar solo para ploteo #Comentar para MADWriter
3739 3739
3740 3740 dataOut.DensityFinal *= 1.e6 #Convert units to m^⁻3
3741 3741 dataOut.EDensityFinal *= 1.e6 #Convert units to m^⁻3
3742 3742 print("Save Cleaner: ", dataOut.flagNoData)
3743 3743
3744 3744
3745 3745 #print("den: ", dataOut.DensityFinal[0,27])
3746 3746 return dataOut
3747 3747
3748 3748
3749 3749
3750 3750 class ACFs(Operation):
3751 3751 '''
3752 3752 Written by R. Flores
3753 3753 '''
3754 3754 def __init__(self, **kwargs):
3755 3755
3756 3756 Operation.__init__(self, **kwargs)
3757 3757
3758 3758 self.aux=1
3759 3759
3760 3760 def run(self,dataOut):
3761 3761
3762 3762 if self.aux:
3763 3763 self.taup=numpy.zeros(dataOut.DPL,'float32')
3764 3764 self.pacf=numpy.zeros(dataOut.DPL,'float32')
3765 3765 self.sacf=numpy.zeros(dataOut.DPL,'float32')
3766 3766
3767 3767 self.taup_full=numpy.zeros(dataOut.DPL,'float32')
3768 3768 self.pacf_full=numpy.zeros(dataOut.DPL,'float32')
3769 3769 self.sacf_full=numpy.zeros(dataOut.DPL,'float32')
3770 3770 self.x_igcej=numpy.zeros(dataOut.DPL,'float32')
3771 3771 self.y_igcej=numpy.zeros(dataOut.DPL,'float32')
3772 3772 self.x_ibad=numpy.zeros(dataOut.DPL,'float32')
3773 3773 self.y_ibad=numpy.zeros(dataOut.DPL,'float32')
3774 3774 self.aux=0
3775 3775
3776 3776 dataOut.acfs_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3777 3777 dataOut.acfs_to_save=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3778 3778 dataOut.acfs_error_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3779 3779 dataOut.acfs_error_to_save=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3780 3780 dataOut.lags_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3781 3781 dataOut.x_igcej_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3782 3782 dataOut.x_ibad_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3783 3783 dataOut.y_igcej_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3784 3784 dataOut.y_ibad_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32')
3785 3785
3786 3786 for i in range(dataOut.NSHTS):
3787 3787
3788 3788 acfm=dataOut.rhor[i][0]**2+dataOut.rhoi[i][0]**2
3789 3789
3790 3790 if acfm>0:
3791 3791 cc=dataOut.rhor[i][0]/acfm
3792 3792 ss=dataOut.rhoi[i][0]/acfm
3793 3793 else:
3794 3794 cc=1.
3795 3795 ss=0.
3796 3796
3797 3797 # keep only uncontaminated data
3798 3798 for l in range(dataOut.DPL):
3799 3799 fact=dataOut.DH
3800 3800 if (dataOut.igcej[i][l]==0 and dataOut.ibad[i][l]==0):
3801 3801
3802 3802 self.pacf_full[l]=min(1.0,max(-1.0,(dataOut.rhor[i][l]*cc + dataOut.rhoi[i][l]*ss)))*fact+dataOut.range1[i]
3803 3803 self.sacf_full[l]=min(1.0,numpy.sqrt(dataOut.sd[i][l]))*fact
3804 3804 self.taup_full[l]=dataOut.alag[l]
3805 3805 self.x_igcej[l]=numpy.nan
3806 3806 self.y_igcej[l]=numpy.nan
3807 3807 self.x_ibad[l]=numpy.nan
3808 3808 self.y_ibad[l]=numpy.nan
3809 3809
3810 3810 else:
3811 3811 self.pacf_full[l]=numpy.nan
3812 3812 self.sacf_full[l]=numpy.nan
3813 3813 self.taup_full[l]=numpy.nan
3814 3814
3815 3815 if dataOut.igcej[i][l]:
3816 3816 self.x_igcej[l]=dataOut.alag[l]
3817 3817 self.y_igcej[l]=dataOut.range1[i]
3818 3818 self.x_ibad[l]=numpy.nan
3819 3819 self.y_ibad[l]=numpy.nan
3820 3820
3821 3821 if dataOut.ibad[i][l]:
3822 3822 self.x_igcej[l]=numpy.nan
3823 3823 self.y_igcej[l]=numpy.nan
3824 3824 self.x_ibad[l]=dataOut.alag[l]
3825 3825 self.y_ibad[l]=dataOut.range1[i]
3826 3826
3827 3827 pacf_new=numpy.copy((self.pacf_full-dataOut.range1[i])/dataOut.DH)
3828 3828 sacf_new=numpy.copy(self.sacf_full/dataOut.DH)
3829 3829 dataOut.acfs_to_save[i,:]=numpy.copy(pacf_new)
3830 3830 dataOut.acfs_error_to_save[i,:]=numpy.copy(sacf_new)
3831 3831 dataOut.acfs_to_plot[i,:]=numpy.copy(self.pacf_full)
3832 3832 dataOut.acfs_error_to_plot[i,:]=numpy.copy(self.sacf_full)
3833 3833 dataOut.lags_to_plot[i,:]=numpy.copy(self.taup_full)
3834 3834 dataOut.x_igcej_to_plot[i,:]=numpy.copy(self.x_igcej)
3835 3835 dataOut.x_ibad_to_plot[i,:]=numpy.copy(self.x_ibad)
3836 3836 dataOut.y_igcej_to_plot[i,:]=numpy.copy(self.y_igcej)
3837 3837 dataOut.y_ibad_to_plot[i,:]=numpy.copy(self.y_ibad)
3838 3838
3839 3839 missing=numpy.nan#-32767
3840 3840 #print("dataOut.igcej",dataOut.igcej)
3841 3841 #print("dataOut.ibad",dataOut.ibad)
3842 3842 for i in range(dataOut.NSHTS,dataOut.NDP):
3843 3843 for j in range(dataOut.DPL):
3844 3844 dataOut.acfs_to_save[i,j]=missing
3845 3845 dataOut.acfs_error_to_save[i,j]=missing
3846 3846 dataOut.acfs_to_plot[i,j]=missing
3847 3847 dataOut.acfs_error_to_plot[i,j]=missing
3848 3848 dataOut.lags_to_plot[i,j]=missing
3849 3849 dataOut.x_igcej_to_plot[i,j]=missing
3850 3850 dataOut.x_ibad_to_plot[i,j]=missing
3851 3851 dataOut.y_igcej_to_plot[i,j]=missing
3852 3852 dataOut.y_ibad_to_plot[i,j]=missing
3853 3853
3854 3854 dataOut.acfs_to_save=dataOut.acfs_to_save.transpose()
3855 3855 dataOut.acfs_error_to_save=dataOut.acfs_error_to_save.transpose()
3856 3856
3857 3857 return dataOut
3858 3858
3859 3859
3860 3860 class CohInt(Operation):
3861 3861
3862 3862 isConfig = False
3863 3863 __profIndex = 0
3864 3864 __byTime = False
3865 3865 __initime = None
3866 3866 __lastdatatime = None
3867 3867 __integrationtime = None
3868 3868 __buffer = None
3869 3869 __bufferStride = []
3870 3870 __dataReady = False
3871 3871 __profIndexStride = 0
3872 3872 __dataToPutStride = False
3873 3873 n = None
3874 3874
3875 3875 def __init__(self, **kwargs):
3876 3876
3877 3877 Operation.__init__(self, **kwargs)
3878 3878
3879 3879 # self.isConfig = False
3880 3880
3881 3881 def setup(self, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False):
3882 3882 """
3883 3883 Set the parameters of the integration class.
3884 3884
3885 3885 Inputs:
3886 3886
3887 3887 n : Number of coherent integrations
3888 3888 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
3889 3889 overlapping :
3890 3890 """
3891 3891
3892 3892 self.__initime = None
3893 3893 self.__lastdatatime = 0
3894 3894 self.__buffer = None
3895 3895 self.__dataReady = False
3896 3896 self.byblock = byblock
3897 3897 self.stride = stride
3898 3898
3899 3899 if n == None and timeInterval == None:
3900 3900 raise ValueError("n or timeInterval should be specified ...")
3901 3901
3902 3902 if n != None:
3903 3903 self.n = n
3904 3904 self.__byTime = False
3905 3905 else:
3906 3906 self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line
3907 3907 self.n = 9999
3908 3908 self.__byTime = True
3909 3909
3910 3910 if overlapping:
3911 3911 self.__withOverlapping = True
3912 3912 self.__buffer = None
3913 3913 else:
3914 3914 self.__withOverlapping = False
3915 3915 self.__buffer = 0
3916 3916
3917 3917 self.__profIndex = 0
3918 3918
3919 3919 def putData(self, data):
3920 3920
3921 3921 """
3922 3922 Add a profile to the __buffer and increase in one the __profileIndex
3923 3923
3924 3924 """
3925 3925
3926 3926 if not self.__withOverlapping:
3927 3927 self.__buffer += data.copy()
3928 3928 self.__profIndex += 1
3929 3929 return
3930 3930
3931 3931 #Overlapping data
3932 3932 nChannels, nHeis = data.shape
3933 3933 data = numpy.reshape(data, (1, nChannels, nHeis))
3934 3934
3935 3935 #If the buffer is empty then it takes the data value
3936 3936 if self.__buffer is None:
3937 3937 self.__buffer = data
3938 3938 self.__profIndex += 1
3939 3939 return
3940 3940
3941 3941 #If the buffer length is lower than n then stakcing the data value
3942 3942 if self.__profIndex < self.n:
3943 3943 self.__buffer = numpy.vstack((self.__buffer, data))
3944 3944 self.__profIndex += 1
3945 3945 return
3946 3946
3947 3947 #If the buffer length is equal to n then replacing the last buffer value with the data value
3948 3948 self.__buffer = numpy.roll(self.__buffer, -1, axis=0)
3949 3949 self.__buffer[self.n-1] = data
3950 3950 self.__profIndex = self.n
3951 3951 return
3952 3952
3953 3953
3954 3954 def pushData(self):
3955 3955 """
3956 3956 Return the sum of the last profiles and the profiles used in the sum.
3957 3957
3958 3958 Affected:
3959 3959
3960 3960 self.__profileIndex
3961 3961
3962 3962 """
3963 3963
3964 3964 if not self.__withOverlapping:
3965 3965 data = self.__buffer
3966 3966 n = self.__profIndex
3967 3967
3968 3968 self.__buffer = 0
3969 3969 self.__profIndex = 0
3970 3970
3971 3971 return data, n
3972 3972
3973 3973 #Integration with Overlapping
3974 3974 data = numpy.sum(self.__buffer, axis=0)
3975 3975 # print data
3976 3976 # raise
3977 3977 n = self.__profIndex
3978 3978
3979 3979 return data, n
3980 3980
3981 3981 def byProfiles(self, data):
3982 3982
3983 3983 self.__dataReady = False
3984 3984 avgdata = None
3985 3985 # n = None
3986 3986 # print data
3987 3987 # raise
3988 3988 self.putData(data)
3989 3989
3990 3990 if self.__profIndex == self.n:
3991 3991 avgdata, n = self.pushData()
3992 3992 self.__dataReady = True
3993 3993
3994 3994 return avgdata
3995 3995
3996 3996 def byTime(self, data, datatime):
3997 3997
3998 3998 self.__dataReady = False
3999 3999 avgdata = None
4000 4000 n = None
4001 4001
4002 4002 self.putData(data)
4003 4003
4004 4004 if (datatime - self.__initime) >= self.__integrationtime:
4005 4005 avgdata, n = self.pushData()
4006 4006 self.n = n
4007 4007 self.__dataReady = True
4008 4008
4009 4009 return avgdata
4010 4010
4011 4011 def integrateByStride(self, data, datatime):
4012 4012 # print data
4013 4013 if self.__profIndex == 0:
4014 4014 self.__buffer = [[data.copy(), datatime]]
4015 4015 else:
4016 4016 self.__buffer.append([data.copy(),datatime])
4017 4017 self.__profIndex += 1
4018 4018 self.__dataReady = False
4019 4019
4020 4020 if self.__profIndex == self.n * self.stride :
4021 4021 self.__dataToPutStride = True
4022 4022 self.__profIndexStride = 0
4023 4023 self.__profIndex = 0
4024 4024 self.__bufferStride = []
4025 4025 for i in range(self.stride):
4026 4026 current = self.__buffer[i::self.stride]
4027 4027 data = numpy.sum([t[0] for t in current], axis=0)
4028 4028 avgdatatime = numpy.average([t[1] for t in current])
4029 4029 # print data
4030 4030 self.__bufferStride.append((data, avgdatatime))
4031 4031
4032 4032 if self.__dataToPutStride:
4033 4033 self.__dataReady = True
4034 4034 self.__profIndexStride += 1
4035 4035 if self.__profIndexStride == self.stride:
4036 4036 self.__dataToPutStride = False
4037 4037 # print self.__bufferStride[self.__profIndexStride - 1]
4038 4038 # raise
4039 4039 return self.__bufferStride[self.__profIndexStride - 1]
4040 4040
4041 4041
4042 4042 return None, None
4043 4043
4044 4044 def integrate(self, data, datatime=None):
4045 4045
4046 4046 if self.__initime == None:
4047 4047 self.__initime = datatime
4048 4048
4049 4049 if self.__byTime:
4050 4050 avgdata = self.byTime(data, datatime)
4051 4051 else:
4052 4052 avgdata = self.byProfiles(data)
4053 4053
4054 4054
4055 4055 self.__lastdatatime = datatime
4056 4056
4057 4057 if avgdata is None:
4058 4058 return None, None
4059 4059
4060 4060 avgdatatime = self.__initime
4061 4061
4062 4062 deltatime = datatime - self.__lastdatatime
4063 4063
4064 4064 if not self.__withOverlapping:
4065 4065 self.__initime = datatime
4066 4066 else:
4067 4067 self.__initime += deltatime
4068 4068
4069 4069 return avgdata, avgdatatime
4070 4070
4071 4071 def integrateByBlock(self, dataOut):
4072 4072
4073 4073 times = int(dataOut.data.shape[1]/self.n)
4074 4074 avgdata = numpy.zeros((dataOut.nChannels, times, dataOut.nHeights), dtype=numpy.complex)
4075 4075
4076 4076 id_min = 0
4077 4077 id_max = self.n
4078 4078
4079 4079 for i in range(times):
4080 4080 junk = dataOut.data[:,id_min:id_max,:]
4081 4081 avgdata[:,i,:] = junk.sum(axis=1)
4082 4082 id_min += self.n
4083 4083 id_max += self.n
4084 4084
4085 4085 timeInterval = dataOut.ippSeconds*self.n
4086 4086 avgdatatime = (times - 1) * timeInterval + dataOut.utctime
4087 4087 self.__dataReady = True
4088 4088 return avgdata, avgdatatime
4089 4089
4090 4090 def run(self, dataOut, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False, **kwargs):
4091 4091
4092 4092 if not self.isConfig:
4093 4093 self.setup(n=n, stride=stride, timeInterval=timeInterval, overlapping=overlapping, byblock=byblock, **kwargs)
4094 4094 self.isConfig = True
4095 4095 #print("inside")
4096 4096 if dataOut.flagDataAsBlock:
4097 4097 """
4098 4098 Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis]
4099 4099 """
4100 4100
4101 4101 avgdata, avgdatatime = self.integrateByBlock(dataOut)
4102 4102 dataOut.nProfiles /= self.n
4103 4103 else:
4104 4104 if stride is None:
4105 4105 avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime)
4106 4106 else:
4107 4107 avgdata, avgdatatime = self.integrateByStride(dataOut.data, dataOut.utctime)
4108 4108
4109 4109
4110 4110 # dataOut.timeInterval *= n
4111 4111 dataOut.flagNoData = True
4112 4112
4113 4113 if self.__dataReady:
4114 4114 dataOut.data = avgdata
4115 4115 if not dataOut.flagCohInt:
4116 4116 dataOut.nCohInt *= self.n
4117 4117 dataOut.flagCohInt = True
4118 4118 dataOut.utctime = avgdatatime
4119 4119 # print avgdata, avgdatatime
4120 4120 # raise
4121 4121 # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt
4122 4122 dataOut.flagNoData = False
4123 4123 return dataOut
4124 4124
4125 4125 class TimesCode(Operation):
4126 4126 '''
4127 4127 Written by R. Flores
4128 4128 '''
4129 4129 """
4130 4130
4131 4131 """
4132 4132
4133 4133 def __init__(self, **kwargs):
4134 4134
4135 4135 Operation.__init__(self, **kwargs)
4136 4136
4137 4137 def run(self,dataOut,code):
4138 4138
4139 4139 #code = numpy.repeat(code, repeats=osamp, axis=1)
4140 4140 nCodes = numpy.shape(code)[1]
4141 4141 #nprofcode = dataOut.nProfiles//nCodes
4142 4142 code = numpy.array(code)
4143 4143 #print("nHeights",dataOut.nHeights)
4144 4144 #print("nheicode",nheicode)
4145 4145 #print("Code.Shape",numpy.shape(code))
4146 4146 #print("Code",code[0,:])
4147 4147 nheicode = dataOut.nHeights//nCodes
4148 4148 res = dataOut.nHeights%nCodes
4149 4149 '''
4150 4150 buffer = numpy.zeros((dataOut.nChannels,
4151 4151 nprofcode,
4152 4152 nCodes,
4153 4153 ndataOut.nHeights),
4154 4154 dtype='complex')
4155 4155 '''
4156 4156 #exit(1)
4157 4157 #for ipr in range(dataOut.nProfiles):
4158 4158 #print(dataOut.nHeights)
4159 4159 #print(dataOut.data[0,384-2:])
4160 4160 #print(dataOut.profileIndex)
4161 4161 #print(dataOut.data[0,:2])
4162 4162 #print(dataOut.data[0,0:64])
4163 4163 #print(dataOut.data[0,64:64+64])
4164 4164 #exit(1)
4165 4165 for ich in range(dataOut.nChannels):
4166 4166 for ihe in range(nheicode):
4167 4167 #print(ihe*nCodes)
4168 4168 #print((ihe+1)*nCodes)
4169 4169 #dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)]
4170 4170 #code[ipr,:]
4171 4171 #print("before",dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)])
4172 4172 #dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)] = numpy.prod([dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)],code[ipr,:]],axis=0)
4173 4173 dataOut.data[ich,ihe*nCodes:nCodes*(ihe+1)] = numpy.prod([dataOut.data[ich,ihe*nCodes:nCodes*(ihe+1)],code[dataOut.profileIndex,:]],axis=0)
4174 4174
4175 4175 #print("after",dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)])
4176 4176 #exit(1)
4177 4177 #print(dataOut.data[0,:2])
4178 4178 #exit(1)
4179 4179 #print(nheicode)
4180 4180 #print((nheicode)*nCodes)
4181 4181 #print(((nheicode)*nCodes)+res)
4182 4182 if res != 0:
4183 4183 for ich in range(dataOut.nChannels):
4184 4184 dataOut.data[ich,nheicode*nCodes:] = numpy.prod([dataOut.data[ich,nheicode*nCodes:],code[dataOut.profileIndex,:res]],axis=0)
4185 4185
4186 4186 #pass
4187 4187 #print(dataOut.data[0,384-2:])
4188 4188 #exit(1)
4189 4189 #dataOut.data = numpy.mean(buffer,axis=1)
4190 4190 #print(numpy.shape(dataOut.data))
4191 4191 #print(dataOut.nHeights)
4192 4192 #dataOut.heightList = dataOut.heightList[0:nheicode]
4193 4193 #print(dataOut.nHeights)
4194 4194 #dataOut.nHeights = numpy.shape(dataOut.data)[2]
4195 4195 #print(numpy.shape(dataOut.data))
4196 4196 #exit(1)
4197 4197
4198 4198 return dataOut
4199 4199
4200 4200
4201 4201 class RemoveDcHae(Operation):
4202 4202 '''
4203 4203 Written by R. Flores
4204 4204 '''
4205 4205 def __init__(self, **kwargs):
4206 4206
4207 4207 Operation.__init__(self, **kwargs)
4208 4208 self.DcCounter = 0
4209 4209
4210 4210 def run(self, dataOut):
4211 4211
4212 4212 if self.DcCounter == 0:
4213 4213 dataOut.DcHae = numpy.zeros((dataOut.data.shape[0],320),dtype='complex')
4214 4214 #dataOut.DcHae = []
4215 4215 self.DcCounter = 1
4216 4216
4217 4217 dataOut.dataaux = numpy.copy(dataOut.data)
4218 4218
4219 4219 #dataOut.DcHae += dataOut.dataaux[:,1666:1666+320]
4220 4220 dataOut.DcHae += dataOut.dataaux[:,0:0+320]
4221 4221 hei = 1666
4222 4222 hei = 2000
4223 4223 hei = 1000
4224 4224 hei = 0
4225 4225 #dataOut.DcHae = numpy.concatenate([dataOut.DcHae,dataOut.dataaux[0,hei]],axis = None)
4226 4226
4227 4227
4228 4228
4229 4229 return dataOut
4230 4230
4231 4231
4232 4232 class SSheightProfiles(Operation):
4233 4233
4234 4234 step = None
4235 4235 nsamples = None
4236 4236 bufferShape = None
4237 4237 profileShape = None
4238 4238 sshProfiles = None
4239 4239 profileIndex = None
4240 4240
4241 4241 def __init__(self, **kwargs):
4242 4242
4243 4243 Operation.__init__(self, **kwargs)
4244 4244 self.isConfig = False
4245 4245
4246 4246 def setup(self,dataOut ,step = None , nsamples = None):
4247 4247
4248 4248 if step == None and nsamples == None:
4249 4249 raise ValueError("step or nheights should be specified ...")
4250 4250
4251 4251 self.step = step
4252 4252 self.nsamples = nsamples
4253 4253 self.__nChannels = dataOut.nChannels
4254 4254 self.__nProfiles = dataOut.nProfiles
4255 4255 self.__nHeis = dataOut.nHeights
4256 4256 shape = dataOut.data.shape #nchannels, nprofiles, nsamples
4257 4257
4258 4258 residue = (shape[1] - self.nsamples) % self.step
4259 4259 if residue != 0:
4260 4260 print("The residue is %d, step=%d should be multiple of %d to avoid loss of %d samples"%(residue,step,shape[1] - self.nsamples,residue))
4261 4261
4262 4262 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
4263 4263 numberProfile = self.nsamples
4264 4264 numberSamples = (shape[1] - self.nsamples)/self.step
4265 4265
4266 4266 self.bufferShape = int(shape[0]), int(numberSamples), int(numberProfile) # nchannels, nsamples , nprofiles
4267 4267 self.profileShape = int(shape[0]), int(numberProfile), int(numberSamples) # nchannels, nprofiles, nsamples
4268 4268 print("buffer shape: ", self.bufferShape)
4269 4269 self.buffer = numpy.zeros(self.bufferShape , dtype=numpy.complex)
4270 4270 self.sshProfiles = numpy.zeros(self.profileShape, dtype=numpy.complex)
4271 4271
4272 4272 def run(self, dataOut, step, nsamples, code = None, repeat = None):
4273 4273 dataOut.flagNoData = True
4274 4274
4275 4275 profileIndex = None
4276 4276 #print(dataOut.getFreqRange(1)/1000.)
4277 4277 #exit(1)
4278 4278 '''
4279 4279 if dataOut.flagDataAsBlock:
4280 4280 dataOut.data = numpy.average(dataOut.data,axis=1)
4281 4281 #print("jee")
4282 4282 '''
4283 4283 dataOut.flagDataAsBlock = False
4284 4284 if not self.isConfig:
4285 4285 self.setup(dataOut, step=step , nsamples=nsamples)
4286 4286 self.isConfig = True
4287 4287
4288 4288 #DC_Hae = numpy.array([0.398+0.588j, -0.926+0.306j, -0.536-0.682j, -0.072+0.53j, 0.368-0.356j, 0.996+0.362j])
4289 4289 #DC_Hae = numpy.array([ 0.001025 +0.0516375j, 0.03485 +0.20923125j, -0.168 -0.02720625j,
4290 4290 #-0.1105375 +0.0707125j, -0.20309375-0.09670625j, 0.189775 +0.02716875j])*(-3.5)
4291 4291
4292 4292 #DC_Hae = numpy.array([ -32.26 +8.66j, -32.26 +8.66j])
4293 4293
4294 4294 #DC_Hae = numpy.array([-2.78500000e-01 -1.39175j, -6.63237294e+02+210.4268625j])
4295 4295
4296 4296 #print(dataOut.data[0,13:15])
4297 4297 #dataOut.data = dataOut.data - DC_Hae[:,None]
4298 4298 #print(dataOut.data[0,13:15])
4299 4299 #exit(1)
4300 4300 if code is not None:
4301 4301 code = numpy.array(code)
4302 4302 code_block = code
4303 4303 '''
4304 4304 roll = 0
4305 4305 code = numpy.roll(code,roll,axis=0)
4306 4306 code = numpy.reshape(code,(5,100,64))
4307 4307 block = dataOut.CurrentBlock%5
4308 4308
4309 4309 day_dif = 0 #day_19_Oct_2021: 3
4310 4310 code_block = code[block-1-day_dif,:,:]
4311 4311 '''
4312 4312 #print("hey i'm here!!!!!!!!!!!!!",code_block)
4313 4313 if repeat is not None:
4314 4314 code_block = numpy.repeat(code_block, repeats=repeat, axis=1)
4315 4315 #print("REPEAT!!!!!!!!!!!!!",code_block)
4316 4316
4317 4317 for i in range(self.buffer.shape[1]):
4318 4318 #exit(1)
4319 4319 if code is not None:
4320 4320 #self.buffer[:,i] = dataOut.data[:,i*self.step:i*self.step + self.nsamples]*code_block[dataOut.profileIndex,:]
4321 4321 #print("BEFORE: ", dataOut.data[:,i*self.step:i*self.step + self.nsamples])
4322 4322 self.buffer[:,i] = dataOut.data[:,i*self.step:i*self.step + self.nsamples]*code_block
4323 4323 #print("AFTER: ", self.buffer[:,i])
4324 4324
4325 4325 else:
4326 4326
4327 4327 self.buffer[:,i] = dataOut.data[:,i*self.step:i*self.step + self.nsamples]#*code[dataOut.profileIndex,:]
4328 4328
4329 4329 #self.buffer[:,j,self.__nHeis-j*self.step - self.nheights:self.__nHeis-j*self.step] = numpy.flip(dataOut.data[:,j*self.step:j*self.step + self.nheights])
4330 4330
4331 4331 for j in range(self.buffer.shape[0]):
4332 4332 self.sshProfiles[j] = numpy.transpose(self.buffer[j])
4333 4333
4334 4334 profileIndex = self.nsamples
4335 4335 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
4336 4336 ippSeconds = (deltaHeight*1.0e-6)/(0.15)
4337 4337 #print "ippSeconds",ippSeconds
4338 4338 try:
4339 4339 if dataOut.concat_m is not None:
4340 4340 ippSeconds= ippSeconds/float(dataOut.concat_m)
4341 4341 #print "Profile concat %d"%dataOut.concat_m
4342 4342 except:
4343 4343 pass
4344 4344
4345 4345 dataOut.data = self.sshProfiles
4346 4346 dataOut.flagNoData = False
4347 4347 dataOut.heightList = numpy.arange(self.buffer.shape[1]) *self.step*deltaHeight + dataOut.heightList[0]
4348 4348 dataOut.nProfiles = int(dataOut.nProfiles*self.nsamples)
4349 4349
4350 4350 dataOut.profileIndex = profileIndex
4351 4351 dataOut.flagDataAsBlock = True
4352 4352 dataOut.ippSeconds = ippSeconds
4353 4353 dataOut.step = self.step
4354 4354
4355 4355
4356 4356 #print("SSH")
4357 4357 #print(numpy.shape(dataOut.data))
4358 4358 #exit(1)
4359 4359 #print(dataOut.data[0,:,150])
4360 4360 #exit(1)
4361 4361 #print(dataOut.data[0,:,0]*numpy.conjugate(dataOut.data[0,0,0]))
4362 4362 #exit(1)
4363 4363
4364 4364 return dataOut
4365 4365
4366 4366 class removeDCHAE(Operation):
4367 4367
4368 4368 def run(self, dataOut, minHei, maxHei):
4369 4369
4370 4370 heights = dataOut.heightList
4371 4371
4372 4372 inda = numpy.where(heights >= minHei)
4373 4373 indb = numpy.where(heights <= maxHei)
4374 4374
4375 4375 minIndex = inda[0][0]
4376 4376 maxIndex = indb[0][-1]
4377 4377
4378 4378 dc = numpy.average(dataOut.data[:,minIndex:maxIndex],axis=1)
4379 4379 #print(dc.shape)
4380 4380 dataOut.data = dataOut.data - dc[:,None]
4381 4381 #print(aux.shape)
4382 4382 #exit(1)
4383 4383
4384 4384 return dataOut
4385 4385
4386 4386 class Decoder_Original(Operation):
4387 4387
4388 4388 isConfig = False
4389 4389 __profIndex = 0
4390 4390
4391 4391 code = None
4392 4392
4393 4393 nCode = None
4394 4394 nBaud = None
4395 4395
4396 4396 def __init__(self, **kwargs):
4397 4397
4398 4398 Operation.__init__(self, **kwargs)
4399 4399
4400 4400 self.times = None
4401 4401 self.osamp = None
4402 4402 # self.__setValues = False
4403 4403 self.isConfig = False
4404 4404 self.setupReq = False
4405 4405 def setup(self, code, osamp, dataOut):
4406 4406
4407 4407 self.__profIndex = 0
4408 4408
4409 4409 self.code = code
4410 4410
4411 4411 self.nCode = len(code)
4412 4412 self.nBaud = len(code[0])
4413 4413
4414 4414 if (osamp != None) and (osamp >1):
4415 4415 self.osamp = osamp
4416 4416 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
4417 4417 self.nBaud = self.nBaud*self.osamp
4418 4418
4419 4419 self.__nChannels = dataOut.nChannels
4420 4420 self.__nProfiles = dataOut.nProfiles
4421 4421 self.__nHeis = dataOut.nHeights
4422 4422 #print("self.__nHeis: ", self.__nHeis)
4423 4423 #print("self.nBaud: ", self.nBaud)
4424 4424 #exit(1)
4425 4425 if self.__nHeis < self.nBaud:
4426 4426 raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud))
4427 4427 #print("JJE")
4428 4428 #exit(1)
4429 4429 #Frequency
4430 4430 __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex)
4431 4431
4432 4432 __codeBuffer[:,0:self.nBaud] = self.code
4433 4433
4434 4434 self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1))
4435 4435
4436 4436 if dataOut.flagDataAsBlock:
4437 4437
4438 4438 self.ndatadec = self.__nHeis #- self.nBaud + 1
4439 4439
4440 4440 self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex)
4441 4441
4442 4442 else:
4443 4443
4444 4444 #Time
4445 4445 self.ndatadec = self.__nHeis #- self.nBaud + 1
4446 4446
4447 4447
4448 4448 self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex)
4449 4449
4450 4450 def __convolutionInFreq(self, data):
4451 4451
4452 4452 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
4453 4453
4454 4454 fft_data = numpy.fft.fft(data, axis=1)
4455 4455
4456 4456 conv = fft_data*fft_code
4457 4457
4458 4458 data = numpy.fft.ifft(conv,axis=1)
4459 4459
4460 4460 return data
4461 4461
4462 4462 def __convolutionInFreqOpt(self, data):
4463 4463
4464 4464 raise NotImplementedError
4465 4465
4466 4466 def __convolutionInTime(self, data):
4467 4467
4468 4468 code = self.code[self.__profIndex]
4469 4469 for i in range(self.__nChannels):
4470 4470 #aux=numpy.correlate(data[i,:], code, mode='full')
4471 4471 #print(numpy.shape(aux))
4472 4472 #print(numpy.shape(data[i,:]))
4473 4473 #print(numpy.shape(code))
4474 4474 #exit(1)
4475 4475 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:]
4476 4476
4477 4477 return self.datadecTime
4478 4478
4479 4479 def __convolutionByBlockInTime(self, data):
4480 4480
4481 4481 repetitions = int(self.__nProfiles / self.nCode)
4482 4482 junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize))
4483 4483 junk = junk.flatten()
4484 4484 code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud))
4485 4485 profilesList = range(self.__nProfiles)
4486 4486 #print(numpy.shape(self.datadecTime))
4487 4487 #print(numpy.shape(data))
4488 4488 #print(profilesList)
4489 4489 for i in range(self.__nChannels):
4490 4490 for j in profilesList:
4491 4491 #print("data.shape: ", data.shape)
4492 4492 #print("code_block: ", code_block.shape)
4493 4493 #print("corr: ", numpy.shape(numpy.correlate(data[i,j,:], code_block[j,:], mode='full')))
4494 4494 #exit(1)
4495 4495 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:]
4496 4496 return self.datadecTime
4497 4497
4498 4498 def __convolutionByBlockInFreq(self, data):
4499 4499
4500 4500 raise NotImplementedError("Decoder by frequency fro Blocks not implemented")
4501 4501
4502 4502
4503 4503 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
4504 4504
4505 4505 fft_data = numpy.fft.fft(data, axis=2)
4506 4506
4507 4507 conv = fft_data*fft_code
4508 4508
4509 4509 data = numpy.fft.ifft(conv,axis=2)
4510 4510
4511 4511 return data
4512 4512
4513 4513
4514 4514 def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None):
4515 4515
4516 4516 if dataOut.flagDecodeData:
4517 4517 print("This data is already decoded, recoding again ...")
4518 4518 #print("code: ", numpy.shape(code))
4519 4519 #exit(1)
4520 4520 if not self.isConfig:
4521 4521
4522 4522 if code is None:
4523 4523 if dataOut.code is None:
4524 4524 raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type)
4525 4525
4526 4526 code = dataOut.code
4527 4527 else:
4528 4528 code = numpy.array(code).reshape(nCode,nBaud)
4529 4529 self.setup(code, osamp, dataOut)
4530 4530
4531 4531 self.isConfig = True
4532 4532
4533 4533 if mode == 3:
4534 4534 sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode)
4535 4535
4536 4536 if times != None:
4537 4537 sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n")
4538 4538
4539 4539 if self.code is None:
4540 4540 print("Fail decoding: Code is not defined.")
4541 4541 return
4542 4542
4543 4543 self.__nProfiles = dataOut.nProfiles
4544 4544 datadec = None
4545 4545
4546 4546 if mode == 3:
4547 4547 mode = 0
4548 4548
4549 4549 if dataOut.flagDataAsBlock:
4550 4550 """
4551 4551 Decoding when data have been read as block,
4552 4552 """
4553 4553
4554 4554 if mode == 0:
4555 4555 datadec = self.__convolutionByBlockInTime(dataOut.data)
4556 4556 if mode == 1:
4557 4557 datadec = self.__convolutionByBlockInFreq(dataOut.data)
4558 4558 else:
4559 4559 """
4560 4560 Decoding when data have been read profile by profile
4561 4561 """
4562 4562 if mode == 0:
4563 4563 datadec = self.__convolutionInTime(dataOut.data)
4564 4564
4565 4565 if mode == 1:
4566 4566 datadec = self.__convolutionInFreq(dataOut.data)
4567 4567
4568 4568 if mode == 2:
4569 4569 datadec = self.__convolutionInFreqOpt(dataOut.data)
4570 4570
4571 4571 if datadec is None:
4572 4572 raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode)
4573 4573
4574 4574 dataOut.code = self.code
4575 4575 dataOut.nCode = self.nCode
4576 4576 dataOut.nBaud = self.nBaud
4577 4577
4578 4578 dataOut.data = datadec#/self.nBaud
4579 4579
4580 4580 #print("before",dataOut.heightList)
4581 4581 dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]]
4582 4582 #print("after",dataOut.heightList)
4583 4583
4584 4584 dataOut.flagDecodeData = True #asumo q la data esta decodificada
4585 4585
4586 4586 if self.__profIndex == self.nCode-1:
4587 4587 self.__profIndex = 0
4588 4588 return dataOut
4589 4589
4590 4590 self.__profIndex += 1
4591 4591
4592 4592 #print("SHAPE",numpy.shape(dataOut.data))
4593 4593
4594 4594 return dataOut
4595 4595 # dataOut.flagDeflipData = True #asumo q la data no esta sin flip
4596 4596
4597 4597 class Decoder(Operation):
4598 4598
4599 4599 isConfig = False
4600 4600 __profIndex = 0
4601 4601
4602 4602 code = None
4603 4603
4604 4604 nCode = None
4605 4605 nBaud = None
4606 4606
4607 4607 def __init__(self, **kwargs):
4608 4608
4609 4609 Operation.__init__(self, **kwargs)
4610 4610
4611 4611 self.times = None
4612 4612 self.osamp = None
4613 4613 # self.__setValues = False
4614 4614 self.isConfig = False
4615 4615 self.setupReq = False
4616 4616 def setup(self, code, osamp, dataOut):
4617 4617
4618 4618 self.__profIndex = 0
4619 4619
4620 4620 self.code = code
4621 4621
4622 4622 self.nCode = len(code)
4623 4623 #self.nBaud = len(code[0])
4624 4624 self.nBaud = int(numpy.shape(code)[-1])
4625 4625
4626 4626 if (osamp != None) and (osamp >1):
4627 4627 self.osamp = osamp
4628 4628 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
4629 4629 self.nBaud = self.nBaud*self.osamp
4630 4630
4631 4631 self.__nChannels = dataOut.nChannels
4632 4632 self.__nProfiles = dataOut.nProfiles
4633 4633 self.__nHeis = dataOut.nHeights
4634 4634 #print("self.__nHeis: ", self.__nHeis)
4635 4635 #print("self.nBaud: ", self.nBaud)
4636 4636 #exit(1)
4637 4637 if self.__nHeis < self.nBaud:
4638 4638 raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud))
4639 4639 #print("JJE")
4640 4640 #exit(1)
4641 4641 '''
4642 4642 #Frequency
4643 4643 __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex)
4644 4644
4645 4645 __codeBuffer[:,0:self.nBaud] = self.code
4646 4646
4647 4647 self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1))
4648 4648 '''
4649 4649 if dataOut.flagDataAsBlock:
4650 4650
4651 4651 self.ndatadec = self.__nHeis #- self.nBaud + 1
4652 4652
4653 4653 self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex)
4654 4654
4655 4655 else:
4656 4656
4657 4657 #Time
4658 4658 self.ndatadec = self.__nHeis #- self.nBaud + 1
4659 4659
4660 4660
4661 4661 self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex)
4662 4662
4663 4663 def __convolutionInFreq(self, data):
4664 4664
4665 4665 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
4666 4666
4667 4667 fft_data = numpy.fft.fft(data, axis=1)
4668 4668
4669 4669 conv = fft_data*fft_code
4670 4670
4671 4671 data = numpy.fft.ifft(conv,axis=1)
4672 4672
4673 4673 return data
4674 4674
4675 4675 def __convolutionInFreqOpt(self, data):
4676 4676
4677 4677 raise NotImplementedError
4678 4678
4679 4679 def __convolutionInTime(self, data):
4680 4680
4681 4681 code = self.code[self.__profIndex]
4682 4682 for i in range(self.__nChannels):
4683 4683 #aux=numpy.correlate(data[i,:], code, mode='full')
4684 4684 #print(numpy.shape(aux))
4685 4685 #print(numpy.shape(data[i,:]))
4686 4686 #print(numpy.shape(code))
4687 4687 #exit(1)
4688 4688 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:]
4689 4689
4690 4690 return self.datadecTime
4691 4691
4692 4692 def __convolutionByBlockInTime(self, data, AutoDecod):
4693 4693
4694 4694 if not AutoDecod:
4695 4695 repetitions = int(self.__nProfiles / self.nCode)
4696 4696 junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize))
4697 4697 junk = junk.flatten()
4698 4698 code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud))
4699 4699 else:
4700 4700 code_block = self.code
4701 4701 profilesList = range(self.__nProfiles)
4702 4702 #print(numpy.shape(self.datadecTime))
4703 4703 #print(numpy.shape(data))
4704 4704 #print(profilesList)
4705 4705 for i in range(self.__nChannels):
4706 4706 for j in profilesList:
4707 4707 #print("data.shape: ", data.shape)
4708 4708 #print("code_block: ", code_block.shape)
4709 4709 #print("corr: ", numpy.shape(numpy.correlate(data[i,j,:], code_block[j,:], mode='full')))
4710 4710 #exit(1)
4711 4711 if not AutoDecod:
4712 4712 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:]
4713 4713 else:
4714 4714 if i%2:
4715 4715 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[0,j,:], mode='full')[self.nBaud-1:]
4716 4716 else:
4717 4717 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[1,j,:], mode='full')[self.nBaud-1:]
4718 4718
4719 4719 return self.datadecTime
4720 4720
4721 4721 def __convolutionByBlockInFreq(self, data):
4722 4722
4723 4723 raise NotImplementedError("Decoder by frequency fro Blocks not implemented")
4724 4724
4725 4725
4726 4726 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
4727 4727
4728 4728 fft_data = numpy.fft.fft(data, axis=2)
4729 4729
4730 4730 conv = fft_data*fft_code
4731 4731
4732 4732 data = numpy.fft.ifft(conv,axis=2)
4733 4733
4734 4734 return data
4735 4735
4736 4736
4737 4737 def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None, AutoDecod = 0):
4738 4738 if dataOut.flagDecodeData:
4739 4739 print("This data is already decoded, recoding again ...")
4740 4740 #print("code: ", code, numpy.shape(code))
4741 4741
4742 4742 if not self.isConfig:
4743 4743
4744 4744 if code is None and not AutoDecod:
4745 4745 if dataOut.code is None:
4746 4746 raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type)
4747 4747
4748 4748 code = dataOut.code
4749 4749 else:
4750 4750 if not AutoDecod:
4751 4751 code = numpy.array(code).reshape(nCode,nBaud)
4752 4752 else:
4753 4753 po = 0
4754 4754 print("***********AutoDecod***********")
4755 4755 code = dataOut.data[:2,:,po:po+64]
4756 4756 #print("AutoDecod Shape: ", numpy.shape(code))
4757 4757 #exit(1)
4758 4758
4759 4759 self.setup(code, osamp, dataOut)
4760 4760 self.isConfig = True
4761 4761
4762 4762 if mode == 3:
4763 4763 sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode)
4764 4764
4765 4765 if times != None:
4766 4766 sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n")
4767 4767
4768 4768 if self.code is None:
4769 4769 print("Fail decoding: Code is not defined.")
4770 4770 return
4771 4771
4772 4772 self.__nProfiles = dataOut.nProfiles
4773 4773 datadec = None
4774 4774
4775 4775 if mode == 3:
4776 4776 mode = 0
4777 4777
4778 4778 if dataOut.flagDataAsBlock:
4779 4779 """
4780 4780 Decoding when data have been read as block,
4781 4781 """
4782 4782
4783 4783 if mode == 0:
4784 4784 datadec = self.__convolutionByBlockInTime(dataOut.data, AutoDecod)
4785 4785 if mode == 1:
4786 4786 datadec = self.__convolutionByBlockInFreq(dataOut.data)
4787 4787 else:
4788 4788 """
4789 4789 Decoding when data have been read profile by profile
4790 4790 """
4791 4791 if mode == 0:
4792 4792 datadec = self.__convolutionInTime(dataOut.data)
4793 4793
4794 4794 if mode == 1:
4795 4795 datadec = self.__convolutionInFreq(dataOut.data)
4796 4796
4797 4797 if mode == 2:
4798 4798 datadec = self.__convolutionInFreqOpt(dataOut.data)
4799 4799
4800 4800 if datadec is None:
4801 4801 raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode)
4802 4802
4803 4803 dataOut.code = self.code
4804 4804 dataOut.nCode = self.nCode
4805 4805 dataOut.nBaud = self.nBaud
4806 4806
4807 4807 dataOut.data = datadec#/self.nBaud
4808 4808
4809 4809 #print("before",dataOut.heightList)
4810 4810 dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]]
4811 4811 #print("after",dataOut.heightList)
4812 4812
4813 4813 dataOut.flagDecodeData = True #asumo q la data esta decodificada
4814 4814
4815 4815 if self.__profIndex == self.nCode-1:
4816 4816 self.__profIndex = 0
4817 4817 return dataOut
4818 4818
4819 4819 self.__profIndex += 1
4820 4820
4821 4821 return dataOut
4822 4822
4823 4823
4824 4824 class DecoderRoll(Operation):
4825 4825
4826 4826 isConfig = False
4827 4827 __profIndex = 0
4828 4828
4829 4829 code = None
4830 4830
4831 4831 nCode = None
4832 4832 nBaud = None
4833 4833
4834 4834 def __init__(self, **kwargs):
4835 4835
4836 4836 Operation.__init__(self, **kwargs)
4837 4837
4838 4838 self.times = None
4839 4839 self.osamp = None
4840 4840 # self.__setValues = False
4841 4841 self.isConfig = False
4842 4842 self.setupReq = False
4843 4843 def setup(self, code, osamp, dataOut):
4844 4844
4845 4845 self.__profIndex = 0
4846 4846
4847 4847
4848 4848 self.code = code
4849 4849
4850 4850 self.nCode = len(code)
4851 4851 self.nBaud = len(code[0])
4852 4852
4853 4853 if (osamp != None) and (osamp >1):
4854 4854 self.osamp = osamp
4855 4855 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
4856 4856 self.nBaud = self.nBaud*self.osamp
4857 4857
4858 4858 self.__nChannels = dataOut.nChannels
4859 4859 self.__nProfiles = dataOut.nProfiles
4860 4860 self.__nHeis = dataOut.nHeights
4861 4861
4862 4862 if self.__nHeis < self.nBaud:
4863 4863 raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud))
4864 4864
4865 4865 #Frequency
4866 4866 __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex)
4867 4867
4868 4868 __codeBuffer[:,0:self.nBaud] = self.code
4869 4869
4870 4870 self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1))
4871 4871
4872 4872 if dataOut.flagDataAsBlock:
4873 4873
4874 4874 self.ndatadec = self.__nHeis #- self.nBaud + 1
4875 4875
4876 4876 self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex)
4877 4877
4878 4878 else:
4879 4879
4880 4880 #Time
4881 4881 self.ndatadec = self.__nHeis #- self.nBaud + 1
4882 4882
4883 4883
4884 4884 self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex)
4885 4885
4886 4886 def __convolutionInFreq(self, data):
4887 4887
4888 4888 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
4889 4889
4890 4890 fft_data = numpy.fft.fft(data, axis=1)
4891 4891
4892 4892 conv = fft_data*fft_code
4893 4893
4894 4894 data = numpy.fft.ifft(conv,axis=1)
4895 4895
4896 4896 return data
4897 4897
4898 4898 def __convolutionInFreqOpt(self, data):
4899 4899
4900 4900 raise NotImplementedError
4901 4901
4902 4902 def __convolutionInTime(self, data):
4903 4903
4904 4904 code = self.code[self.__profIndex]
4905 4905 #print("code",code[0,0])
4906 4906 for i in range(self.__nChannels):
4907 4907 #aux=numpy.correlate(data[i,:], code, mode='full')
4908 4908 #print(numpy.shape(aux))
4909 4909 #print(numpy.shape(data[i,:]))
4910 4910 #print(numpy.shape(code))
4911 4911 #exit(1)
4912 4912 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:]
4913 4913
4914 4914 return self.datadecTime
4915 4915
4916 4916 def __convolutionByBlockInTime(self, data):
4917 4917
4918 4918 repetitions = int(self.__nProfiles / self.nCode)
4919 4919 junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize))
4920 4920 junk = junk.flatten()
4921 4921 code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud))
4922 4922 profilesList = range(self.__nProfiles)
4923 4923 #print(numpy.shape(self.datadecTime))
4924 4924 #print(numpy.shape(data))
4925 4925 for i in range(self.__nChannels):
4926 4926 for j in profilesList:
4927 4927 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:]
4928 4928 return self.datadecTime
4929 4929
4930 4930 def __convolutionByBlockInFreq(self, data):
4931 4931
4932 4932 raise NotImplementedError("Decoder by frequency fro Blocks not implemented")
4933 4933
4934 4934
4935 4935 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
4936 4936
4937 4937 fft_data = numpy.fft.fft(data, axis=2)
4938 4938
4939 4939 conv = fft_data*fft_code
4940 4940
4941 4941 data = numpy.fft.ifft(conv,axis=2)
4942 4942
4943 4943 return data
4944 4944
4945 4945
4946 4946 def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None):
4947 4947
4948 4948 if dataOut.flagDecodeData:
4949 4949 print("This data is already decoded, recoding again ...")
4950 4950
4951 4951
4952 4952 roll = 0
4953 4953
4954 4954 if self.isConfig:
4955 4955 code = numpy.array(code)
4956 4956
4957 4957 code = numpy.roll(code,roll,axis=0)
4958 4958 code = numpy.reshape(code,(5,100,64))
4959 4959 block = dataOut.CurrentBlock%5
4960 4960 #code = code[block-1,:,:] #NormalizeDPPower
4961 4961 code = code[block-1-1,:,:] #Next Day
4962 4962 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
4963 4963
4964 4964
4965 4965 if not self.isConfig:
4966 4966
4967 4967 if code is None:
4968 4968 if dataOut.code is None:
4969 4969 raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type)
4970 4970
4971 4971 code = dataOut.code
4972 4972 else:
4973 4973 code = numpy.array(code)
4974 4974
4975 4975 #roll = 29
4976 4976 code = numpy.roll(code,roll,axis=0)
4977 4977 code = numpy.reshape(code,(5,100,64))
4978 4978 block = dataOut.CurrentBlock%5
4979 4979 code = code[block-1-1,:,:]
4980 4980 #print(code.shape())
4981 4981 #exit(1)
4982 4982
4983 4983 code = numpy.array(code).reshape(nCode,nBaud)
4984 4984 self.setup(code, osamp, dataOut)
4985 4985
4986 4986 self.isConfig = True
4987 4987
4988 4988 if mode == 3:
4989 4989 sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode)
4990 4990
4991 4991 if times != None:
4992 4992 sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n")
4993 4993
4994 4994 if self.code is None:
4995 4995 print("Fail decoding: Code is not defined.")
4996 4996 return
4997 4997
4998 4998 self.__nProfiles = dataOut.nProfiles
4999 4999 datadec = None
5000 5000
5001 5001 if mode == 3:
5002 5002 mode = 0
5003 5003
5004 5004 if dataOut.flagDataAsBlock:
5005 5005 """
5006 5006 Decoding when data have been read as block,
5007 5007 """
5008 5008
5009 5009 if mode == 0:
5010 5010 datadec = self.__convolutionByBlockInTime(dataOut.data)
5011 5011 if mode == 1:
5012 5012 datadec = self.__convolutionByBlockInFreq(dataOut.data)
5013 5013 else:
5014 5014 """
5015 5015 Decoding when data have been read profile by profile
5016 5016 """
5017 5017 if mode == 0:
5018 5018 datadec = self.__convolutionInTime(dataOut.data)
5019 5019
5020 5020 if mode == 1:
5021 5021 datadec = self.__convolutionInFreq(dataOut.data)
5022 5022
5023 5023 if mode == 2:
5024 5024 datadec = self.__convolutionInFreqOpt(dataOut.data)
5025 5025
5026 5026 if datadec is None:
5027 5027 raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode)
5028 5028
5029 5029 dataOut.code = self.code
5030 5030 dataOut.nCode = self.nCode
5031 5031 dataOut.nBaud = self.nBaud
5032 5032
5033 5033 dataOut.data = datadec
5034 5034 #print("before",dataOut.heightList)
5035 5035 dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]]
5036 5036 #print("after",dataOut.heightList)
5037 5037
5038 5038 dataOut.flagDecodeData = True #asumo q la data esta decodificada
5039 5039
5040 5040 if self.__profIndex == self.nCode-1:
5041 5041 self.__profIndex = 0
5042 5042 return dataOut
5043 5043
5044 5044 self.__profIndex += 1
5045 5045
5046 5046 #print("SHAPE",numpy.shape(dataOut.data))
5047 5047
5048 5048 return dataOut
5049 5049
5050 5050
5051 5051 class ProfileConcat(Operation):
5052 5052
5053 5053 isConfig = False
5054 5054 buffer = None
5055 5055
5056 5056 def __init__(self, **kwargs):
5057 5057
5058 5058 Operation.__init__(self, **kwargs)
5059 5059 self.profileIndex = 0
5060 5060
5061 5061 def reset(self):
5062 5062 self.buffer = numpy.zeros_like(self.buffer)
5063 5063 self.start_index = 0
5064 5064 self.times = 1
5065 5065
5066 5066 def setup(self, data, m, n=1):
5067 5067 self.buffer = numpy.zeros((data.shape[0],data.shape[1]*m),dtype=type(data[0,0]))
5068 5068 self.nHeights = data.shape[1]#.nHeights
5069 5069 self.start_index = 0
5070 5070 self.times = 1
5071 5071
5072 5072 def concat(self, data):
5073 5073
5074 5074 self.buffer[:,self.start_index:self.nHeights*self.times] = data.copy()
5075 5075 self.start_index = self.start_index + self.nHeights
5076 5076
5077 5077 def run(self, dataOut, m):
5078 5078 dataOut.flagNoData = True
5079 5079
5080 5080 if not self.isConfig:
5081 5081 self.setup(dataOut.data, m, 1)
5082 5082 self.isConfig = True
5083 5083
5084 5084 if dataOut.flagDataAsBlock:
5085 5085 raise ValueError("ProfileConcat can only be used when voltage have been read profile by profile, getBlock = False")
5086 5086
5087 5087 else:
5088 5088 self.concat(dataOut.data)
5089 5089 self.times += 1
5090 5090 if self.times > m:
5091 5091 dataOut.data = self.buffer
5092 5092 self.reset()
5093 5093 dataOut.flagNoData = False
5094 5094 # se deben actualizar mas propiedades del header y del objeto dataOut, por ejemplo, las alturas
5095 5095 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
5096 5096 xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * m
5097 5097 dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight)
5098 5098 dataOut.ippSeconds *= m
5099 5099 return dataOut
5100 5100
5101 5101 class ProfileSelector(Operation):
5102 5102
5103 5103 profileIndex = None
5104 5104 # Tamanho total de los perfiles
5105 5105 nProfiles = None
5106 5106
5107 5107 def __init__(self, **kwargs):
5108 5108
5109 5109 Operation.__init__(self, **kwargs)
5110 5110 self.profileIndex = 0
5111 5111
5112 5112 def incProfileIndex(self):
5113 5113
5114 5114 self.profileIndex += 1
5115 5115
5116 5116 if self.profileIndex >= self.nProfiles:
5117 5117 self.profileIndex = 0
5118 5118
5119 5119 def isThisProfileInRange(self, profileIndex, minIndex, maxIndex):
5120 5120
5121 5121 if profileIndex < minIndex:
5122 5122 return False
5123 5123
5124 5124 if profileIndex > maxIndex:
5125 5125 return False
5126 5126
5127 5127 return True
5128 5128
5129 5129 def isThisProfileInList(self, profileIndex, profileList):
5130 5130
5131 5131 if profileIndex not in profileList:
5132 5132 return False
5133 5133
5134 5134 return True
5135 5135
5136 5136 def run(self, dataOut, profileList=None, profileRangeList=None, beam=None, byblock=False, rangeList = None, nProfiles=None):
5137 5137
5138 5138 """
5139 5139 ProfileSelector:
5140 5140
5141 5141 Inputs:
5142 5142 profileList : Index of profiles selected. Example: profileList = (0,1,2,7,8)
5143 5143
5144 5144 profileRangeList : Minimum and maximum profile indexes. Example: profileRangeList = (4, 30)
5145 5145
5146 5146 rangeList : List of profile ranges. Example: rangeList = ((4, 30), (32, 64), (128, 256))
5147 5147
5148 5148 """
5149 5149 #print("nProfiles,", self.nProfiles)
5150 5150 #print("dataOut.flagDataAsBlock", dataOut.flagDataAsBlock)
5151 5151 #print("dataOut.data", numpy.shape(dataOut.data))
5152 5152 if rangeList is not None:
5153 5153 if type(rangeList[0]) not in (tuple, list):
5154 5154 rangeList = [rangeList]
5155 5155
5156 5156 dataOut.flagNoData = True
5157 5157
5158 5158 if dataOut.flagDataAsBlock:
5159 5159 """
5160 5160 data dimension = [nChannels, nProfiles, nHeis]
5161 5161 """
5162 5162 if profileList != None:
5163 5163 dataOut.data = dataOut.data[:,profileList,:]
5164 5164
5165 5165 if profileRangeList != None:
5166 5166 minIndex = profileRangeList[0]
5167 5167 maxIndex = profileRangeList[1]
5168 5168 profileList = list(range(minIndex, maxIndex+1))
5169 5169
5170 5170 dataOut.data = dataOut.data[:,minIndex:maxIndex+1,:]
5171 5171
5172 5172 if rangeList != None:
5173 5173
5174 5174 profileList = []
5175 5175
5176 5176 for thisRange in rangeList:
5177 5177 minIndex = thisRange[0]
5178 5178 maxIndex = thisRange[1]
5179 5179
5180 5180 profileList.extend(list(range(minIndex, maxIndex+1)))
5181 5181
5182 5182 dataOut.data = dataOut.data[:,profileList,:]
5183 5183
5184 5184 dataOut.nProfiles = len(profileList)
5185 5185 dataOut.profileIndex = dataOut.nProfiles - 1
5186 5186 dataOut.flagNoData = False
5187 5187 #print("Shape after prof select: ", numpy.shape(dataOut.data))
5188 5188 #print(dataOut.heightList)
5189 5189 #exit(1)
5190 5190 '''
5191 5191 po = 5
5192 5192 import matplotlib.pyplot as plt
5193 5193 this_data = dataOut.data[0,0,:]
5194 5194 plt.plot(this_data*numpy.conjugate(this_data),marker='*',linestyle='-')
5195 5195 plt.axvline(po)
5196 5196 plt.axvline(po+64-1)
5197 5197 plt.grid()
5198 5198 plt.show()
5199 5199 '''
5200 5200 return dataOut
5201 5201
5202 5202 """
5203 5203 data dimension = [nChannels, nHeis]
5204 5204 """
5205 5205
5206 5206 if profileList != None:
5207 5207
5208 5208 if self.isThisProfileInList(dataOut.profileIndex, profileList):
5209 5209
5210 5210 self.nProfiles = len(profileList)
5211 5211 dataOut.nProfiles = self.nProfiles
5212 5212 dataOut.profileIndex = self.profileIndex
5213 5213 dataOut.flagNoData = False
5214 5214
5215 5215 self.incProfileIndex()
5216 5216 return dataOut
5217 5217
5218 5218 if profileRangeList != None:
5219 5219
5220 5220 minIndex = profileRangeList[0]
5221 5221 maxIndex = profileRangeList[1]
5222 5222
5223 5223 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
5224 5224
5225 5225 self.nProfiles = maxIndex - minIndex + 1
5226 5226 dataOut.nProfiles = self.nProfiles
5227 5227 dataOut.profileIndex = self.profileIndex
5228 5228 dataOut.flagNoData = False
5229 5229
5230 5230 self.incProfileIndex()
5231 5231 return dataOut
5232 5232
5233 5233 if rangeList != None:
5234 5234
5235 5235 nProfiles = 0
5236 5236
5237 5237 for thisRange in rangeList:
5238 5238 minIndex = thisRange[0]
5239 5239 maxIndex = thisRange[1]
5240 5240
5241 5241 nProfiles += maxIndex - minIndex + 1
5242 5242
5243 5243 for thisRange in rangeList:
5244 5244
5245 5245 minIndex = thisRange[0]
5246 5246 maxIndex = thisRange[1]
5247 5247
5248 5248 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
5249 5249
5250 5250 self.nProfiles = nProfiles
5251 5251 dataOut.nProfiles = self.nProfiles
5252 5252 dataOut.profileIndex = self.profileIndex
5253 5253 dataOut.flagNoData = False
5254 5254
5255 5255 self.incProfileIndex()
5256 5256
5257 5257 break
5258 5258
5259 5259 return dataOut
5260 5260
5261 5261
5262 5262 if beam != None: #beam is only for AMISR data
5263 5263 if self.isThisProfileInList(dataOut.profileIndex, dataOut.beamRangeDict[beam]):
5264 5264 dataOut.flagNoData = False
5265 5265 dataOut.profileIndex = self.profileIndex
5266 5266
5267 5267 self.incProfileIndex()
5268 5268
5269 5269 return dataOut
5270 5270
5271 5271 raise ValueError("ProfileSelector needs profileList, profileRangeList or rangeList parameter")
5272 5272
5273 5273 #return False
5274 5274 return dataOut
5275 5275
5276 5276 class Reshaper(Operation):
5277 5277
5278 5278 def __init__(self, **kwargs):
5279 5279
5280 5280 Operation.__init__(self, **kwargs)
5281 5281
5282 5282 self.__buffer = None
5283 5283 self.__nitems = 0
5284 5284
5285 5285 def __appendProfile(self, dataOut, nTxs):
5286 5286
5287 5287 if self.__buffer is None:
5288 5288 shape = (dataOut.nChannels, int(dataOut.nHeights/nTxs) )
5289 5289 self.__buffer = numpy.empty(shape, dtype = dataOut.data.dtype)
5290 5290
5291 5291 ini = dataOut.nHeights * self.__nitems
5292 5292 end = ini + dataOut.nHeights
5293 5293
5294 5294 self.__buffer[:, ini:end] = dataOut.data
5295 5295
5296 5296 self.__nitems += 1
5297 5297
5298 5298 return int(self.__nitems*nTxs)
5299 5299
5300 5300 def __getBuffer(self):
5301 5301
5302 5302 if self.__nitems == int(1./self.__nTxs):
5303 5303
5304 5304 self.__nitems = 0
5305 5305
5306 5306 return self.__buffer.copy()
5307 5307
5308 5308 return None
5309 5309
5310 5310 def __checkInputs(self, dataOut, shape, nTxs):
5311 5311
5312 5312 if shape is None and nTxs is None:
5313 5313 raise ValueError("Reshaper: shape of factor should be defined")
5314 5314
5315 5315 if nTxs:
5316 5316 if nTxs < 0:
5317 5317 raise ValueError("nTxs should be greater than 0")
5318 5318
5319 5319 if nTxs < 1 and dataOut.nProfiles % (1./nTxs) != 0:
5320 5320 raise ValueError("nProfiles= %d is not divisibled by (1./nTxs) = %f" %(dataOut.nProfiles, (1./nTxs)))
5321 5321
5322 5322 shape = [dataOut.nChannels, dataOut.nProfiles*nTxs, dataOut.nHeights/nTxs]
5323 5323
5324 5324 return shape, nTxs
5325 5325
5326 5326 if len(shape) != 2 and len(shape) != 3:
5327 5327 raise ValueError("shape dimension should be equal to 2 or 3. shape = (nProfiles, nHeis) or (nChannels, nProfiles, nHeis). Actually shape = (%d, %d, %d)" %(dataOut.nChannels, dataOut.nProfiles, dataOut.nHeights))
5328 5328
5329 5329 if len(shape) == 2:
5330 5330 shape_tuple = [dataOut.nChannels]
5331 5331 shape_tuple.extend(shape)
5332 5332 else:
5333 5333 shape_tuple = list(shape)
5334 5334 #print(shape_tuple)
5335 5335 #print(dataOut.nProfiles)
5336 5336 nTxs = 1.0*shape_tuple[1]/dataOut.nProfiles
5337 5337
5338 5338 return shape_tuple, nTxs
5339 5339
5340 5340 def run(self, dataOut, shape=None, nTxs=None):
5341 5341 #print(numpy.shape(dataOut.data))
5342 5342 #exit(1)
5343 5343 shape_tuple, self.__nTxs = self.__checkInputs(dataOut, shape, nTxs)
5344 5344 #print(self.__nTxs)
5345 5345 #print(dataOut.flagDataAsBlock)
5346 5346 dataOut.flagNoData = True
5347 5347 profileIndex = None
5348 5348
5349 5349 if dataOut.flagDataAsBlock:
5350 5350
5351 5351 dataOut.data = numpy.reshape(dataOut.data, shape_tuple)
5352 5352 dataOut.flagNoData = False
5353 5353
5354 5354 profileIndex = int(dataOut.nProfiles*self.__nTxs) - 1
5355 5355
5356 5356 else:
5357 5357
5358 5358
5359 5359 if self.__nTxs < 1:
5360 5360
5361 5361 self.__appendProfile(dataOut, self.__nTxs)
5362 5362 new_data = self.__getBuffer()
5363 5363
5364 5364 if new_data is not None:
5365 5365 dataOut.data = new_data
5366 5366 dataOut.flagNoData = False
5367 5367
5368 5368 profileIndex = dataOut.profileIndex*nTxs
5369 5369
5370 5370 else:
5371 5371 raise ValueError("nTxs should be greater than 0 and lower than 1, or use VoltageReader(..., getblock=True)")
5372 5372
5373 5373 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
5374 5374
5375 5375 dataOut.heightList = numpy.arange(dataOut.nHeights/self.__nTxs) * deltaHeight + dataOut.heightList[0]
5376 5376
5377 5377 dataOut.nProfiles = int(dataOut.nProfiles*self.__nTxs)
5378 5378
5379 5379 dataOut.profileIndex = profileIndex
5380 5380
5381 5381 dataOut.ippSeconds /= self.__nTxs
5382 5382 #print(numpy.shape(dataOut.data))
5383 5383 #exit(1)
5384 5384
5385 5385 return dataOut
5386 5386
5387 5387 class SplitProfiles(Operation):
5388 5388
5389 5389 def __init__(self, **kwargs):
5390 5390
5391 5391 Operation.__init__(self, **kwargs)
5392 5392
5393 5393 def run(self, dataOut, n):
5394 5394
5395 5395 dataOut.flagNoData = True
5396 5396 profileIndex = None
5397 5397
5398 5398 if dataOut.flagDataAsBlock:
5399 5399
5400 5400 #nchannels, nprofiles, nsamples
5401 5401 shape = dataOut.data.shape
5402 5402
5403 5403 if shape[2] % n != 0:
5404 5404 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[2]))
5405 5405
5406 5406 new_shape = shape[0], shape[1]*n, int(shape[2]/n)
5407 5407
5408 5408 dataOut.data = numpy.reshape(dataOut.data, new_shape)
5409 5409 dataOut.flagNoData = False
5410 5410
5411 5411 profileIndex = int(dataOut.nProfiles/n) - 1
5412 5412
5413 5413 else:
5414 5414
5415 5415 raise ValueError("Could not split the data when is read Profile by Profile. Use VoltageReader(..., getblock=True)")
5416 5416
5417 5417 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
5418 5418
5419 5419 dataOut.heightList = numpy.arange(dataOut.nHeights/n) * deltaHeight + dataOut.heightList[0]
5420 5420
5421 5421 dataOut.nProfiles = int(dataOut.nProfiles*n)
5422 5422
5423 5423 dataOut.profileIndex = profileIndex
5424 5424
5425 5425 dataOut.ippSeconds /= n
5426 5426
5427 5427 return dataOut
5428 5428
5429 5429 class CombineProfiles(Operation):
5430 5430 def __init__(self, **kwargs):
5431 5431
5432 5432 Operation.__init__(self, **kwargs)
5433 5433
5434 5434 self.__remData = None
5435 5435 self.__profileIndex = 0
5436 5436
5437 5437 def run(self, dataOut, n):
5438 5438
5439 5439 dataOut.flagNoData = True
5440 5440 profileIndex = None
5441 5441
5442 5442 if dataOut.flagDataAsBlock:
5443 5443
5444 5444 #nchannels, nprofiles, nsamples
5445 5445 shape = dataOut.data.shape
5446 5446 new_shape = shape[0], shape[1]/n, shape[2]*n
5447 5447
5448 5448 if shape[1] % n != 0:
5449 5449 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[1]))
5450 5450
5451 5451 dataOut.data = numpy.reshape(dataOut.data, new_shape)
5452 5452 dataOut.flagNoData = False
5453 5453
5454 5454 profileIndex = int(dataOut.nProfiles*n) - 1
5455 5455
5456 5456 else:
5457 5457
5458 5458 #nchannels, nsamples
5459 5459 if self.__remData is None:
5460 5460 newData = dataOut.data
5461 5461 else:
5462 5462 newData = numpy.concatenate((self.__remData, dataOut.data), axis=1)
5463 5463
5464 5464 self.__profileIndex += 1
5465 5465
5466 5466 if self.__profileIndex < n:
5467 5467 self.__remData = newData
5468 5468 #continue
5469 5469 return
5470 5470
5471 5471 self.__profileIndex = 0
5472 5472 self.__remData = None
5473 5473
5474 5474 dataOut.data = newData
5475 5475 dataOut.flagNoData = False
5476 5476
5477 5477 profileIndex = dataOut.profileIndex/n
5478 5478
5479 5479
5480 5480 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
5481 5481
5482 5482 dataOut.heightList = numpy.arange(dataOut.nHeights*n) * deltaHeight + dataOut.heightList[0]
5483 5483
5484 5484 dataOut.nProfiles = int(dataOut.nProfiles/n)
5485 5485
5486 5486 dataOut.profileIndex = profileIndex
5487 5487
5488 5488 dataOut.ippSeconds *= n
5489 5489
5490 5490 return dataOut
5491 5491 # import collections
5492 5492 # from scipy.stats import mode
5493 5493 #
5494 5494 # class Synchronize(Operation):
5495 5495 #
5496 5496 # isConfig = False
5497 5497 # __profIndex = 0
5498 5498 #
5499 5499 # def __init__(self, **kwargs):
5500 5500 #
5501 5501 # Operation.__init__(self, **kwargs)
5502 5502 # # self.isConfig = False
5503 5503 # self.__powBuffer = None
5504 5504 # self.__startIndex = 0
5505 5505 # self.__pulseFound = False
5506 5506 #
5507 5507 # def __findTxPulse(self, dataOut, channel=0, pulse_with = None):
5508 5508 #
5509 5509 # #Read data
5510 5510 #
5511 5511 # powerdB = dataOut.getPower(channel = channel)
5512 5512 # noisedB = dataOut.getNoise(channel = channel)[0]
5513 5513 #
5514 5514 # self.__powBuffer.extend(powerdB.flatten())
5515 5515 #
5516 5516 # dataArray = numpy.array(self.__powBuffer)
5517 5517 #
5518 5518 # filteredPower = numpy.correlate(dataArray, dataArray[0:self.__nSamples], "same")
5519 5519 #
5520 5520 # maxValue = numpy.nanmax(filteredPower)
5521 5521 #
5522 5522 # if maxValue < noisedB + 10:
5523 5523 # #No se encuentra ningun pulso de transmision
5524 5524 # return None
5525 5525 #
5526 5526 # maxValuesIndex = numpy.where(filteredPower > maxValue - 0.1*abs(maxValue))[0]
5527 5527 #
5528 5528 # if len(maxValuesIndex) < 2:
5529 5529 # #Solo se encontro un solo pulso de transmision de un baudio, esperando por el siguiente TX
5530 5530 # return None
5531 5531 #
5532 5532 # phasedMaxValuesIndex = maxValuesIndex - self.__nSamples
5533 5533 #
5534 5534 # #Seleccionar solo valores con un espaciamiento de nSamples
5535 5535 # pulseIndex = numpy.intersect1d(maxValuesIndex, phasedMaxValuesIndex)
5536 5536 #
5537 5537 # if len(pulseIndex) < 2:
5538 5538 # #Solo se encontro un pulso de transmision con ancho mayor a 1
5539 5539 # return None
5540 5540 #
5541 5541 # spacing = pulseIndex[1:] - pulseIndex[:-1]
5542 5542 #
5543 5543 # #remover senales que se distancien menos de 10 unidades o muestras
5544 5544 # #(No deberian existir IPP menor a 10 unidades)
5545 5545 #
5546 5546 # realIndex = numpy.where(spacing > 10 )[0]
5547 5547 #
5548 5548 # if len(realIndex) < 2:
5549 5549 # #Solo se encontro un pulso de transmision con ancho mayor a 1
5550 5550 # return None
5551 5551 #
5552 5552 # #Eliminar pulsos anchos (deja solo la diferencia entre IPPs)
5553 5553 # realPulseIndex = pulseIndex[realIndex]
5554 5554 #
5555 5555 # period = mode(realPulseIndex[1:] - realPulseIndex[:-1])[0][0]
5556 5556 #
5557 5557 # print "IPP = %d samples" %period
5558 5558 #
5559 5559 # self.__newNSamples = dataOut.nHeights #int(period)
5560 5560 # self.__startIndex = int(realPulseIndex[0])
5561 5561 #
5562 5562 # return 1
5563 5563 #
5564 5564 #
5565 5565 # def setup(self, nSamples, nChannels, buffer_size = 4):
5566 5566 #
5567 5567 # self.__powBuffer = collections.deque(numpy.zeros( buffer_size*nSamples,dtype=numpy.float),
5568 5568 # maxlen = buffer_size*nSamples)
5569 5569 #
5570 5570 # bufferList = []
5571 5571 #
5572 5572 # for i in range(nChannels):
5573 5573 # bufferByChannel = collections.deque(numpy.zeros( buffer_size*nSamples, dtype=numpy.complex) + numpy.NAN,
5574 5574 # maxlen = buffer_size*nSamples)
5575 5575 #
5576 5576 # bufferList.append(bufferByChannel)
5577 5577 #
5578 5578 # self.__nSamples = nSamples
5579 5579 # self.__nChannels = nChannels
5580 5580 # self.__bufferList = bufferList
5581 5581 #
5582 5582 # def run(self, dataOut, channel = 0):
5583 5583 #
5584 5584 # if not self.isConfig:
5585 5585 # nSamples = dataOut.nHeights
5586 5586 # nChannels = dataOut.nChannels
5587 5587 # self.setup(nSamples, nChannels)
5588 5588 # self.isConfig = True
5589 5589 #
5590 5590 # #Append new data to internal buffer
5591 5591 # for thisChannel in range(self.__nChannels):
5592 5592 # bufferByChannel = self.__bufferList[thisChannel]
5593 5593 # bufferByChannel.extend(dataOut.data[thisChannel])
5594 5594 #
5595 5595 # if self.__pulseFound:
5596 5596 # self.__startIndex -= self.__nSamples
5597 5597 #
5598 5598 # #Finding Tx Pulse
5599 5599 # if not self.__pulseFound:
5600 5600 # indexFound = self.__findTxPulse(dataOut, channel)
5601 5601 #
5602 5602 # if indexFound == None:
5603 5603 # dataOut.flagNoData = True
5604 5604 # return
5605 5605 #
5606 5606 # self.__arrayBuffer = numpy.zeros((self.__nChannels, self.__newNSamples), dtype = numpy.complex)
5607 5607 # self.__pulseFound = True
5608 5608 # self.__startIndex = indexFound
5609 5609 #
5610 5610 # #If pulse was found ...
5611 5611 # for thisChannel in range(self.__nChannels):
5612 5612 # bufferByChannel = self.__bufferList[thisChannel]
5613 5613 # #print self.__startIndex
5614 5614 # x = numpy.array(bufferByChannel)
5615 5615 # self.__arrayBuffer[thisChannel] = x[self.__startIndex:self.__startIndex+self.__newNSamples]
5616 5616 #
5617 5617 # deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
5618 5618 # dataOut.heightList = numpy.arange(self.__newNSamples)*deltaHeight
5619 5619 # # dataOut.ippSeconds = (self.__newNSamples / deltaHeight)/1e6
5620 5620 #
5621 5621 # dataOut.data = self.__arrayBuffer
5622 5622 #
5623 5623 # self.__startIndex += self.__newNSamples
5624 5624 #
5625 5625 # return
5626 5626
5627 5627
5628 5628
5629 5629
5630 5630
5631 5631
5632 5632
5633 5633 ##############################LONG PULSE##############################
5634 5634
5635 5635
5636 5636
5637 5637 class CrossProdHybrid(CrossProdDP):
5638 5638 """Operation to calculate cross products of the Hybrid Experiment.
5639 5639
5640 5640 Parameters:
5641 5641 -----------
5642 5642 NLAG : int
5643 5643 Number of lags for Long Pulse.
5644 5644 NRANGE : int
5645 5645 Number of samples (heights) for Long Pulse.
5646 5646 NCAL : int
5647 5647 .*
5648 5648 DPL : int
5649 5649 Number of lags for Double Pulse.
5650 5650 NDN : int
5651 5651 .*
5652 5652 NDT : int
5653 5653 Number of heights for Double Pulse.*
5654 5654 NDP : int
5655 5655 Number of heights for Double Pulse.*
5656 5656 NSCAN : int
5657 5657 Number of profiles when the transmitter is on.
5658 5658 lagind : intlist
5659 5659 .*
5660 5660 lagfirst : intlist
5661 5661 .*
5662 5662 NAVG : int
5663 5663 Number of blocks to be "averaged".
5664 5664 nkill : int
5665 5665 Number of blocks not to be considered when averaging.
5666 5666
5667 5667 Example
5668 5668 --------
5669 5669
5670 5670 op = proc_unit.addOperation(name='CrossProdHybrid', optype='other')
5671 5671 op.addParameter(name='NLAG', value='16', format='int')
5672 5672 op.addParameter(name='NRANGE', value='200', format='int')
5673 5673 op.addParameter(name='NCAL', value='0', format='int')
5674 5674 op.addParameter(name='DPL', value='11', format='int')
5675 5675 op.addParameter(name='NDN', value='0', format='int')
5676 5676 op.addParameter(name='NDT', value='67', format='int')
5677 5677 op.addParameter(name='NDP', value='67', format='int')
5678 5678 op.addParameter(name='NSCAN', value='128', format='int')
5679 5679 op.addParameter(name='lagind', value='(0,1,2,3,4,5,6,7,0,3,4,5,6,8,9,10)', format='intlist')
5680 5680 op.addParameter(name='lagfirst', value='(1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1)', format='intlist')
5681 5681 op.addParameter(name='NAVG', value='16', format='int')
5682 5682 op.addParameter(name='nkill', value='6', format='int')
5683 5683
5684 5684 """
5685 5685
5686 5686 def __init__(self, **kwargs):
5687 5687
5688 5688 Operation.__init__(self, **kwargs)
5689 5689 self.bcounter=0
5690 5690 self.aux=1
5691 5691 self.aux_cross_lp=1
5692 5692 self.lag_products_LP_median_estimates_aux=1
5693 5693
5694 5694 def get_products_cabxys_HP(self,dataOut):
5695 5695
5696 5696 if self.aux==1:
5697 5697 self.set_header_output(dataOut)
5698 5698 self.aux=0
5699 5699
5700 5700 self.cax=numpy.zeros((dataOut.NDP,dataOut.DPL,2))# hp:67x11x2 dp: 66x11x2
5701 5701 self.cay=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5702 5702 self.cbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5703 5703 self.cby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5704 5704 self.cax2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5705 5705 self.cay2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5706 5706 self.cbx2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5707 5707 self.cby2=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5708 5708 self.caxbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5709 5709 self.caxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5710 5710 self.caybx=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5711 5711 self.cayby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5712 5712 self.caxay=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5713 5713 self.cbxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2))
5714 5714 for i in range(2): # flipped and unflipped
5715 5715 for j in range(dataOut.NDP): # loop over true ranges # 67
5716 5716 for k in range(int(dataOut.NSCAN)): # 128
5717 5717
5718 5718 n=dataOut.lagind[k%dataOut.NLAG] # 128=16x8
5719 5719
5720 5720 ax=dataOut.data[0,k,dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT].real#-dataOut.dc.real[0]
5721 5721 ay=dataOut.data[0,k,dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT].imag#-dataOut.dc.imag[0]
5722 5722
5723 5723 if dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n<dataOut.read_samples:
5724 5724
5725 5725 bx=dataOut.data[1,k,dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n].real#-dataOut.dc.real[1]
5726 5726 by=dataOut.data[1,k,dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n].imag#-dataOut.dc.imag[1]
5727 5727 #print(bx)
5728 5728 #print(by)
5729 5729 #exit(1)
5730 5730 else:
5731 5731 #print(i,j,n)
5732 5732 #exit(1)
5733 5733
5734 5734 if k+1<int(dataOut.NSCAN):
5735 5735 bx=dataOut.data[1,k+1,(dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n)%dataOut.NDP].real
5736 5736 by=dataOut.data[1,k+1,(dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n)%dataOut.NDP].imag
5737 5737 #print(bx)
5738 5738 #print(by)
5739 5739 #exit(1)
5740 5740 if k+1==int(dataOut.NSCAN):## ESTO ES UN PARCHE PUES NO SE TIENE EL SIGUIENTE BLOQUE
5741 5741 bx=dataOut.data[1,k,(dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n)%dataOut.NDP].real
5742 5742 by=dataOut.data[1,k,(dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n)%dataOut.NDP].imag
5743 5743 #if n == 7 and j == 65:
5744 5744 #print(k)
5745 5745 #print(bx)
5746 5746 #print(by)
5747 5747 #exit(1)
5748 5748 if(k<dataOut.NLAG and dataOut.lagfirst[k%dataOut.NLAG]==1):# if(k<16 && lagfirst[k%16]==1)
5749 5749 self.cax[j][n][i]=ax
5750 5750 self.cay[j][n][i]=ay
5751 5751 self.cbx[j][n][i]=bx
5752 5752 self.cby[j][n][i]=by
5753 5753 self.cax2[j][n][i]=ax*ax
5754 5754 self.cay2[j][n][i]=ay*ay
5755 5755 self.cbx2[j][n][i]=bx*bx
5756 5756 self.cby2[j][n][i]=by*by
5757 5757 self.caxbx[j][n][i]=ax*bx
5758 5758 self.caxby[j][n][i]=ax*by
5759 5759 self.caybx[j][n][i]=ay*bx
5760 5760 self.cayby[j][n][i]=ay*by
5761 5761 self.caxay[j][n][i]=ax*ay
5762 5762 self.cbxby[j][n][i]=bx*by
5763 5763 else:
5764 5764 self.cax[j][n][i]+=ax
5765 5765 self.cay[j][n][i]+=ay
5766 5766 self.cbx[j][n][i]+=bx
5767 5767 self.cby[j][n][i]+=by
5768 5768 self.cax2[j][n][i]+=ax*ax
5769 5769 self.cay2[j][n][i]+=ay*ay
5770 5770 self.cbx2[j][n][i]+=bx*bx
5771 5771 self.cby2[j][n][i]+=by*by
5772 5772 self.caxbx[j][n][i]+=ax*bx
5773 5773 self.caxby[j][n][i]+=ax*by
5774 5774 self.caybx[j][n][i]+=ay*bx
5775 5775 self.cayby[j][n][i]+=ay*by
5776 5776 self.caxay[j][n][i]+=ax*ay
5777 5777 self.cbxby[j][n][i]+=bx*by
5778 5778 '''
5779 5779 if j == 20 and n == 10:
5780 5780 print("i: ",i)
5781 5781 print(ax+ay*1.j)
5782 5782 print("b",bx+by*1.j)
5783 5783 '''
5784 5784 #if n ==7:
5785 5785 #print()
5786 5786
5787 5787
5788 5788 # FindMe
5789 5789 pa1 = 20
5790 5790 pa2 = 0
5791 5791 #for pa1 in range(67):
5792 5792 '''
5793 5793 print(self.cax[pa1,pa2,0]+self.cax[pa1,pa2,1]+self.cay[pa1,pa2,0]*1.j+self.cay[pa1,pa2,1]*1.j)
5794 5794 print("b",self.cbx[pa1,pa2,0]+self.cbx[pa1,pa2,1]+self.cby[pa1,pa2,0]*1.j+self.cby[pa1,pa2,1]*1.j)
5795 5795 '''
5796 5796 '''
5797 5797 print(self.caxbx[pa1,pa2,0]+self.caxbx[pa1,pa2,1]+self.cayby[pa1,pa2,0]+self.cayby[pa1,pa2,1])
5798 5798 print("c",self.caybx[pa1,pa2,0]+self.caybx[pa1,pa2,1]-self.caxby[pa1,pa2,0]-self.caxby[pa1,pa2,1])
5799 5799
5800 5800 exit(1)
5801 5801 '''
5802 5802
5803 5803
5804 5804 def lag_products_LP(self,dataOut):
5805 5805
5806 5806
5807 5807 buffer=dataOut.data
5808 5808 if self.aux_cross_lp==1:
5809 5809
5810 5810 #self.dataOut.nptsfft2=150
5811 5811 self.cnorm=float((dataOut.nProfiles-dataOut.NSCAN)/dataOut.NSCAN)
5812 5812 self.lagp0=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NAVG),'complex128')
5813 5813 ww=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NSCAN,dataOut.NAVG),'complex128')
5814 5814 self.lagp1=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NAVG),'complex128')
5815 5815 self.lagp2=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NAVG),'complex128')
5816 5816 self.lagp3=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NAVG),'complex128')
5817 5817
5818 5818 #self.lagp4=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NAVG),'complex64')
5819 5819 self.aux_cross_lp=0
5820 5820
5821 5821 for i in range(dataOut.NR):
5822 5822 buffer_dc=dataOut.dc[i]
5823 5823 for j in range(dataOut.NRANGE):
5824 5824
5825 5825 range_for_n=numpy.min((dataOut.NRANGE-j,dataOut.NLAG))
5826 5826
5827 5827 buffer_aux=numpy.conj(buffer[i,:dataOut.nProfiles,j])#-buffer_dc)
5828 5828 for n in range(range_for_n):
5829 5829
5830 5830 c=(buffer_aux)*(buffer[i,:dataOut.nProfiles,j+n])#-buffer_dc)
5831 5831 #print(c.dtype)
5832 5832 #print(self.lagp0.dtype)
5833 5833 #exit(1)
5834 5834 if i==0:
5835 5835 self.lagp0[n][j][self.bcounter-1]=numpy.sum(c[:dataOut.NSCAN])
5836 5836 #ww[n,j,:,self.bcounter-1]=c[:dataOut.NSCAN]
5837 5837 self.lagp3[n][j][self.bcounter-1]=numpy.sum(c[dataOut.NSCAN:]/self.cnorm)
5838 5838 elif i==1:
5839 5839 self.lagp1[n][j][self.bcounter-1]=numpy.sum(c[:dataOut.NSCAN])
5840 5840 elif i==2:
5841 5841 self.lagp2[n][j][self.bcounter-1]=numpy.sum(c[:dataOut.NSCAN])
5842 5842 #if i==2 and j==30 and n==0:
5843 5843 #print(c[:dataOut.NSCAN])
5844 5844 #print(numpy.sum(c[:dataOut.NSCAN]))
5845 5845
5846 5846 self.lagp0[:,:,self.bcounter-1]=numpy.conj(self.lagp0[:,:,self.bcounter-1])
5847 5847 self.lagp1[:,:,self.bcounter-1]=numpy.conj(self.lagp1[:,:,self.bcounter-1])
5848 5848 self.lagp2[:,:,self.bcounter-1]=numpy.conj(self.lagp2[:,:,self.bcounter-1])
5849 5849 self.lagp3[:,:,self.bcounter-1]=numpy.conj(self.lagp3[:,:,self.bcounter-1])
5850 5850 #print(self.lagp3[2,197,0])
5851 5851 #print(self.lagp0[0,0,self.bcounter-1])
5852 5852 #print(sum(self.buffer[3,:,199,2]))
5853 5853 #print(self.cnorm)
5854 5854 #exit(1)
5855 5855 #print("self,lagp0: ", self.lagp0[0,0,self.bcounter-1])
5856 5856 #print(ww[:,0,0,self.bcounter-1])
5857 5857 #exit(1)
5858 5858
5859 5859
5860 5860 def LP_median_estimates_original(self,dataOut):
5861 5861
5862 5862 if self.bcounter==dataOut.NAVG:
5863 5863
5864 5864 if self.lag_products_LP_median_estimates_aux==1:
5865 5865 self.output=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),'complex128')
5866 5866 self.lag_products_LP_median_estimates_aux=0
5867 5867
5868 5868 #print("self,lagp0: ", numpy.sum(self.lagp0[0,0,:]))
5869 5869 for i in range(dataOut.NLAG):
5870 5870 for j in range(dataOut.NRANGE):
5871 5871 for l in range(4): #four outputs
5872 5872 for k in range(dataOut.NAVG):
5873 5873 if k==0:
5874 5874 self.output[i,j,l]=0.0+0.j
5875 5875 if l==0:
5876 5876 self.lagp0[i,j,:]=sorted(self.lagp0[i,j,:], key=lambda x: x.real) #sorted(self.lagp0[i,j,:].real)
5877 5877 if l==1:
5878 5878 self.lagp1[i,j,:]=sorted(self.lagp1[i,j,:], key=lambda x: x.real) #sorted(self.lagp1[i,j,:].real)
5879 5879 if l==2:
5880 5880 self.lagp2[i,j,:]=sorted(self.lagp2[i,j,:], key=lambda x: x.real) #sorted(self.lagp2[i,j,:].real)
5881 5881 if l==3:
5882 5882 self.lagp3[i,j,:]=sorted(self.lagp3[i,j,:], key=lambda x: x.real) #sorted(self.lagp3[i,j,:].real)
5883 5883 #print(self.lagp0[2,100,1]);exit(1)
5884 5884 if k>=dataOut.nkill/2 and k<dataOut.NAVG-dataOut.nkill/2:
5885 5885 if l==0:
5886 5886 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp0[i,j,k])
5887 5887 if l==1:
5888 5888 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp1[i,j,k])
5889 5889 if l==2:
5890 5890 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp2[i,j,k])
5891 5891 if l==3:
5892 5892 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp3[i,j,k])
5893 5893 #if j== 30 and i==2 and l==0:
5894 5894 #print(self.output[2,30,0])
5895 5895 dataOut.output_LP=self.output
5896 5896 dataOut.data_for_RTI_LP=numpy.zeros((4,dataOut.NRANGE))
5897 5897 dataOut.data_for_RTI_LP[0],dataOut.data_for_RTI_LP[1],dataOut.data_for_RTI_LP[2],dataOut.data_for_RTI_LP[3]=self.RTI_LP(dataOut.output_LP,dataOut.NRANGE)
5898 5898 #print("output:",self.output[2,30,0])
5899 5899 #exit(1)
5900 5900
5901 5901 def LP_median_estimates(self,dataOut):
5902 5902
5903 5903 if self.bcounter==dataOut.NAVG:
5904 5904
5905 5905 if self.lag_products_LP_median_estimates_aux==1:
5906 5906 self.output=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),'complex128')
5907 5907 self.lag_products_LP_median_estimates_aux=0
5908 5908 #print("self,lagp0: ", numpy.sum(self.lagp0[0,0,:]))
5909 5909
5910 5910 for i in range(dataOut.NLAG):
5911 5911 #my_list = ([0,1,2,3,4,5,6,7]) #hasta 7 funciona, en 6 ya no
5912 5912 #for i in my_list:
5913 5913 for j in range(dataOut.NRANGE):
5914 5914 for l in range(4): #four outputs
5915 5915 for k in range(dataOut.NAVG):
5916 5916 if k==0:
5917 5917 self.output[i,j,l]=0.0+0.j
5918 5918 if l==0:
5919 5919 self.lagp0[i,j,:]=sorted(self.lagp0[i,j,:], key=lambda x: x.real) #sorted(self.lagp0[i,j,:].real)
5920 5920 if l==1:
5921 5921 self.lagp1[i,j,:]=sorted(self.lagp1[i,j,:], key=lambda x: x.real) #sorted(self.lagp1[i,j,:].real)
5922 5922 if l==2:
5923 5923 self.lagp2[i,j,:]=sorted(self.lagp2[i,j,:], key=lambda x: x.real) #sorted(self.lagp2[i,j,:].real)
5924 5924 if l==3:
5925 5925 self.lagp3[i,j,:]=sorted(self.lagp3[i,j,:], key=lambda x: x.real) #sorted(self.lagp3[i,j,:].real)
5926 5926 '''
5927 5927 x = 2
5928 5928 if k>=x and k<dataOut.NAVG-dataOut.nkill/2:
5929 5929 if l==0:
5930 5930 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-x-dataOut.nkill/2))*self.lagp0[i,j,k])
5931 5931 if l==1:
5932 5932 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-x-dataOut.nkill/2))*self.lagp1[i,j,k])
5933 5933 if l==2:
5934 5934 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-x-dataOut.nkill/2))*self.lagp2[i,j,k])
5935 5935 if l==3:
5936 5936 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-x-dataOut.nkill/2))*self.lagp3[i,j,k])
5937 5937 '''
5938 5938 #'''
5939 5939 if k>=dataOut.nkill/2 and k<dataOut.NAVG-dataOut.nkill/2:
5940 5940 if l==0:
5941 5941 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp0[i,j,k])
5942 5942 if l==1:
5943 5943 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp1[i,j,k])
5944 5944 if l==2:
5945 5945 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp2[i,j,k])
5946 5946 if l==3:
5947 5947 self.output[i,j,l]=self.output[i,j,l]+((float(dataOut.NAVG)/(float)(dataOut.NAVG-dataOut.nkill))*self.lagp3[i,j,k])
5948 5948 #'''
5949 5949
5950 5950
5951 5951 dataOut.output_LP=self.output
5952 5952 dataOut.data_for_RTI_LP=numpy.zeros((4,dataOut.NRANGE))
5953 5953 dataOut.data_for_RTI_LP[0],dataOut.data_for_RTI_LP[1],dataOut.data_for_RTI_LP[2],dataOut.data_for_RTI_LP[3]=self.RTI_LP(dataOut.output_LP,dataOut.NRANGE)
5954 5954
5955 5955 def get_dc(self,dataOut):
5956 5956
5957 5957 if self.bcounter==0:
5958 5958 dataOut.dc=numpy.zeros(dataOut.NR,dtype='complex64')
5959 5959
5960 5960 dataOut.dc+=numpy.sum(dataOut.data[:,:,2*dataOut.NLAG:dataOut.NRANGE],axis=(1,2))
5961 5961 dataOut.dc=dataOut.dc/float(dataOut.nProfiles*(dataOut.NRANGE-2*dataOut.NLAG))
5962 5962
5963 5963 def noise_estimation4x_HP(self,dataOut):
5964 5964 if self.bcounter==dataOut.NAVG:
5965 5965 dataOut.noise_final=numpy.zeros(dataOut.NR,'float32')
5966 5966 #snoise=numpy.zeros((NR,NAVG),'float32')
5967 5967 #nvector1=numpy.zeros((NR,NAVG,MAXNRANGENDT),'float32')
5968 5968 sorted_data=numpy.zeros((dataOut.MAXNRANGENDT,dataOut.NR,dataOut.NAVG),'float32')
5969 5969 for i in range(dataOut.NR):
5970 5970 dataOut.noise_final[i]=0.0
5971 5971 for j in range(dataOut.MAXNRANGENDT):
5972 5972 sorted_data[j,i,:]=numpy.copy(sorted(dataOut.noisevector[j,i,:]))
5973 5973 l=dataOut.MAXNRANGENDT-2
5974 5974 for k in range(dataOut.NAVG):
5975 5975 if k>=dataOut.nkill/2 and k<dataOut.NAVG-dataOut.nkill/2:
5976 5976 dataOut.noise_final[i]+=sorted_data[min(j,l),i,k]*float(dataOut.NAVG)/float(dataOut.NAVG-dataOut.nkill)
5977 5977
5978 5978 def noisevectorizer(self,NSCAN,nProfiles,NR,MAXNRANGENDT,noisevector,data,dc):
5979 5979
5980 5980 #rnormalizer= 1./(float(nProfiles - NSCAN))
5981 5981 rnormalizer= float(NSCAN)/((float(nProfiles - NSCAN))*float(MAXNRANGENDT))
5982 5982 for i in range(NR):
5983 5983 for j in range(MAXNRANGENDT):
5984 5984 for k in range(NSCAN,nProfiles):
5985 5985 #TODO:integrate just 2nd quartile gates
5986 5986 if k==NSCAN:
5987 5987 noisevector[j][i][self.bcounter]=(abs(data[i][k][j]-dc[i])**2)*rnormalizer
5988 5988 ##noisevector[j][i][iavg]=(abs(cdata[k][j][i])**2)*rnormalizer
5989 5989 else:
5990 5990 noisevector[j][i][self.bcounter]+=(abs(data[i][k][j]-dc[i])**2)*rnormalizer
5991 5991
5992 5992
5993 5993 def RTI_LP(self,output,NRANGE):
5994 5994 x00=numpy.zeros(NRANGE,dtype='float32')
5995 5995 x01=numpy.zeros(NRANGE,dtype='float32')
5996 5996 x02=numpy.zeros(NRANGE,dtype='float32')
5997 5997 x03=numpy.zeros(NRANGE,dtype='float32')
5998 5998
5999 5999 for i in range(2): #first couple of lags
6000 6000 for j in range(NRANGE): #
6001 6001 #fx=numpy.sqrt((kaxbx[i,j,k]+kayby[i,j,k])**2+(kaybx[i,j,k]-kaxby[i,j,k])**2)
6002 6002 x00[j]+=numpy.abs(output[i,j,0]) #Ch0
6003 6003 x01[j]+=numpy.abs(output[i,j,1]) #Ch1
6004 6004 x02[j]+=numpy.abs(output[i,j,2]) #Ch2
6005 6005 x03[j]+=numpy.abs(output[i,j,3]) #Ch3
6006 6006 #x02[i]=x02[i]+fx
6007 6007
6008 6008 x00[j]=10.0*numpy.log10(x00[j]/2.)
6009 6009 x01[j]=10.0*numpy.log10(x01[j]/2.)
6010 6010 x02[j]=10.0*numpy.log10(x02[j]/2.)
6011 6011 x03[j]=10.0*numpy.log10(x03[j]/2.)
6012 6012 #x02[i]=10.0*numpy.log10(x02[i])
6013 6013 return x00,x01,x02,x03
6014 6014
6015 6015 def run(self, dataOut, NLAG=16, NRANGE=200, NCAL=0, DPL=11,
6016 6016 NDN=0, NDT=67, NDP=67, NSCAN=128,
6017 6017 lagind=(0,1,2,3,4,5,6,7,0,3,4,5,6,8,9,10), lagfirst=(1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1),
6018 6018 NAVG=16, nkill=6):
6019 6019 #print(dataOut.data[1,:12,200:200+15])
6020 6020 #exit(1)
6021 6021 dataOut.NLAG=NLAG
6022 6022 dataOut.NR=len(dataOut.channelList)
6023 6023 dataOut.NRANGE=NRANGE
6024 6024 dataOut.NCAL=NCAL
6025 6025 dataOut.DPL=DPL
6026 6026 dataOut.NDN=NDN
6027 6027 dataOut.NDT=NDT
6028 6028 dataOut.NDP=NDP
6029 6029 dataOut.NSCAN=NSCAN
6030 6030 dataOut.DH=dataOut.heightList[1]-dataOut.heightList[0]
6031 6031 dataOut.H0=int(dataOut.heightList[0])
6032 6032 dataOut.lagind=lagind
6033 6033 dataOut.lagfirst=lagfirst
6034 6034 dataOut.NAVG=NAVG
6035 6035 dataOut.nkill=nkill
6036 6036
6037 6037 dataOut.flagNoData = True
6038 6038
6039 6039 self.get_dc(dataOut)
6040 6040 self.get_products_cabxys_HP(dataOut)
6041 6041 self.cabxys_navg(dataOut)
6042 6042 self.lag_products_LP(dataOut)
6043 6043 self.LP_median_estimates(dataOut)
6044 6044 self.noise_estimation4x_HP(dataOut)
6045 6045 self.kabxys(dataOut)
6046 6046
6047 6047 return dataOut
6048 6048
6049 6049
6050 6050 class RemoveDebris(Operation):
6051 6051 """Operation to remove blocks where an outlier is found for Double (Long) Pulse.
6052 6052
6053 6053 Parameters:
6054 6054 -----------
6055 6055 None
6056 6056
6057 6057 Example
6058 6058 --------
6059 6059
6060 6060 op = proc_unit.addOperation(name='RemoveDebris', optype='other')
6061 6061
6062 6062 """
6063 6063
6064 6064 def __init__(self, **kwargs):
6065 6065
6066 6066 Operation.__init__(self, **kwargs)
6067 6067
6068 6068 def run(self,dataOut):
6069 6069 debris=numpy.zeros(dataOut.NRANGE,'float32')
6070 6070
6071 6071 for j in range(0,3):
6072 6072 for i in range(dataOut.NRANGE):
6073 6073 if j==0:
6074 6074 debris[i]=10*numpy.log10(numpy.abs(dataOut.output_LP[j,i,0]))
6075 6075 else:
6076 6076 debris[i]+=10*numpy.log10(numpy.abs(dataOut.output_LP[j,i,0]))
6077 6077
6078 6078 thresh=8.0+4+4+4
6079 6079 for i in range(47,100):
6080 6080 if ((debris[i-2]+debris[i-1]+debris[i]+debris[i+1])>
6081 6081 ((debris[i-12]+debris[i-11]+debris[i-10]+debris[i-9]+
6082 6082 debris[i+12]+debris[i+11]+debris[i+10]+debris[i+9])/2.0+
6083 6083 thresh)):
6084 6084
6085 6085 dataOut.flagNoData=True
6086 6086 print("LP Debris detected at",i*15,"km")
6087 6087
6088 6088 debris=numpy.zeros(dataOut.NDP,dtype='float32')
6089 6089 Range=numpy.arange(0,3000,15)
6090 6090 for k in range(2): #flip
6091 6091 for i in range(dataOut.NDP): #
6092 6092 debris[i]+=numpy.sqrt((dataOut.kaxbx[i,0,k]+dataOut.kayby[i,0,k])**2+(dataOut.kaybx[i,0,k]-dataOut.kaxby[i,0,k])**2)
6093 6093
6094 6094 if gmtime(dataOut.utctime).tm_hour > 11:
6095 6095 for i in range(2,dataOut.NDP-2):
6096 6096 if (debris[i]>3.0*debris[i-2] and
6097 6097 debris[i]>3.0*debris[i+2] and
6098 6098 Range[i]>200.0 and Range[i]<=540.0):
6099 6099 dataOut.flagNoData=True
6100 6100 print("DP Debris detected at",i*15,"km")
6101 6101
6102 6102 return dataOut
6103 6103
6104 6104
6105 6105 class IntegrationHP(IntegrationDP):
6106 6106 """Operation to integrate Double Pulse and Long Pulse data.
6107 6107
6108 6108 Parameters:
6109 6109 -----------
6110 6110 nint : int
6111 6111 Number of integrations.
6112 6112
6113 6113 Example
6114 6114 --------
6115 6115
6116 6116 op = proc_unit.addOperation(name='IntegrationHP', optype='other')
6117 6117 op.addParameter(name='nint', value='30', format='int')
6118 6118
6119 6119 """
6120 6120
6121 6121 def __init__(self, **kwargs):
6122 6122
6123 6123 Operation.__init__(self, **kwargs)
6124 6124
6125 6125 self.counter = 0
6126 6126 self.aux = 0
6127 6127
6128 6128 def integration_noise(self,dataOut):
6129 6129
6130 6130 if self.counter == 0:
6131 6131 dataOut.tnoise=numpy.zeros((dataOut.NR),dtype='float32')
6132 6132
6133 6133 dataOut.tnoise+=dataOut.noise_final
6134 6134
6135 6135 def integration_for_long_pulse(self,dataOut):
6136 6136
6137 6137 if self.counter == 0:
6138 6138 dataOut.output_LP_integrated=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),order='F',dtype='complex128')
6139 6139
6140 6140 dataOut.output_LP_integrated+=dataOut.output_LP
6141 6141
6142 6142 def run(self,dataOut,nint=None):
6143 6143
6144 6144 dataOut.flagNoData=True
6145 6145
6146 6146 dataOut.nint=nint
6147 6147 dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 )
6148 6148 dataOut.lat=-11.95
6149 6149 dataOut.lon=-76.87
6150 6150
6151 6151 self.integration_for_long_pulse(dataOut)
6152 6152
6153 6153 self.integration_noise(dataOut)
6154 6154
6155 6155 if self.counter==dataOut.nint-1:
6156 6156 dataOut.nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10
6157 6157 #print(dataOut.tnoise)
6158 6158 #exit(1)
6159 6159 dataOut.tnoise[0]*=0.995
6160 6160 dataOut.tnoise[1]*=0.995
6161 6161 dataOut.pan=dataOut.tnoise[0]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG)
6162 6162 dataOut.pbn=dataOut.tnoise[1]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG)
6163 6163 #print(self.counter) ToDo: Fix when nint = 1
6164 6164 '''
6165 6165 print("pan: ",dataOut.pan)
6166 6166 print("pbn: ",dataOut.pbn)
6167 6167 #print("tnoise: ",dataOut.tnoise)
6168 6168 exit(1)
6169 6169 '''
6170 6170 #print(dataOut.output_LP_integrated[0,30,2])
6171 6171 #exit(1)
6172 6172 self.integration_for_double_pulse(dataOut)
6173 6173 #if self.counter==dataOut.nint:
6174 6174 #print(dataOut.kabxys_integrated[8][53,6,0]+dataOut.kabxys_integrated[11][53,6,0])
6175 6175 #print(dataOut.kabxys_integrated[8][53,9,0]+dataOut.kabxys_integrated[11][53,9,0])
6176 6176 #exit(1)
6177 6177 #print(dataOut.flagNoData)
6178 6178 return dataOut
6179 6179
6180 6180 class SumFlipsHP(SumFlips):
6181 6181 """Operation to sum the flip and unflip part of certain cross products of the Double Pulse.
6182 6182
6183 6183 Parameters:
6184 6184 -----------
6185 6185 None
6186 6186
6187 6187 Example
6188 6188 --------
6189 6189
6190 6190 op = proc_unit.addOperation(name='SumFlipsHP', optype='other')
6191 6191
6192 6192 """
6193 6193
6194 6194 def __init__(self, **kwargs):
6195 6195
6196 6196 Operation.__init__(self, **kwargs)
6197 6197
6198 6198 def rint2HP(self,dataOut):
6199 6199
6200 6200 dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32')
6201 6201 #print(dataOut.nint,dataOut.NAVG)
6202 6202 for l in range(dataOut.DPL):
6203 6203 if(l==0 or (l>=3 and l <=6)):
6204 6204 dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*16.0)
6205 6205 else:
6206 6206 dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*8.0)
6207 6207
6208 6208 def run(self,dataOut):
6209 6209
6210 6210 self.rint2HP(dataOut)
6211 6211 self.SumLags(dataOut)
6212 6212
6213 6213 hei = 2
6214 6214 lag = 0
6215 6215 '''
6216 6216 for hei in range(67):
6217 6217 print("hei",hei)
6218 6218 print(dataOut.kabxys_integrated[8][hei,:,0]+dataOut.kabxys_integrated[11][hei,:,0])
6219 6219 print(dataOut.kabxys_integrated[10][hei,:,0]-dataOut.kabxys_integrated[9][hei,:,0])
6220 6220 exit(1)
6221 6221 '''
6222 6222 '''
6223 6223 print("b",(dataOut.kabxys_integrated[4][hei,lag,0]+dataOut.kabxys_integrated[5][hei,lag,0]))
6224 6224 print((dataOut.kabxys_integrated[6][hei,lag,0]+dataOut.kabxys_integrated[7][hei,lag,0]))
6225 6225 print("c",(dataOut.kabxys_integrated[8][hei,lag,0]+dataOut.kabxys_integrated[11][hei,lag,0]))
6226 6226 print((dataOut.kabxys_integrated[10][hei,lag,0]-dataOut.kabxys_integrated[9][hei,lag,0]))
6227 6227 exit(1)
6228 6228 '''
6229 6229 #print(dataOut.rnint2)
6230 6230 #print(numpy.sum(dataOut.kabxys_integrated[4][:,1,0]+dataOut.kabxys_integrated[5][:,1,0]))
6231 6231 #print(dataOut.nis)
6232 6232 #exit(1)
6233 6233 return dataOut
6234 6234
6235 6235
6236 6236 class LongPulseAnalysis(Operation):
6237 6237 """Operation to estimate ACFs, temperatures, total electron density and Hydrogen/Helium fractions from the Long Pulse data.
6238 6238
6239 6239 Parameters:
6240 6240 -----------
6241 6241 NACF : int
6242 6242 .*
6243 6243
6244 6244 Example
6245 6245 --------
6246 6246
6247 6247 op = proc_unit.addOperation(name='LongPulseAnalysis', optype='other')
6248 6248 op.addParameter(name='NACF', value='16', format='int')
6249 6249
6250 6250 """
6251 6251
6252 6252 def __init__(self, **kwargs):
6253 6253
6254 6254 Operation.__init__(self, **kwargs)
6255 6255 self.aux=1
6256 6256
6257 6257 def run(self,dataOut,NACF):
6258 6258
6259 6259 dataOut.NACF=NACF
6260 6260 dataOut.heightList=dataOut.DH*(numpy.arange(dataOut.NACF))
6261 6261 anoise0=dataOut.tnoise[0]
6262 6262 anoise1=anoise0*0.0 #seems to be noise in 1st lag 0.015 before '14
6263 6263 #print(anoise0)
6264 6264 #exit(1)
6265 6265 if self.aux:
6266 6266 #dataOut.cut=31#26#height=31*15=465
6267 6267 self.cal=numpy.zeros((dataOut.NLAG),'float32')
6268 6268 self.drift=numpy.zeros((200),'float32')
6269 6269 self.rdrift=numpy.zeros((200),'float32')
6270 6270 self.ddrift=numpy.zeros((200),'float32')
6271 6271 self.sigma=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6272 6272 self.powera=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6273 6273 self.powerb=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6274 6274 self.perror=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6275 6275 dataOut.ene=numpy.zeros((dataOut.NRANGE),'float32')
6276 6276 self.dpulse=numpy.zeros((dataOut.NACF),'float32')
6277 6277 self.lpulse=numpy.zeros((dataOut.NACF),'float32')
6278 6278 dataOut.lags_LP=numpy.zeros((dataOut.IBITS),order='F',dtype='float32')
6279 6279 self.lagp=numpy.zeros((dataOut.NACF),'float32')
6280 6280 self.u=numpy.zeros((2*dataOut.NACF,2*dataOut.NACF),'float32')
6281 6281 dataOut.ne=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6282 6282 dataOut.te=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6283 6283 dataOut.ete=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6284 6284 dataOut.ti=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6285 6285 dataOut.eti=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6286 6286 dataOut.ph=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6287 6287 dataOut.eph=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6288 6288 dataOut.phe=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6289 6289 dataOut.ephe=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6290 6290 dataOut.errors=numpy.zeros((dataOut.IBITS,max(dataOut.NRANGE,dataOut.NSHTS)),order='F',dtype='float32')
6291 6291 dataOut.fit_array_real=numpy.zeros((max(dataOut.NRANGE,dataOut.NSHTS),dataOut.NLAG),order='F',dtype='float32')
6292 6292 dataOut.status=numpy.zeros(1,'float32')
6293 6293 dataOut.tx=240.0 #deberΓ­a provenir del header #hybrid
6294 6294
6295 6295 for i in range(dataOut.IBITS):
6296 6296 dataOut.lags_LP[i]=float(i)*(dataOut.tx/150.0)/float(dataOut.IBITS) # (float)i*(header.tx/150.0)/(float)IBITS;
6297 6297
6298 6298 self.aux=0
6299 6299
6300 6300 dataOut.cut=30
6301 6301 for i in range(30,15,-1): #AquΓ­ se calcula en donde se unirΓ‘ DP y LP en la parte final
6302 6302 if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10 or dataOut.info2[i]==0:
6303 6303 dataOut.cut=i-1
6304 6304
6305 6305 for i in range(dataOut.NLAG):
6306 6306 self.cal[i]=sum(dataOut.output_LP_integrated[i,:,3].real) #Lag x Height x Channel
6307 6307
6308 6308 #print(numpy.sum(self.cal)) #Coinciden
6309 6309 #exit(1)
6310 6310 self.cal/=float(dataOut.NRANGE)
6311 6311 #print(anoise0)
6312 6312 #print(anoise1)
6313 6313 #exit(1)
6314 6314 #print("nis: ", dataOut.nis)
6315 6315 #print("pan: ", dataOut.pan)
6316 6316 #print("pbn: ", dataOut.pbn)
6317 6317 #print(numpy.sum(dataOut.output_LP_integrated[0,:,0]))
6318 6318 '''
6319 6319 import matplotlib.pyplot as plt
6320 6320 plt.plot(dataOut.output_LP_integrated[:,40,0])
6321 6321 plt.show()
6322 6322 '''
6323 6323 #print(dataOut.output_LP_integrated[0,40,0])
6324 6324 #print(numpy.sum(dataOut.output_LP_integrated[:,0,0]))
6325 6325 #exit(1)
6326 6326
6327 6327 #################### PROBAR MÁS INTEGRACIΓ“N, SINO MODIFICAR VALOR DE "NIS" ####################
6328 6328 # VER dataOut.nProfiles_LP #
6329 6329
6330 6330 '''
6331 6331 #PLOTEAR POTENCIA VS RUIDO, QUIZA SE ESTA REMOVIENDO MUCHA SEΓ‘AL
6332 6332 #print(dataOut.heightList)
6333 6333 import matplotlib.pyplot as plt
6334 6334 plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]),dataOut.range1)
6335 6335 #plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]/dataOut.nProfiles_LP),dataOut.range1)
6336 6336 plt.axvline(10*numpy.log10(anoise0),color='k',linestyle='dashed')
6337 6337 plt.grid()
6338 6338 plt.xlim(20,100)
6339 6339 plt.show()
6340 6340 '''
6341 6341
6342 6342
6343 6343 for j in range(dataOut.NACF+2*dataOut.IBITS+2):
6344 6344
6345 6345 dataOut.output_LP_integrated.real[0,j,0]-=anoise0 #lag0 ch0
6346 6346 dataOut.output_LP_integrated.real[1,j,0]-=anoise1 #lag1 ch0
6347 6347
6348 6348 for i in range(1,dataOut.NLAG): #remove cal data from certain lags
6349 6349 dataOut.output_LP_integrated.real[i,j,0]-=self.cal[i]
6350 6350 k=max(j,26) #constant power below range 26
6351 6351 self.powera[j]=dataOut.output_LP_integrated.real[0,k,0] #Lag0 and Channel 0
6352 6352
6353 6353 ## examine drifts here - based on 60 'indep.' estimates
6354 6354 #print(numpy.sum(self.powera))
6355 6355 #exit(1)
6356 6356 #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10
6357 6357 nis = dataOut.nis
6358 6358 #print("nis",nis)
6359 6359 alpha=beta=delta=0.0
6360 6360 nest=0
6361 6361 gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[1]*1.0e-3)
6362 6362 beta=gamma*(math.atan2(dataOut.output_LP_integrated.imag[14,0,2],dataOut.output_LP_integrated.real[14,0,2])-math.atan2(dataOut.output_LP_integrated.imag[1,0,2],dataOut.output_LP_integrated.real[1,0,2]))/13.0
6363 6363 #print(gamma,beta)
6364 6364 #exit(1)
6365 6365 for i in range(1,3):
6366 6366 gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[i]*1.0e-3)
6367 6367 #print("gamma",gamma)
6368 6368 for j in range(34,44):
6369 6369 rho2=numpy.abs(dataOut.output_LP_integrated[i,j,0])/numpy.abs(dataOut.output_LP_integrated[0,j,0])
6370 6370 dataOut.dphi2=(1.0/rho2-1.0)/(float(2*nis))
6371 6371 dataOut.dphi2*=gamma**2
6372 6372 pest=gamma*math.atan(dataOut.output_LP_integrated.imag[i,j,0]/dataOut.output_LP_integrated.real[i,j,0])
6373 6373 #print("1",dataOut.output_LP_integrated.imag[i,j,0])
6374 6374 #print("2",dataOut.output_LP_integrated.real[i,j,0])
6375 6375 self.drift[nest]=pest
6376 6376 self.ddrift[nest]=dataOut.dphi2
6377 6377 self.rdrift[nest]=float(nest)
6378 6378 nest+=1
6379 6379
6380 6380 sorted(self.drift[:nest])
6381 6381
6382 6382 #print(dataOut.dphi2)
6383 6383 #exit(1)
6384 6384
6385 6385 for j in range(int(nest/4),int(3*nest/4)):
6386 6386 #i=int(self.rdrift[j])
6387 6387 alpha+=self.drift[j]/self.ddrift[j]
6388 6388 delta+=1.0/self.ddrift[j]
6389 6389
6390 6390 alpha/=delta
6391 6391 delta=1./numpy.sqrt(delta)
6392 6392 vdrift=alpha-beta
6393 6393 dvdrift=delta
6394 6394
6395 6395 #need to develop estimate of complete density profile using all
6396 6396 #available data
6397 6397
6398 6398 #estimate sample variances for long-pulse power profile
6399 6399
6400 6400 #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint
6401 6401 nis = dataOut.nis/10
6402 6402 #print("nis",nis)
6403 6403
6404 6404 self.sigma[:dataOut.NACF+2*dataOut.IBITS+2]=((anoise0+self.powera[:dataOut.NACF+2*dataOut.IBITS+2])**2)/float(nis)
6405 6405 #print(self.sigma)
6406 6406 #exit(1)
6407 6407 ioff=1
6408 6408
6409 6409 #deconvolve rectangular pulse shape from profile ==> powerb, perror
6410 6410
6411 6411
6412 6412 ############# START nnlswrap#############
6413 6413
6414 6414 if dataOut.ut_Faraday>14.0:
6415 6415 alpha_nnlswrap=20.0
6416 6416 else:
6417 6417 alpha_nnlswrap=30.0
6418 6418
6419 6419 range1_nnls=dataOut.NACF
6420 6420 range2_nnls=dataOut.NACF+dataOut.IBITS-1
6421 6421
6422 6422 g_nnlswrap=numpy.zeros((range1_nnls,range2_nnls),'float32')
6423 6423 a_nnlswrap=numpy.zeros((range2_nnls,range2_nnls),'float64')
6424 6424
6425 6425 for i in range(range1_nnls):
6426 6426 for j in range(range2_nnls):
6427 6427 if j>=i and j<i+dataOut.IBITS:
6428 6428 g_nnlswrap[i,j]=1.0
6429 6429 else:
6430 6430 g_nnlswrap[i,j]=0.0
6431 6431
6432 6432 a_nnlswrap[:]=numpy.matmul(numpy.transpose(g_nnlswrap),g_nnlswrap)
6433 6433
6434 6434 numpy.fill_diagonal(a_nnlswrap,a_nnlswrap.diagonal()+alpha_nnlswrap**2)
6435 6435
6436 6436 #ERROR ANALYSIS#
6437 6437
6438 6438 self.perror[:range2_nnls]=0.0
6439 6439 self.perror[:range2_nnls]=numpy.matmul(1./(self.sigma[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff]),g_nnlswrap**2)
6440 6440 self.perror[:range1_nnls]+=(alpha_nnlswrap**2)/(self.sigma[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff])
6441 6441 self.perror[:range2_nnls]=1.00/self.perror[:range2_nnls]
6442 6442
6443 6443 b_nnlswrap=numpy.zeros(range2_nnls,'float64')
6444 6444 b_nnlswrap[:]=numpy.matmul(self.powera[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff],g_nnlswrap) #match filter alturas
6445 6445
6446 6446 x_nnlswrap=numpy.zeros(range2_nnls,'float64')
6447 6447 x_nnlswrap[:]=nnls(a_nnlswrap,b_nnlswrap)[0]
6448 6448
6449 6449 self.powerb[:range2_nnls]=x_nnlswrap
6450 6450 #print(self.powerb[40])
6451 6451 #print(self.powerb[66])
6452 6452 #exit(1)
6453 6453 #############END nnlswrap#############
6454 6454 #print(numpy.sum(numpy.sqrt(self.perror[0:dataOut.NACF])))
6455 6455 #print(self.powerb[0:dataOut.NACF])
6456 6456 #exit(1)
6457 6457 #estimate relative error for deconvolved profile (scaling irrelevant)
6458 6458 #print(dataOut.NACF)
6459 6459 dataOut.ene[0:dataOut.NACF]=numpy.sqrt(self.perror[0:dataOut.NACF])/self.powerb[0:dataOut.NACF]
6460 6460 #print(numpy.sum(dataOut.ene))
6461 6461 #exit(1)
6462 6462 aux=0
6463 6463
6464 6464 for i in range(dataOut.IBITS,dataOut.NACF):
6465 6465 self.dpulse[i]=self.lpulse[i]=0.0
6466 6466 for j in range(dataOut.IBITS):
6467 6467 k=int(i-j)
6468 6468 if k<36-aux and k>16:
6469 6469 self.dpulse[i]+=dataOut.ph2[k]/dataOut.h2[k]
6470 6470 elif k>=36-aux:
6471 6471 self.lpulse[i]+=self.powerb[k]
6472 6472 self.lagp[i]=self.powera[i]
6473 6473
6474 6474 #find scale factor that best merges profiles
6475 6475
6476 6476 qi=sum(self.dpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6477 6477 ri=sum((self.dpulse[32:dataOut.NACF]*self.lpulse[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6478 6478 si=sum((self.dpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6479 6479 ui=sum(self.lpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6480 6480 vi=sum((self.lpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6481 6481
6482 6482 alpha=(si*ui-vi*ri)/(qi*ui-ri*ri)
6483 6483 beta=(qi*vi-ri*si)/(qi*ui-ri*ri)
6484 6484
6485 6485 #form density profile estimate, merging rescaled power profiles
6486 6486 #print(dataOut.h2)
6487 6487 #print(numpy.sum(alpha))
6488 6488 #print(numpy.sum(dataOut.ph2))
6489 6489 self.powerb[16:36-aux]=alpha*dataOut.ph2[16:36-aux]/dataOut.h2[16:36-aux]
6490 6490 self.powerb[36-aux:dataOut.NACF]*=beta
6491 6491
6492 6492 #form Ne estimate, fill in error estimate at low altitudes
6493 6493
6494 6494 dataOut.ene[0:36-aux]=dataOut.sdp2[0:36-aux]/dataOut.ph2[0:36-aux]
6495 6495 dataOut.ne[:dataOut.NACF]=self.powerb[:dataOut.NACF]*dataOut.h2[:dataOut.NACF]/alpha
6496 6496 #print(numpy.sum(self.powerb))
6497 6497 #print(numpy.sum(dataOut.ene))
6498 6498 #print(numpy.sum(dataOut.ne))
6499 6499 #exit(1)
6500 6500 #now do error propagation: store zero lag error covariance in u
6501 6501
6502 6502 nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint/1 # DLH serious debris removal
6503 6503
6504 6504 for i in range(dataOut.NACF):
6505 6505 for j in range(i,dataOut.NACF):
6506 6506 if j-i>=dataOut.IBITS:
6507 6507 self.u[i,j]=0.0
6508 6508 else:
6509 6509 self.u[i,j]=dataOut.output_LP_integrated.real[j-i,i,0]**2/float(nis)
6510 6510 self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,i,0])/dataOut.output_LP_integrated.real[0,i,0]
6511 6511 self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,j,0])/dataOut.output_LP_integrated.real[0,j,0]
6512 6512
6513 6513 self.u[j,i]=self.u[i,j]
6514 6514
6515 6515 #now error analyis for lag product matrix (diag), place in acf_err
6516 6516
6517 6517 for i in range(dataOut.NACF):
6518 6518 for j in range(dataOut.IBITS):
6519 6519 if j==0:
6520 6520 dataOut.errors[0,i]=numpy.sqrt(self.u[i,i])
6521 6521 else:
6522 6522 dataOut.errors[j,i]=numpy.sqrt(((dataOut.output_LP_integrated.real[0,i,0]+anoise0)*(dataOut.output_LP_integrated.real[0,i+j,0]+anoise0)+dataOut.output_LP_integrated.real[j,i,0]**2)/float(2*nis))
6523 6523 '''
6524 6524 print(numpy.sum(dataOut.output_LP_integrated))
6525 6525 print(numpy.sum(dataOut.errors))
6526 6526 print(numpy.sum(self.powerb))
6527 6527 print(numpy.sum(dataOut.ne))
6528 6528 print(numpy.sum(dataOut.lags_LP))
6529 6529 print(numpy.sum(dataOut.thb))
6530 6530 print(numpy.sum(dataOut.bfm))
6531 6531 print(numpy.sum(dataOut.te))
6532 6532 print(numpy.sum(dataOut.ete))
6533 6533 print(numpy.sum(dataOut.ti))
6534 6534 print(numpy.sum(dataOut.eti))
6535 6535 print(numpy.sum(dataOut.ph))
6536 6536 print(numpy.sum(dataOut.eph))
6537 6537 print(numpy.sum(dataOut.phe))
6538 6538 print(numpy.sum(dataOut.ephe))
6539 6539 print(numpy.sum(dataOut.range1))
6540 6540 print(numpy.sum(dataOut.ut))
6541 6541 print(numpy.sum(dataOut.NACF))
6542 6542 print(numpy.sum(dataOut.fit_array_real))
6543 6543 print(numpy.sum(dataOut.status))
6544 6544 print(numpy.sum(dataOut.NRANGE))
6545 6545 print(numpy.sum(dataOut.IBITS))
6546 6546 exit(1)
6547 6547 '''
6548 6548 '''
6549 6549 print(dataOut.te2[13:16])
6550 6550 print(numpy.sum(dataOut.te2))
6551 6551 exit(1)
6552 6552 '''
6553 6553 #print("Success 1")
6554 6554 ###################Correlation pulse and itself
6555 6555
6556 6556 #print(dataOut.NRANGE)
6557 6557 print("LP Estimation")
6558 6558 with suppress_stdout_stderr():
6559 6559 #pass
6560 6560 full_profile_profile.profile(numpy.transpose(dataOut.output_LP_integrated,(2,1,0)),numpy.transpose(dataOut.errors),self.powerb,dataOut.ne,dataOut.lags_LP,dataOut.thb,dataOut.bfm,dataOut.te,dataOut.ete,dataOut.ti,dataOut.eti,dataOut.ph,dataOut.eph,dataOut.phe,dataOut.ephe,dataOut.range1,dataOut.ut,dataOut.NACF,dataOut.fit_array_real,dataOut.status,dataOut.NRANGE,dataOut.IBITS)
6561 6561
6562 6562 print("status: ",dataOut.status)
6563 6563
6564 6564 if dataOut.status>=3.5:
6565 6565 dataOut.te[:]=numpy.nan
6566 6566 dataOut.ete[:]=numpy.nan
6567 6567 dataOut.ti[:]=numpy.nan
6568 6568 dataOut.eti[:]=numpy.nan
6569 6569 dataOut.ph[:]=numpy.nan
6570 6570 dataOut.eph[:]=numpy.nan
6571 6571 dataOut.phe[:]=numpy.nan
6572 6572 dataOut.ephe[:]=numpy.nan
6573 6573
6574 6574 return dataOut
6575 6575
6576 6576 class LongPulseAnalysisSpectra(Operation):
6577 6577 """Operation to estimate ACFs, temperatures, total electron density and Hydrogen/Helium fractions from the Long Pulse data.
6578 6578
6579 6579 Parameters:
6580 6580 -----------
6581 6581 NACF : int
6582 6582 .*
6583 6583
6584 6584 Example
6585 6585 --------
6586 6586
6587 6587 op = proc_unit.addOperation(name='LongPulseAnalysis', optype='other')
6588 6588 op.addParameter(name='NACF', value='16', format='int')
6589 6589
6590 6590 """
6591 6591
6592 6592 def __init__(self, **kwargs):
6593 6593
6594 6594 Operation.__init__(self, **kwargs)
6595 6595 self.aux=1
6596 6596
6597 6597 def run(self,dataOut,NACF):
6598 6598
6599 6599 dataOut.NACF=NACF
6600 6600 dataOut.heightList=dataOut.DH*(numpy.arange(dataOut.NACF))
6601 6601 anoise0=dataOut.tnoise[0]
6602 6602 anoise1=anoise0*0.0 #seems to be noise in 1st lag 0.015 before '14
6603 6603 #print(anoise0)
6604 6604 #exit(1)
6605 6605 if self.aux:
6606 6606 #dataOut.cut=31#26#height=31*15=465
6607 6607 self.cal=numpy.zeros((dataOut.NLAG),'float32')
6608 6608 self.drift=numpy.zeros((200),'float32')
6609 6609 self.rdrift=numpy.zeros((200),'float32')
6610 6610 self.ddrift=numpy.zeros((200),'float32')
6611 6611 self.sigma=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6612 6612 self.powera=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6613 6613 self.powerb=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6614 6614 self.perror=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6615 6615 dataOut.ene=numpy.zeros((dataOut.NRANGE),'float32')
6616 6616 self.dpulse=numpy.zeros((dataOut.NACF),'float32')
6617 6617 self.lpulse=numpy.zeros((dataOut.NACF),'float32')
6618 6618 dataOut.lags_LP=numpy.zeros((dataOut.IBITS),order='F',dtype='float32')
6619 6619 self.lagp=numpy.zeros((dataOut.NACF),'float32')
6620 6620 self.u=numpy.zeros((2*dataOut.NACF,2*dataOut.NACF),'float32')
6621 6621 dataOut.ne=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6622 6622 dataOut.te=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6623 6623 dataOut.ete=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6624 6624 dataOut.ti=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6625 6625 dataOut.eti=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6626 6626 dataOut.ph=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6627 6627 dataOut.eph=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6628 6628 dataOut.phe=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6629 6629 dataOut.ephe=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6630 6630 dataOut.errors=numpy.zeros((dataOut.IBITS,max(dataOut.NRANGE,dataOut.NSHTS)),order='F',dtype='float32')
6631 6631 dataOut.fit_array_real=numpy.zeros((max(dataOut.NRANGE,dataOut.NSHTS),dataOut.NLAG),order='F',dtype='float32')
6632 6632 dataOut.status=numpy.zeros(1,'float32')
6633 6633 dataOut.tx=240.0 #deberΓ­a provenir del header #hybrid
6634 6634
6635 6635 for i in range(dataOut.IBITS):
6636 6636 dataOut.lags_LP[i]=float(i)*(dataOut.tx/150.0)/float(dataOut.IBITS) # (float)i*(header.tx/150.0)/(float)IBITS;
6637 6637
6638 6638 self.aux=0
6639 6639
6640 6640 dataOut.cut=30
6641 6641 for i in range(30,15,-1): #AquΓ­ se calcula en donde se unirΓ‘ DP y LP en la parte final
6642 6642 if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10 or dataOut.info2[i]==0:
6643 6643 dataOut.cut=i-1
6644 6644
6645 6645 for i in range(dataOut.NLAG):
6646 6646 self.cal[i]=sum(dataOut.output_LP_integrated[i,:,3].real) #Lag x Height x Channel
6647 6647
6648 6648 #print(numpy.sum(self.cal)) #Coinciden
6649 6649 #exit(1)
6650 6650 self.cal/=float(dataOut.NRANGE)
6651 6651
6652 6652
6653 6653 #################### PROBAR MÁS INTEGRACIΓ“N, SINO MODIFICAR VALOR DE "NIS" ####################
6654 6654 # VER dataOut.nProfiles_LP #
6655 6655
6656 6656 '''
6657 6657 #PLOTEAR POTENCIA VS RUIDO, QUIZA SE ESTA REMOVIENDO MUCHA SEΓ‘AL
6658 6658 #print(dataOut.heightList)
6659 6659 import matplotlib.pyplot as plt
6660 6660 plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]),dataOut.range1)
6661 6661 #plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]/dataOut.nProfiles_LP),dataOut.range1)
6662 6662 plt.axvline(10*numpy.log10(anoise0),color='k',linestyle='dashed')
6663 6663 plt.grid()
6664 6664 plt.xlim(20,100)
6665 6665 plt.show()
6666 6666 '''
6667 6667
6668 6668
6669 6669 for j in range(dataOut.NACF+2*dataOut.IBITS+2):
6670 6670
6671 6671 dataOut.output_LP_integrated.real[0,j,0]-=anoise0 #lag0 ch0
6672 6672 dataOut.output_LP_integrated.real[1,j,0]-=anoise1 #lag1 ch0
6673 6673
6674 6674 for i in range(1,dataOut.NLAG): #remove cal data from certain lags
6675 6675 dataOut.output_LP_integrated.real[i,j,0]-=self.cal[i]
6676 6676 k=max(j,26) #constant power below range 26
6677 6677 self.powera[j]=dataOut.output_LP_integrated.real[0,k,0] #Lag0 and Channel 0
6678 6678
6679 6679 ## examine drifts here - based on 60 'indep.' estimates
6680 6680 #print(numpy.sum(self.powera))
6681 6681 #exit(1)
6682 6682 #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10
6683 6683 nis = dataOut.nis
6684 6684 #print("nis",nis)
6685 6685 alpha=beta=delta=0.0
6686 6686 nest=0
6687 6687 gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[1]*1.0e-3)
6688 6688 beta=gamma*(math.atan2(dataOut.output_LP_integrated.imag[14,0,2],dataOut.output_LP_integrated.real[14,0,2])-math.atan2(dataOut.output_LP_integrated.imag[1,0,2],dataOut.output_LP_integrated.real[1,0,2]))/13.0
6689 6689 #print(gamma,beta)
6690 6690 #exit(1)
6691 6691 for i in range(1,3):
6692 6692 gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[i]*1.0e-3)
6693 6693 #print("gamma",gamma)
6694 6694 for j in range(34,44):
6695 6695 rho2=numpy.abs(dataOut.output_LP_integrated[i,j,0])/numpy.abs(dataOut.output_LP_integrated[0,j,0])
6696 6696 dataOut.dphi2=(1.0/rho2-1.0)/(float(2*nis))
6697 6697 dataOut.dphi2*=gamma**2
6698 6698 pest=gamma*math.atan(dataOut.output_LP_integrated.imag[i,j,0]/dataOut.output_LP_integrated.real[i,j,0])
6699 6699 #print("1",dataOut.output_LP_integrated.imag[i,j,0])
6700 6700 #print("2",dataOut.output_LP_integrated.real[i,j,0])
6701 6701 self.drift[nest]=pest
6702 6702 self.ddrift[nest]=dataOut.dphi2
6703 6703 self.rdrift[nest]=float(nest)
6704 6704 nest+=1
6705 6705
6706 6706 sorted(self.drift[:nest])
6707 6707
6708 6708 #print(dataOut.dphi2)
6709 6709 #exit(1)
6710 6710
6711 6711 for j in range(int(nest/4),int(3*nest/4)):
6712 6712 #i=int(self.rdrift[j])
6713 6713 alpha+=self.drift[j]/self.ddrift[j]
6714 6714 delta+=1.0/self.ddrift[j]
6715 6715
6716 6716 alpha/=delta
6717 6717 delta=1./numpy.sqrt(delta)
6718 6718 vdrift=alpha-beta
6719 6719 dvdrift=delta
6720 6720
6721 6721 #need to develop estimate of complete density profile using all
6722 6722 #available data
6723 6723
6724 6724 #estimate sample variances for long-pulse power profile
6725 6725
6726 6726 #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint
6727 6727 nis = dataOut.nis/10
6728 6728 #print("nis",nis)
6729 6729
6730 6730 self.sigma[:dataOut.NACF+2*dataOut.IBITS+2]=((anoise0+self.powera[:dataOut.NACF+2*dataOut.IBITS+2])**2)/float(nis)
6731 6731 #print(self.sigma)
6732 6732 #exit(1)
6733 6733 ioff=1
6734 6734
6735 6735 #deconvolve rectangular pulse shape from profile ==> powerb, perror
6736 6736
6737 6737 '''
6738 6738 ############# START nnlswrap#############
6739 6739
6740 6740 if dataOut.ut_Faraday>14.0:
6741 6741 alpha_nnlswrap=20.0
6742 6742 else:
6743 6743 alpha_nnlswrap=30.0
6744 6744
6745 6745 range1_nnls=dataOut.NACF
6746 6746 range2_nnls=dataOut.NACF+dataOut.IBITS-1
6747 6747
6748 6748 g_nnlswrap=numpy.zeros((range1_nnls,range2_nnls),'float32')
6749 6749 a_nnlswrap=numpy.zeros((range2_nnls,range2_nnls),'float64')
6750 6750
6751 6751 for i in range(range1_nnls):
6752 6752 for j in range(range2_nnls):
6753 6753 if j>=i and j<i+dataOut.IBITS:
6754 6754 g_nnlswrap[i,j]=1.0
6755 6755 else:
6756 6756 g_nnlswrap[i,j]=0.0
6757 6757
6758 6758 a_nnlswrap[:]=numpy.matmul(numpy.transpose(g_nnlswrap),g_nnlswrap)
6759 6759
6760 6760 numpy.fill_diagonal(a_nnlswrap,a_nnlswrap.diagonal()+alpha_nnlswrap**2)
6761 6761
6762 6762 #ERROR ANALYSIS#
6763 6763
6764 6764 self.perror[:range2_nnls]=0.0
6765 6765 self.perror[:range2_nnls]=numpy.matmul(1./(self.sigma[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff]),g_nnlswrap**2)
6766 6766 self.perror[:range1_nnls]+=(alpha_nnlswrap**2)/(self.sigma[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff])
6767 6767 self.perror[:range2_nnls]=1.00/self.perror[:range2_nnls]
6768 6768
6769 6769 b_nnlswrap=numpy.zeros(range2_nnls,'float64')
6770 6770 b_nnlswrap[:]=numpy.matmul(self.powera[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff],g_nnlswrap)
6771 6771
6772 6772 x_nnlswrap=numpy.zeros(range2_nnls,'float64')
6773 6773 x_nnlswrap[:]=nnls(a_nnlswrap,b_nnlswrap)[0]
6774 6774
6775 6775 self.powerb[:range2_nnls]=x_nnlswrap
6776 6776 #print(self.powerb[40])
6777 6777 #print(self.powerb[66])
6778 6778 #exit(1)
6779 6779 #############END nnlswrap#############
6780 6780 '''
6781 6781 self.powerb[:] = self.powera
6782 6782 self.perror[:] = 0.
6783 6783 #print(numpy.sum(numpy.sqrt(self.perror[0:dataOut.NACF])))
6784 6784 #print(self.powerb[0:dataOut.NACF])
6785 6785 #exit(1)
6786 6786 #estimate relative error for deconvolved profile (scaling irrelevant)
6787 6787 #print(dataOut.NACF)
6788 6788 dataOut.ene[0:dataOut.NACF]=numpy.sqrt(self.perror[0:dataOut.NACF])/self.powerb[0:dataOut.NACF]
6789 6789 #print(numpy.sum(dataOut.ene))
6790 6790 #exit(1)
6791 6791 aux=0
6792 6792
6793 6793 for i in range(dataOut.IBITS,dataOut.NACF):
6794 6794 self.dpulse[i]=self.lpulse[i]=0.0
6795 6795 for j in range(dataOut.IBITS):
6796 6796 k=int(i-j)
6797 6797 if k<36-aux and k>16:
6798 6798 self.dpulse[i]+=dataOut.ph2[k]/dataOut.h2[k]
6799 6799 elif k>=36-aux:
6800 6800 self.lpulse[i]+=self.powerb[k]
6801 6801 self.lagp[i]=self.powera[i]
6802 6802
6803 6803 #find scale factor that best merges profiles
6804 6804
6805 6805 qi=sum(self.dpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6806 6806 ri=sum((self.dpulse[32:dataOut.NACF]*self.lpulse[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6807 6807 si=sum((self.dpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6808 6808 ui=sum(self.lpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6809 6809 vi=sum((self.lpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
6810 6810
6811 6811 alpha=(si*ui-vi*ri)/(qi*ui-ri*ri)
6812 6812 beta=(qi*vi-ri*si)/(qi*ui-ri*ri)
6813 6813
6814 6814 #form density profile estimate, merging rescaled power profiles
6815 6815 #print(dataOut.h2)
6816 6816 #print(numpy.sum(alpha))
6817 6817 #print(numpy.sum(dataOut.ph2))
6818 6818 self.powerb[16:36-aux]=alpha*dataOut.ph2[16:36-aux]/dataOut.h2[16:36-aux]
6819 6819 self.powerb[36-aux:dataOut.NACF]*=beta
6820 6820
6821 6821 #form Ne estimate, fill in error estimate at low altitudes
6822 6822
6823 6823 dataOut.ene[0:36-aux]=dataOut.sdp2[0:36-aux]/dataOut.ph2[0:36-aux]
6824 6824 dataOut.ne[:dataOut.NACF]=self.powerb[:dataOut.NACF]*dataOut.h2[:dataOut.NACF]/alpha
6825 6825 #print(numpy.sum(self.powerb))
6826 6826 #print(numpy.sum(dataOut.ene))
6827 6827 #print(numpy.sum(dataOut.ne))
6828 6828 #exit(1)
6829 6829 #now do error propagation: store zero lag error covariance in u
6830 6830
6831 6831 nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint/1 # DLH serious debris removal
6832 6832
6833 6833 for i in range(dataOut.NACF):
6834 6834 for j in range(i,dataOut.NACF):
6835 6835 if j-i>=dataOut.IBITS:
6836 6836 self.u[i,j]=0.0
6837 6837 else:
6838 6838 self.u[i,j]=dataOut.output_LP_integrated.real[j-i,i,0]**2/float(nis)
6839 6839 self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,i,0])/dataOut.output_LP_integrated.real[0,i,0]
6840 6840 self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,j,0])/dataOut.output_LP_integrated.real[0,j,0]
6841 6841
6842 6842 self.u[j,i]=self.u[i,j]
6843 6843
6844 6844 #now error analyis for lag product matrix (diag), place in acf_err
6845 6845
6846 6846 for i in range(dataOut.NACF):
6847 6847 for j in range(dataOut.IBITS):
6848 6848 if j==0:
6849 6849 dataOut.errors[0,i]=numpy.sqrt(self.u[i,i])
6850 6850 else:
6851 6851 dataOut.errors[j,i]=numpy.sqrt(((dataOut.output_LP_integrated.real[0,i,0]+anoise0)*(dataOut.output_LP_integrated.real[0,i+j,0]+anoise0)+dataOut.output_LP_integrated.real[j,i,0]**2)/float(2*nis))
6852 6852
6853 6853 print("Success")
6854 6854 #print(dataOut.NRANGE)
6855 6855 with suppress_stdout_stderr():
6856 6856 pass
6857 6857 #full_profile_profile.profile(numpy.transpose(dataOut.output_LP_integrated,(2,1,0)),numpy.transpose(dataOut.errors),self.powerb,dataOut.ne,dataOut.lags_LP,dataOut.thb,dataOut.bfm,dataOut.te,dataOut.ete,dataOut.ti,dataOut.eti,dataOut.ph,dataOut.eph,dataOut.phe,dataOut.ephe,dataOut.range1,dataOut.ut,dataOut.NACF,dataOut.fit_array_real,dataOut.status,dataOut.NRANGE,dataOut.IBITS)
6858 6858
6859 6859 print("status: ",dataOut.status)
6860 6860
6861 6861 if dataOut.status>=3.5:
6862 6862 dataOut.te[:]=numpy.nan
6863 6863 dataOut.ete[:]=numpy.nan
6864 6864 dataOut.ti[:]=numpy.nan
6865 6865 dataOut.eti[:]=numpy.nan
6866 6866 dataOut.ph[:]=numpy.nan
6867 6867 dataOut.eph[:]=numpy.nan
6868 6868 dataOut.phe[:]=numpy.nan
6869 6869 dataOut.ephe[:]=numpy.nan
6870 6870
6871 6871 return dataOut
6872 6872
6873 6873 class LongPulseAnalysis_V2(Operation):
6874 6874 """Operation to estimate ACFs, temperatures, total electron density and Hydrogen/Helium fractions from the Long Pulse data.
6875 6875
6876 6876 Parameters:
6877 6877 -----------
6878 6878 NACF : int
6879 6879 .*
6880 6880
6881 6881 Example
6882 6882 --------
6883 6883
6884 6884 op = proc_unit.addOperation(name='LongPulseAnalysis', optype='other')
6885 6885 op.addParameter(name='NACF', value='16', format='int')
6886 6886
6887 6887 """
6888 6888
6889 6889 def __init__(self, **kwargs):
6890 6890
6891 6891 Operation.__init__(self, **kwargs)
6892 6892 self.aux=1
6893 6893
6894 6894 def run(self,dataOut,NACF):
6895 6895
6896 6896 dataOut.NACF=NACF
6897 6897 dataOut.heightList=dataOut.DH*(numpy.arange(dataOut.NACF))
6898 6898 anoise0=dataOut.tnoise[0]
6899 6899 anoise1=anoise0*0.0 #seems to be noise in 1st lag 0.015 before '14
6900 6900 #print(anoise0)
6901 6901 #exit(1)
6902 6902 if self.aux:
6903 6903 #dataOut.cut=31#26#height=31*15=465
6904 6904 self.cal=numpy.zeros((dataOut.NLAG),'float32')
6905 6905 self.drift=numpy.zeros((200),'float32')
6906 6906 self.rdrift=numpy.zeros((200),'float32')
6907 6907 self.ddrift=numpy.zeros((200),'float32')
6908 6908 self.sigma=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6909 6909 self.powera=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6910 6910 self.powerb=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6911 6911 self.perror=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6912 6912 dataOut.ene=numpy.zeros((dataOut.NRANGE),'float32')
6913 6913 self.dpulse=numpy.zeros((dataOut.NACF),'float32')
6914 6914 self.lpulse=numpy.zeros((dataOut.NACF),'float32')
6915 6915 dataOut.lags_LP=numpy.zeros((dataOut.IBITS),order='F',dtype='float32')
6916 6916 self.lagp=numpy.zeros((dataOut.NACF),'float32')
6917 6917 self.u=numpy.zeros((2*dataOut.NACF,2*dataOut.NACF),'float32')
6918 6918 dataOut.ne=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32')
6919 6919 dataOut.te=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6920 6920 dataOut.ete=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6921 6921 dataOut.ti=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6922 6922 dataOut.eti=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6923 6923 dataOut.ph=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6924 6924 dataOut.eph=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6925 6925 dataOut.phe=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6926 6926 dataOut.ephe=numpy.zeros((dataOut.NACF),order='F',dtype='float32')
6927 6927 dataOut.errors=numpy.zeros((dataOut.IBITS,max(dataOut.NRANGE,dataOut.NSHTS)),order='F',dtype='float32')
6928 6928 dataOut.fit_array_real=numpy.zeros((max(dataOut.NRANGE,dataOut.NSHTS),dataOut.NLAG),order='F',dtype='float32')
6929 6929 dataOut.status=numpy.zeros(1,'float32')
6930 6930 dataOut.tx=240.0 #deberΓ­a provenir del header #hybrid
6931 6931
6932 6932 for i in range(dataOut.IBITS):
6933 6933 dataOut.lags_LP[i]=float(i)*(dataOut.tx/150.0)/float(dataOut.IBITS) # (float)i*(header.tx/150.0)/(float)IBITS;
6934 6934
6935 6935 self.aux=0
6936 6936
6937 6937 dataOut.cut=30
6938 6938 for i in range(30,15,-1):
6939 6939 if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10 or dataOut.info2[i]==0:
6940 6940 dataOut.cut=i-1
6941 6941
6942 6942 for i in range(dataOut.NLAG):
6943 6943 self.cal[i]=sum(dataOut.output_LP_integrated[i,:,3].real)
6944 6944
6945 6945 #print(numpy.sum(self.cal)) #Coinciden
6946 6946 #exit(1)
6947 6947 self.cal/=float(dataOut.NRANGE)
6948 6948 #print(anoise0)
6949 6949 #print(anoise1)
6950 6950 #exit(1)
6951 6951
6952 6952 for j in range(dataOut.NACF+2*dataOut.IBITS+2):
6953 6953
6954 6954 dataOut.output_LP_integrated.real[0,j,0]-=anoise0 #lag0 ch0
6955 6955 dataOut.output_LP_integrated.real[1,j,0]-=anoise1 #lag1 ch0
6956 6956
6957 6957 for i in range(1,dataOut.NLAG): #remove cal data from certain lags
6958 6958 dataOut.output_LP_integrated.real[i,j,0]-=self.cal[i]
6959 6959 k=max(j,26) #constant power below range 26
6960 6960 self.powera[j]=dataOut.output_LP_integrated.real[0,k,0]
6961 6961
6962 6962 ## examine drifts here - based on 60 'indep.' estimates
6963 6963 #print(numpy.sum(self.powera))
6964 6964 #exit(1)
6965 6965 #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10
6966 6966 nis = dataOut.nis
6967 6967 #print("nis",nis)
6968 6968 alpha=beta=delta=0.0
6969 6969 nest=0
6970 6970 gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[1]*1.0e-3)
6971 6971 beta=gamma*(math.atan2(dataOut.output_LP_integrated.imag[14,0,2],dataOut.output_LP_integrated.real[14,0,2])-math.atan2(dataOut.output_LP_integrated.imag[1,0,2],dataOut.output_LP_integrated.real[1,0,2]))/13.0
6972 6972 #print(gamma,beta)
6973 6973 #exit(1)
6974 6974 for i in range(1,3):
6975 6975 gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[i]*1.0e-3)
6976 6976 #print("gamma",gamma)
6977 6977 for j in range(34,44):
6978 6978 rho2=numpy.abs(dataOut.output_LP_integrated[i,j,0])/numpy.abs(dataOut.output_LP_integrated[0,j,0])
6979 6979 dataOut.dphi2=(1.0/rho2-1.0)/(float(2*nis))
6980 6980 dataOut.dphi2*=gamma**2
6981 6981 pest=gamma*math.atan(dataOut.output_LP_integrated.imag[i,j,0]/dataOut.output_LP_integrated.real[i,j,0])
6982 6982 #print("1",dataOut.output_LP_integrated.imag[i,j,0])
6983 6983 #print("2",dataOut.output_LP_integrated.real[i,j,0])
6984 6984 self.drift[nest]=pest
6985 6985 self.ddrift[nest]=dataOut.dphi2
6986 6986 self.rdrift[nest]=float(nest)
6987 6987 nest+=1
6988 6988
6989 6989 sorted(self.drift[:nest])
6990 6990
6991 6991 #print(dataOut.dphi2)
6992 6992 #exit(1)
6993 6993
6994 6994 for j in range(int(nest/4),int(3*nest/4)):
6995 6995 #i=int(self.rdrift[j])
6996 6996 alpha+=self.drift[j]/self.ddrift[j]
6997 6997 delta+=1.0/self.ddrift[j]
6998 6998
6999 6999 alpha/=delta
7000 7000 delta=1./numpy.sqrt(delta)
7001 7001 vdrift=alpha-beta
7002 7002 dvdrift=delta
7003 7003
7004 7004 #need to develop estimate of complete density profile using all
7005 7005 #available data
7006 7006
7007 7007 #estimate sample variances for long-pulse power profile
7008 7008
7009 7009 #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint
7010 7010 nis = dataOut.nis/10
7011 7011 #print("nis",nis)
7012 7012
7013 7013 self.sigma[:dataOut.NACF+2*dataOut.IBITS+2]=((anoise0+self.powera[:dataOut.NACF+2*dataOut.IBITS+2])**2)/float(nis)
7014 7014 #print(self.sigma)
7015 7015 #exit(1)
7016 7016 ioff=1
7017 7017
7018 7018 #deconvolve rectangular pulse shape from profile ==> powerb, perror
7019 7019
7020 7020
7021 7021 ############# START nnlswrap#############
7022 7022
7023 7023 if dataOut.ut_Faraday>14.0:
7024 7024 alpha_nnlswrap=20.0
7025 7025 else:
7026 7026 alpha_nnlswrap=30.0
7027 7027
7028 7028 range1_nnls=dataOut.NACF
7029 7029 range2_nnls=dataOut.NACF+dataOut.IBITS-1
7030 7030
7031 7031 g_nnlswrap=numpy.zeros((range1_nnls,range2_nnls),'float32')
7032 7032 a_nnlswrap=numpy.zeros((range2_nnls,range2_nnls),'float64')
7033 7033
7034 7034 for i in range(range1_nnls):
7035 7035 for j in range(range2_nnls):
7036 7036 if j>=i and j<i+dataOut.IBITS:
7037 7037 g_nnlswrap[i,j]=1.0
7038 7038 else:
7039 7039 g_nnlswrap[i,j]=0.0
7040 7040
7041 7041 a_nnlswrap[:]=numpy.matmul(numpy.transpose(g_nnlswrap),g_nnlswrap)
7042 7042
7043 7043 numpy.fill_diagonal(a_nnlswrap,a_nnlswrap.diagonal()+alpha_nnlswrap**2)
7044 7044
7045 7045 #ERROR ANALYSIS#
7046 7046
7047 7047 self.perror[:range2_nnls]=0.0
7048 7048 self.perror[:range2_nnls]=numpy.matmul(1./(self.sigma[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff]),g_nnlswrap**2)
7049 7049 self.perror[:range1_nnls]+=(alpha_nnlswrap**2)/(self.sigma[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff])
7050 7050 self.perror[:range2_nnls]=1.00/self.perror[:range2_nnls]
7051 7051
7052 7052 b_nnlswrap=numpy.zeros(range2_nnls,'float64')
7053 7053 b_nnlswrap[:]=numpy.matmul(self.powera[dataOut.IBITS+ioff:range1_nnls+dataOut.IBITS+ioff],g_nnlswrap)
7054 7054
7055 7055 x_nnlswrap=numpy.zeros(range2_nnls,'float64')
7056 7056 x_nnlswrap[:]=nnls(a_nnlswrap,b_nnlswrap)[0]
7057 7057
7058 7058 self.powerb[:range2_nnls]=x_nnlswrap
7059 7059 #print(self.powerb[40])
7060 7060 #print(self.powerb[66])
7061 7061 #exit(1)
7062 7062 #############END nnlswrap#############
7063 7063 #print(numpy.sum(numpy.sqrt(self.perror[0:dataOut.NACF])))
7064 7064 #print(self.powerb[0:dataOut.NACF])
7065 7065 #exit(1)
7066 7066 #estimate relative error for deconvolved profile (scaling irrelevant)
7067 7067 #print(dataOut.NACF)
7068 7068 dataOut.ene[0:dataOut.NACF]=numpy.sqrt(self.perror[0:dataOut.NACF])/self.powerb[0:dataOut.NACF]
7069 7069 #print(numpy.sum(dataOut.ene))
7070 7070 #exit(1)
7071 7071 aux=0
7072 7072
7073 7073 for i in range(dataOut.IBITS,dataOut.NACF):
7074 7074 self.dpulse[i]=self.lpulse[i]=0.0
7075 7075 for j in range(dataOut.IBITS):
7076 7076 k=int(i-j)
7077 7077 if k<36-aux and k>16:
7078 7078 self.dpulse[i]+=dataOut.ph2[k]/dataOut.h2[k]
7079 7079 elif k>=36-aux:
7080 7080 self.lpulse[i]+=self.powerb[k]
7081 7081 self.lagp[i]=self.powera[i]
7082 7082
7083 7083 #find scale factor that best merges profiles
7084 7084
7085 7085 qi=sum(self.dpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2)
7086 7086 ri=sum((self.dpulse[32:dataOut.NACF]*self.lpulse[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
7087 7087 si=sum((self.dpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
7088 7088 ui=sum(self.lpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2)
7089 7089 vi=sum((self.lpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2)
7090 7090
7091 7091 alpha=(si*ui-vi*ri)/(qi*ui-ri*ri)
7092 7092 beta=(qi*vi-ri*si)/(qi*ui-ri*ri)
7093 7093
7094 7094 #form density profile estimate, merging rescaled power profiles
7095 7095 #print(dataOut.h2)
7096 7096 #print(numpy.sum(alpha))
7097 7097 #print(numpy.sum(dataOut.ph2))
7098 7098 self.powerb[16:36-aux]=alpha*dataOut.ph2[16:36-aux]/dataOut.h2[16:36-aux]
7099 7099 self.powerb[36-aux:dataOut.NACF]*=beta
7100 7100
7101 7101 #form Ne estimate, fill in error estimate at low altitudes
7102 7102
7103 7103 dataOut.ene[0:36-aux]=dataOut.sdp2[0:36-aux]/dataOut.ph2[0:36-aux]
7104 7104 dataOut.ne[:dataOut.NACF]=self.powerb[:dataOut.NACF]*dataOut.h2[:dataOut.NACF]/alpha
7105 7105 #print(numpy.sum(self.powerb))
7106 7106 #print(numpy.sum(dataOut.ene))
7107 7107 #print(numpy.sum(dataOut.ne))
7108 7108 #exit(1)
7109 7109 #now do error propagation: store zero lag error covariance in u
7110 7110
7111 7111 nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint/1 # DLH serious debris removal
7112 7112
7113 7113 for i in range(dataOut.NACF):
7114 7114 for j in range(i,dataOut.NACF):
7115 7115 if j-i>=dataOut.IBITS:
7116 7116 self.u[i,j]=0.0
7117 7117 else:
7118 7118 self.u[i,j]=dataOut.output_LP_integrated.real[j-i,i,0]**2/float(nis)
7119 7119 self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,i,0])/dataOut.output_LP_integrated.real[0,i,0]
7120 7120 self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,j,0])/dataOut.output_LP_integrated.real[0,j,0]
7121 7121
7122 7122 self.u[j,i]=self.u[i,j]
7123 7123
7124 7124 #now error analyis for lag product matrix (diag), place in acf_err
7125 7125
7126 7126 for i in range(dataOut.NACF):
7127 7127 for j in range(dataOut.IBITS):
7128 7128 if j==0:
7129 7129 dataOut.errors[0,i]=numpy.sqrt(self.u[i,i])
7130 7130 else:
7131 7131 dataOut.errors[j,i]=numpy.sqrt(((dataOut.output_LP_integrated.real[0,i,0]+anoise0)*(dataOut.output_LP_integrated.real[0,i+j,0]+anoise0)+dataOut.output_LP_integrated.real[j,i,0]**2)/float(2*nis))
7132 7132
7133 7133 print("Success")
7134 7134 with suppress_stdout_stderr():
7135 7135 #pass
7136 7136 full_profile_profile.profile(numpy.transpose(dataOut.output_LP_integrated,(2,1,0)),numpy.transpose(dataOut.errors),self.powerb,dataOut.ne,dataOut.lags_LP,dataOut.thb,dataOut.bfm,dataOut.te,dataOut.ete,dataOut.ti,dataOut.eti,dataOut.ph,dataOut.eph,dataOut.phe,dataOut.ephe,dataOut.range1,dataOut.ut,dataOut.NACF,dataOut.fit_array_real,dataOut.status,dataOut.NRANGE,dataOut.IBITS)
7137 7137
7138 7138 if dataOut.status>=3.5:
7139 7139 dataOut.te[:]=numpy.nan
7140 7140 dataOut.ete[:]=numpy.nan
7141 7141 dataOut.ti[:]=numpy.nan
7142 7142 dataOut.eti[:]=numpy.nan
7143 7143 dataOut.ph[:]=numpy.nan
7144 7144 dataOut.eph[:]=numpy.nan
7145 7145 dataOut.phe[:]=numpy.nan
7146 7146 dataOut.ephe[:]=numpy.nan
7147 7147
7148 7148 return dataOut
7149 7149
7150 7150 class PulsePairVoltage(Operation):
7151 7151 '''
7152 7152 Function PulsePair(Signal Power, Velocity)
7153 7153 The real component of Lag[0] provides Intensity Information
7154 7154 The imag component of Lag[1] Phase provides Velocity Information
7155 7155
7156 7156 Configuration Parameters:
7157 7157 nPRF = Number of Several PRF
7158 7158 theta = Degree Azimuth angel Boundaries
7159 7159
7160 7160 Input:
7161 7161 self.dataOut
7162 7162 lag[N]
7163 7163 Affected:
7164 7164 self.dataOut.spc
7165 7165 '''
7166 7166 isConfig = False
7167 7167 __profIndex = 0
7168 7168 __initime = None
7169 7169 __lastdatatime = None
7170 7170 __buffer = None
7171 7171 noise = None
7172 7172 __dataReady = False
7173 7173 n = None
7174 7174 __nch = 0
7175 7175 __nHeis = 0
7176 7176 removeDC = False
7177 7177 ipp = None
7178 7178 lambda_ = 0
7179 7179
7180 7180 def __init__(self,**kwargs):
7181 7181 Operation.__init__(self,**kwargs)
7182 7182
7183 7183 def setup(self, dataOut, n = None, removeDC=False):
7184 7184 '''
7185 7185 n= Numero de PRF's de entrada
7186 7186 '''
7187 7187 self.__initime = None
7188 7188 self.__lastdatatime = 0
7189 7189 self.__dataReady = False
7190 7190 self.__buffer = 0
7191 7191 self.__profIndex = 0
7192 7192 self.noise = None
7193 7193 self.__nch = dataOut.nChannels
7194 7194 self.__nHeis = dataOut.nHeights
7195 7195 self.removeDC = removeDC
7196 7196 self.lambda_ = 3.0e8/(9345.0e6)
7197 7197 self.ippSec = dataOut.ippSeconds
7198 7198 self.nCohInt = dataOut.nCohInt
7199 7199 print("IPPseconds",dataOut.ippSeconds)
7200 7200
7201 7201 print("ELVALOR DE n es:", n)
7202 7202 if n == None:
7203 7203 raise ValueError("n should be specified.")
7204 7204
7205 7205 if n != None:
7206 7206 if n<2:
7207 7207 raise ValueError("n should be greater than 2")
7208 7208
7209 7209 self.n = n
7210 7210 self.__nProf = n
7211 7211
7212 7212 self.__buffer = numpy.zeros((dataOut.nChannels,
7213 7213 n,
7214 7214 dataOut.nHeights),
7215 7215 dtype='complex')
7216 7216
7217 7217 def putData(self,data):
7218 7218 '''
7219 7219 Add a profile to he __buffer and increase in one the __profiel Index
7220 7220 '''
7221 7221 self.__buffer[:,self.__profIndex,:]= data
7222 7222 self.__profIndex += 1
7223 7223 return
7224 7224
7225 7225 def pushData(self,dataOut):
7226 7226 '''
7227 7227 Return the PULSEPAIR and the profiles used in the operation
7228 7228 Affected : self.__profileIndex
7229 7229 '''
7230 7230 #----------------- Remove DC-----------------------------------
7231 7231 if self.removeDC==True:
7232 7232 mean = numpy.mean(self.__buffer,1)
7233 7233 tmp = mean.reshape(self.__nch,1,self.__nHeis)
7234 7234 dc= numpy.tile(tmp,[1,self.__nProf,1])
7235 7235 self.__buffer = self.__buffer - dc
7236 7236 #------------------Calculo de Potencia ------------------------
7237 7237 pair0 = self.__buffer*numpy.conj(self.__buffer)
7238 7238 pair0 = pair0.real
7239 7239 lag_0 = numpy.sum(pair0,1)
7240 7240 #------------------Calculo de Ruido x canal--------------------
7241 7241 self.noise = numpy.zeros(self.__nch)
7242 7242 for i in range(self.__nch):
7243 7243 daux = numpy.sort(pair0[i,:,:],axis= None)
7244 7244 self.noise[i]=hildebrand_sekhon( daux ,self.nCohInt)
7245 7245
7246 7246 self.noise = self.noise.reshape(self.__nch,1)
7247 7247 self.noise = numpy.tile(self.noise,[1,self.__nHeis])
7248 7248 noise_buffer = self.noise.reshape(self.__nch,1,self.__nHeis)
7249 7249 noise_buffer = numpy.tile(noise_buffer,[1,self.__nProf,1])
7250 7250 #------------------ Potencia recibida= P , Potencia senal = S , Ruido= N--
7251 7251 #------------------ P= S+N ,P=lag_0/N ---------------------------------
7252 7252 #-------------------- Power --------------------------------------------------
7253 7253 data_power = lag_0/(self.n*self.nCohInt)
7254 7254 #------------------ Senal ---------------------------------------------------
7255 7255 data_intensity = pair0 - noise_buffer
7256 7256 data_intensity = numpy.sum(data_intensity,axis=1)*(self.n*self.nCohInt)#*self.nCohInt)
7257 7257 #data_intensity = (lag_0-self.noise*self.n)*(self.n*self.nCohInt)
7258 7258 for i in range(self.__nch):
7259 7259 for j in range(self.__nHeis):
7260 7260 if data_intensity[i][j] < 0:
7261 7261 data_intensity[i][j] = numpy.min(numpy.absolute(data_intensity[i][j]))
7262 7262
7263 7263 #----------------- Calculo de Frecuencia y Velocidad doppler--------
7264 7264 pair1 = self.__buffer[:,:-1,:]*numpy.conjugate(self.__buffer[:,1:,:])
7265 7265 lag_1 = numpy.sum(pair1,1)
7266 7266 data_freq = (-1/(2.0*math.pi*self.ippSec*self.nCohInt))*numpy.angle(lag_1)
7267 7267 data_velocity = (self.lambda_/2.0)*data_freq
7268 7268
7269 7269 #---------------- Potencia promedio estimada de la Senal-----------
7270 7270 lag_0 = lag_0/self.n
7271 7271 S = lag_0-self.noise
7272 7272
7273 7273 #---------------- Frecuencia Doppler promedio ---------------------
7274 7274 lag_1 = lag_1/(self.n-1)
7275 7275 R1 = numpy.abs(lag_1)
7276 7276
7277 7277 #---------------- Calculo del SNR----------------------------------
7278 7278 data_snrPP = S/self.noise
7279 7279 for i in range(self.__nch):
7280 7280 for j in range(self.__nHeis):
7281 7281 if data_snrPP[i][j] < 1.e-20:
7282 7282 data_snrPP[i][j] = 1.e-20
7283 7283
7284 7284 #----------------- Calculo del ancho espectral ----------------------
7285 7285 L = S/R1
7286 7286 L = numpy.where(L<0,1,L)
7287 7287 L = numpy.log(L)
7288 7288 tmp = numpy.sqrt(numpy.absolute(L))
7289 7289 data_specwidth = (self.lambda_/(2*math.sqrt(2)*math.pi*self.ippSec*self.nCohInt))*tmp*numpy.sign(L)
7290 7290 n = self.__profIndex
7291 7291
7292 7292 self.__buffer = numpy.zeros((self.__nch, self.__nProf,self.__nHeis), dtype='complex')
7293 7293 self.__profIndex = 0
7294 7294 return data_power,data_intensity,data_velocity,data_snrPP,data_specwidth,n
7295 7295
7296 7296
7297 7297 def pulsePairbyProfiles(self,dataOut):
7298 7298
7299 7299 self.__dataReady = False
7300 7300 data_power = None
7301 7301 data_intensity = None
7302 7302 data_velocity = None
7303 7303 data_specwidth = None
7304 7304 data_snrPP = None
7305 7305 self.putData(data=dataOut.data)
7306 7306 if self.__profIndex == self.n:
7307 7307 data_power,data_intensity, data_velocity,data_snrPP,data_specwidth, n = self.pushData(dataOut=dataOut)
7308 7308 self.__dataReady = True
7309 7309
7310 7310 return data_power, data_intensity, data_velocity, data_snrPP, data_specwidth
7311 7311
7312 7312
7313 7313 def pulsePairOp(self, dataOut, datatime= None):
7314 7314
7315 7315 if self.__initime == None:
7316 7316 self.__initime = datatime
7317 7317 data_power, data_intensity, data_velocity, data_snrPP, data_specwidth = self.pulsePairbyProfiles(dataOut)
7318 7318 self.__lastdatatime = datatime
7319 7319
7320 7320 if data_power is None:
7321 7321 return None, None, None,None,None,None
7322 7322
7323 7323 avgdatatime = self.__initime
7324 7324 deltatime = datatime - self.__lastdatatime
7325 7325 self.__initime = datatime
7326 7326
7327 7327 return data_power, data_intensity, data_velocity, data_snrPP, data_specwidth, avgdatatime
7328 7328
7329 7329 def run(self, dataOut,n = None,removeDC= False, overlapping= False,**kwargs):
7330 7330
7331 7331 if not self.isConfig:
7332 7332 self.setup(dataOut = dataOut, n = n , removeDC=removeDC , **kwargs)
7333 7333 self.isConfig = True
7334 7334 data_power, data_intensity, data_velocity,data_snrPP,data_specwidth, avgdatatime = self.pulsePairOp(dataOut, dataOut.utctime)
7335 7335 dataOut.flagNoData = True
7336 7336
7337 7337 if self.__dataReady:
7338 7338 dataOut.nCohInt *= self.n
7339 7339 dataOut.dataPP_POW = data_intensity # S
7340 7340 dataOut.dataPP_POWER = data_power # P
7341 7341 dataOut.dataPP_DOP = data_velocity
7342 7342 dataOut.dataPP_SNR = data_snrPP
7343 7343 dataOut.dataPP_WIDTH = data_specwidth
7344 7344 dataOut.PRFbyAngle = self.n #numero de PRF*cada angulo rotado que equivale a un tiempo.
7345 7345 dataOut.utctime = avgdatatime
7346 7346 dataOut.flagNoData = False
7347 7347 return dataOut
7348 7348
7349 7349
7350 7350
7351 7351 # import collections
7352 7352 # from scipy.stats import mode
7353 7353 #
7354 7354 # class Synchronize(Operation):
7355 7355 #
7356 7356 # isConfig = False
7357 7357 # __profIndex = 0
7358 7358 #
7359 7359 # def __init__(self, **kwargs):
7360 7360 #
7361 7361 # Operation.__init__(self, **kwargs)
7362 7362 # # self.isConfig = False
7363 7363 # self.__powBuffer = None
7364 7364 # self.__startIndex = 0
7365 7365 # self.__pulseFound = False
7366 7366 #
7367 7367 # def __findTxPulse(self, dataOut, channel=0, pulse_with = None):
7368 7368 #
7369 7369 # #Read data
7370 7370 #
7371 7371 # powerdB = dataOut.getPower(channel = channel)
7372 7372 # noisedB = dataOut.getNoise(channel = channel)[0]
7373 7373 #
7374 7374 # self.__powBuffer.extend(powerdB.flatten())
7375 7375 #
7376 7376 # dataArray = numpy.array(self.__powBuffer)
7377 7377 #
7378 7378 # filteredPower = numpy.correlate(dataArray, dataArray[0:self.__nSamples], "same")
7379 7379 #
7380 7380 # maxValue = numpy.nanmax(filteredPower)
7381 7381 #
7382 7382 # if maxValue < noisedB + 10:
7383 7383 # #No se encuentra ningun pulso de transmision
7384 7384 # return None
7385 7385 #
7386 7386 # maxValuesIndex = numpy.where(filteredPower > maxValue - 0.1*abs(maxValue))[0]
7387 7387 #
7388 7388 # if len(maxValuesIndex) < 2:
7389 7389 # #Solo se encontro un solo pulso de transmision de un baudio, esperando por el siguiente TX
7390 7390 # return None
7391 7391 #
7392 7392 # phasedMaxValuesIndex = maxValuesIndex - self.__nSamples
7393 7393 #
7394 7394 # #Seleccionar solo valores con un espaciamiento de nSamples
7395 7395 # pulseIndex = numpy.intersect1d(maxValuesIndex, phasedMaxValuesIndex)
7396 7396 #
7397 7397 # if len(pulseIndex) < 2:
7398 7398 # #Solo se encontro un pulso de transmision con ancho mayor a 1
7399 7399 # return None
7400 7400 #
7401 7401 # spacing = pulseIndex[1:] - pulseIndex[:-1]
7402 7402 #
7403 7403 # #remover senales que se distancien menos de 10 unidades o muestras
7404 7404 # #(No deberian existir IPP menor a 10 unidades)
7405 7405 #
7406 7406 # realIndex = numpy.where(spacing > 10 )[0]
7407 7407 #
7408 7408 # if len(realIndex) < 2:
7409 7409 # #Solo se encontro un pulso de transmision con ancho mayor a 1
7410 7410 # return None
7411 7411 #
7412 7412 # #Eliminar pulsos anchos (deja solo la diferencia entre IPPs)
7413 7413 # realPulseIndex = pulseIndex[realIndex]
7414 7414 #
7415 7415 # period = mode(realPulseIndex[1:] - realPulseIndex[:-1])[0][0]
7416 7416 #
7417 7417 # print "IPP = %d samples" %period
7418 7418 #
7419 7419 # self.__newNSamples = dataOut.nHeights #int(period)
7420 7420 # self.__startIndex = int(realPulseIndex[0])
7421 7421 #
7422 7422 # return 1
7423 7423 #
7424 7424 #
7425 7425 # def setup(self, nSamples, nChannels, buffer_size = 4):
7426 7426 #
7427 7427 # self.__powBuffer = collections.deque(numpy.zeros( buffer_size*nSamples,dtype=numpy.float),
7428 7428 # maxlen = buffer_size*nSamples)
7429 7429 #
7430 7430 # bufferList = []
7431 7431 #
7432 7432 # for i in range(nChannels):
7433 7433 # bufferByChannel = collections.deque(numpy.zeros( buffer_size*nSamples, dtype=numpy.complex) + numpy.NAN,
7434 7434 # maxlen = buffer_size*nSamples)
7435 7435 #
7436 7436 # bufferList.append(bufferByChannel)
7437 7437 #
7438 7438 # self.__nSamples = nSamples
7439 7439 # self.__nChannels = nChannels
7440 7440 # self.__bufferList = bufferList
7441 7441 #
7442 7442 # def run(self, dataOut, channel = 0):
7443 7443 #
7444 7444 # if not self.isConfig:
7445 7445 # nSamples = dataOut.nHeights
7446 7446 # nChannels = dataOut.nChannels
7447 7447 # self.setup(nSamples, nChannels)
7448 7448 # self.isConfig = True
7449 7449 #
7450 7450 # #Append new data to internal buffer
7451 7451 # for thisChannel in range(self.__nChannels):
7452 7452 # bufferByChannel = self.__bufferList[thisChannel]
7453 7453 # bufferByChannel.extend(dataOut.data[thisChannel])
7454 7454 #
7455 7455 # if self.__pulseFound:
7456 7456 # self.__startIndex -= self.__nSamples
7457 7457 #
7458 7458 # #Finding Tx Pulse
7459 7459 # if not self.__pulseFound:
7460 7460 # indexFound = self.__findTxPulse(dataOut, channel)
7461 7461 #
7462 7462 # if indexFound == None:
7463 7463 # dataOut.flagNoData = True
7464 7464 # return
7465 7465 #
7466 7466 # self.__arrayBuffer = numpy.zeros((self.__nChannels, self.__newNSamples), dtype = numpy.complex)
7467 7467 # self.__pulseFound = True
7468 7468 # self.__startIndex = indexFound
7469 7469 #
7470 7470 # #If pulse was found ...
7471 7471 # for thisChannel in range(self.__nChannels):
7472 7472 # bufferByChannel = self.__bufferList[thisChannel]
7473 7473 # #print self.__startIndex
7474 7474 # x = numpy.array(bufferByChannel)
7475 7475 # self.__arrayBuffer[thisChannel] = x[self.__startIndex:self.__startIndex+self.__newNSamples]
7476 7476 #
7477 7477 # deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
7478 7478 # dataOut.heightList = numpy.arange(self.__newNSamples)*deltaHeight
7479 7479 # # dataOut.ippSeconds = (self.__newNSamples / deltaHeight)/1e6
7480 7480 #
7481 7481 # dataOut.data = self.__arrayBuffer
7482 7482 #
7483 7483 # self.__startIndex += self.__newNSamples
7484 7484 #
7485 7485 # return
7486
7487 class ToLilBlock(Operation):
7488
7489 '''
7490 Class to separate blocks of data
7491
7492 '''
7493
7494 isConfig = False
7495 n = None
7496 __timeInterval = None
7497 __profIndex = 0
7498 __byTime = False
7499 __dataReady = False
7500 __buffer_data = []
7501 __buffer_times = []
7502 __initime = None
7503 __count_exec = 0
7504 __profIndex = 0
7505 buffer = None
7506 lenProfileOut = 1
7507 init_prof = 0
7508 end_prof = 0
7509 n_profiles = 0
7510 first_utcBlock = None
7511 __dh = 0
7512
7513
7514 def __init__(self, **kwargs):
7515
7516 Operation.__init__(self, **kwargs)
7517
7518 self.isConfig = False
7519
7520
7521 def setup(self,dataOut):
7522
7523 self.init_prof = 0
7524 self.end_prof = 0
7525
7526 def releaseBlock(self, dataOut):
7527
7528 if self.n % self.lenProfileOut != 0:
7529 raise ValueError("lenProfileOut %d must be submultiple of nProfiles %d" %(self.lenProfileOut, self.n_profiles))
7530 return None
7531
7532 dataOut.data = self.buffer[:,self.init_prof:self.end_prof,:] #ch, prof, alt
7533 self.init_prof = self.end_prof
7534 self.end_prof += self.lenProfileOut
7535 if self.init_prof == self.n:
7536 #if self.end_prof >= (self.n +self.lenProfileOut):
7537 self.init_prof = 0
7538 self.__profIndex = 0
7539 self.buffer = None
7540 dataOut.buffer_empty = True
7541 #print("done")
7542 return dataOut
7543
7544
7545 def run(self, dataOut, nProfilesOut=1):
7546
7547 self.n = dataOut.nProfiles
7548 self.nChannels = dataOut.nChannels
7549 self.nHeights = dataOut.nHeights
7550
7551 #print(dataOut.data.shape)
7552 #exit(1)
7553 if not self.isConfig:
7554 self.setup(dataOut)
7555 self.isConfig = True
7556
7557 dataBlock = None
7558
7559 if not dataOut.buffer_empty:
7560 if self.init_prof == 0:
7561 self.lenProfileOut = nProfilesOut
7562 dataOut.flagNoData = False
7563 self.init_prof = 0
7564 self.end_prof = self.lenProfileOut
7565 dataOut.nProfiles = self.lenProfileOut
7566 dataOut.error = False
7567
7568 dataOut.flagNoData = False
7569
7570 return self.releaseBlock(dataOut)
7571
7572 dataOut.flagNoData = True
7573 self.buffer = dataOut.data.copy()
7574 dataOut.error = False
7575 dataOut.useInputBuffer = True
7576 dataOut.buffer_empty = False
7577
7578
7579 return dataOut No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now