##// END OF EJS Templates
Writing Unit for Madrigal decorated (just for python 2x)
George Yong -
r1206:59caf7a2130e
parent child
Show More
@@ -1,642 +1,645
1 1 '''
2 2 Created on Aug 1, 2017
3 3
4 4 @author: Juan C. Espinoza
5 5 '''
6 6
7 7 import os
8 8 import sys
9 9 import time
10 10 import json
11 11 import glob
12 12 import datetime
13 13
14 14 import numpy
15 15 import h5py
16
17 from schainpy.model.io.jroIO_base import JRODataReader
16 from schainpy.model.io.jroIO_base import LOCALTIME, JRODataReader, JRODataWriter
18 17 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
19 18 from schainpy.model.data.jrodata import Parameters
20 19 from schainpy.utils import log
21 20
22 21 try:
23 22 import madrigal.cedar
24 23 except:
25 24 log.warning(
26 25 'You should install "madrigal library" module if you want to read/write Madrigal data'
27 26 )
28 27
29 28 DEF_CATALOG = {
30 29 'principleInvestigator': 'Marco Milla',
31 'expPurpose': None,
32 'cycleTime': None,
33 'correlativeExp': None,
34 'sciRemarks': None,
35 'instRemarks': None
30 'expPurpose': '',
31 'cycleTime': '',
32 'correlativeExp': '',
33 'sciRemarks': '',
34 'instRemarks': ''
36 35 }
36
37 37 DEF_HEADER = {
38 'kindatDesc': None,
38 'kindatDesc': '',
39 39 'analyst': 'Jicamarca User',
40 'comments': None,
41 'history': None
40 'comments': '',
41 'history': ''
42 42 }
43
43 44 MNEMONICS = {
44 45 10: 'jro',
45 46 11: 'jbr',
46 47 840: 'jul',
47 48 13: 'jas',
48 49 1000: 'pbr',
49 50 1001: 'hbr',
50 51 1002: 'obr',
52 400: 'clr'
53
51 54 }
52 55
53 56 UT1970 = datetime.datetime(1970, 1, 1) - datetime.timedelta(seconds=time.timezone)
54 57
55 58 def load_json(obj):
56 59 '''
57 60 Parse json as string instead of unicode
58 61 '''
59 62
60 63 if isinstance(obj, str):
61 64 iterable = json.loads(obj)
62 65 else:
63 66 iterable = obj
64 67
65 68 if isinstance(iterable, dict):
66 return {str(k): load_json(v) if isinstance(v, dict) else str(v) if isinstance(v, str) else v
69 return {str(k): load_json(v) if isinstance(v, dict) else str(v) if isinstance(v, (str,unicode)) else v
67 70 for k, v in list(iterable.items())}
68 71 elif isinstance(iterable, (list, tuple)):
69 72 return [str(v) if isinstance(v, str) else v for v in iterable]
70 73
71 74 return iterable
72 75
73 76 @MPDecorator
74 77 class MADReader(JRODataReader, ProcessingUnit):
75 78
76 79 def __init__(self):
77 80
78 81 ProcessingUnit.__init__(self)
79 82
80 83 self.dataOut = Parameters()
81 84 self.counter_records = 0
82 85 self.nrecords = None
83 86 self.flagNoMoreFiles = 0
84 87 self.isConfig = False
85 88 self.filename = None
86 89 self.intervals = set()
87 90
88 91 def setup(self,
89 92 path=None,
90 93 startDate=None,
91 94 endDate=None,
92 95 format=None,
93 96 startTime=datetime.time(0, 0, 0),
94 97 endTime=datetime.time(23, 59, 59),
95 98 **kwargs):
96 99
97 100 self.path = path
98 101 self.startDate = startDate
99 102 self.endDate = endDate
100 103 self.startTime = startTime
101 104 self.endTime = endTime
102 105 self.datatime = datetime.datetime(1900,1,1)
103 106 self.oneDDict = load_json(kwargs.get('oneDDict',
104 107 "{\"GDLATR\":\"lat\", \"GDLONR\":\"lon\"}"))
105 108 self.twoDDict = load_json(kwargs.get('twoDDict',
106 109 "{\"GDALT\": \"heightList\"}"))
107 110 self.ind2DList = load_json(kwargs.get('ind2DList',
108 111 "[\"GDALT\"]"))
109 112 if self.path is None:
110 113 raise ValueError('The path is not valid')
111 114
112 115 if format is None:
113 116 raise ValueError('The format is not valid choose simple or hdf5')
114 117 elif format.lower() in ('simple', 'txt'):
115 118 self.ext = '.txt'
116 119 elif format.lower() in ('cedar',):
117 120 self.ext = '.001'
118 121 else:
119 122 self.ext = '.hdf5'
120 123
121 124 self.search_files(self.path)
122 125 self.fileId = 0
123 126
124 127 if not self.fileList:
125 128 raise Warning('There is no files matching these date in the folder: {}. \n Check startDate and endDate'.format(path))
126 129
127 130 self.setNextFile()
128 131
129 132 def search_files(self, path):
130 133 '''
131 134 Searching for madrigal files in path
132 135 Creating a list of files to procces included in [startDate,endDate]
133 136
134 137 Input:
135 138 path - Path to find files
136 139 '''
137 140
138 141 log.log('Searching files {} in {} '.format(self.ext, path), 'MADReader')
139 142 foldercounter = 0
140 143 fileList0 = glob.glob1(path, '*{}'.format(self.ext))
141 144 fileList0.sort()
142 145
143 146 self.fileList = []
144 147 self.dateFileList = []
145 148
146 149 startDate = self.startDate - datetime.timedelta(1)
147 150 endDate = self.endDate + datetime.timedelta(1)
148 151
149 152 for thisFile in fileList0:
150 153 year = thisFile[3:7]
151 154 if not year.isdigit():
152 155 continue
153 156
154 157 month = thisFile[7:9]
155 158 if not month.isdigit():
156 159 continue
157 160
158 161 day = thisFile[9:11]
159 162 if not day.isdigit():
160 163 continue
161 164
162 165 year, month, day = int(year), int(month), int(day)
163 166 dateFile = datetime.date(year, month, day)
164 167
165 168 if (startDate > dateFile) or (endDate < dateFile):
166 169 continue
167 170
168 171 self.fileList.append(thisFile)
169 172 self.dateFileList.append(dateFile)
170 173
171 174 return
172 175
173 176 def parseHeader(self):
174 177 '''
175 178 '''
176 179
177 180 self.output = {}
178 181 self.version = '2'
179 182 s_parameters = None
180 183 if self.ext == '.txt':
181 184 self.parameters = [s.strip().lower() for s in self.fp.readline().strip().split(' ') if s]
182 185 elif self.ext == '.hdf5':
183 186 metadata = self.fp['Metadata']
184 187 data = self.fp['Data']['Array Layout']
185 188 if 'Independent Spatial Parameters' in metadata:
186 189 s_parameters = [s[0].lower() for s in metadata['Independent Spatial Parameters']]
187 190 self.version = '3'
188 191 one = [s[0].lower() for s in data['1D Parameters']['Data Parameters']]
189 192 one_d = [1 for s in one]
190 193 two = [s[0].lower() for s in data['2D Parameters']['Data Parameters']]
191 194 two_d = [2 for s in two]
192 195 self.parameters = one + two
193 196 self.parameters_d = one_d + two_d
194 197
195 log.success('Parameters found: {}'.format(','.join(str(self.parameters))),
198 log.success('Parameters found: {}'.format(self.parameters),
196 199 'MADReader')
197 200 if s_parameters:
198 201 log.success('Spatial parameters: {}'.format(','.join(str(s_parameters))),
199 202 'MADReader')
200 203
201 204 for param in list(self.oneDDict.keys()):
202 205 if param.lower() not in self.parameters:
203 206 log.warning(
204 207 'Parameter {} not found will be ignored'.format(
205 208 param),
206 209 'MADReader')
207 210 self.oneDDict.pop(param, None)
208 211
209 212 for param, value in list(self.twoDDict.items()):
210 213 if param.lower() not in self.parameters:
211 214 log.warning(
212 215 'Parameter {} not found, it will be ignored'.format(
213 216 param),
214 217 'MADReader')
215 218 self.twoDDict.pop(param, None)
216 219 continue
217 220 if isinstance(value, list):
218 221 if value[0] not in self.output:
219 222 self.output[value[0]] = []
220 223 self.output[value[0]].append(None)
221 224
222 225 def parseData(self):
223 226 '''
224 227 '''
225 228
226 229 if self.ext == '.txt':
227 230 self.data = numpy.genfromtxt(self.fp, missing_values=('missing'))
228 231 self.nrecords = self.data.shape[0]
229 232 self.ranges = numpy.unique(self.data[:,self.parameters.index(self.ind2DList[0].lower())])
230 233 elif self.ext == '.hdf5':
231 234 self.data = self.fp['Data']['Array Layout']
232 235 self.nrecords = len(self.data['timestamps'].value)
233 236 self.ranges = self.data['range'].value
234 237
235 238 def setNextFile(self):
236 239 '''
237 240 '''
238 241
239 242 file_id = self.fileId
240 243
241 244 if file_id == len(self.fileList):
242 245 log.success('No more files', 'MADReader')
243 246 self.flagNoMoreFiles = 1
244 247 return 0
245 248
246 249 log.success(
247 250 'Opening: {}'.format(self.fileList[file_id]),
248 251 'MADReader'
249 252 )
250 253
251 254 filename = os.path.join(self.path, self.fileList[file_id])
252 255
253 256 if self.filename is not None:
254 257 self.fp.close()
255 258
256 259 self.filename = filename
257 260 self.filedate = self.dateFileList[file_id]
258 261
259 262 if self.ext=='.hdf5':
260 263 self.fp = h5py.File(self.filename, 'r')
261 264 else:
262 265 self.fp = open(self.filename, 'rb')
263 266
264 267 self.parseHeader()
265 268 self.parseData()
266 269 self.sizeOfFile = os.path.getsize(self.filename)
267 270 self.counter_records = 0
268 271 self.flagIsNewFile = 0
269 272 self.fileId += 1
270 273
271 274 return 1
272 275
273 276 def readNextBlock(self):
274 277
275 278 while True:
276 279 self.flagDiscontinuousBlock = 0
277 280 if self.flagIsNewFile:
278 281 if not self.setNextFile():
279 282 return 0
280 283
281 284 self.readBlock()
282 285
283 286 if (self.datatime < datetime.datetime.combine(self.startDate, self.startTime)) or \
284 287 (self.datatime > datetime.datetime.combine(self.endDate, self.endTime)):
285 288 log.warning(
286 289 'Reading Record No. {}/{} -> {} [Skipping]'.format(
287 290 self.counter_records,
288 291 self.nrecords,
289 292 self.datatime.ctime()),
290 293 'MADReader')
291 294 continue
292 295 break
293 296
294 297 log.log(
295 298 'Reading Record No. {}/{} -> {}'.format(
296 299 self.counter_records,
297 300 self.nrecords,
298 301 self.datatime.ctime()),
299 302 'MADReader')
300 303
301 304 return 1
302 305
303 306 def readBlock(self):
304 307 '''
305 308 '''
306 309 dum = []
307 310 if self.ext == '.txt':
308 311 dt = self.data[self.counter_records][:6].astype(int)
309 312 if datetime.datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5]).date() > self.datatime.date():
310 313 self.flagDiscontinuousBlock = 1
311 314 self.datatime = datetime.datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5])
312 315 while True:
313 316 dt = self.data[self.counter_records][:6].astype(int)
314 317 datatime = datetime.datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5])
315 318 if datatime == self.datatime:
316 319 dum.append(self.data[self.counter_records])
317 320 self.counter_records += 1
318 321 if self.counter_records == self.nrecords:
319 322 self.flagIsNewFile = True
320 323 break
321 324 continue
322 325 self.intervals.add((datatime-self.datatime).seconds)
323 326 break
324 327 elif self.ext == '.hdf5':
325 328 datatime = datetime.datetime.utcfromtimestamp(
326 329 self.data['timestamps'][self.counter_records])
327 330 nHeights = len(self.ranges)
328 331 for n, param in enumerate(self.parameters):
329 332 if self.parameters_d[n] == 1:
330 333 dum.append(numpy.ones(nHeights)*self.data['1D Parameters'][param][self.counter_records])
331 334 else:
332 335 if self.version == '2':
333 336 dum.append(self.data['2D Parameters'][param][self.counter_records])
334 337 else:
335 338 tmp = self.data['2D Parameters'][param].value.T
336 339 dum.append(tmp[self.counter_records])
337 340 self.intervals.add((datatime-self.datatime).seconds)
338 341 if datatime.date()>self.datatime.date():
339 342 self.flagDiscontinuousBlock = 1
340 343 self.datatime = datatime
341 344 self.counter_records += 1
342 345 if self.counter_records == self.nrecords:
343 346 self.flagIsNewFile = True
344 347
345 348 self.buffer = numpy.array(dum)
346 349 return
347 350
348 351 def set_output(self):
349 352 '''
350 353 Storing data from buffer to dataOut object
351 354 '''
352 355
353 356 parameters = [None for __ in self.parameters]
354 357
355 358 for param, attr in list(self.oneDDict.items()):
356 359 x = self.parameters.index(param.lower())
357 360 setattr(self.dataOut, attr, self.buffer[0][x])
358 361
359 for param, value in list(self.twoDDict.items()):
362 for param, value in list(self.twoDDict.items()):
360 363 x = self.parameters.index(param.lower())
361 364 if self.ext == '.txt':
362 365 y = self.parameters.index(self.ind2DList[0].lower())
363 366 ranges = self.buffer[:,y]
364 if self.ranges.size == ranges.size:
365 continue
367 #if self.ranges.size == ranges.size:
368 # continue
366 369 index = numpy.where(numpy.in1d(self.ranges, ranges))[0]
367 370 dummy = numpy.zeros(self.ranges.shape) + numpy.nan
368 371 dummy[index] = self.buffer[:,x]
369 372 else:
370 373 dummy = self.buffer[x]
371
374
372 375 if isinstance(value, str):
373 376 if value not in self.ind2DList:
374 377 setattr(self.dataOut, value, dummy.reshape(1,-1))
375 378 elif isinstance(value, list):
376 379 self.output[value[0]][value[1]] = dummy
377 380 parameters[value[1]] = param
378 381
379 382 for key, value in list(self.output.items()):
380 383 setattr(self.dataOut, key, numpy.array(value))
381 384
382 385 self.dataOut.parameters = [s for s in parameters if s]
383 386 self.dataOut.heightList = self.ranges
384 387 self.dataOut.utctime = (self.datatime - datetime.datetime(1970, 1, 1)).total_seconds()
385 388 self.dataOut.utctimeInit = self.dataOut.utctime
386 389 self.dataOut.paramInterval = min(self.intervals)
387 390 self.dataOut.useLocalTime = False
388 391 self.dataOut.flagNoData = False
389 392 self.dataOut.nrecords = self.nrecords
390 393 self.dataOut.flagDiscontinuousBlock = self.flagDiscontinuousBlock
391 394
392 395 def getData(self):
393 396 '''
394 397 Storing data from databuffer to dataOut object
395 398 '''
396 399 if self.flagNoMoreFiles:
397 400 self.dataOut.flagNoData = True
398 401 self.dataOut.error = 'No file left to process'
399 402 return 0
400 403
401 404 if not self.readNextBlock():
402 405 self.dataOut.flagNoData = True
403 406 return 0
404 407
405 408 self.set_output()
406 409
407 410 return 1
408 411
409
412 @MPDecorator
410 413 class MADWriter(Operation):
411 414
412 415 missing = -32767
413 416
414 def __init__(self, **kwargs):
417 def __init__(self):
415 418
416 Operation.__init__(self, **kwargs)
419 Operation.__init__(self)
417 420 self.dataOut = Parameters()
418 421 self.counter = 0
419 422 self.path = None
420 423 self.fp = None
421 424
422 425 def run(self, dataOut, path, oneDDict, ind2DList='[]', twoDDict='{}',
423 426 metadata='{}', format='cedar', **kwargs):
424 427 '''
425 428 Inputs:
426 429 path - path where files will be created
427 430 oneDDict - json of one-dimensional parameters in record where keys
428 431 are Madrigal codes (integers or mnemonics) and values the corresponding
429 432 dataOut attribute e.g: {
430 433 'gdlatr': 'lat',
431 434 'gdlonr': 'lon',
432 435 'gdlat2':'lat',
433 436 'glon2':'lon'}
434 437 ind2DList - list of independent spatial two-dimensional parameters e.g:
435 438 ['heighList']
436 439 twoDDict - json of two-dimensional parameters in record where keys
437 440 are Madrigal codes (integers or mnemonics) and values the corresponding
438 441 dataOut attribute if multidimensional array specify as tupple
439 442 ('attr', pos) e.g: {
440 443 'gdalt': 'heightList',
441 444 'vn1p2': ('data_output', 0),
442 445 'vn2p2': ('data_output', 1),
443 446 'vn3': ('data_output', 2),
444 447 'snl': ('data_SNR', 'db')
445 448 }
446 449 metadata - json of madrigal metadata (kinst, kindat, catalog and header)
447 450 '''
448 451 if not self.isConfig:
449 452 self.setup(path, oneDDict, ind2DList, twoDDict, metadata, format, **kwargs)
450 453 self.isConfig = True
451 454
452 455 self.dataOut = dataOut
453 456 self.putData()
454 return
457 return 1
455 458
456 459 def setup(self, path, oneDDict, ind2DList, twoDDict, metadata, format, **kwargs):
457 460 '''
458 461 Configure Operation
459 462 '''
460 463
461 464 self.path = path
462 465 self.blocks = kwargs.get('blocks', None)
463 466 self.counter = 0
464 467 self.oneDDict = load_json(oneDDict)
465 468 self.twoDDict = load_json(twoDDict)
466 469 self.ind2DList = load_json(ind2DList)
467 470 meta = load_json(metadata)
468 471 self.kinst = meta.get('kinst')
469 472 self.kindat = meta.get('kindat')
470 473 self.catalog = meta.get('catalog', DEF_CATALOG)
471 474 self.header = meta.get('header', DEF_HEADER)
472 475 if format == 'cedar':
473 476 self.ext = '.dat'
474 477 self.extra_args = {}
475 478 elif format == 'hdf5':
476 479 self.ext = '.hdf5'
477 480 self.extra_args = {'ind2DList': self.ind2DList}
478 481
479 482 self.keys = [k.lower() for k in self.twoDDict]
480 483 if 'range' in self.keys:
481 484 self.keys.remove('range')
482 485 if 'gdalt' in self.keys:
483 486 self.keys.remove('gdalt')
484 487
485 488 def setFile(self):
486 489 '''
487 490 Create new cedar file object
488 491 '''
489 492
490 493 self.mnemonic = MNEMONICS[self.kinst] #TODO get mnemonic from madrigal
491 494 date = datetime.datetime.utcfromtimestamp(self.dataOut.utctime)
492 495
493 496 filename = '{}{}{}'.format(self.mnemonic,
494 497 date.strftime('%Y%m%d_%H%M%S'),
495 498 self.ext)
496 499
497 500 self.fullname = os.path.join(self.path, filename)
498 501
499 502 if os.path.isfile(self.fullname) :
500 503 log.warning(
501 504 'Destination file {} already exists, previous file deleted.'.format(
502 505 self.fullname),
503 506 'MADWriter')
504 507 os.remove(self.fullname)
505 508
506 509 try:
507 510 log.success(
508 511 'Creating file: {}'.format(self.fullname),
509 512 'MADWriter')
510 513 self.fp = madrigal.cedar.MadrigalCedarFile(self.fullname, True)
511 514 except ValueError as e:
512 515 log.error(
513 516 'Impossible to create a cedar object with "madrigal.cedar.MadrigalCedarFile"',
514 517 'MADWriter')
515 518 return
516 519
517 520 return 1
518 521
519 522 def writeBlock(self):
520 523 '''
521 524 Add data records to cedar file taking data from oneDDict and twoDDict
522 525 attributes.
523 526 Allowed parameters in: parcodes.tab
524 527 '''
525 528
526 529 startTime = datetime.datetime.utcfromtimestamp(self.dataOut.utctime)
527 530 endTime = startTime + datetime.timedelta(seconds=self.dataOut.paramInterval)
528 531 heights = self.dataOut.heightList
529 532
530 533 if self.ext == '.dat':
531 534 for key, value in list(self.twoDDict.items()):
532 535 if isinstance(value, str):
533 536 data = getattr(self.dataOut, value)
534 537 invalid = numpy.isnan(data)
535 538 data[invalid] = self.missing
536 539 elif isinstance(value, (tuple, list)):
537 540 attr, key = value
538 541 data = getattr(self.dataOut, attr)
539 542 invalid = numpy.isnan(data)
540 543 data[invalid] = self.missing
541 544
542 545 out = {}
543 546 for key, value in list(self.twoDDict.items()):
544 547 key = key.lower()
545 548 if isinstance(value, str):
546 549 if 'db' in value.lower():
547 550 tmp = getattr(self.dataOut, value.replace('_db', ''))
548 551 SNRavg = numpy.average(tmp, axis=0)
549 552 tmp = 10*numpy.log10(SNRavg)
550 553 else:
551 554 tmp = getattr(self.dataOut, value)
552 555 out[key] = tmp.flatten()
553 556 elif isinstance(value, (tuple, list)):
554 557 attr, x = value
555 558 data = getattr(self.dataOut, attr)
556 559 out[key] = data[int(x)]
557 560
558 561 a = numpy.array([out[k] for k in self.keys])
559 562 nrows = numpy.array([numpy.isnan(a[:, x]).all() for x in range(len(heights))])
560 563 index = numpy.where(nrows == False)[0]
561 564
562 565 rec = madrigal.cedar.MadrigalDataRecord(
563 566 self.kinst,
564 567 self.kindat,
565 568 startTime.year,
566 569 startTime.month,
567 570 startTime.day,
568 571 startTime.hour,
569 572 startTime.minute,
570 573 startTime.second,
571 574 startTime.microsecond/10000,
572 575 endTime.year,
573 576 endTime.month,
574 577 endTime.day,
575 578 endTime.hour,
576 579 endTime.minute,
577 580 endTime.second,
578 581 endTime.microsecond/10000,
579 582 list(self.oneDDict.keys()),
580 583 list(self.twoDDict.keys()),
581 584 len(index),
582 585 **self.extra_args
583 586 )
584 587
585 588 # Setting 1d values
586 589 for key in self.oneDDict:
587 590 rec.set1D(key, getattr(self.dataOut, self.oneDDict[key]))
588 591
589 592 # Setting 2d values
590 593 nrec = 0
591 594 for n in index:
592 595 for key in out:
593 596 rec.set2D(key, nrec, out[key][n])
594 597 nrec += 1
595 598
596 599 self.fp.append(rec)
597 600 if self.ext == '.hdf5' and self.counter % 500 == 0 and self.counter > 0:
598 601 self.fp.dump()
599 if self.counter % 100 == 0 and self.counter > 0:
602 if self.counter % 20 == 0 and self.counter > 0:
600 603 log.log(
601 604 'Writing {} records'.format(
602 605 self.counter),
603 606 'MADWriter')
604 607
605 608 def setHeader(self):
606 609 '''
607 610 Create an add catalog and header to cedar file
608 611 '''
609 612
610 613 log.success('Closing file {}'.format(self.fullname), 'MADWriter')
611 614
612 615 if self.ext == '.dat':
613 616 self.fp.write()
614 617 else:
615 618 self.fp.dump()
616 619 self.fp.close()
617 620
618 621 header = madrigal.cedar.CatalogHeaderCreator(self.fullname)
619 622 header.createCatalog(**self.catalog)
620 623 header.createHeader(**self.header)
621 624 header.write()
622 625
623 626 def putData(self):
624 627
625 628 if self.dataOut.flagNoData:
626 629 return 0
627 630
628 631 if self.dataOut.flagDiscontinuousBlock or self.counter == self.blocks:
629 632 if self.counter > 0:
630 633 self.setHeader()
631 634 self.counter = 0
632 635
633 636 if self.counter == 0:
634 637 self.setFile()
635 638
636 639 self.writeBlock()
637 640 self.counter += 1
638 641
639 642 def close(self):
640 643
641 644 if self.counter > 0:
642 645 self.setHeader() No newline at end of file
@@ -1,384 +1,390
1 1 '''
2 2 Updated for multiprocessing
3 3 Author : Sergio Cortez
4 4 Jan 2018
5 5 Abstract:
6 6 Base class for processing units and operations. A decorator provides multiprocessing features and interconnect the processes created.
7 7 The argument (kwargs) sent from the controller is parsed and filtered via the decorator for each processing unit or operation instantiated.
8 8 The decorator handle also the methods inside the processing unit to be called from the main script (not as operations) (OPERATION -> type ='self').
9 9
10 10 Based on:
11 11 $Author: murco $
12 12 $Id: jroproc_base.py 1 2012-11-12 18:56:07Z murco $
13 13 '''
14 14
15 15 import inspect
16 16 import zmq
17 17 import time
18 18 import pickle
19 19 import os
20 20 from multiprocessing import Process
21 21 from zmq.utils.monitor import recv_monitor_message
22 22
23 23 from schainpy.utils import log
24 24
25 25
26 26 class ProcessingUnit(object):
27 27
28 28 """
29 29 Update - Jan 2018 - MULTIPROCESSING
30 30 All the "call" methods present in the previous base were removed.
31 31 The majority of operations are independant processes, thus
32 32 the decorator is in charge of communicate the operation processes
33 33 with the proccessing unit via IPC.
34 34
35 35 The constructor does not receive any argument. The remaining methods
36 36 are related with the operations to execute.
37 37
38 38
39 39 """
40 40
41 41 def __init__(self):
42 42
43 43 self.dataIn = None
44 44 self.dataOut = None
45 45 self.isConfig = False
46 46 self.operations = []
47 47 self.plots = []
48 48
49 49 def getAllowedArgs(self):
50 50 if hasattr(self, '__attrs__'):
51 51 return self.__attrs__
52 52 else:
53 53 return inspect.getargspec(self.run).args
54 54
55 55 def addOperation(self, conf, operation):
56 56 """
57 57 This method is used in the controller, and update the dictionary containing the operations to execute. The dict
58 58 posses the id of the operation process (IPC purposes)
59 59
60 60 Agrega un objeto del tipo "Operation" (opObj) a la lista de objetos "self.objectList" y retorna el
61 61 identificador asociado a este objeto.
62 62
63 63 Input:
64 64
65 65 object : objeto de la clase "Operation"
66 66
67 67 Return:
68 68
69 69 objId : identificador del objeto, necesario para comunicar con master(procUnit)
70 70 """
71 71
72 72 self.operations.append(
73 73 (operation, conf.type, conf.id, conf.getKwargs()))
74 74
75 75 if 'plot' in self.name.lower():
76 76 self.plots.append(operation.CODE)
77 77
78 78 def getOperationObj(self, objId):
79 79
80 80 if objId not in list(self.operations.keys()):
81 81 return None
82 82
83 83 return self.operations[objId]
84 84
85 85 def operation(self, **kwargs):
86 86 """
87 87 Operacion directa sobre la data (dataOut.data). Es necesario actualizar los valores de los
88 88 atributos del objeto dataOut
89 89
90 90 Input:
91 91
92 92 **kwargs : Diccionario de argumentos de la funcion a ejecutar
93 93 """
94 94
95 95 raise NotImplementedError
96 96
97 97 def setup(self):
98 98
99 99 raise NotImplementedError
100 100
101 101 def run(self):
102 102
103 103 raise NotImplementedError
104 104
105 105 def close(self):
106 106
107 107 return
108 108
109 109
110 110 class Operation(object):
111 111
112 112 """
113 113 Update - Jan 2018 - MULTIPROCESSING
114 114
115 115 Most of the methods remained the same. The decorator parse the arguments and executed the run() method for each process.
116 116 The constructor doe snot receive any argument, neither the baseclass.
117 117
118 118
119 119 Clase base para definir las operaciones adicionales que se pueden agregar a la clase ProcessingUnit
120 120 y necesiten acumular informacion previa de los datos a procesar. De preferencia usar un buffer de
121 121 acumulacion dentro de esta clase
122 122
123 123 Ejemplo: Integraciones coherentes, necesita la informacion previa de los n perfiles anteriores (bufffer)
124 124
125 125 """
126 126
127 127 def __init__(self):
128 128
129 129 self.id = None
130 130 self.isConfig = False
131 131
132 132 if not hasattr(self, 'name'):
133 133 self.name = self.__class__.__name__
134 134
135 135 def getAllowedArgs(self):
136 136 if hasattr(self, '__attrs__'):
137 137 return self.__attrs__
138 138 else:
139 139 return inspect.getargspec(self.run).args
140 140
141 141 def setup(self):
142 142
143 143 self.isConfig = True
144 144
145 145 raise NotImplementedError
146 146
147 147 def run(self, dataIn, **kwargs):
148 148 """
149 149 Realiza las operaciones necesarias sobre la dataIn.data y actualiza los
150 150 atributos del objeto dataIn.
151 151
152 152 Input:
153 153
154 154 dataIn : objeto del tipo JROData
155 155
156 156 Return:
157 157
158 158 None
159 159
160 160 Affected:
161 161 __buffer : buffer de recepcion de datos.
162 162
163 163 """
164 164 if not self.isConfig:
165 165 self.setup(**kwargs)
166 166
167 167 raise NotImplementedError
168 168
169 169 def close(self):
170 170
171 171 return
172 172
173 173
174 174 def MPDecorator(BaseClass):
175 175 """
176 176 Multiprocessing class decorator
177 177
178 178 This function add multiprocessing features to a BaseClass. Also, it handle
179 179 the communication beetween processes (readers, procUnits and operations).
180 180 """
181 181
182 182 class MPClass(BaseClass, Process):
183 183
184 184 def __init__(self, *args, **kwargs):
185 185 super(MPClass, self).__init__()
186 186 Process.__init__(self)
187 187 self.operationKwargs = {}
188 188 self.args = args
189 189 self.kwargs = kwargs
190 190 self.sender = None
191 191 self.receiver = None
192 192 self.name = BaseClass.__name__
193 193 if 'plot' in self.name.lower() and not self.name.endswith('_'):
194 194 self.name = '{}{}'.format(self.CODE.upper(), 'Plot')
195 195 self.start_time = time.time()
196 196
197 197 if len(self.args) is 3:
198 198 self.typeProc = "ProcUnit"
199 199 self.id = args[0]
200 200 self.inputId = args[1]
201 201 self.project_id = args[2]
202 202 elif len(self.args) is 2:
203 203 self.id = args[0]
204 204 self.inputId = args[0]
205 205 self.project_id = args[1]
206 206 self.typeProc = "Operation"
207 207
208 208 def subscribe(self):
209 209 '''
210 210 This function create a socket to receive objects from the
211 211 topic `inputId`.
212 212 '''
213 213
214 214 c = zmq.Context()
215 215 self.receiver = c.socket(zmq.SUB)
216 216 self.receiver.connect(
217 217 'ipc:///tmp/schain/{}_pub'.format(self.project_id))
218 218 self.receiver.setsockopt(zmq.SUBSCRIBE, self.inputId.encode())
219 219
220 220 def listen(self):
221 221 '''
222 222 This function waits for objects and deserialize using pickle
223 223 '''
224 224
225 225 data = pickle.loads(self.receiver.recv_multipart()[1])
226 226
227 227 return data
228 228
229 229 def set_publisher(self):
230 230 '''
231 231 This function create a socket for publishing purposes.
232 232 '''
233 233
234 234 time.sleep(1)
235 235 c = zmq.Context()
236 236 self.sender = c.socket(zmq.PUB)
237 237 self.sender.connect(
238 238 'ipc:///tmp/schain/{}_sub'.format(self.project_id))
239 239
240 240 def publish(self, data, id):
241 241 '''
242 242 This function publish an object, to a specific topic.
243 243 '''
244 244 self.sender.send_multipart([str(id).encode(), pickle.dumps(data)])
245 245
246 246 def runReader(self):
247 247 '''
248 248 Run fuction for read units
249 249 '''
250 250 while True:
251 251
252 252 BaseClass.run(self, **self.kwargs)
253 253
254 254 for op, optype, opId, kwargs in self.operations:
255 255 if optype == 'self' and not self.dataOut.flagNoData:
256 256 op(**kwargs)
257 257 elif optype == 'other' and not self.dataOut.flagNoData:
258 258 self.dataOut = op.run(self.dataOut, **self.kwargs)
259 259 elif optype == 'external':
260 260 self.publish(self.dataOut, opId)
261 261
262 262 if self.dataOut.flagNoData and not self.dataOut.error:
263 263 continue
264 264
265 265 self.publish(self.dataOut, self.id)
266 266
267 267 if self.dataOut.error:
268 268 log.error(self.dataOut.error, self.name)
269 269 # self.sender.send_multipart([str(self.project_id).encode(), 'end'.encode()])
270 270 break
271 271
272 272 time.sleep(1)
273 273
274 274 def runProc(self):
275 275 '''
276 276 Run function for proccessing units
277 277 '''
278 278
279 279 while True:
280 280 self.dataIn = self.listen()
281 281
282 282 if self.dataIn.flagNoData and self.dataIn.error is None:
283 283 continue
284 284
285 285 BaseClass.run(self, **self.kwargs)
286 286
287 287 if self.dataIn.error:
288 288 self.dataOut.error = self.dataIn.error
289 self.dataOut.flagNoData = True
290
289 self.dataOut.flagNoData = True
290
291 291 for op, optype, opId, kwargs in self.operations:
292 292 if optype == 'self' and not self.dataOut.flagNoData:
293 293 op(**kwargs)
294 294 elif optype == 'other' and not self.dataOut.flagNoData:
295 295 self.dataOut = op.run(self.dataOut, **kwargs)
296 elif optype == 'external' and not self.dataOut.flagNoData:
297 if not self.dataOut.flagNoData or self.dataOut.error:
298 self.publish(self.dataOut, opId)
296 elif optype == 'external' and not self.dataOut.flagNoData:
297 self.publish(self.dataOut, opId)
299 298
300 299 if not self.dataOut.flagNoData or self.dataOut.error:
301 300 self.publish(self.dataOut, self.id)
301 for op, optype, opId, kwargs in self.operations:
302 if optype == 'self' and self.dataOut.error:
303 op(**kwargs)
304 elif optype == 'other' and self.dataOut.error:
305 self.dataOut = op.run(self.dataOut, **kwargs)
306 elif optype == 'external' and self.dataOut.error:
307 self.publish(self.dataOut, opId)
302 308
303 309 if self.dataIn.error:
304 310 break
305 311
306 312 time.sleep(1)
307 313
308 314 def runOp(self):
309 315 '''
310 316 Run function for external operations (this operations just receive data
311 317 ex: plots, writers, publishers)
312 318 '''
313 319
314 320 while True:
315 321
316 322 dataOut = self.listen()
317 323
318 324 BaseClass.run(self, dataOut, **self.kwargs)
319 325
320 326 if dataOut.error:
321 327 break
322 328
323 329 time.sleep(1)
324 330
325 331 def run(self):
326 332 if self.typeProc is "ProcUnit":
327 333
328 334 if self.inputId is not None:
329 335
330 336 self.subscribe()
331 337
332 338 self.set_publisher()
333 339
334 340 if 'Reader' not in BaseClass.__name__:
335 341 self.runProc()
336 342 else:
337 343 self.runReader()
338 344
339 345 elif self.typeProc is "Operation":
340 346
341 347 self.subscribe()
342 348 self.runOp()
343 349
344 350 else:
345 351 raise ValueError("Unknown type")
346 352
347 353 self.close()
348 354
349 355 def event_monitor(self, monitor):
350 356
351 357 events = {}
352 358
353 359 for name in dir(zmq):
354 360 if name.startswith('EVENT_'):
355 361 value = getattr(zmq, name)
356 362 events[value] = name
357 363
358 364 while monitor.poll():
359 365 evt = recv_monitor_message(monitor)
360 366 if evt['event'] == 32:
361 367 self.connections += 1
362 368 if evt['event'] == 512:
363 369 pass
364 370
365 371 evt.update({'description': events[evt['event']]})
366 372
367 373 if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
368 374 break
369 375 monitor.close()
370 376 print('event monitor thread done!')
371 377
372 378 def close(self):
373 379
374 380 BaseClass.close(self)
375 381
376 382 if self.sender:
377 383 self.sender.close()
378 384
379 385 if self.receiver:
380 386 self.receiver.close()
381 387
382 388 log.success('Done...(Time:{:4.2f} secs)'.format(time.time()-self.start_time), self.name)
383 389
384 390 return MPClass
@@ -1,3857 +1,3858
1 1 import numpy
2 2 import math
3 3 from scipy import optimize, interpolate, signal, stats, ndimage
4 4 import scipy
5 5 import re
6 6 import datetime
7 7 import copy
8 8 import sys
9 9 import importlib
10 10 import itertools
11 11 from multiprocessing import Pool, TimeoutError
12 12 from multiprocessing.pool import ThreadPool
13 13 import time
14 14
15 15 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
16 16 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
17 17 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
18 18 from scipy import asarray as ar,exp
19 19 from scipy.optimize import curve_fit
20 20 from schainpy.utils import log
21 21 import warnings
22 22 from numpy import NaN
23 23 from scipy.optimize.optimize import OptimizeWarning
24 24 warnings.filterwarnings('ignore')
25 25
26 26
27 27 SPEED_OF_LIGHT = 299792458
28 28
29 29
30 30 '''solving pickling issue'''
31 31
32 32 def _pickle_method(method):
33 33 func_name = method.__func__.__name__
34 34 obj = method.__self__
35 35 cls = method.__self__.__class__
36 36 return _unpickle_method, (func_name, obj, cls)
37 37
38 38 def _unpickle_method(func_name, obj, cls):
39 39 for cls in cls.mro():
40 40 try:
41 41 func = cls.__dict__[func_name]
42 42 except KeyError:
43 43 pass
44 44 else:
45 45 break
46 46 return func.__get__(obj, cls)
47 47
48 48 @MPDecorator
49 49 class ParametersProc(ProcessingUnit):
50 50
51 51 METHODS = {}
52 52 nSeconds = None
53 53
54 54 def __init__(self):
55 55 ProcessingUnit.__init__(self)
56 56
57 57 # self.objectDict = {}
58 58 self.buffer = None
59 59 self.firstdatatime = None
60 60 self.profIndex = 0
61 61 self.dataOut = Parameters()
62 62 self.setupReq = False #Agregar a todas las unidades de proc
63 63
64 64 def __updateObjFromInput(self):
65 65
66 66 self.dataOut.inputUnit = self.dataIn.type
67 67
68 68 self.dataOut.timeZone = self.dataIn.timeZone
69 69 self.dataOut.dstFlag = self.dataIn.dstFlag
70 70 self.dataOut.errorCount = self.dataIn.errorCount
71 71 self.dataOut.useLocalTime = self.dataIn.useLocalTime
72 72
73 73 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
74 74 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
75 75 self.dataOut.channelList = self.dataIn.channelList
76 76 self.dataOut.heightList = self.dataIn.heightList
77 77 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
78 78 # self.dataOut.nHeights = self.dataIn.nHeights
79 79 # self.dataOut.nChannels = self.dataIn.nChannels
80 80 self.dataOut.nBaud = self.dataIn.nBaud
81 81 self.dataOut.nCode = self.dataIn.nCode
82 82 self.dataOut.code = self.dataIn.code
83 83 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
84 84 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
85 85 # self.dataOut.utctime = self.firstdatatime
86 86 self.dataOut.utctime = self.dataIn.utctime
87 87 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
88 88 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
89 89 self.dataOut.nCohInt = self.dataIn.nCohInt
90 90 # self.dataOut.nIncohInt = 1
91 91 self.dataOut.ippSeconds = self.dataIn.ippSeconds
92 92 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
93 93 self.dataOut.timeInterval1 = self.dataIn.timeInterval
94 94 self.dataOut.heightList = self.dataIn.getHeiRange()
95 95 self.dataOut.frequency = self.dataIn.frequency
96 96 # self.dataOut.noise = self.dataIn.noise
97 self.dataOut.error = self.dataIn.error
97 98
98 99 def run(self):
99 100
100 101
101 102
102 103 #---------------------- Voltage Data ---------------------------
103 104
104 105 if self.dataIn.type == "Voltage":
105 106
106 107 self.__updateObjFromInput()
107 108 self.dataOut.data_pre = self.dataIn.data.copy()
108 109 self.dataOut.flagNoData = False
109 110 self.dataOut.utctimeInit = self.dataIn.utctime
110 111 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
111 112 return
112 113
113 114 #---------------------- Spectra Data ---------------------------
114 115
115 116 if self.dataIn.type == "Spectra":
116 117
117 118 self.dataOut.data_pre = (self.dataIn.data_spc, self.dataIn.data_cspc)
118 119 self.dataOut.data_spc = self.dataIn.data_spc
119 120 self.dataOut.data_cspc = self.dataIn.data_cspc
120 121 self.dataOut.nProfiles = self.dataIn.nProfiles
121 122 self.dataOut.nIncohInt = self.dataIn.nIncohInt
122 123 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
123 124 self.dataOut.ippFactor = self.dataIn.ippFactor
124 125 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
125 126 self.dataOut.spc_noise = self.dataIn.getNoise()
126 127 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
127 128 # self.dataOut.normFactor = self.dataIn.normFactor
128 129 self.dataOut.pairsList = self.dataIn.pairsList
129 130 self.dataOut.groupList = self.dataIn.pairsList
130 self.dataOut.flagNoData = False
131 self.dataOut.flagNoData = False
131 132
132 133 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
133 134 self.dataOut.ChanDist = self.dataIn.ChanDist
134 135 else: self.dataOut.ChanDist = None
135 136
136 137 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
137 138 # self.dataOut.VelRange = self.dataIn.VelRange
138 139 #else: self.dataOut.VelRange = None
139 140
140 141 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
141 142 self.dataOut.RadarConst = self.dataIn.RadarConst
142 143
143 144 if hasattr(self.dataIn, 'NPW'): #NPW
144 145 self.dataOut.NPW = self.dataIn.NPW
145 146
146 147 if hasattr(self.dataIn, 'COFA'): #COFA
147 148 self.dataOut.COFA = self.dataIn.COFA
148 149
149 150
150 151
151 152 #---------------------- Correlation Data ---------------------------
152 153
153 154 if self.dataIn.type == "Correlation":
154 155 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
155 156
156 157 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
157 158 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
158 159 self.dataOut.groupList = (acf_pairs, ccf_pairs)
159 160
160 161 self.dataOut.abscissaList = self.dataIn.lagRange
161 162 self.dataOut.noise = self.dataIn.noise
162 163 self.dataOut.data_SNR = self.dataIn.SNR
163 164 self.dataOut.flagNoData = False
164 165 self.dataOut.nAvg = self.dataIn.nAvg
165 166
166 167 #---------------------- Parameters Data ---------------------------
167 168
168 169 if self.dataIn.type == "Parameters":
169 170 self.dataOut.copy(self.dataIn)
170 171 self.dataOut.flagNoData = False
171 172
172 173 return True
173 174
174 175 self.__updateObjFromInput()
175 176 self.dataOut.utctimeInit = self.dataIn.utctime
176 177 self.dataOut.paramInterval = self.dataIn.timeInterval
177 178
178 179 return
179 180
180 181
181 182 def target(tups):
182 183
183 184 obj, args = tups
184 185
185 186 return obj.FitGau(args)
186 187
187 188
188 189 class SpectralFilters(Operation):
189 190
190 191 '''This class allows the Rainfall / Wind Selection for CLAIRE RADAR
191 192
192 193 LimitR : It is the limit in m/s of Rainfall
193 194 LimitW : It is the limit in m/s for Winds
194 195
195 196 Input:
196 197
197 198 self.dataOut.data_pre : SPC and CSPC
198 199 self.dataOut.spc_range : To select wind and rainfall velocities
199 200
200 201 Affected:
201 202
202 203 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
203 204 self.dataOut.spcparam_range : Used in SpcParamPlot
204 205 self.dataOut.SPCparam : Used in PrecipitationProc
205 206
206 207
207 208 '''
208 209
209 210 def __init__(self):
210 211 Operation.__init__(self)
211 212 self.i=0
212 213
213 214 def run(self, dataOut, PositiveLimit=1.5, NegativeLimit=2.5):
214 215
215 216
216 217 #Limite de vientos
217 218 LimitR = PositiveLimit
218 219 LimitN = NegativeLimit
219 220
220 221 self.spc = dataOut.data_pre[0].copy()
221 222 self.cspc = dataOut.data_pre[1].copy()
222 223
223 224 self.Num_Hei = self.spc.shape[2]
224 225 self.Num_Bin = self.spc.shape[1]
225 226 self.Num_Chn = self.spc.shape[0]
226 227
227 228 VelRange = dataOut.spc_range[2]
228 229 TimeRange = dataOut.spc_range[1]
229 230 FrecRange = dataOut.spc_range[0]
230 231
231 232 Vmax= 2*numpy.max(dataOut.spc_range[2])
232 233 Tmax= 2*numpy.max(dataOut.spc_range[1])
233 234 Fmax= 2*numpy.max(dataOut.spc_range[0])
234 235
235 236 Breaker1R=VelRange[numpy.abs(VelRange-(-LimitN)).argmin()]
236 237 Breaker1R=numpy.where(VelRange == Breaker1R)
237 238
238 239 Delta = self.Num_Bin/2 - Breaker1R[0]
239 240
240 241
241 242 '''Reacomodando SPCrange'''
242 243
243 244 VelRange=numpy.roll(VelRange,-(int(self.Num_Bin/2)) ,axis=0)
244 245
245 246 VelRange[-(int(self.Num_Bin/2)):]+= Vmax
246 247
247 248 FrecRange=numpy.roll(FrecRange,-(int(self.Num_Bin/2)),axis=0)
248 249
249 250 FrecRange[-(int(self.Num_Bin/2)):]+= Fmax
250 251
251 252 TimeRange=numpy.roll(TimeRange,-(int(self.Num_Bin/2)),axis=0)
252 253
253 254 TimeRange[-(int(self.Num_Bin/2)):]+= Tmax
254 255
255 256 ''' ------------------ '''
256 257
257 258 Breaker2R=VelRange[numpy.abs(VelRange-(LimitR)).argmin()]
258 259 Breaker2R=numpy.where(VelRange == Breaker2R)
259 260
260 261
261 262 SPCroll = numpy.roll(self.spc,-(int(self.Num_Bin/2)) ,axis=1)
262 263
263 264 SPCcut = SPCroll.copy()
264 265 for i in range(self.Num_Chn):
265 266
266 267 SPCcut[i,0:int(Breaker2R[0]),:] = dataOut.noise[i]
267 268 SPCcut[i,-int(Delta):,:] = dataOut.noise[i]
268 269
269 270 SPCcut[i]=SPCcut[i]- dataOut.noise[i]
270 271 SPCcut[ numpy.where( SPCcut<0 ) ] = 1e-20
271 272
272 273 SPCroll[i]=SPCroll[i]-dataOut.noise[i]
273 274 SPCroll[ numpy.where( SPCroll<0 ) ] = 1e-20
274 275
275 276 SPC_ch1 = SPCroll
276 277
277 278 SPC_ch2 = SPCcut
278 279
279 280 SPCparam = (SPC_ch1, SPC_ch2, self.spc)
280 281 dataOut.SPCparam = numpy.asarray(SPCparam)
281 282
282 283
283 284 dataOut.spcparam_range=numpy.zeros([self.Num_Chn,self.Num_Bin+1])
284 285
285 286 dataOut.spcparam_range[2]=VelRange
286 287 dataOut.spcparam_range[1]=TimeRange
287 288 dataOut.spcparam_range[0]=FrecRange
288 289 return dataOut
289 290
290 291 class GaussianFit(Operation):
291 292
292 293 '''
293 294 Function that fit of one and two generalized gaussians (gg) based
294 295 on the PSD shape across an "power band" identified from a cumsum of
295 296 the measured spectrum - noise.
296 297
297 298 Input:
298 299 self.dataOut.data_pre : SelfSpectra
299 300
300 301 Output:
301 302 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
302 303
303 304 '''
304 305 def __init__(self):
305 306 Operation.__init__(self)
306 307 self.i=0
307 308
308 309
309 310 def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
310 311 """This routine will find a couple of generalized Gaussians to a power spectrum
311 312 input: spc
312 313 output:
313 314 Amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1,noise
314 315 """
315 316
316 317 self.spc = dataOut.data_pre[0].copy()
317 318 self.Num_Hei = self.spc.shape[2]
318 319 self.Num_Bin = self.spc.shape[1]
319 320 self.Num_Chn = self.spc.shape[0]
320 321 Vrange = dataOut.abscissaList
321 322
322 323 GauSPC = numpy.empty([self.Num_Chn,self.Num_Bin,self.Num_Hei])
323 324 SPC_ch1 = numpy.empty([self.Num_Bin,self.Num_Hei])
324 325 SPC_ch2 = numpy.empty([self.Num_Bin,self.Num_Hei])
325 326 SPC_ch1[:] = numpy.NaN
326 327 SPC_ch2[:] = numpy.NaN
327 328
328 329
329 330 start_time = time.time()
330 331
331 332 noise_ = dataOut.spc_noise[0].copy()
332 333
333 334
334 335 pool = Pool(processes=self.Num_Chn)
335 336 args = [(Vrange, Ch, pnoise, noise_, num_intg, SNRlimit) for Ch in range(self.Num_Chn)]
336 337 objs = [self for __ in range(self.Num_Chn)]
337 338 attrs = list(zip(objs, args))
338 339 gauSPC = pool.map(target, attrs)
339 340 dataOut.SPCparam = numpy.asarray(SPCparam)
340 341
341 342 ''' Parameters:
342 343 1. Amplitude
343 344 2. Shift
344 345 3. Width
345 346 4. Power
346 347 '''
347 348
348 349 def FitGau(self, X):
349 350
350 351 Vrange, ch, pnoise, noise_, num_intg, SNRlimit = X
351 352
352 353 SPCparam = []
353 354 SPC_ch1 = numpy.empty([self.Num_Bin,self.Num_Hei])
354 355 SPC_ch2 = numpy.empty([self.Num_Bin,self.Num_Hei])
355 356 SPC_ch1[:] = 0#numpy.NaN
356 357 SPC_ch2[:] = 0#numpy.NaN
357 358
358 359
359 360
360 361 for ht in range(self.Num_Hei):
361 362
362 363
363 364 spc = numpy.asarray(self.spc)[ch,:,ht]
364 365
365 366 #############################################
366 367 # normalizing spc and noise
367 368 # This part differs from gg1
368 369 spc_norm_max = max(spc)
369 370 #spc = spc / spc_norm_max
370 371 pnoise = pnoise #/ spc_norm_max
371 372 #############################################
372 373
373 374 fatspectra=1.0
374 375
375 376 wnoise = noise_ #/ spc_norm_max
376 377 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
377 378 #if wnoise>1.1*pnoise: # to be tested later
378 379 # wnoise=pnoise
379 380 noisebl=wnoise*0.9;
380 381 noisebh=wnoise*1.1
381 382 spc=spc-wnoise
382 383
383 384 minx=numpy.argmin(spc)
384 385 #spcs=spc.copy()
385 386 spcs=numpy.roll(spc,-minx)
386 387 cum=numpy.cumsum(spcs)
387 388 tot_noise=wnoise * self.Num_Bin #64;
388 389
389 390 snr = sum(spcs)/tot_noise
390 391 snrdB=10.*numpy.log10(snr)
391 392
392 393 if snrdB < SNRlimit :
393 394 snr = numpy.NaN
394 395 SPC_ch1[:,ht] = 0#numpy.NaN
395 396 SPC_ch1[:,ht] = 0#numpy.NaN
396 397 SPCparam = (SPC_ch1,SPC_ch2)
397 398 continue
398 399
399 400
400 401 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
401 402 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
402 403
403 404 cummax=max(cum);
404 405 epsi=0.08*fatspectra # cumsum to narrow down the energy region
405 406 cumlo=cummax*epsi;
406 407 cumhi=cummax*(1-epsi)
407 408 powerindex=numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
408 409
409 410
410 411 if len(powerindex) < 1:# case for powerindex 0
411 412 continue
412 413 powerlo=powerindex[0]
413 414 powerhi=powerindex[-1]
414 415 powerwidth=powerhi-powerlo
415 416
416 417 firstpeak=powerlo+powerwidth/10.# first gaussian energy location
417 418 secondpeak=powerhi-powerwidth/10.#second gaussian energy location
418 419 midpeak=(firstpeak+secondpeak)/2.
419 420 firstamp=spcs[int(firstpeak)]
420 421 secondamp=spcs[int(secondpeak)]
421 422 midamp=spcs[int(midpeak)]
422 423
423 424 x=numpy.arange( self.Num_Bin )
424 425 y_data=spc+wnoise
425 426
426 427 ''' single Gaussian '''
427 428 shift0=numpy.mod(midpeak+minx, self.Num_Bin )
428 429 width0=powerwidth/4.#Initialization entire power of spectrum divided by 4
429 430 power0=2.
430 431 amplitude0=midamp
431 432 state0=[shift0,width0,amplitude0,power0,wnoise]
432 433 bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
433 434 lsq1=fmin_l_bfgs_b(self.misfit1,state0,args=(y_data,x,num_intg),bounds=bnds,approx_grad=True)
434 435
435 436 chiSq1=lsq1[1];
436 437
437 438
438 439 if fatspectra<1.0 and powerwidth<4:
439 440 choice=0
440 441 Amplitude0=lsq1[0][2]
441 442 shift0=lsq1[0][0]
442 443 width0=lsq1[0][1]
443 444 p0=lsq1[0][3]
444 445 Amplitude1=0.
445 446 shift1=0.
446 447 width1=0.
447 448 p1=0.
448 449 noise=lsq1[0][4]
449 450 #return (numpy.array([shift0,width0,Amplitude0,p0]),
450 451 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
451 452
452 453 ''' two gaussians '''
453 454 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
454 455 shift0=numpy.mod(firstpeak+minx, self.Num_Bin );
455 456 shift1=numpy.mod(secondpeak+minx, self.Num_Bin )
456 457 width0=powerwidth/6.;
457 458 width1=width0
458 459 power0=2.;
459 460 power1=power0
460 461 amplitude0=firstamp;
461 462 amplitude1=secondamp
462 463 state0=[shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
463 464 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
464 465 bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
465 466 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
466 467
467 468 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
468 469
469 470
470 471 chiSq2=lsq2[1];
471 472
472 473
473 474
474 475 oneG=(chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
475 476
476 477 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
477 478 if oneG:
478 479 choice=0
479 480 else:
480 481 w1=lsq2[0][1]; w2=lsq2[0][5]
481 482 a1=lsq2[0][2]; a2=lsq2[0][6]
482 483 p1=lsq2[0][3]; p2=lsq2[0][7]
483 484 s1=(2**(1+1./p1))*scipy.special.gamma(1./p1)/p1;
484 485 s2=(2**(1+1./p2))*scipy.special.gamma(1./p2)/p2;
485 486 gp1=a1*w1*s1; gp2=a2*w2*s2 # power content of each ggaussian with proper p scaling
486 487
487 488 if gp1>gp2:
488 489 if a1>0.7*a2:
489 490 choice=1
490 491 else:
491 492 choice=2
492 493 elif gp2>gp1:
493 494 if a2>0.7*a1:
494 495 choice=2
495 496 else:
496 497 choice=1
497 498 else:
498 499 choice=numpy.argmax([a1,a2])+1
499 500 #else:
500 501 #choice=argmin([std2a,std2b])+1
501 502
502 503 else: # with low SNR go to the most energetic peak
503 504 choice=numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
504 505
505 506
506 507 shift0=lsq2[0][0];
507 508 vel0=Vrange[0] + shift0*(Vrange[1]-Vrange[0])
508 509 shift1=lsq2[0][4];
509 510 vel1=Vrange[0] + shift1*(Vrange[1]-Vrange[0])
510 511
511 512 max_vel = 1.0
512 513
513 514 #first peak will be 0, second peak will be 1
514 515 if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range
515 516 shift0=lsq2[0][0]
516 517 width0=lsq2[0][1]
517 518 Amplitude0=lsq2[0][2]
518 519 p0=lsq2[0][3]
519 520
520 521 shift1=lsq2[0][4]
521 522 width1=lsq2[0][5]
522 523 Amplitude1=lsq2[0][6]
523 524 p1=lsq2[0][7]
524 525 noise=lsq2[0][8]
525 526 else:
526 527 shift1=lsq2[0][0]
527 528 width1=lsq2[0][1]
528 529 Amplitude1=lsq2[0][2]
529 530 p1=lsq2[0][3]
530 531
531 532 shift0=lsq2[0][4]
532 533 width0=lsq2[0][5]
533 534 Amplitude0=lsq2[0][6]
534 535 p0=lsq2[0][7]
535 536 noise=lsq2[0][8]
536 537
537 538 if Amplitude0<0.05: # in case the peak is noise
538 539 shift0,width0,Amplitude0,p0 = [0,0,0,0]#4*[numpy.NaN]
539 540 if Amplitude1<0.05:
540 541 shift1,width1,Amplitude1,p1 = [0,0,0,0]#4*[numpy.NaN]
541 542
542 543
543 544 SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0))/width0)**p0
544 545 SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1))/width1)**p1
545 546 SPCparam = (SPC_ch1,SPC_ch2)
546 547
547 548
548 549 return GauSPC
549 550
550 551 def y_model1(self,x,state):
551 552 shift0,width0,amplitude0,power0,noise=state
552 553 model0=amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
553 554
554 555 model0u=amplitude0*numpy.exp(-0.5*abs((x-shift0- self.Num_Bin )/width0)**power0)
555 556
556 557 model0d=amplitude0*numpy.exp(-0.5*abs((x-shift0+ self.Num_Bin )/width0)**power0)
557 558 return model0+model0u+model0d+noise
558 559
559 560 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
560 561 shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,noise=state
561 562 model0=amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
562 563
563 564 model0u=amplitude0*numpy.exp(-0.5*abs((x-shift0- self.Num_Bin )/width0)**power0)
564 565
565 566 model0d=amplitude0*numpy.exp(-0.5*abs((x-shift0+ self.Num_Bin )/width0)**power0)
566 567 model1=amplitude1*numpy.exp(-0.5*abs((x-shift1)/width1)**power1)
567 568
568 569 model1u=amplitude1*numpy.exp(-0.5*abs((x-shift1- self.Num_Bin )/width1)**power1)
569 570
570 571 model1d=amplitude1*numpy.exp(-0.5*abs((x-shift1+ self.Num_Bin )/width1)**power1)
571 572 return model0+model0u+model0d+model1+model1u+model1d+noise
572 573
573 574 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
574 575
575 576 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
576 577
577 578 def misfit2(self,state,y_data,x,num_intg):
578 579 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
579 580
580 581
581 582
582 583 class PrecipitationProc(Operation):
583 584
584 585 '''
585 586 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
586 587
587 588 Input:
588 589 self.dataOut.data_pre : SelfSpectra
589 590
590 591 Output:
591 592
592 593 self.dataOut.data_output : Reflectivity factor, rainfall Rate
593 594
594 595
595 596 Parameters affected:
596 597 '''
597 598
598 599 def __init__(self):
599 600 Operation.__init__(self)
600 601 self.i=0
601 602
602 603
603 604 def gaus(self,xSamples,Amp,Mu,Sigma):
604 605 return ( Amp / ((2*numpy.pi)**0.5 * Sigma) ) * numpy.exp( -( xSamples - Mu )**2 / ( 2 * (Sigma**2) ))
605 606
606 607
607 608
608 609 def Moments(self, ySamples, xSamples):
609 610 Pot = numpy.nansum( ySamples ) # Potencia, momento 0
610 611 yNorm = ySamples / Pot
611 612
612 613 Vr = numpy.nansum( yNorm * xSamples ) # Velocidad radial, mu, corrimiento doppler, primer momento
613 614 Sigma2 = abs(numpy.nansum( yNorm * ( xSamples - Vr )**2 )) # Segundo Momento
614 615 Desv = Sigma2**0.5 # Desv. Estandar, Ancho espectral
615 616
616 617 return numpy.array([Pot, Vr, Desv])
617 618
618 619 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
619 620 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km = 0.93, Altitude=3350):
620 621
621 622
622 623 Velrange = dataOut.spcparam_range[2]
623 624 FrecRange = dataOut.spcparam_range[0]
624 625
625 626 dV= Velrange[1]-Velrange[0]
626 627 dF= FrecRange[1]-FrecRange[0]
627 628
628 629 if radar == "MIRA35C" :
629 630
630 631 self.spc = dataOut.data_pre[0].copy()
631 632 self.Num_Hei = self.spc.shape[2]
632 633 self.Num_Bin = self.spc.shape[1]
633 634 self.Num_Chn = self.spc.shape[0]
634 635 Ze = self.dBZeMODE2(dataOut)
635 636
636 637 else:
637 638
638 639 self.spc = dataOut.SPCparam[1].copy() #dataOut.data_pre[0].copy() #
639 640
640 641 """NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX"""
641 642
642 643 self.spc[:,:,0:7]= numpy.NaN
643 644
644 645 """##########################################"""
645 646
646 647 self.Num_Hei = self.spc.shape[2]
647 648 self.Num_Bin = self.spc.shape[1]
648 649 self.Num_Chn = self.spc.shape[0]
649 650
650 651 ''' Se obtiene la constante del RADAR '''
651 652
652 653 self.Pt = Pt
653 654 self.Gt = Gt
654 655 self.Gr = Gr
655 656 self.Lambda = Lambda
656 657 self.aL = aL
657 658 self.tauW = tauW
658 659 self.ThetaT = ThetaT
659 660 self.ThetaR = ThetaR
660 661
661 662 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
662 663 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
663 664 RadarConstant = 10e-26 * Numerator / Denominator #
664 665
665 666 ''' ============================= '''
666 667
667 668 self.spc[0] = (self.spc[0]-dataOut.noise[0])
668 669 self.spc[1] = (self.spc[1]-dataOut.noise[1])
669 670 self.spc[2] = (self.spc[2]-dataOut.noise[2])
670 671
671 672 self.spc[ numpy.where(self.spc < 0)] = 0
672 673
673 674 SPCmean = (numpy.mean(self.spc,0) - numpy.mean(dataOut.noise))
674 675 SPCmean[ numpy.where(SPCmean < 0)] = 0
675 676
676 677 ETAn = numpy.zeros([self.Num_Bin,self.Num_Hei])
677 678 ETAv = numpy.zeros([self.Num_Bin,self.Num_Hei])
678 679 ETAd = numpy.zeros([self.Num_Bin,self.Num_Hei])
679 680
680 681 Pr = SPCmean[:,:]
681 682
682 683 VelMeteoro = numpy.mean(SPCmean,axis=0)
683 684
684 685 D_range = numpy.zeros([self.Num_Bin,self.Num_Hei])
685 686 SIGMA = numpy.zeros([self.Num_Bin,self.Num_Hei])
686 687 N_dist = numpy.zeros([self.Num_Bin,self.Num_Hei])
687 688 V_mean = numpy.zeros(self.Num_Hei)
688 689 del_V = numpy.zeros(self.Num_Hei)
689 690 Z = numpy.zeros(self.Num_Hei)
690 691 Ze = numpy.zeros(self.Num_Hei)
691 692 RR = numpy.zeros(self.Num_Hei)
692 693
693 694 Range = dataOut.heightList*1000.
694 695
695 696 for R in range(self.Num_Hei):
696 697
697 698 h = Range[R] + Altitude #Range from ground to radar pulse altitude
698 699 del_V[R] = 1 + 3.68 * 10**-5 * h + 1.71 * 10**-9 * h**2 #Density change correction for velocity
699 700
700 701 D_range[:,R] = numpy.log( (9.65 - (Velrange[0:self.Num_Bin] / del_V[R])) / 10.3 ) / -0.6 #Diameter range [m]x10**-3
701 702
702 703 '''NOTA: ETA(n) dn = ETA(f) df
703 704
704 705 dn = 1 Diferencial de muestreo
705 706 df = ETA(n) / ETA(f)
706 707
707 708 '''
708 709
709 710 ETAn[:,R] = RadarConstant * Pr[:,R] * (Range[R] )**2 #Reflectivity (ETA)
710 711
711 712 ETAv[:,R]=ETAn[:,R]/dV
712 713
713 714 ETAd[:,R]=ETAv[:,R]*6.18*exp(-0.6*D_range[:,R])
714 715
715 716 SIGMA[:,R] = Km * (D_range[:,R] * 1e-3 )**6 * numpy.pi**5 / Lambda**4 #Equivalent Section of drops (sigma)
716 717
717 718 N_dist[:,R] = ETAn[:,R] / SIGMA[:,R]
718 719
719 720 DMoments = self.Moments(Pr[:,R], Velrange[0:self.Num_Bin])
720 721
721 722 try:
722 723 popt01,pcov = curve_fit(self.gaus, Velrange[0:self.Num_Bin] , Pr[:,R] , p0=DMoments)
723 724 except:
724 725 popt01=numpy.zeros(3)
725 726 popt01[1]= DMoments[1]
726 727
727 728 if popt01[1]<0 or popt01[1]>20:
728 729 popt01[1]=numpy.NaN
729 730
730 731
731 732 V_mean[R]=popt01[1]
732 733
733 734 Z[R] = numpy.nansum( N_dist[:,R] * (D_range[:,R])**6 )#*10**-18
734 735
735 736 RR[R] = 0.0006*numpy.pi * numpy.nansum( D_range[:,R]**3 * N_dist[:,R] * Velrange[0:self.Num_Bin] ) #Rainfall rate
736 737
737 738 Ze[R] = (numpy.nansum( ETAn[:,R]) * Lambda**4) / ( 10**-18*numpy.pi**5 * Km)
738 739
739 740
740 741
741 742 RR2 = (Z/200)**(1/1.6)
742 743 dBRR = 10*numpy.log10(RR)
743 744 dBRR2 = 10*numpy.log10(RR2)
744 745
745 746 dBZe = 10*numpy.log10(Ze)
746 747 dBZ = 10*numpy.log10(Z)
747 748
748 749 dataOut.data_output = RR[8]
749 750 dataOut.data_param = numpy.ones([3,self.Num_Hei])
750 751 dataOut.channelList = [0,1,2]
751 752
752 753 dataOut.data_param[0]=dBZ
753 754 dataOut.data_param[1]=V_mean
754 755 dataOut.data_param[2]=RR
755
756
756 757 return dataOut
757 758
758 759 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
759 760
760 761 NPW = dataOut.NPW
761 762 COFA = dataOut.COFA
762 763
763 764 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
764 765 RadarConst = dataOut.RadarConst
765 766 #frequency = 34.85*10**9
766 767
767 768 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
768 769 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
769 770
770 771 ETA = numpy.sum(SNR,1)
771 772
772 773 ETA = numpy.where(ETA is not 0. , ETA, numpy.NaN)
773 774
774 775 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
775 776
776 777 for r in range(self.Num_Hei):
777 778
778 779 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
779 780 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
780 781
781 782 return Ze
782 783
783 784 # def GetRadarConstant(self):
784 785 #
785 786 # """
786 787 # Constants:
787 788 #
788 789 # Pt: Transmission Power dB 5kW 5000
789 790 # Gt: Transmission Gain dB 24.7 dB 295.1209
790 791 # Gr: Reception Gain dB 18.5 dB 70.7945
791 792 # Lambda: Wavelenght m 0.6741 m 0.6741
792 793 # aL: Attenuation loses dB 4dB 2.5118
793 794 # tauW: Width of transmission pulse s 4us 4e-6
794 795 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
795 796 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
796 797 #
797 798 # """
798 799 #
799 800 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
800 801 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
801 802 # RadarConstant = Numerator / Denominator
802 803 #
803 804 # return RadarConstant
804 805
805 806
806 807
807 808 class FullSpectralAnalysis(Operation):
808 809
809 810 """
810 811 Function that implements Full Spectral Analisys technique.
811 812
812 813 Input:
813 814 self.dataOut.data_pre : SelfSpectra and CrossSPectra data
814 815 self.dataOut.groupList : Pairlist of channels
815 816 self.dataOut.ChanDist : Physical distance between receivers
816 817
817 818
818 819 Output:
819 820
820 821 self.dataOut.data_output : Zonal wind, Meridional wind and Vertical wind
821 822
822 823
823 824 Parameters affected: Winds, height range, SNR
824 825
825 826 """
826 827 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRlimit=7):
827 828
828 829 self.indice=int(numpy.random.rand()*1000)
829 830
830 831 spc = dataOut.data_pre[0].copy()
831 832 cspc = dataOut.data_pre[1]
832 833
833 834 """NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX"""
834 835
835 836 SNRspc = spc.copy()
836 837 SNRspc[:,:,0:7]= numpy.NaN
837 838
838 839 """##########################################"""
839 840
840 841
841 842 nChannel = spc.shape[0]
842 843 nProfiles = spc.shape[1]
843 844 nHeights = spc.shape[2]
844 845
845 846 pairsList = dataOut.groupList
846 847 if dataOut.ChanDist is not None :
847 848 ChanDist = dataOut.ChanDist
848 849 else:
849 850 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
850 851
851 852 FrecRange = dataOut.spc_range[0]
852 853
853 854 ySamples=numpy.ones([nChannel,nProfiles])
854 855 phase=numpy.ones([nChannel,nProfiles])
855 856 CSPCSamples=numpy.ones([nChannel,nProfiles],dtype=numpy.complex_)
856 857 coherence=numpy.ones([nChannel,nProfiles])
857 858 PhaseSlope=numpy.ones(nChannel)
858 859 PhaseInter=numpy.ones(nChannel)
859 860 data_SNR=numpy.zeros([nProfiles])
860 861
861 862 data = dataOut.data_pre
862 863 noise = dataOut.noise
863 864
864 865 dataOut.data_SNR = (numpy.mean(SNRspc,axis=1)- noise[0]) / noise[0]
865 866
866 867 dataOut.data_SNR[numpy.where( dataOut.data_SNR <0 )] = 1e-20
867 868
868 869
869 870 data_output=numpy.ones([spc.shape[0],spc.shape[2]])*numpy.NaN
870 871
871 872 velocityX=[]
872 873 velocityY=[]
873 874 velocityV=[]
874 875 PhaseLine=[]
875 876
876 877 dbSNR = 10*numpy.log10(dataOut.data_SNR)
877 878 dbSNR = numpy.average(dbSNR,0)
878 879
879 880 for Height in range(nHeights):
880 881
881 882 [Vzon,Vmer,Vver, GaussCenter, PhaseSlope, FitGaussCSPC]= self.WindEstimation(spc, cspc, pairsList, ChanDist, Height, noise, dataOut.spc_range, dbSNR[Height], SNRlimit)
882 883 PhaseLine = numpy.append(PhaseLine, PhaseSlope)
883 884
884 885 if abs(Vzon)<100. and abs(Vzon)> 0.:
885 886 velocityX=numpy.append(velocityX, Vzon)#Vmag
886 887
887 888 else:
888 889 velocityX=numpy.append(velocityX, numpy.NaN)
889 890
890 891 if abs(Vmer)<100. and abs(Vmer) > 0.:
891 892 velocityY=numpy.append(velocityY, -Vmer)#Vang
892 893
893 894 else:
894 895 velocityY=numpy.append(velocityY, numpy.NaN)
895 896
896 897 if dbSNR[Height] > SNRlimit:
897 898 velocityV=numpy.append(velocityV, -Vver)#FirstMoment[Height])
898 899 else:
899 900 velocityV=numpy.append(velocityV, numpy.NaN)
900 901
901 902
902 903
903 904 '''Nota: Cambiar el signo de numpy.array(velocityX) cuando se intente procesar datos de BLTR'''
904 905 data_output[0] = numpy.array(velocityX) #self.moving_average(numpy.array(velocityX) , N=1)
905 906 data_output[1] = numpy.array(velocityY) #self.moving_average(numpy.array(velocityY) , N=1)
906 907 data_output[2] = velocityV#FirstMoment
907 908
908 909 xFrec=FrecRange[0:spc.shape[1]]
909 910
910 911 dataOut.data_output=data_output
911 912
912 913 return dataOut
913 914
914 915
915 916 def moving_average(self,x, N=2):
916 917 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
917 918
918 919 def gaus(self,xSamples,Amp,Mu,Sigma):
919 920 return ( Amp / ((2*numpy.pi)**0.5 * Sigma) ) * numpy.exp( -( xSamples - Mu )**2 / ( 2 * (Sigma**2) ))
920 921
921 922
922 923
923 924 def Moments(self, ySamples, xSamples):
924 925 Pot = numpy.nansum( ySamples ) # Potencia, momento 0
925 926 yNorm = ySamples / Pot
926 927 Vr = numpy.nansum( yNorm * xSamples ) # Velocidad radial, mu, corrimiento doppler, primer momento
927 928 Sigma2 = abs(numpy.nansum( yNorm * ( xSamples - Vr )**2 )) # Segundo Momento
928 929 Desv = Sigma2**0.5 # Desv. Estandar, Ancho espectral
929 930
930 931 return numpy.array([Pot, Vr, Desv])
931 932
932 933 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit):
933 934
934 935
935 936
936 937 ySamples=numpy.ones([spc.shape[0],spc.shape[1]])
937 938 phase=numpy.ones([spc.shape[0],spc.shape[1]])
938 939 CSPCSamples=numpy.ones([spc.shape[0],spc.shape[1]],dtype=numpy.complex_)
939 940 coherence=numpy.ones([spc.shape[0],spc.shape[1]])
940 941 PhaseSlope=numpy.zeros(spc.shape[0])
941 942 PhaseInter=numpy.ones(spc.shape[0])
942 943 xFrec=AbbsisaRange[0][0:spc.shape[1]]
943 944 xVel =AbbsisaRange[2][0:spc.shape[1]]
944 945 Vv=numpy.empty(spc.shape[2])*0
945 946 SPCav = numpy.average(spc, axis=0)-numpy.average(noise) #spc[0]-noise[0]#
946 947
947 948 SPCmoments = self.Moments(SPCav[:,Height], xVel )
948 949 CSPCmoments = []
949 950 cspcNoise = numpy.empty(3)
950 951
951 952 '''Getting Eij and Nij'''
952 953
953 954 Xi01=ChanDist[0][0]
954 955 Eta01=ChanDist[0][1]
955 956
956 957 Xi02=ChanDist[1][0]
957 958 Eta02=ChanDist[1][1]
958 959
959 960 Xi12=ChanDist[2][0]
960 961 Eta12=ChanDist[2][1]
961 962
962 963 z = spc.copy()
963 964 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
964 965
965 966 for i in range(spc.shape[0]):
966 967
967 968 '''****** Line of Data SPC ******'''
968 969 zline=z[i,:,Height].copy() - noise[i] # Se resta ruido
969 970
970 971 '''****** SPC is normalized ******'''
971 972 SmoothSPC =self.moving_average(zline.copy(),N=1) # Se suaviza el ruido
972 973 FactNorm = SmoothSPC/numpy.nansum(SmoothSPC) # SPC Normalizado y suavizado
973 974
974 975 xSamples = xFrec # Se toma el rango de frecuncias
975 976 ySamples[i] = FactNorm # Se toman los valores de SPC normalizado
976 977
977 978 for i in range(spc.shape[0]):
978 979
979 980 '''****** Line of Data CSPC ******'''
980 981 cspcLine = ( cspc[i,:,Height].copy())# - noise[i] ) # no! Se resta el ruido
981 982 SmoothCSPC =self.moving_average(cspcLine,N=1) # Se suaviza el ruido
982 983 cspcNorm = SmoothCSPC/numpy.nansum(SmoothCSPC) # CSPC normalizado y suavizado
983 984
984 985 '''****** CSPC is normalized with respect to Briggs and Vincent ******'''
985 986 chan_index0 = pairsList[i][0]
986 987 chan_index1 = pairsList[i][1]
987 988
988 989 CSPCFactor= numpy.abs(numpy.nansum(ySamples[chan_index0]))**2 * numpy.abs(numpy.nansum(ySamples[chan_index1]))**2
989 990 CSPCNorm = cspcNorm / numpy.sqrt(CSPCFactor)
990 991
991 992 CSPCSamples[i] = CSPCNorm
992 993
993 994 coherence[i] = numpy.abs(CSPCSamples[i]) / numpy.sqrt(CSPCFactor)
994 995
995 996 #coherence[i]= self.moving_average(coherence[i],N=1)
996 997
997 998 phase[i] = self.moving_average( numpy.arctan2(CSPCSamples[i].imag, CSPCSamples[i].real),N=1)#*180/numpy.pi
998 999
999 1000 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPCSamples[0]), xSamples),
1000 1001 self.Moments(numpy.abs(CSPCSamples[1]), xSamples),
1001 1002 self.Moments(numpy.abs(CSPCSamples[2]), xSamples)])
1002 1003
1003 1004
1004 1005 popt=[1e-10,0,1e-10]
1005 1006 popt01, popt02, popt12 = [1e-10,1e-10,1e-10], [1e-10,1e-10,1e-10] ,[1e-10,1e-10,1e-10]
1006 1007 FitGauss01, FitGauss02, FitGauss12 = numpy.empty(len(xSamples))*0, numpy.empty(len(xSamples))*0, numpy.empty(len(xSamples))*0
1007 1008
1008 1009 CSPCMask01 = numpy.abs(CSPCSamples[0])
1009 1010 CSPCMask02 = numpy.abs(CSPCSamples[1])
1010 1011 CSPCMask12 = numpy.abs(CSPCSamples[2])
1011 1012
1012 1013 mask01 = ~numpy.isnan(CSPCMask01)
1013 1014 mask02 = ~numpy.isnan(CSPCMask02)
1014 1015 mask12 = ~numpy.isnan(CSPCMask12)
1015 1016
1016 1017 #mask = ~numpy.isnan(CSPCMask01)
1017 1018 CSPCMask01 = CSPCMask01[mask01]
1018 1019 CSPCMask02 = CSPCMask02[mask02]
1019 1020 CSPCMask12 = CSPCMask12[mask12]
1020 1021 #CSPCMask01 = numpy.ma.masked_invalid(CSPCMask01)
1021 1022
1022 1023
1023 1024
1024 1025 '''***Fit Gauss CSPC01***'''
1025 1026 if dbSNR > SNRlimit and numpy.abs(SPCmoments[1])<3 :
1026 1027 try:
1027 1028 popt01,pcov = curve_fit(self.gaus,xSamples[mask01],numpy.abs(CSPCMask01),p0=CSPCmoments[0])
1028 1029 popt02,pcov = curve_fit(self.gaus,xSamples[mask02],numpy.abs(CSPCMask02),p0=CSPCmoments[1])
1029 1030 popt12,pcov = curve_fit(self.gaus,xSamples[mask12],numpy.abs(CSPCMask12),p0=CSPCmoments[2])
1030 1031 FitGauss01 = self.gaus(xSamples,*popt01)
1031 1032 FitGauss02 = self.gaus(xSamples,*popt02)
1032 1033 FitGauss12 = self.gaus(xSamples,*popt12)
1033 1034 except:
1034 1035 FitGauss01=numpy.ones(len(xSamples))*numpy.mean(numpy.abs(CSPCSamples[0]))
1035 1036 FitGauss02=numpy.ones(len(xSamples))*numpy.mean(numpy.abs(CSPCSamples[1]))
1036 1037 FitGauss12=numpy.ones(len(xSamples))*numpy.mean(numpy.abs(CSPCSamples[2]))
1037 1038
1038 1039
1039 1040 CSPCopt = numpy.vstack([popt01,popt02,popt12])
1040 1041
1041 1042 '''****** Getting fij width ******'''
1042 1043
1043 1044 yMean = numpy.average(ySamples, axis=0) # ySamples[0]
1044 1045
1045 1046 '''******* Getting fitting Gaussian *******'''
1046 1047 meanGauss = sum(xSamples*yMean) / len(xSamples) # Mu, velocidad radial (frecuencia)
1047 1048 sigma2 = sum(yMean*(xSamples-meanGauss)**2) / len(xSamples) # Varianza, Ancho espectral (frecuencia)
1048 1049
1049 1050 yMoments = self.Moments(yMean, xSamples)
1050 1051
1051 1052 if dbSNR > SNRlimit and numpy.abs(SPCmoments[1])<3: # and abs(meanGauss/sigma2) > 0.00001:
1052 1053 try:
1053 1054 popt,pcov = curve_fit(self.gaus,xSamples,yMean,p0=yMoments)
1054 1055 FitGauss=self.gaus(xSamples,*popt)
1055 1056
1056 1057 except :#RuntimeError:
1057 1058 FitGauss=numpy.ones(len(xSamples))*numpy.mean(yMean)
1058 1059
1059 1060
1060 1061 else:
1061 1062 FitGauss=numpy.ones(len(xSamples))*numpy.mean(yMean)
1062 1063
1063 1064
1064 1065
1065 1066 '''****** Getting Fij ******'''
1066 1067 Fijcspc = CSPCopt[:,2]/2*3
1067 1068
1068 1069
1069 1070 GaussCenter = popt[1] #xFrec[GCpos]
1070 1071 #Punto en Eje X de la Gaussiana donde se encuentra el centro
1071 1072 ClosestCenter = xSamples[numpy.abs(xSamples-GaussCenter).argmin()]
1072 1073 PointGauCenter = numpy.where(xSamples==ClosestCenter)[0][0]
1073 1074
1074 1075 #Punto e^-1 hubicado en la Gaussiana
1075 1076 PeMinus1 = numpy.max(FitGauss)* numpy.exp(-1)
1076 1077 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # El punto mas cercano a "Peminus1" dentro de "FitGauss"
1077 1078 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
1078 1079
1079 1080 if xSamples[PointFij] > xSamples[PointGauCenter]:
1080 1081 Fij = xSamples[PointFij] - xSamples[PointGauCenter]
1081 1082
1082 1083 else:
1083 1084 Fij = xSamples[PointGauCenter] - xSamples[PointFij]
1084 1085
1085 1086
1086 1087 '''****** Taking frequency ranges from SPCs ******'''
1087 1088
1088 1089
1089 1090 #GaussCenter = popt[1] #Primer momento 01
1090 1091 GauWidth = popt[2] *3/2 #Ancho de banda de Gau01
1091 1092 Range = numpy.empty(2)
1092 1093 Range[0] = GaussCenter - GauWidth
1093 1094 Range[1] = GaussCenter + GauWidth
1094 1095 #Punto en Eje X de la Gaussiana donde se encuentra ancho de banda (min:max)
1095 1096 ClosRangeMin = xSamples[numpy.abs(xSamples-Range[0]).argmin()]
1096 1097 ClosRangeMax = xSamples[numpy.abs(xSamples-Range[1]).argmin()]
1097 1098
1098 1099 PointRangeMin = numpy.where(xSamples==ClosRangeMin)[0][0]
1099 1100 PointRangeMax = numpy.where(xSamples==ClosRangeMax)[0][0]
1100 1101
1101 1102 Range=numpy.array([ PointRangeMin, PointRangeMax ])
1102 1103
1103 1104 FrecRange = xFrec[ Range[0] : Range[1] ]
1104 1105 VelRange = xVel[ Range[0] : Range[1] ]
1105 1106
1106 1107
1107 1108 '''****** Getting SCPC Slope ******'''
1108 1109
1109 1110 for i in range(spc.shape[0]):
1110 1111
1111 1112 if len(FrecRange)>5 and len(FrecRange)<spc.shape[1]*0.3:
1112 1113 PhaseRange=self.moving_average(phase[i,Range[0]:Range[1]],N=3)
1113 1114
1114 1115 '''***********************VelRange******************'''
1115 1116
1116 1117 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
1117 1118
1118 1119 if len(FrecRange) == len(PhaseRange):
1119 1120 try:
1120 1121 slope, intercept, r_value, p_value, std_err = stats.linregress(FrecRange[mask], PhaseRange[mask])
1121 1122 PhaseSlope[i]=slope
1122 1123 PhaseInter[i]=intercept
1123 1124 except:
1124 1125 PhaseSlope[i]=0
1125 1126 PhaseInter[i]=0
1126 1127 else:
1127 1128 PhaseSlope[i]=0
1128 1129 PhaseInter[i]=0
1129 1130 else:
1130 1131 PhaseSlope[i]=0
1131 1132 PhaseInter[i]=0
1132 1133
1133 1134
1134 1135 '''Getting constant C'''
1135 1136 cC=(Fij*numpy.pi)**2
1136 1137
1137 1138 '''****** Getting constants F and G ******'''
1138 1139 MijEijNij=numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
1139 1140 MijResult0=(-PhaseSlope[1]*cC) / (2*numpy.pi)
1140 1141 MijResult1=(-PhaseSlope[2]*cC) / (2*numpy.pi)
1141 1142 MijResults=numpy.array([MijResult0,MijResult1])
1142 1143 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
1143 1144
1144 1145 '''****** Getting constants A, B and H ******'''
1145 1146 W01=numpy.nanmax( FitGauss01 ) #numpy.abs(CSPCSamples[0]))
1146 1147 W02=numpy.nanmax( FitGauss02 ) #numpy.abs(CSPCSamples[1]))
1147 1148 W12=numpy.nanmax( FitGauss12 ) #numpy.abs(CSPCSamples[2]))
1148 1149
1149 1150 WijResult0=((cF*Xi01+cG*Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi/cC))
1150 1151 WijResult1=((cF*Xi02+cG*Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi/cC))
1151 1152 WijResult2=((cF*Xi12+cG*Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi/cC))
1152 1153
1153 1154 WijResults=numpy.array([WijResult0, WijResult1, WijResult2])
1154 1155
1155 1156 WijEijNij=numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
1156 1157 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
1157 1158
1158 1159 VxVy=numpy.array([[cA,cH],[cH,cB]])
1159 1160 VxVyResults=numpy.array([-cF,-cG])
1160 1161 (Vx,Vy) = numpy.linalg.solve(VxVy, VxVyResults)
1161 1162
1162 1163 Vzon = Vy
1163 1164 Vmer = Vx
1164 1165 Vmag=numpy.sqrt(Vzon**2+Vmer**2)
1165 1166 Vang=numpy.arctan2(Vmer,Vzon)
1166 1167 if numpy.abs( popt[1] ) < 3.5 and len(FrecRange)>4:
1167 1168 Vver=popt[1]
1168 1169 else:
1169 1170 Vver=numpy.NaN
1170 1171 FitGaussCSPC = numpy.array([FitGauss01,FitGauss02,FitGauss12])
1171 1172
1172 1173
1173 1174 return Vzon, Vmer, Vver, GaussCenter, PhaseSlope, FitGaussCSPC
1174 1175
1175 1176 class SpectralMoments(Operation):
1176 1177
1177 1178 '''
1178 1179 Function SpectralMoments()
1179 1180
1180 1181 Calculates moments (power, mean, standard deviation) and SNR of the signal
1181 1182
1182 1183 Type of dataIn: Spectra
1183 1184
1184 1185 Configuration Parameters:
1185 1186
1186 1187 dirCosx : Cosine director in X axis
1187 1188 dirCosy : Cosine director in Y axis
1188 1189
1189 1190 elevation :
1190 1191 azimuth :
1191 1192
1192 1193 Input:
1193 1194 channelList : simple channel list to select e.g. [2,3,7]
1194 1195 self.dataOut.data_pre : Spectral data
1195 1196 self.dataOut.abscissaList : List of frequencies
1196 1197 self.dataOut.noise : Noise level per channel
1197 1198
1198 1199 Affected:
1199 1200 self.dataOut.moments : Parameters per channel
1200 1201 self.dataOut.data_SNR : SNR per channel
1201 1202
1202 1203 '''
1203 1204
1204 1205 def run(self, dataOut):
1205 1206
1206 1207 #dataOut.data_pre = dataOut.data_pre[0]
1207 1208 data = dataOut.data_pre[0]
1208 1209 absc = dataOut.abscissaList[:-1]
1209 1210 noise = dataOut.noise
1210 1211 nChannel = data.shape[0]
1211 1212 data_param = numpy.zeros((nChannel, 4, data.shape[2]))
1212 1213
1213 1214 for ind in range(nChannel):
1214 1215 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind] )
1215 1216
1216 1217 dataOut.moments = data_param[:,1:,:]
1217 1218 dataOut.data_SNR = data_param[:,0]
1218 1219 dataOut.data_DOP = data_param[:,1]
1219 1220 dataOut.data_MEAN = data_param[:,2]
1220 1221 dataOut.data_STD = data_param[:,3]
1221 1222 return dataOut
1222 1223
1223 1224 def __calculateMoments(self, oldspec, oldfreq, n0,
1224 1225 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
1225 1226
1226 1227 if (nicoh is None): nicoh = 1
1227 1228 if (graph is None): graph = 0
1228 1229 if (smooth is None): smooth = 0
1229 1230 elif (self.smooth < 3): smooth = 0
1230 1231
1231 1232 if (type1 is None): type1 = 0
1232 1233 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
1233 1234 if (snrth is None): snrth = -3
1234 1235 if (dc is None): dc = 0
1235 1236 if (aliasing is None): aliasing = 0
1236 1237 if (oldfd is None): oldfd = 0
1237 1238 if (wwauto is None): wwauto = 0
1238 1239
1239 1240 if (n0 < 1.e-20): n0 = 1.e-20
1240 1241
1241 1242 freq = oldfreq
1242 1243 vec_power = numpy.zeros(oldspec.shape[1])
1243 1244 vec_fd = numpy.zeros(oldspec.shape[1])
1244 1245 vec_w = numpy.zeros(oldspec.shape[1])
1245 1246 vec_snr = numpy.zeros(oldspec.shape[1])
1246 1247
1247 1248 oldspec = numpy.ma.masked_invalid(oldspec)
1248 1249
1249 1250 for ind in range(oldspec.shape[1]):
1250 1251
1251 1252 spec = oldspec[:,ind]
1252 1253 aux = spec*fwindow
1253 1254 max_spec = aux.max()
1254 1255 m = list(aux).index(max_spec)
1255 1256
1256 1257 #Smooth
1257 1258 if (smooth == 0): spec2 = spec
1258 1259 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
1259 1260
1260 1261 # Calculo de Momentos
1261 1262 bb = spec2[list(range(m,spec2.size))]
1262 1263 bb = (bb<n0).nonzero()
1263 1264 bb = bb[0]
1264 1265
1265 1266 ss = spec2[list(range(0,m + 1))]
1266 1267 ss = (ss<n0).nonzero()
1267 1268 ss = ss[0]
1268 1269
1269 1270 if (bb.size == 0):
1270 1271 bb0 = spec.size - 1 - m
1271 1272 else:
1272 1273 bb0 = bb[0] - 1
1273 1274 if (bb0 < 0):
1274 1275 bb0 = 0
1275 1276
1276 1277 if (ss.size == 0): ss1 = 1
1277 1278 else: ss1 = max(ss) + 1
1278 1279
1279 1280 if (ss1 > m): ss1 = m
1280 1281
1281 1282 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
1282 1283 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
1283 1284 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
1284 1285 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
1285 1286 snr = (spec2.mean()-n0)/n0
1286 1287
1287 1288 if (snr < 1.e-20) :
1288 1289 snr = 1.e-20
1289 1290
1290 1291 vec_power[ind] = power
1291 1292 vec_fd[ind] = fd
1292 1293 vec_w[ind] = w
1293 1294 vec_snr[ind] = snr
1294 1295
1295 1296 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
1296 1297 return moments
1297 1298
1298 1299 #------------------ Get SA Parameters --------------------------
1299 1300
1300 1301 def GetSAParameters(self):
1301 1302 #SA en frecuencia
1302 1303 pairslist = self.dataOut.groupList
1303 1304 num_pairs = len(pairslist)
1304 1305
1305 1306 vel = self.dataOut.abscissaList
1306 1307 spectra = self.dataOut.data_pre
1307 1308 cspectra = self.dataIn.data_cspc
1308 1309 delta_v = vel[1] - vel[0]
1309 1310
1310 1311 #Calculating the power spectrum
1311 1312 spc_pow = numpy.sum(spectra, 3)*delta_v
1312 1313 #Normalizing Spectra
1313 1314 norm_spectra = spectra/spc_pow
1314 1315 #Calculating the norm_spectra at peak
1315 1316 max_spectra = numpy.max(norm_spectra, 3)
1316 1317
1317 1318 #Normalizing Cross Spectra
1318 1319 norm_cspectra = numpy.zeros(cspectra.shape)
1319 1320
1320 1321 for i in range(num_chan):
1321 1322 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
1322 1323
1323 1324 max_cspectra = numpy.max(norm_cspectra,2)
1324 1325 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
1325 1326
1326 1327 for i in range(num_pairs):
1327 1328 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
1328 1329 #------------------- Get Lags ----------------------------------
1329 1330
1330 1331 class SALags(Operation):
1331 1332 '''
1332 1333 Function GetMoments()
1333 1334
1334 1335 Input:
1335 1336 self.dataOut.data_pre
1336 1337 self.dataOut.abscissaList
1337 1338 self.dataOut.noise
1338 1339 self.dataOut.normFactor
1339 1340 self.dataOut.data_SNR
1340 1341 self.dataOut.groupList
1341 1342 self.dataOut.nChannels
1342 1343
1343 1344 Affected:
1344 1345 self.dataOut.data_param
1345 1346
1346 1347 '''
1347 1348 def run(self, dataOut):
1348 1349 data_acf = dataOut.data_pre[0]
1349 1350 data_ccf = dataOut.data_pre[1]
1350 1351 normFactor_acf = dataOut.normFactor[0]
1351 1352 normFactor_ccf = dataOut.normFactor[1]
1352 1353 pairs_acf = dataOut.groupList[0]
1353 1354 pairs_ccf = dataOut.groupList[1]
1354 1355
1355 1356 nHeights = dataOut.nHeights
1356 1357 absc = dataOut.abscissaList
1357 1358 noise = dataOut.noise
1358 1359 SNR = dataOut.data_SNR
1359 1360 nChannels = dataOut.nChannels
1360 1361 # pairsList = dataOut.groupList
1361 1362 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
1362 1363
1363 1364 for l in range(len(pairs_acf)):
1364 1365 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
1365 1366
1366 1367 for l in range(len(pairs_ccf)):
1367 1368 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
1368 1369
1369 1370 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
1370 1371 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
1371 1372 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
1372 1373 return
1373 1374
1374 1375 # def __getPairsAutoCorr(self, pairsList, nChannels):
1375 1376 #
1376 1377 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1377 1378 #
1378 1379 # for l in range(len(pairsList)):
1379 1380 # firstChannel = pairsList[l][0]
1380 1381 # secondChannel = pairsList[l][1]
1381 1382 #
1382 1383 # #Obteniendo pares de Autocorrelacion
1383 1384 # if firstChannel == secondChannel:
1384 1385 # pairsAutoCorr[firstChannel] = int(l)
1385 1386 #
1386 1387 # pairsAutoCorr = pairsAutoCorr.astype(int)
1387 1388 #
1388 1389 # pairsCrossCorr = range(len(pairsList))
1389 1390 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1390 1391 #
1391 1392 # return pairsAutoCorr, pairsCrossCorr
1392 1393
1393 1394 def __calculateTaus(self, data_acf, data_ccf, lagRange):
1394 1395
1395 1396 lag0 = data_acf.shape[1]/2
1396 1397 #Funcion de Autocorrelacion
1397 1398 mean_acf = stats.nanmean(data_acf, axis = 0)
1398 1399
1399 1400 #Obtencion Indice de TauCross
1400 1401 ind_ccf = data_ccf.argmax(axis = 1)
1401 1402 #Obtencion Indice de TauAuto
1402 1403 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
1403 1404 ccf_lag0 = data_ccf[:,lag0,:]
1404 1405
1405 1406 for i in range(ccf_lag0.shape[0]):
1406 1407 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
1407 1408
1408 1409 #Obtencion de TauCross y TauAuto
1409 1410 tau_ccf = lagRange[ind_ccf]
1410 1411 tau_acf = lagRange[ind_acf]
1411 1412
1412 1413 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
1413 1414
1414 1415 tau_ccf[Nan1,Nan2] = numpy.nan
1415 1416 tau_acf[Nan1,Nan2] = numpy.nan
1416 1417 tau = numpy.vstack((tau_ccf,tau_acf))
1417 1418
1418 1419 return tau
1419 1420
1420 1421 def __calculateLag1Phase(self, data, lagTRange):
1421 1422 data1 = stats.nanmean(data, axis = 0)
1422 1423 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
1423 1424
1424 1425 phase = numpy.angle(data1[lag1,:])
1425 1426
1426 1427 return phase
1427 1428
1428 1429 class SpectralFitting(Operation):
1429 1430 '''
1430 1431 Function GetMoments()
1431 1432
1432 1433 Input:
1433 1434 Output:
1434 1435 Variables modified:
1435 1436 '''
1436 1437
1437 1438 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None):
1438 1439
1439 1440
1440 1441 if path != None:
1441 1442 sys.path.append(path)
1442 1443 self.dataOut.library = importlib.import_module(file)
1443 1444
1444 1445 #To be inserted as a parameter
1445 1446 groupArray = numpy.array(groupList)
1446 1447 # groupArray = numpy.array([[0,1],[2,3]])
1447 1448 self.dataOut.groupList = groupArray
1448 1449
1449 1450 nGroups = groupArray.shape[0]
1450 1451 nChannels = self.dataIn.nChannels
1451 1452 nHeights=self.dataIn.heightList.size
1452 1453
1453 1454 #Parameters Array
1454 1455 self.dataOut.data_param = None
1455 1456
1456 1457 #Set constants
1457 1458 constants = self.dataOut.library.setConstants(self.dataIn)
1458 1459 self.dataOut.constants = constants
1459 1460 M = self.dataIn.normFactor
1460 1461 N = self.dataIn.nFFTPoints
1461 1462 ippSeconds = self.dataIn.ippSeconds
1462 1463 K = self.dataIn.nIncohInt
1463 1464 pairsArray = numpy.array(self.dataIn.pairsList)
1464 1465
1465 1466 #List of possible combinations
1466 1467 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
1467 1468 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
1468 1469
1469 1470 if getSNR:
1470 1471 listChannels = groupArray.reshape((groupArray.size))
1471 1472 listChannels.sort()
1472 1473 noise = self.dataIn.getNoise()
1473 1474 self.dataOut.data_SNR = self.__getSNR(self.dataIn.data_spc[listChannels,:,:], noise[listChannels])
1474 1475
1475 1476 for i in range(nGroups):
1476 1477 coord = groupArray[i,:]
1477 1478
1478 1479 #Input data array
1479 1480 data = self.dataIn.data_spc[coord,:,:]/(M*N)
1480 1481 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
1481 1482
1482 1483 #Cross Spectra data array for Covariance Matrixes
1483 1484 ind = 0
1484 1485 for pairs in listComb:
1485 1486 pairsSel = numpy.array([coord[x],coord[y]])
1486 1487 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
1487 1488 ind += 1
1488 1489 dataCross = self.dataIn.data_cspc[indCross,:,:]/(M*N)
1489 1490 dataCross = dataCross**2/K
1490 1491
1491 1492 for h in range(nHeights):
1492 1493
1493 1494 #Input
1494 1495 d = data[:,h]
1495 1496
1496 1497 #Covariance Matrix
1497 1498 D = numpy.diag(d**2/K)
1498 1499 ind = 0
1499 1500 for pairs in listComb:
1500 1501 #Coordinates in Covariance Matrix
1501 1502 x = pairs[0]
1502 1503 y = pairs[1]
1503 1504 #Channel Index
1504 1505 S12 = dataCross[ind,:,h]
1505 1506 D12 = numpy.diag(S12)
1506 1507 #Completing Covariance Matrix with Cross Spectras
1507 1508 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
1508 1509 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
1509 1510 ind += 1
1510 1511 Dinv=numpy.linalg.inv(D)
1511 1512 L=numpy.linalg.cholesky(Dinv)
1512 1513 LT=L.T
1513 1514
1514 1515 dp = numpy.dot(LT,d)
1515 1516
1516 1517 #Initial values
1517 1518 data_spc = self.dataIn.data_spc[coord,:,h]
1518 1519
1519 1520 if (h>0)and(error1[3]<5):
1520 1521 p0 = self.dataOut.data_param[i,:,h-1]
1521 1522 else:
1522 1523 p0 = numpy.array(self.dataOut.library.initialValuesFunction(data_spc, constants, i))
1523 1524
1524 1525 try:
1525 1526 #Least Squares
1526 1527 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
1527 1528 # minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
1528 1529 #Chi square error
1529 1530 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
1530 1531 #Error with Jacobian
1531 1532 error1 = self.dataOut.library.errorFunction(minp,constants,LT)
1532 1533 except:
1533 1534 minp = p0*numpy.nan
1534 1535 error0 = numpy.nan
1535 1536 error1 = p0*numpy.nan
1536 1537
1537 1538 #Save
1538 1539 if self.dataOut.data_param is None:
1539 1540 self.dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
1540 1541 self.dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
1541 1542
1542 1543 self.dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
1543 1544 self.dataOut.data_param[i,:,h] = minp
1544 1545 return
1545 1546
1546 1547 def __residFunction(self, p, dp, LT, constants):
1547 1548
1548 1549 fm = self.dataOut.library.modelFunction(p, constants)
1549 1550 fmp=numpy.dot(LT,fm)
1550 1551
1551 1552 return dp-fmp
1552 1553
1553 1554 def __getSNR(self, z, noise):
1554 1555
1555 1556 avg = numpy.average(z, axis=1)
1556 1557 SNR = (avg.T-noise)/noise
1557 1558 SNR = SNR.T
1558 1559 return SNR
1559 1560
1560 1561 def __chisq(p,chindex,hindex):
1561 1562 #similar to Resid but calculates CHI**2
1562 1563 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
1563 1564 dp=numpy.dot(LT,d)
1564 1565 fmp=numpy.dot(LT,fm)
1565 1566 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
1566 1567 return chisq
1567 1568
1568 1569 class WindProfiler(Operation):
1569 1570
1570 1571 __isConfig = False
1571 1572
1572 1573 __initime = None
1573 1574 __lastdatatime = None
1574 1575 __integrationtime = None
1575 1576
1576 1577 __buffer = None
1577 1578
1578 1579 __dataReady = False
1579 1580
1580 1581 __firstdata = None
1581 1582
1582 1583 n = None
1583 1584
1584 1585 def __init__(self):
1585 1586 Operation.__init__(self)
1586 1587
1587 1588 def __calculateCosDir(self, elev, azim):
1588 1589 zen = (90 - elev)*numpy.pi/180
1589 1590 azim = azim*numpy.pi/180
1590 1591 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
1591 1592 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
1592 1593
1593 1594 signX = numpy.sign(numpy.cos(azim))
1594 1595 signY = numpy.sign(numpy.sin(azim))
1595 1596
1596 1597 cosDirX = numpy.copysign(cosDirX, signX)
1597 1598 cosDirY = numpy.copysign(cosDirY, signY)
1598 1599 return cosDirX, cosDirY
1599 1600
1600 1601 def __calculateAngles(self, theta_x, theta_y, azimuth):
1601 1602
1602 1603 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
1603 1604 zenith_arr = numpy.arccos(dir_cosw)
1604 1605 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
1605 1606
1606 1607 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
1607 1608 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
1608 1609
1609 1610 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
1610 1611
1611 1612 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
1612 1613
1613 1614 #
1614 1615 if horOnly:
1615 1616 A = numpy.c_[dir_cosu,dir_cosv]
1616 1617 else:
1617 1618 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
1618 1619 A = numpy.asmatrix(A)
1619 1620 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
1620 1621
1621 1622 return A1
1622 1623
1623 1624 def __correctValues(self, heiRang, phi, velRadial, SNR):
1624 1625 listPhi = phi.tolist()
1625 1626 maxid = listPhi.index(max(listPhi))
1626 1627 minid = listPhi.index(min(listPhi))
1627 1628
1628 1629 rango = list(range(len(phi)))
1629 1630 # rango = numpy.delete(rango,maxid)
1630 1631
1631 1632 heiRang1 = heiRang*math.cos(phi[maxid])
1632 1633 heiRangAux = heiRang*math.cos(phi[minid])
1633 1634 indOut = (heiRang1 < heiRangAux[0]).nonzero()
1634 1635 heiRang1 = numpy.delete(heiRang1,indOut)
1635 1636
1636 1637 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
1637 1638 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
1638 1639
1639 1640 for i in rango:
1640 1641 x = heiRang*math.cos(phi[i])
1641 1642 y1 = velRadial[i,:]
1642 1643 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
1643 1644
1644 1645 x1 = heiRang1
1645 1646 y11 = f1(x1)
1646 1647
1647 1648 y2 = SNR[i,:]
1648 1649 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
1649 1650 y21 = f2(x1)
1650 1651
1651 1652 velRadial1[i,:] = y11
1652 1653 SNR1[i,:] = y21
1653 1654
1654 1655 return heiRang1, velRadial1, SNR1
1655 1656
1656 1657 def __calculateVelUVW(self, A, velRadial):
1657 1658
1658 1659 #Operacion Matricial
1659 1660 # velUVW = numpy.zeros((velRadial.shape[1],3))
1660 1661 # for ind in range(velRadial.shape[1]):
1661 1662 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
1662 1663 # velUVW = velUVW.transpose()
1663 1664 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
1664 1665 velUVW[:,:] = numpy.dot(A,velRadial)
1665 1666
1666 1667
1667 1668 return velUVW
1668 1669
1669 1670 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
1670 1671
1671 1672 def techniqueDBS(self, kwargs):
1672 1673 """
1673 1674 Function that implements Doppler Beam Swinging (DBS) technique.
1674 1675
1675 1676 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1676 1677 Direction correction (if necessary), Ranges and SNR
1677 1678
1678 1679 Output: Winds estimation (Zonal, Meridional and Vertical)
1679 1680
1680 1681 Parameters affected: Winds, height range, SNR
1681 1682 """
1682 1683 velRadial0 = kwargs['velRadial']
1683 1684 heiRang = kwargs['heightList']
1684 1685 SNR0 = kwargs['SNR']
1685 1686
1686 1687 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
1687 1688 theta_x = numpy.array(kwargs['dirCosx'])
1688 1689 theta_y = numpy.array(kwargs['dirCosy'])
1689 1690 else:
1690 1691 elev = numpy.array(kwargs['elevation'])
1691 1692 azim = numpy.array(kwargs['azimuth'])
1692 1693 theta_x, theta_y = self.__calculateCosDir(elev, azim)
1693 1694 azimuth = kwargs['correctAzimuth']
1694 1695 if 'horizontalOnly' in kwargs:
1695 1696 horizontalOnly = kwargs['horizontalOnly']
1696 1697 else: horizontalOnly = False
1697 1698 if 'correctFactor' in kwargs:
1698 1699 correctFactor = kwargs['correctFactor']
1699 1700 else: correctFactor = 1
1700 1701 if 'channelList' in kwargs:
1701 1702 channelList = kwargs['channelList']
1702 1703 if len(channelList) == 2:
1703 1704 horizontalOnly = True
1704 1705 arrayChannel = numpy.array(channelList)
1705 1706 param = param[arrayChannel,:,:]
1706 1707 theta_x = theta_x[arrayChannel]
1707 1708 theta_y = theta_y[arrayChannel]
1708 1709
1709 1710 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
1710 1711 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
1711 1712 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
1712 1713
1713 1714 #Calculo de Componentes de la velocidad con DBS
1714 1715 winds = self.__calculateVelUVW(A,velRadial1)
1715 1716
1716 1717 return winds, heiRang1, SNR1
1717 1718
1718 1719 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
1719 1720
1720 1721 nPairs = len(pairs_ccf)
1721 1722 posx = numpy.asarray(posx)
1722 1723 posy = numpy.asarray(posy)
1723 1724
1724 1725 #Rotacion Inversa para alinear con el azimuth
1725 1726 if azimuth!= None:
1726 1727 azimuth = azimuth*math.pi/180
1727 1728 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
1728 1729 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
1729 1730 else:
1730 1731 posx1 = posx
1731 1732 posy1 = posy
1732 1733
1733 1734 #Calculo de Distancias
1734 1735 distx = numpy.zeros(nPairs)
1735 1736 disty = numpy.zeros(nPairs)
1736 1737 dist = numpy.zeros(nPairs)
1737 1738 ang = numpy.zeros(nPairs)
1738 1739
1739 1740 for i in range(nPairs):
1740 1741 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
1741 1742 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
1742 1743 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
1743 1744 ang[i] = numpy.arctan2(disty[i],distx[i])
1744 1745
1745 1746 return distx, disty, dist, ang
1746 1747 #Calculo de Matrices
1747 1748 # nPairs = len(pairs)
1748 1749 # ang1 = numpy.zeros((nPairs, 2, 1))
1749 1750 # dist1 = numpy.zeros((nPairs, 2, 1))
1750 1751 #
1751 1752 # for j in range(nPairs):
1752 1753 # dist1[j,0,0] = dist[pairs[j][0]]
1753 1754 # dist1[j,1,0] = dist[pairs[j][1]]
1754 1755 # ang1[j,0,0] = ang[pairs[j][0]]
1755 1756 # ang1[j,1,0] = ang[pairs[j][1]]
1756 1757 #
1757 1758 # return distx,disty, dist1,ang1
1758 1759
1759 1760
1760 1761 def __calculateVelVer(self, phase, lagTRange, _lambda):
1761 1762
1762 1763 Ts = lagTRange[1] - lagTRange[0]
1763 1764 velW = -_lambda*phase/(4*math.pi*Ts)
1764 1765
1765 1766 return velW
1766 1767
1767 1768 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
1768 1769 nPairs = tau1.shape[0]
1769 1770 nHeights = tau1.shape[1]
1770 1771 vel = numpy.zeros((nPairs,3,nHeights))
1771 1772 dist1 = numpy.reshape(dist, (dist.size,1))
1772 1773
1773 1774 angCos = numpy.cos(ang)
1774 1775 angSin = numpy.sin(ang)
1775 1776
1776 1777 vel0 = dist1*tau1/(2*tau2**2)
1777 1778 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
1778 1779 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
1779 1780
1780 1781 ind = numpy.where(numpy.isinf(vel))
1781 1782 vel[ind] = numpy.nan
1782 1783
1783 1784 return vel
1784 1785
1785 1786 # def __getPairsAutoCorr(self, pairsList, nChannels):
1786 1787 #
1787 1788 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1788 1789 #
1789 1790 # for l in range(len(pairsList)):
1790 1791 # firstChannel = pairsList[l][0]
1791 1792 # secondChannel = pairsList[l][1]
1792 1793 #
1793 1794 # #Obteniendo pares de Autocorrelacion
1794 1795 # if firstChannel == secondChannel:
1795 1796 # pairsAutoCorr[firstChannel] = int(l)
1796 1797 #
1797 1798 # pairsAutoCorr = pairsAutoCorr.astype(int)
1798 1799 #
1799 1800 # pairsCrossCorr = range(len(pairsList))
1800 1801 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1801 1802 #
1802 1803 # return pairsAutoCorr, pairsCrossCorr
1803 1804
1804 1805 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
1805 1806 def techniqueSA(self, kwargs):
1806 1807
1807 1808 """
1808 1809 Function that implements Spaced Antenna (SA) technique.
1809 1810
1810 1811 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1811 1812 Direction correction (if necessary), Ranges and SNR
1812 1813
1813 1814 Output: Winds estimation (Zonal, Meridional and Vertical)
1814 1815
1815 1816 Parameters affected: Winds
1816 1817 """
1817 1818 position_x = kwargs['positionX']
1818 1819 position_y = kwargs['positionY']
1819 1820 azimuth = kwargs['azimuth']
1820 1821
1821 1822 if 'correctFactor' in kwargs:
1822 1823 correctFactor = kwargs['correctFactor']
1823 1824 else:
1824 1825 correctFactor = 1
1825 1826
1826 1827 groupList = kwargs['groupList']
1827 1828 pairs_ccf = groupList[1]
1828 1829 tau = kwargs['tau']
1829 1830 _lambda = kwargs['_lambda']
1830 1831
1831 1832 #Cross Correlation pairs obtained
1832 1833 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
1833 1834 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
1834 1835 # pairsSelArray = numpy.array(pairsSelected)
1835 1836 # pairs = []
1836 1837 #
1837 1838 # #Wind estimation pairs obtained
1838 1839 # for i in range(pairsSelArray.shape[0]/2):
1839 1840 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
1840 1841 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
1841 1842 # pairs.append((ind1,ind2))
1842 1843
1843 1844 indtau = tau.shape[0]/2
1844 1845 tau1 = tau[:indtau,:]
1845 1846 tau2 = tau[indtau:-1,:]
1846 1847 # tau1 = tau1[pairs,:]
1847 1848 # tau2 = tau2[pairs,:]
1848 1849 phase1 = tau[-1,:]
1849 1850
1850 1851 #---------------------------------------------------------------------
1851 1852 #Metodo Directo
1852 1853 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
1853 1854 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
1854 1855 winds = stats.nanmean(winds, axis=0)
1855 1856 #---------------------------------------------------------------------
1856 1857 #Metodo General
1857 1858 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
1858 1859 # #Calculo Coeficientes de Funcion de Correlacion
1859 1860 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
1860 1861 # #Calculo de Velocidades
1861 1862 # winds = self.calculateVelUV(F,G,A,B,H)
1862 1863
1863 1864 #---------------------------------------------------------------------
1864 1865 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
1865 1866 winds = correctFactor*winds
1866 1867 return winds
1867 1868
1868 1869 def __checkTime(self, currentTime, paramInterval, outputInterval):
1869 1870
1870 1871 dataTime = currentTime + paramInterval
1871 1872 deltaTime = dataTime - self.__initime
1872 1873
1873 1874 if deltaTime >= outputInterval or deltaTime < 0:
1874 1875 self.__dataReady = True
1875 1876 return
1876 1877
1877 1878 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
1878 1879 '''
1879 1880 Function that implements winds estimation technique with detected meteors.
1880 1881
1881 1882 Input: Detected meteors, Minimum meteor quantity to wind estimation
1882 1883
1883 1884 Output: Winds estimation (Zonal and Meridional)
1884 1885
1885 1886 Parameters affected: Winds
1886 1887 '''
1887 1888 #Settings
1888 1889 nInt = (heightMax - heightMin)/2
1889 1890 nInt = int(nInt)
1890 1891 winds = numpy.zeros((2,nInt))*numpy.nan
1891 1892
1892 1893 #Filter errors
1893 1894 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
1894 1895 finalMeteor = arrayMeteor[error,:]
1895 1896
1896 1897 #Meteor Histogram
1897 1898 finalHeights = finalMeteor[:,2]
1898 1899 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
1899 1900 nMeteorsPerI = hist[0]
1900 1901 heightPerI = hist[1]
1901 1902
1902 1903 #Sort of meteors
1903 1904 indSort = finalHeights.argsort()
1904 1905 finalMeteor2 = finalMeteor[indSort,:]
1905 1906
1906 1907 # Calculating winds
1907 1908 ind1 = 0
1908 1909 ind2 = 0
1909 1910
1910 1911 for i in range(nInt):
1911 1912 nMet = nMeteorsPerI[i]
1912 1913 ind1 = ind2
1913 1914 ind2 = ind1 + nMet
1914 1915
1915 1916 meteorAux = finalMeteor2[ind1:ind2,:]
1916 1917
1917 1918 if meteorAux.shape[0] >= meteorThresh:
1918 1919 vel = meteorAux[:, 6]
1919 1920 zen = meteorAux[:, 4]*numpy.pi/180
1920 1921 azim = meteorAux[:, 3]*numpy.pi/180
1921 1922
1922 1923 n = numpy.cos(zen)
1923 1924 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
1924 1925 # l = m*numpy.tan(azim)
1925 1926 l = numpy.sin(zen)*numpy.sin(azim)
1926 1927 m = numpy.sin(zen)*numpy.cos(azim)
1927 1928
1928 1929 A = numpy.vstack((l, m)).transpose()
1929 1930 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
1930 1931 windsAux = numpy.dot(A1, vel)
1931 1932
1932 1933 winds[0,i] = windsAux[0]
1933 1934 winds[1,i] = windsAux[1]
1934 1935
1935 1936 return winds, heightPerI[:-1]
1936 1937
1937 1938 def techniqueNSM_SA(self, **kwargs):
1938 1939 metArray = kwargs['metArray']
1939 1940 heightList = kwargs['heightList']
1940 1941 timeList = kwargs['timeList']
1941 1942
1942 1943 rx_location = kwargs['rx_location']
1943 1944 groupList = kwargs['groupList']
1944 1945 azimuth = kwargs['azimuth']
1945 1946 dfactor = kwargs['dfactor']
1946 1947 k = kwargs['k']
1947 1948
1948 1949 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
1949 1950 d = dist*dfactor
1950 1951 #Phase calculation
1951 1952 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
1952 1953
1953 1954 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
1954 1955
1955 1956 velEst = numpy.zeros((heightList.size,2))*numpy.nan
1956 1957 azimuth1 = azimuth1*numpy.pi/180
1957 1958
1958 1959 for i in range(heightList.size):
1959 1960 h = heightList[i]
1960 1961 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
1961 1962 metHeight = metArray1[indH,:]
1962 1963 if metHeight.shape[0] >= 2:
1963 1964 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
1964 1965 iazim = metHeight[:,1].astype(int)
1965 1966 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
1966 1967 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
1967 1968 A = numpy.asmatrix(A)
1968 1969 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
1969 1970 velHor = numpy.dot(A1,velAux)
1970 1971
1971 1972 velEst[i,:] = numpy.squeeze(velHor)
1972 1973 return velEst
1973 1974
1974 1975 def __getPhaseSlope(self, metArray, heightList, timeList):
1975 1976 meteorList = []
1976 1977 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
1977 1978 #Putting back together the meteor matrix
1978 1979 utctime = metArray[:,0]
1979 1980 uniqueTime = numpy.unique(utctime)
1980 1981
1981 1982 phaseDerThresh = 0.5
1982 1983 ippSeconds = timeList[1] - timeList[0]
1983 1984 sec = numpy.where(timeList>1)[0][0]
1984 1985 nPairs = metArray.shape[1] - 6
1985 1986 nHeights = len(heightList)
1986 1987
1987 1988 for t in uniqueTime:
1988 1989 metArray1 = metArray[utctime==t,:]
1989 1990 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
1990 1991 tmet = metArray1[:,1].astype(int)
1991 1992 hmet = metArray1[:,2].astype(int)
1992 1993
1993 1994 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
1994 1995 metPhase[:,:] = numpy.nan
1995 1996 metPhase[:,hmet,tmet] = metArray1[:,6:].T
1996 1997
1997 1998 #Delete short trails
1998 1999 metBool = ~numpy.isnan(metPhase[0,:,:])
1999 2000 heightVect = numpy.sum(metBool, axis = 1)
2000 2001 metBool[heightVect<sec,:] = False
2001 2002 metPhase[:,heightVect<sec,:] = numpy.nan
2002 2003
2003 2004 #Derivative
2004 2005 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
2005 2006 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
2006 2007 metPhase[phDerAux] = numpy.nan
2007 2008
2008 2009 #--------------------------METEOR DETECTION -----------------------------------------
2009 2010 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
2010 2011
2011 2012 for p in numpy.arange(nPairs):
2012 2013 phase = metPhase[p,:,:]
2013 2014 phDer = metDer[p,:,:]
2014 2015
2015 2016 for h in indMet:
2016 2017 height = heightList[h]
2017 2018 phase1 = phase[h,:] #82
2018 2019 phDer1 = phDer[h,:]
2019 2020
2020 2021 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
2021 2022
2022 2023 indValid = numpy.where(~numpy.isnan(phase1))[0]
2023 2024 initMet = indValid[0]
2024 2025 endMet = 0
2025 2026
2026 2027 for i in range(len(indValid)-1):
2027 2028
2028 2029 #Time difference
2029 2030 inow = indValid[i]
2030 2031 inext = indValid[i+1]
2031 2032 idiff = inext - inow
2032 2033 #Phase difference
2033 2034 phDiff = numpy.abs(phase1[inext] - phase1[inow])
2034 2035
2035 2036 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
2036 2037 sizeTrail = inow - initMet + 1
2037 2038 if sizeTrail>3*sec: #Too short meteors
2038 2039 x = numpy.arange(initMet,inow+1)*ippSeconds
2039 2040 y = phase1[initMet:inow+1]
2040 2041 ynnan = ~numpy.isnan(y)
2041 2042 x = x[ynnan]
2042 2043 y = y[ynnan]
2043 2044 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
2044 2045 ylin = x*slope + intercept
2045 2046 rsq = r_value**2
2046 2047 if rsq > 0.5:
2047 2048 vel = slope#*height*1000/(k*d)
2048 2049 estAux = numpy.array([utctime,p,height, vel, rsq])
2049 2050 meteorList.append(estAux)
2050 2051 initMet = inext
2051 2052 metArray2 = numpy.array(meteorList)
2052 2053
2053 2054 return metArray2
2054 2055
2055 2056 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
2056 2057
2057 2058 azimuth1 = numpy.zeros(len(pairslist))
2058 2059 dist = numpy.zeros(len(pairslist))
2059 2060
2060 2061 for i in range(len(rx_location)):
2061 2062 ch0 = pairslist[i][0]
2062 2063 ch1 = pairslist[i][1]
2063 2064
2064 2065 diffX = rx_location[ch0][0] - rx_location[ch1][0]
2065 2066 diffY = rx_location[ch0][1] - rx_location[ch1][1]
2066 2067 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
2067 2068 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
2068 2069
2069 2070 azimuth1 -= azimuth0
2070 2071 return azimuth1, dist
2071 2072
2072 2073 def techniqueNSM_DBS(self, **kwargs):
2073 2074 metArray = kwargs['metArray']
2074 2075 heightList = kwargs['heightList']
2075 2076 timeList = kwargs['timeList']
2076 2077 azimuth = kwargs['azimuth']
2077 2078 theta_x = numpy.array(kwargs['theta_x'])
2078 2079 theta_y = numpy.array(kwargs['theta_y'])
2079 2080
2080 2081 utctime = metArray[:,0]
2081 2082 cmet = metArray[:,1].astype(int)
2082 2083 hmet = metArray[:,3].astype(int)
2083 2084 SNRmet = metArray[:,4]
2084 2085 vmet = metArray[:,5]
2085 2086 spcmet = metArray[:,6]
2086 2087
2087 2088 nChan = numpy.max(cmet) + 1
2088 2089 nHeights = len(heightList)
2089 2090
2090 2091 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
2091 2092 hmet = heightList[hmet]
2092 2093 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
2093 2094
2094 2095 velEst = numpy.zeros((heightList.size,2))*numpy.nan
2095 2096
2096 2097 for i in range(nHeights - 1):
2097 2098 hmin = heightList[i]
2098 2099 hmax = heightList[i + 1]
2099 2100
2100 2101 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
2101 2102 indthisH = numpy.where(thisH)
2102 2103
2103 2104 if numpy.size(indthisH) > 3:
2104 2105
2105 2106 vel_aux = vmet[thisH]
2106 2107 chan_aux = cmet[thisH]
2107 2108 cosu_aux = dir_cosu[chan_aux]
2108 2109 cosv_aux = dir_cosv[chan_aux]
2109 2110 cosw_aux = dir_cosw[chan_aux]
2110 2111
2111 2112 nch = numpy.size(numpy.unique(chan_aux))
2112 2113 if nch > 1:
2113 2114 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
2114 2115 velEst[i,:] = numpy.dot(A,vel_aux)
2115 2116
2116 2117 return velEst
2117 2118
2118 2119 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
2119 2120
2120 2121 param = dataOut.data_param
2121 2122 if dataOut.abscissaList != None:
2122 2123 absc = dataOut.abscissaList[:-1]
2123 2124 # noise = dataOut.noise
2124 2125 heightList = dataOut.heightList
2125 2126 SNR = dataOut.data_SNR
2126 2127
2127 2128 if technique == 'DBS':
2128 2129
2129 2130 kwargs['velRadial'] = param[:,1,:] #Radial velocity
2130 2131 kwargs['heightList'] = heightList
2131 2132 kwargs['SNR'] = SNR
2132 2133
2133 2134 dataOut.data_output, dataOut.heightList, dataOut.data_SNR = self.techniqueDBS(kwargs) #DBS Function
2134 2135 dataOut.utctimeInit = dataOut.utctime
2135 2136 dataOut.outputInterval = dataOut.paramInterval
2136 2137
2137 2138 elif technique == 'SA':
2138 2139
2139 2140 #Parameters
2140 2141 # position_x = kwargs['positionX']
2141 2142 # position_y = kwargs['positionY']
2142 2143 # azimuth = kwargs['azimuth']
2143 2144 #
2144 2145 # if kwargs.has_key('crosspairsList'):
2145 2146 # pairs = kwargs['crosspairsList']
2146 2147 # else:
2147 2148 # pairs = None
2148 2149 #
2149 2150 # if kwargs.has_key('correctFactor'):
2150 2151 # correctFactor = kwargs['correctFactor']
2151 2152 # else:
2152 2153 # correctFactor = 1
2153 2154
2154 2155 # tau = dataOut.data_param
2155 2156 # _lambda = dataOut.C/dataOut.frequency
2156 2157 # pairsList = dataOut.groupList
2157 2158 # nChannels = dataOut.nChannels
2158 2159
2159 2160 kwargs['groupList'] = dataOut.groupList
2160 2161 kwargs['tau'] = dataOut.data_param
2161 2162 kwargs['_lambda'] = dataOut.C/dataOut.frequency
2162 2163 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
2163 2164 dataOut.data_output = self.techniqueSA(kwargs)
2164 2165 dataOut.utctimeInit = dataOut.utctime
2165 2166 dataOut.outputInterval = dataOut.timeInterval
2166 2167
2167 2168 elif technique == 'Meteors':
2168 2169 dataOut.flagNoData = True
2169 2170 self.__dataReady = False
2170 2171
2171 2172 if 'nHours' in kwargs:
2172 2173 nHours = kwargs['nHours']
2173 2174 else:
2174 2175 nHours = 1
2175 2176
2176 2177 if 'meteorsPerBin' in kwargs:
2177 2178 meteorThresh = kwargs['meteorsPerBin']
2178 2179 else:
2179 2180 meteorThresh = 6
2180 2181
2181 2182 if 'hmin' in kwargs:
2182 2183 hmin = kwargs['hmin']
2183 2184 else: hmin = 70
2184 2185 if 'hmax' in kwargs:
2185 2186 hmax = kwargs['hmax']
2186 2187 else: hmax = 110
2187 2188
2188 2189 dataOut.outputInterval = nHours*3600
2189 2190
2190 2191 if self.__isConfig == False:
2191 2192 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2192 2193 #Get Initial LTC time
2193 2194 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2194 2195 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2195 2196
2196 2197 self.__isConfig = True
2197 2198
2198 2199 if self.__buffer is None:
2199 2200 self.__buffer = dataOut.data_param
2200 2201 self.__firstdata = copy.copy(dataOut)
2201 2202
2202 2203 else:
2203 2204 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2204 2205
2205 2206 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2206 2207
2207 2208 if self.__dataReady:
2208 2209 dataOut.utctimeInit = self.__initime
2209 2210
2210 2211 self.__initime += dataOut.outputInterval #to erase time offset
2211 2212
2212 2213 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
2213 2214 dataOut.flagNoData = False
2214 2215 self.__buffer = None
2215 2216
2216 2217 elif technique == 'Meteors1':
2217 2218 dataOut.flagNoData = True
2218 2219 self.__dataReady = False
2219 2220
2220 2221 if 'nMins' in kwargs:
2221 2222 nMins = kwargs['nMins']
2222 2223 else: nMins = 20
2223 2224 if 'rx_location' in kwargs:
2224 2225 rx_location = kwargs['rx_location']
2225 2226 else: rx_location = [(0,1),(1,1),(1,0)]
2226 2227 if 'azimuth' in kwargs:
2227 2228 azimuth = kwargs['azimuth']
2228 2229 else: azimuth = 51.06
2229 2230 if 'dfactor' in kwargs:
2230 2231 dfactor = kwargs['dfactor']
2231 2232 if 'mode' in kwargs:
2232 2233 mode = kwargs['mode']
2233 2234 if 'theta_x' in kwargs:
2234 2235 theta_x = kwargs['theta_x']
2235 2236 if 'theta_y' in kwargs:
2236 2237 theta_y = kwargs['theta_y']
2237 2238 else: mode = 'SA'
2238 2239
2239 2240 #Borrar luego esto
2240 2241 if dataOut.groupList is None:
2241 2242 dataOut.groupList = [(0,1),(0,2),(1,2)]
2242 2243 groupList = dataOut.groupList
2243 2244 C = 3e8
2244 2245 freq = 50e6
2245 2246 lamb = C/freq
2246 2247 k = 2*numpy.pi/lamb
2247 2248
2248 2249 timeList = dataOut.abscissaList
2249 2250 heightList = dataOut.heightList
2250 2251
2251 2252 if self.__isConfig == False:
2252 2253 dataOut.outputInterval = nMins*60
2253 2254 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2254 2255 #Get Initial LTC time
2255 2256 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2256 2257 minuteAux = initime.minute
2257 2258 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
2258 2259 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2259 2260
2260 2261 self.__isConfig = True
2261 2262
2262 2263 if self.__buffer is None:
2263 2264 self.__buffer = dataOut.data_param
2264 2265 self.__firstdata = copy.copy(dataOut)
2265 2266
2266 2267 else:
2267 2268 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2268 2269
2269 2270 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2270 2271
2271 2272 if self.__dataReady:
2272 2273 dataOut.utctimeInit = self.__initime
2273 2274 self.__initime += dataOut.outputInterval #to erase time offset
2274 2275
2275 2276 metArray = self.__buffer
2276 2277 if mode == 'SA':
2277 2278 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
2278 2279 elif mode == 'DBS':
2279 2280 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
2280 2281 dataOut.data_output = dataOut.data_output.T
2281 2282 dataOut.flagNoData = False
2282 2283 self.__buffer = None
2283 2284
2284 2285 return
2285 2286
2286 2287 class EWDriftsEstimation(Operation):
2287 2288
2288 2289 def __init__(self):
2289 2290 Operation.__init__(self)
2290 2291
2291 2292 def __correctValues(self, heiRang, phi, velRadial, SNR):
2292 2293 listPhi = phi.tolist()
2293 2294 maxid = listPhi.index(max(listPhi))
2294 2295 minid = listPhi.index(min(listPhi))
2295 2296
2296 2297 rango = list(range(len(phi)))
2297 2298 # rango = numpy.delete(rango,maxid)
2298 2299
2299 2300 heiRang1 = heiRang*math.cos(phi[maxid])
2300 2301 heiRangAux = heiRang*math.cos(phi[minid])
2301 2302 indOut = (heiRang1 < heiRangAux[0]).nonzero()
2302 2303 heiRang1 = numpy.delete(heiRang1,indOut)
2303 2304
2304 2305 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
2305 2306 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
2306 2307
2307 2308 for i in rango:
2308 2309 x = heiRang*math.cos(phi[i])
2309 2310 y1 = velRadial[i,:]
2310 2311 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
2311 2312
2312 2313 x1 = heiRang1
2313 2314 y11 = f1(x1)
2314 2315
2315 2316 y2 = SNR[i,:]
2316 2317 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
2317 2318 y21 = f2(x1)
2318 2319
2319 2320 velRadial1[i,:] = y11
2320 2321 SNR1[i,:] = y21
2321 2322
2322 2323 return heiRang1, velRadial1, SNR1
2323 2324
2324 2325 def run(self, dataOut, zenith, zenithCorrection):
2325 2326 heiRang = dataOut.heightList
2326 2327 velRadial = dataOut.data_param[:,3,:]
2327 2328 SNR = dataOut.data_SNR
2328 2329
2329 2330 zenith = numpy.array(zenith)
2330 2331 zenith -= zenithCorrection
2331 2332 zenith *= numpy.pi/180
2332 2333
2333 2334 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
2334 2335
2335 2336 alp = zenith[0]
2336 2337 bet = zenith[1]
2337 2338
2338 2339 w_w = velRadial1[0,:]
2339 2340 w_e = velRadial1[1,:]
2340 2341
2341 2342 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
2342 2343 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
2343 2344
2344 2345 winds = numpy.vstack((u,w))
2345 2346
2346 2347 dataOut.heightList = heiRang1
2347 2348 dataOut.data_output = winds
2348 2349 dataOut.data_SNR = SNR1
2349 2350
2350 2351 dataOut.utctimeInit = dataOut.utctime
2351 2352 dataOut.outputInterval = dataOut.timeInterval
2352 2353 return
2353 2354
2354 2355 #--------------- Non Specular Meteor ----------------
2355 2356
2356 2357 class NonSpecularMeteorDetection(Operation):
2357 2358
2358 2359 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
2359 2360 data_acf = dataOut.data_pre[0]
2360 2361 data_ccf = dataOut.data_pre[1]
2361 2362 pairsList = dataOut.groupList[1]
2362 2363
2363 2364 lamb = dataOut.C/dataOut.frequency
2364 2365 tSamp = dataOut.ippSeconds*dataOut.nCohInt
2365 2366 paramInterval = dataOut.paramInterval
2366 2367
2367 2368 nChannels = data_acf.shape[0]
2368 2369 nLags = data_acf.shape[1]
2369 2370 nProfiles = data_acf.shape[2]
2370 2371 nHeights = dataOut.nHeights
2371 2372 nCohInt = dataOut.nCohInt
2372 2373 sec = numpy.round(nProfiles/dataOut.paramInterval)
2373 2374 heightList = dataOut.heightList
2374 2375 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
2375 2376 utctime = dataOut.utctime
2376 2377
2377 2378 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
2378 2379
2379 2380 #------------------------ SNR --------------------------------------
2380 2381 power = data_acf[:,0,:,:].real
2381 2382 noise = numpy.zeros(nChannels)
2382 2383 SNR = numpy.zeros(power.shape)
2383 2384 for i in range(nChannels):
2384 2385 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
2385 2386 SNR[i] = (power[i]-noise[i])/noise[i]
2386 2387 SNRm = numpy.nanmean(SNR, axis = 0)
2387 2388 SNRdB = 10*numpy.log10(SNR)
2388 2389
2389 2390 if mode == 'SA':
2390 2391 dataOut.groupList = dataOut.groupList[1]
2391 2392 nPairs = data_ccf.shape[0]
2392 2393 #---------------------- Coherence and Phase --------------------------
2393 2394 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
2394 2395 # phase1 = numpy.copy(phase)
2395 2396 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
2396 2397
2397 2398 for p in range(nPairs):
2398 2399 ch0 = pairsList[p][0]
2399 2400 ch1 = pairsList[p][1]
2400 2401 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
2401 2402 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
2402 2403 # phase1[p,:,:] = numpy.angle(ccf) #median filter
2403 2404 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
2404 2405 # coh1[p,:,:] = numpy.abs(ccf) #median filter
2405 2406 coh = numpy.nanmax(coh1, axis = 0)
2406 2407 # struc = numpy.ones((5,1))
2407 2408 # coh = ndimage.morphology.grey_dilation(coh, size=(10,1))
2408 2409 #---------------------- Radial Velocity ----------------------------
2409 2410 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
2410 2411 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
2411 2412
2412 2413 if allData:
2413 2414 boolMetFin = ~numpy.isnan(SNRm)
2414 2415 # coh[:-1,:] = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2415 2416 else:
2416 2417 #------------------------ Meteor mask ---------------------------------
2417 2418 # #SNR mask
2418 2419 # boolMet = (SNRdB>SNRthresh)#|(~numpy.isnan(SNRdB))
2419 2420 #
2420 2421 # #Erase small objects
2421 2422 # boolMet1 = self.__erase_small(boolMet, 2*sec, 5)
2422 2423 #
2423 2424 # auxEEJ = numpy.sum(boolMet1,axis=0)
2424 2425 # indOver = auxEEJ>nProfiles*0.8 #Use this later
2425 2426 # indEEJ = numpy.where(indOver)[0]
2426 2427 # indNEEJ = numpy.where(~indOver)[0]
2427 2428 #
2428 2429 # boolMetFin = boolMet1
2429 2430 #
2430 2431 # if indEEJ.size > 0:
2431 2432 # boolMet1[:,indEEJ] = False #Erase heights with EEJ
2432 2433 #
2433 2434 # boolMet2 = coh > cohThresh
2434 2435 # boolMet2 = self.__erase_small(boolMet2, 2*sec,5)
2435 2436 #
2436 2437 # #Final Meteor mask
2437 2438 # boolMetFin = boolMet1|boolMet2
2438 2439
2439 2440 #Coherence mask
2440 2441 boolMet1 = coh > 0.75
2441 2442 struc = numpy.ones((30,1))
2442 2443 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
2443 2444
2444 2445 #Derivative mask
2445 2446 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2446 2447 boolMet2 = derPhase < 0.2
2447 2448 # boolMet2 = ndimage.morphology.binary_opening(boolMet2)
2448 2449 # boolMet2 = ndimage.morphology.binary_closing(boolMet2, structure = numpy.ones((10,1)))
2449 2450 boolMet2 = ndimage.median_filter(boolMet2,size=5)
2450 2451 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
2451 2452 # #Final mask
2452 2453 # boolMetFin = boolMet2
2453 2454 boolMetFin = boolMet1&boolMet2
2454 2455 # boolMetFin = ndimage.morphology.binary_dilation(boolMetFin)
2455 2456 #Creating data_param
2456 2457 coordMet = numpy.where(boolMetFin)
2457 2458
2458 2459 tmet = coordMet[0]
2459 2460 hmet = coordMet[1]
2460 2461
2461 2462 data_param = numpy.zeros((tmet.size, 6 + nPairs))
2462 2463 data_param[:,0] = utctime
2463 2464 data_param[:,1] = tmet
2464 2465 data_param[:,2] = hmet
2465 2466 data_param[:,3] = SNRm[tmet,hmet]
2466 2467 data_param[:,4] = velRad[tmet,hmet]
2467 2468 data_param[:,5] = coh[tmet,hmet]
2468 2469 data_param[:,6:] = phase[:,tmet,hmet].T
2469 2470
2470 2471 elif mode == 'DBS':
2471 2472 dataOut.groupList = numpy.arange(nChannels)
2472 2473
2473 2474 #Radial Velocities
2474 2475 phase = numpy.angle(data_acf[:,1,:,:])
2475 2476 # phase = ndimage.median_filter(numpy.angle(data_acf[:,1,:,:]), size = (1,5,1))
2476 2477 velRad = phase*lamb/(4*numpy.pi*tSamp)
2477 2478
2478 2479 #Spectral width
2479 2480 # acf1 = ndimage.median_filter(numpy.abs(data_acf[:,1,:,:]), size = (1,5,1))
2480 2481 # acf2 = ndimage.median_filter(numpy.abs(data_acf[:,2,:,:]), size = (1,5,1))
2481 2482 acf1 = data_acf[:,1,:,:]
2482 2483 acf2 = data_acf[:,2,:,:]
2483 2484
2484 2485 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
2485 2486 # velRad = ndimage.median_filter(velRad, size = (1,5,1))
2486 2487 if allData:
2487 2488 boolMetFin = ~numpy.isnan(SNRdB)
2488 2489 else:
2489 2490 #SNR
2490 2491 boolMet1 = (SNRdB>SNRthresh) #SNR mask
2491 2492 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
2492 2493
2493 2494 #Radial velocity
2494 2495 boolMet2 = numpy.abs(velRad) < 20
2495 2496 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
2496 2497
2497 2498 #Spectral Width
2498 2499 boolMet3 = spcWidth < 30
2499 2500 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
2500 2501 # boolMetFin = self.__erase_small(boolMet1, 10,5)
2501 2502 boolMetFin = boolMet1&boolMet2&boolMet3
2502 2503
2503 2504 #Creating data_param
2504 2505 coordMet = numpy.where(boolMetFin)
2505 2506
2506 2507 cmet = coordMet[0]
2507 2508 tmet = coordMet[1]
2508 2509 hmet = coordMet[2]
2509 2510
2510 2511 data_param = numpy.zeros((tmet.size, 7))
2511 2512 data_param[:,0] = utctime
2512 2513 data_param[:,1] = cmet
2513 2514 data_param[:,2] = tmet
2514 2515 data_param[:,3] = hmet
2515 2516 data_param[:,4] = SNR[cmet,tmet,hmet].T
2516 2517 data_param[:,5] = velRad[cmet,tmet,hmet].T
2517 2518 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
2518 2519
2519 2520 # self.dataOut.data_param = data_int
2520 2521 if len(data_param) == 0:
2521 2522 dataOut.flagNoData = True
2522 2523 else:
2523 2524 dataOut.data_param = data_param
2524 2525
2525 2526 def __erase_small(self, binArray, threshX, threshY):
2526 2527 labarray, numfeat = ndimage.measurements.label(binArray)
2527 2528 binArray1 = numpy.copy(binArray)
2528 2529
2529 2530 for i in range(1,numfeat + 1):
2530 2531 auxBin = (labarray==i)
2531 2532 auxSize = auxBin.sum()
2532 2533
2533 2534 x,y = numpy.where(auxBin)
2534 2535 widthX = x.max() - x.min()
2535 2536 widthY = y.max() - y.min()
2536 2537
2537 2538 #width X: 3 seg -> 12.5*3
2538 2539 #width Y:
2539 2540
2540 2541 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
2541 2542 binArray1[auxBin] = False
2542 2543
2543 2544 return binArray1
2544 2545
2545 2546 #--------------- Specular Meteor ----------------
2546 2547
2547 2548 class SMDetection(Operation):
2548 2549 '''
2549 2550 Function DetectMeteors()
2550 2551 Project developed with paper:
2551 2552 HOLDSWORTH ET AL. 2004
2552 2553
2553 2554 Input:
2554 2555 self.dataOut.data_pre
2555 2556
2556 2557 centerReceiverIndex: From the channels, which is the center receiver
2557 2558
2558 2559 hei_ref: Height reference for the Beacon signal extraction
2559 2560 tauindex:
2560 2561 predefinedPhaseShifts: Predefined phase offset for the voltge signals
2561 2562
2562 2563 cohDetection: Whether to user Coherent detection or not
2563 2564 cohDet_timeStep: Coherent Detection calculation time step
2564 2565 cohDet_thresh: Coherent Detection phase threshold to correct phases
2565 2566
2566 2567 noise_timeStep: Noise calculation time step
2567 2568 noise_multiple: Noise multiple to define signal threshold
2568 2569
2569 2570 multDet_timeLimit: Multiple Detection Removal time limit in seconds
2570 2571 multDet_rangeLimit: Multiple Detection Removal range limit in km
2571 2572
2572 2573 phaseThresh: Maximum phase difference between receiver to be consider a meteor
2573 2574 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
2574 2575
2575 2576 hmin: Minimum Height of the meteor to use it in the further wind estimations
2576 2577 hmax: Maximum Height of the meteor to use it in the further wind estimations
2577 2578 azimuth: Azimuth angle correction
2578 2579
2579 2580 Affected:
2580 2581 self.dataOut.data_param
2581 2582
2582 2583 Rejection Criteria (Errors):
2583 2584 0: No error; analysis OK
2584 2585 1: SNR < SNR threshold
2585 2586 2: angle of arrival (AOA) ambiguously determined
2586 2587 3: AOA estimate not feasible
2587 2588 4: Large difference in AOAs obtained from different antenna baselines
2588 2589 5: echo at start or end of time series
2589 2590 6: echo less than 5 examples long; too short for analysis
2590 2591 7: echo rise exceeds 0.3s
2591 2592 8: echo decay time less than twice rise time
2592 2593 9: large power level before echo
2593 2594 10: large power level after echo
2594 2595 11: poor fit to amplitude for estimation of decay time
2595 2596 12: poor fit to CCF phase variation for estimation of radial drift velocity
2596 2597 13: height unresolvable echo: not valid height within 70 to 110 km
2597 2598 14: height ambiguous echo: more then one possible height within 70 to 110 km
2598 2599 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
2599 2600 16: oscilatory echo, indicating event most likely not an underdense echo
2600 2601
2601 2602 17: phase difference in meteor Reestimation
2602 2603
2603 2604 Data Storage:
2604 2605 Meteors for Wind Estimation (8):
2605 2606 Utc Time | Range Height
2606 2607 Azimuth Zenith errorCosDir
2607 2608 VelRad errorVelRad
2608 2609 Phase0 Phase1 Phase2 Phase3
2609 2610 TypeError
2610 2611
2611 2612 '''
2612 2613
2613 2614 def run(self, dataOut, hei_ref = None, tauindex = 0,
2614 2615 phaseOffsets = None,
2615 2616 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
2616 2617 noise_timeStep = 4, noise_multiple = 4,
2617 2618 multDet_timeLimit = 1, multDet_rangeLimit = 3,
2618 2619 phaseThresh = 20, SNRThresh = 5,
2619 2620 hmin = 50, hmax=150, azimuth = 0,
2620 2621 channelPositions = None) :
2621 2622
2622 2623
2623 2624 #Getting Pairslist
2624 2625 if channelPositions is None:
2625 2626 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
2626 2627 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
2627 2628 meteorOps = SMOperations()
2628 2629 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
2629 2630 heiRang = dataOut.getHeiRange()
2630 2631 #Get Beacon signal - No Beacon signal anymore
2631 2632 # newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
2632 2633 #
2633 2634 # if hei_ref != None:
2634 2635 # newheis = numpy.where(self.dataOut.heightList>hei_ref)
2635 2636 #
2636 2637
2637 2638
2638 2639 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
2639 2640 # see if the user put in pre defined phase shifts
2640 2641 voltsPShift = dataOut.data_pre.copy()
2641 2642
2642 2643 # if predefinedPhaseShifts != None:
2643 2644 # hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
2644 2645 #
2645 2646 # # elif beaconPhaseShifts:
2646 2647 # # #get hardware phase shifts using beacon signal
2647 2648 # # hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
2648 2649 # # hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
2649 2650 #
2650 2651 # else:
2651 2652 # hardwarePhaseShifts = numpy.zeros(5)
2652 2653 #
2653 2654 # voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
2654 2655 # for i in range(self.dataOut.data_pre.shape[0]):
2655 2656 # voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
2656 2657
2657 2658 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
2658 2659
2659 2660 #Remove DC
2660 2661 voltsDC = numpy.mean(voltsPShift,1)
2661 2662 voltsDC = numpy.mean(voltsDC,1)
2662 2663 for i in range(voltsDC.shape[0]):
2663 2664 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
2664 2665
2665 2666 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
2666 2667 # voltsPShift = voltsPShift[:,:,:newheis[0][0]]
2667 2668
2668 2669 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
2669 2670 #Coherent Detection
2670 2671 if cohDetection:
2671 2672 #use coherent detection to get the net power
2672 2673 cohDet_thresh = cohDet_thresh*numpy.pi/180
2673 2674 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
2674 2675
2675 2676 #Non-coherent detection!
2676 2677 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
2677 2678 #********** END OF COH/NON-COH POWER CALCULATION**********************
2678 2679
2679 2680 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
2680 2681 #Get noise
2681 2682 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
2682 2683 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
2683 2684 #Get signal threshold
2684 2685 signalThresh = noise_multiple*noise
2685 2686 #Meteor echoes detection
2686 2687 listMeteors = self.__findMeteors(powerNet, signalThresh)
2687 2688 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
2688 2689
2689 2690 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
2690 2691 #Parameters
2691 2692 heiRange = dataOut.getHeiRange()
2692 2693 rangeInterval = heiRange[1] - heiRange[0]
2693 2694 rangeLimit = multDet_rangeLimit/rangeInterval
2694 2695 timeLimit = multDet_timeLimit/dataOut.timeInterval
2695 2696 #Multiple detection removals
2696 2697 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
2697 2698 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
2698 2699
2699 2700 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
2700 2701 #Parameters
2701 2702 phaseThresh = phaseThresh*numpy.pi/180
2702 2703 thresh = [phaseThresh, noise_multiple, SNRThresh]
2703 2704 #Meteor reestimation (Errors N 1, 6, 12, 17)
2704 2705 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
2705 2706 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
2706 2707 #Estimation of decay times (Errors N 7, 8, 11)
2707 2708 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
2708 2709 #******************* END OF METEOR REESTIMATION *******************
2709 2710
2710 2711 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
2711 2712 #Calculating Radial Velocity (Error N 15)
2712 2713 radialStdThresh = 10
2713 2714 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
2714 2715
2715 2716 if len(listMeteors4) > 0:
2716 2717 #Setting New Array
2717 2718 date = dataOut.utctime
2718 2719 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
2719 2720
2720 2721 #Correcting phase offset
2721 2722 if phaseOffsets != None:
2722 2723 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
2723 2724 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
2724 2725
2725 2726 #Second Pairslist
2726 2727 pairsList = []
2727 2728 pairx = (0,1)
2728 2729 pairy = (2,3)
2729 2730 pairsList.append(pairx)
2730 2731 pairsList.append(pairy)
2731 2732
2732 2733 jph = numpy.array([0,0,0,0])
2733 2734 h = (hmin,hmax)
2734 2735 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
2735 2736
2736 2737 # #Calculate AOA (Error N 3, 4)
2737 2738 # #JONES ET AL. 1998
2738 2739 # error = arrayParameters[:,-1]
2739 2740 # AOAthresh = numpy.pi/8
2740 2741 # phases = -arrayParameters[:,9:13]
2741 2742 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
2742 2743 #
2743 2744 # #Calculate Heights (Error N 13 and 14)
2744 2745 # error = arrayParameters[:,-1]
2745 2746 # Ranges = arrayParameters[:,2]
2746 2747 # zenith = arrayParameters[:,5]
2747 2748 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
2748 2749 # error = arrayParameters[:,-1]
2749 2750 #********************* END OF PARAMETERS CALCULATION **************************
2750 2751
2751 2752 #***************************+ PASS DATA TO NEXT STEP **********************
2752 2753 # arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
2753 2754 dataOut.data_param = arrayParameters
2754 2755
2755 2756 if arrayParameters is None:
2756 2757 dataOut.flagNoData = True
2757 2758 else:
2758 2759 dataOut.flagNoData = True
2759 2760
2760 2761 return
2761 2762
2762 2763 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
2763 2764
2764 2765 minIndex = min(newheis[0])
2765 2766 maxIndex = max(newheis[0])
2766 2767
2767 2768 voltage = voltage0[:,:,minIndex:maxIndex+1]
2768 2769 nLength = voltage.shape[1]/n
2769 2770 nMin = 0
2770 2771 nMax = 0
2771 2772 phaseOffset = numpy.zeros((len(pairslist),n))
2772 2773
2773 2774 for i in range(n):
2774 2775 nMax += nLength
2775 2776 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
2776 2777 phaseCCF = numpy.mean(phaseCCF, axis = 2)
2777 2778 phaseOffset[:,i] = phaseCCF.transpose()
2778 2779 nMin = nMax
2779 2780 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
2780 2781
2781 2782 #Remove Outliers
2782 2783 factor = 2
2783 2784 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
2784 2785 dw = numpy.std(wt,axis = 1)
2785 2786 dw = dw.reshape((dw.size,1))
2786 2787 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
2787 2788 phaseOffset[ind] = numpy.nan
2788 2789 phaseOffset = stats.nanmean(phaseOffset, axis=1)
2789 2790
2790 2791 return phaseOffset
2791 2792
2792 2793 def __shiftPhase(self, data, phaseShift):
2793 2794 #this will shift the phase of a complex number
2794 2795 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
2795 2796 return dataShifted
2796 2797
2797 2798 def __estimatePhaseDifference(self, array, pairslist):
2798 2799 nChannel = array.shape[0]
2799 2800 nHeights = array.shape[2]
2800 2801 numPairs = len(pairslist)
2801 2802 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
2802 2803 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
2803 2804
2804 2805 #Correct phases
2805 2806 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
2806 2807 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
2807 2808
2808 2809 if indDer[0].shape[0] > 0:
2809 2810 for i in range(indDer[0].shape[0]):
2810 2811 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
2811 2812 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
2812 2813
2813 2814 # for j in range(numSides):
2814 2815 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
2815 2816 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
2816 2817 #
2817 2818 #Linear
2818 2819 phaseInt = numpy.zeros((numPairs,1))
2819 2820 angAllCCF = phaseCCF[:,[0,1,3,4],0]
2820 2821 for j in range(numPairs):
2821 2822 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
2822 2823 phaseInt[j] = fit[1]
2823 2824 #Phase Differences
2824 2825 phaseDiff = phaseInt - phaseCCF[:,2,:]
2825 2826 phaseArrival = phaseInt.reshape(phaseInt.size)
2826 2827
2827 2828 #Dealias
2828 2829 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
2829 2830 # indAlias = numpy.where(phaseArrival > numpy.pi)
2830 2831 # phaseArrival[indAlias] -= 2*numpy.pi
2831 2832 # indAlias = numpy.where(phaseArrival < -numpy.pi)
2832 2833 # phaseArrival[indAlias] += 2*numpy.pi
2833 2834
2834 2835 return phaseDiff, phaseArrival
2835 2836
2836 2837 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
2837 2838 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
2838 2839 #find the phase shifts of each channel over 1 second intervals
2839 2840 #only look at ranges below the beacon signal
2840 2841 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2841 2842 numBlocks = int(volts.shape[1]/numProfPerBlock)
2842 2843 numHeights = volts.shape[2]
2843 2844 nChannel = volts.shape[0]
2844 2845 voltsCohDet = volts.copy()
2845 2846
2846 2847 pairsarray = numpy.array(pairslist)
2847 2848 indSides = pairsarray[:,1]
2848 2849 # indSides = numpy.array(range(nChannel))
2849 2850 # indSides = numpy.delete(indSides, indCenter)
2850 2851 #
2851 2852 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
2852 2853 listBlocks = numpy.array_split(volts, numBlocks, 1)
2853 2854
2854 2855 startInd = 0
2855 2856 endInd = 0
2856 2857
2857 2858 for i in range(numBlocks):
2858 2859 startInd = endInd
2859 2860 endInd = endInd + listBlocks[i].shape[1]
2860 2861
2861 2862 arrayBlock = listBlocks[i]
2862 2863 # arrayBlockCenter = listCenter[i]
2863 2864
2864 2865 #Estimate the Phase Difference
2865 2866 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
2866 2867 #Phase Difference RMS
2867 2868 arrayPhaseRMS = numpy.abs(phaseDiff)
2868 2869 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
2869 2870 indPhase = numpy.where(phaseRMSaux==4)
2870 2871 #Shifting
2871 2872 if indPhase[0].shape[0] > 0:
2872 2873 for j in range(indSides.size):
2873 2874 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
2874 2875 voltsCohDet[:,startInd:endInd,:] = arrayBlock
2875 2876
2876 2877 return voltsCohDet
2877 2878
2878 2879 def __calculateCCF(self, volts, pairslist ,laglist):
2879 2880
2880 2881 nHeights = volts.shape[2]
2881 2882 nPoints = volts.shape[1]
2882 2883 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
2883 2884
2884 2885 for i in range(len(pairslist)):
2885 2886 volts1 = volts[pairslist[i][0]]
2886 2887 volts2 = volts[pairslist[i][1]]
2887 2888
2888 2889 for t in range(len(laglist)):
2889 2890 idxT = laglist[t]
2890 2891 if idxT >= 0:
2891 2892 vStacked = numpy.vstack((volts2[idxT:,:],
2892 2893 numpy.zeros((idxT, nHeights),dtype='complex')))
2893 2894 else:
2894 2895 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
2895 2896 volts2[:(nPoints + idxT),:]))
2896 2897 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
2897 2898
2898 2899 vStacked = None
2899 2900 return voltsCCF
2900 2901
2901 2902 def __getNoise(self, power, timeSegment, timeInterval):
2902 2903 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2903 2904 numBlocks = int(power.shape[0]/numProfPerBlock)
2904 2905 numHeights = power.shape[1]
2905 2906
2906 2907 listPower = numpy.array_split(power, numBlocks, 0)
2907 2908 noise = numpy.zeros((power.shape[0], power.shape[1]))
2908 2909 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
2909 2910
2910 2911 startInd = 0
2911 2912 endInd = 0
2912 2913
2913 2914 for i in range(numBlocks): #split por canal
2914 2915 startInd = endInd
2915 2916 endInd = endInd + listPower[i].shape[0]
2916 2917
2917 2918 arrayBlock = listPower[i]
2918 2919 noiseAux = numpy.mean(arrayBlock, 0)
2919 2920 # noiseAux = numpy.median(noiseAux)
2920 2921 # noiseAux = numpy.mean(arrayBlock)
2921 2922 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
2922 2923
2923 2924 noiseAux1 = numpy.mean(arrayBlock)
2924 2925 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
2925 2926
2926 2927 return noise, noise1
2927 2928
2928 2929 def __findMeteors(self, power, thresh):
2929 2930 nProf = power.shape[0]
2930 2931 nHeights = power.shape[1]
2931 2932 listMeteors = []
2932 2933
2933 2934 for i in range(nHeights):
2934 2935 powerAux = power[:,i]
2935 2936 threshAux = thresh[:,i]
2936 2937
2937 2938 indUPthresh = numpy.where(powerAux > threshAux)[0]
2938 2939 indDNthresh = numpy.where(powerAux <= threshAux)[0]
2939 2940
2940 2941 j = 0
2941 2942
2942 2943 while (j < indUPthresh.size - 2):
2943 2944 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
2944 2945 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
2945 2946 indDNthresh = indDNthresh[indDNAux]
2946 2947
2947 2948 if (indDNthresh.size > 0):
2948 2949 indEnd = indDNthresh[0] - 1
2949 2950 indInit = indUPthresh[j]
2950 2951
2951 2952 meteor = powerAux[indInit:indEnd + 1]
2952 2953 indPeak = meteor.argmax() + indInit
2953 2954 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
2954 2955
2955 2956 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
2956 2957 j = numpy.where(indUPthresh == indEnd)[0] + 1
2957 2958 else: j+=1
2958 2959 else: j+=1
2959 2960
2960 2961 return listMeteors
2961 2962
2962 2963 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
2963 2964
2964 2965 arrayMeteors = numpy.asarray(listMeteors)
2965 2966 listMeteors1 = []
2966 2967
2967 2968 while arrayMeteors.shape[0] > 0:
2968 2969 FLAs = arrayMeteors[:,4]
2969 2970 maxFLA = FLAs.argmax()
2970 2971 listMeteors1.append(arrayMeteors[maxFLA,:])
2971 2972
2972 2973 MeteorInitTime = arrayMeteors[maxFLA,1]
2973 2974 MeteorEndTime = arrayMeteors[maxFLA,3]
2974 2975 MeteorHeight = arrayMeteors[maxFLA,0]
2975 2976
2976 2977 #Check neighborhood
2977 2978 maxHeightIndex = MeteorHeight + rangeLimit
2978 2979 minHeightIndex = MeteorHeight - rangeLimit
2979 2980 minTimeIndex = MeteorInitTime - timeLimit
2980 2981 maxTimeIndex = MeteorEndTime + timeLimit
2981 2982
2982 2983 #Check Heights
2983 2984 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
2984 2985 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
2985 2986 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
2986 2987
2987 2988 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
2988 2989
2989 2990 return listMeteors1
2990 2991
2991 2992 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
2992 2993 numHeights = volts.shape[2]
2993 2994 nChannel = volts.shape[0]
2994 2995
2995 2996 thresholdPhase = thresh[0]
2996 2997 thresholdNoise = thresh[1]
2997 2998 thresholdDB = float(thresh[2])
2998 2999
2999 3000 thresholdDB1 = 10**(thresholdDB/10)
3000 3001 pairsarray = numpy.array(pairslist)
3001 3002 indSides = pairsarray[:,1]
3002 3003
3003 3004 pairslist1 = list(pairslist)
3004 3005 pairslist1.append((0,1))
3005 3006 pairslist1.append((3,4))
3006 3007
3007 3008 listMeteors1 = []
3008 3009 listPowerSeries = []
3009 3010 listVoltageSeries = []
3010 3011 #volts has the war data
3011 3012
3012 3013 if frequency == 30e6:
3013 3014 timeLag = 45*10**-3
3014 3015 else:
3015 3016 timeLag = 15*10**-3
3016 3017 lag = numpy.ceil(timeLag/timeInterval)
3017 3018
3018 3019 for i in range(len(listMeteors)):
3019 3020
3020 3021 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
3021 3022 meteorAux = numpy.zeros(16)
3022 3023
3023 3024 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
3024 3025 mHeight = listMeteors[i][0]
3025 3026 mStart = listMeteors[i][1]
3026 3027 mPeak = listMeteors[i][2]
3027 3028 mEnd = listMeteors[i][3]
3028 3029
3029 3030 #get the volt data between the start and end times of the meteor
3030 3031 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
3031 3032 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3032 3033
3033 3034 #3.6. Phase Difference estimation
3034 3035 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
3035 3036
3036 3037 #3.7. Phase difference removal & meteor start, peak and end times reestimated
3037 3038 #meteorVolts0.- all Channels, all Profiles
3038 3039 meteorVolts0 = volts[:,:,mHeight]
3039 3040 meteorThresh = noise[:,mHeight]*thresholdNoise
3040 3041 meteorNoise = noise[:,mHeight]
3041 3042 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
3042 3043 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
3043 3044
3044 3045 #Times reestimation
3045 3046 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
3046 3047 if mStart1.size > 0:
3047 3048 mStart1 = mStart1[-1] + 1
3048 3049
3049 3050 else:
3050 3051 mStart1 = mPeak
3051 3052
3052 3053 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
3053 3054 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
3054 3055 if mEndDecayTime1.size == 0:
3055 3056 mEndDecayTime1 = powerNet0.size
3056 3057 else:
3057 3058 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
3058 3059 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
3059 3060
3060 3061 #meteorVolts1.- all Channels, from start to end
3061 3062 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
3062 3063 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
3063 3064 if meteorVolts2.shape[1] == 0:
3064 3065 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
3065 3066 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
3066 3067 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
3067 3068 ##################### END PARAMETERS REESTIMATION #########################
3068 3069
3069 3070 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
3070 3071 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
3071 3072 if meteorVolts2.shape[1] > 0:
3072 3073 #Phase Difference re-estimation
3073 3074 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
3074 3075 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
3075 3076 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
3076 3077 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
3077 3078 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
3078 3079
3079 3080 #Phase Difference RMS
3080 3081 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
3081 3082 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
3082 3083 #Data from Meteor
3083 3084 mPeak1 = powerNet1.argmax() + mStart1
3084 3085 mPeakPower1 = powerNet1.max()
3085 3086 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
3086 3087 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
3087 3088 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
3088 3089 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
3089 3090 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
3090 3091 #Vectorize
3091 3092 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
3092 3093 meteorAux[7:11] = phaseDiffint[0:4]
3093 3094
3094 3095 #Rejection Criterions
3095 3096 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
3096 3097 meteorAux[-1] = 17
3097 3098 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
3098 3099 meteorAux[-1] = 1
3099 3100
3100 3101
3101 3102 else:
3102 3103 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
3103 3104 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
3104 3105 PowerSeries = 0
3105 3106
3106 3107 listMeteors1.append(meteorAux)
3107 3108 listPowerSeries.append(PowerSeries)
3108 3109 listVoltageSeries.append(meteorVolts1)
3109 3110
3110 3111 return listMeteors1, listPowerSeries, listVoltageSeries
3111 3112
3112 3113 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
3113 3114
3114 3115 threshError = 10
3115 3116 #Depending if it is 30 or 50 MHz
3116 3117 if frequency == 30e6:
3117 3118 timeLag = 45*10**-3
3118 3119 else:
3119 3120 timeLag = 15*10**-3
3120 3121 lag = numpy.ceil(timeLag/timeInterval)
3121 3122
3122 3123 listMeteors1 = []
3123 3124
3124 3125 for i in range(len(listMeteors)):
3125 3126 meteorPower = listPower[i]
3126 3127 meteorAux = listMeteors[i]
3127 3128
3128 3129 if meteorAux[-1] == 0:
3129 3130
3130 3131 try:
3131 3132 indmax = meteorPower.argmax()
3132 3133 indlag = indmax + lag
3133 3134
3134 3135 y = meteorPower[indlag:]
3135 3136 x = numpy.arange(0, y.size)*timeLag
3136 3137
3137 3138 #first guess
3138 3139 a = y[0]
3139 3140 tau = timeLag
3140 3141 #exponential fit
3141 3142 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
3142 3143 y1 = self.__exponential_function(x, *popt)
3143 3144 #error estimation
3144 3145 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
3145 3146
3146 3147 decayTime = popt[1]
3147 3148 riseTime = indmax*timeInterval
3148 3149 meteorAux[11:13] = [decayTime, error]
3149 3150
3150 3151 #Table items 7, 8 and 11
3151 3152 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
3152 3153 meteorAux[-1] = 7
3153 3154 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
3154 3155 meteorAux[-1] = 8
3155 3156 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
3156 3157 meteorAux[-1] = 11
3157 3158
3158 3159
3159 3160 except:
3160 3161 meteorAux[-1] = 11
3161 3162
3162 3163
3163 3164 listMeteors1.append(meteorAux)
3164 3165
3165 3166 return listMeteors1
3166 3167
3167 3168 #Exponential Function
3168 3169
3169 3170 def __exponential_function(self, x, a, tau):
3170 3171 y = a*numpy.exp(-x/tau)
3171 3172 return y
3172 3173
3173 3174 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
3174 3175
3175 3176 pairslist1 = list(pairslist)
3176 3177 pairslist1.append((0,1))
3177 3178 pairslist1.append((3,4))
3178 3179 numPairs = len(pairslist1)
3179 3180 #Time Lag
3180 3181 timeLag = 45*10**-3
3181 3182 c = 3e8
3182 3183 lag = numpy.ceil(timeLag/timeInterval)
3183 3184 freq = 30e6
3184 3185
3185 3186 listMeteors1 = []
3186 3187
3187 3188 for i in range(len(listMeteors)):
3188 3189 meteorAux = listMeteors[i]
3189 3190 if meteorAux[-1] == 0:
3190 3191 mStart = listMeteors[i][1]
3191 3192 mPeak = listMeteors[i][2]
3192 3193 mLag = mPeak - mStart + lag
3193 3194
3194 3195 #get the volt data between the start and end times of the meteor
3195 3196 meteorVolts = listVolts[i]
3196 3197 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3197 3198
3198 3199 #Get CCF
3199 3200 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
3200 3201
3201 3202 #Method 2
3202 3203 slopes = numpy.zeros(numPairs)
3203 3204 time = numpy.array([-2,-1,1,2])*timeInterval
3204 3205 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
3205 3206
3206 3207 #Correct phases
3207 3208 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
3208 3209 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
3209 3210
3210 3211 if indDer[0].shape[0] > 0:
3211 3212 for i in range(indDer[0].shape[0]):
3212 3213 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
3213 3214 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
3214 3215
3215 3216 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
3216 3217 for j in range(numPairs):
3217 3218 fit = stats.linregress(time, angAllCCF[j,:])
3218 3219 slopes[j] = fit[0]
3219 3220
3220 3221 #Remove Outlier
3221 3222 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3222 3223 # slopes = numpy.delete(slopes,indOut)
3223 3224 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3224 3225 # slopes = numpy.delete(slopes,indOut)
3225 3226
3226 3227 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
3227 3228 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
3228 3229 meteorAux[-2] = radialError
3229 3230 meteorAux[-3] = radialVelocity
3230 3231
3231 3232 #Setting Error
3232 3233 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
3233 3234 if numpy.abs(radialVelocity) > 200:
3234 3235 meteorAux[-1] = 15
3235 3236 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
3236 3237 elif radialError > radialStdThresh:
3237 3238 meteorAux[-1] = 12
3238 3239
3239 3240 listMeteors1.append(meteorAux)
3240 3241 return listMeteors1
3241 3242
3242 3243 def __setNewArrays(self, listMeteors, date, heiRang):
3243 3244
3244 3245 #New arrays
3245 3246 arrayMeteors = numpy.array(listMeteors)
3246 3247 arrayParameters = numpy.zeros((len(listMeteors), 13))
3247 3248
3248 3249 #Date inclusion
3249 3250 # date = re.findall(r'\((.*?)\)', date)
3250 3251 # date = date[0].split(',')
3251 3252 # date = map(int, date)
3252 3253 #
3253 3254 # if len(date)<6:
3254 3255 # date.append(0)
3255 3256 #
3256 3257 # date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
3257 3258 # arrayDate = numpy.tile(date, (len(listMeteors), 1))
3258 3259 arrayDate = numpy.tile(date, (len(listMeteors)))
3259 3260
3260 3261 #Meteor array
3261 3262 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
3262 3263 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
3263 3264
3264 3265 #Parameters Array
3265 3266 arrayParameters[:,0] = arrayDate #Date
3266 3267 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
3267 3268 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
3268 3269 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
3269 3270 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
3270 3271
3271 3272
3272 3273 return arrayParameters
3273 3274
3274 3275 class CorrectSMPhases(Operation):
3275 3276
3276 3277 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
3277 3278
3278 3279 arrayParameters = dataOut.data_param
3279 3280 pairsList = []
3280 3281 pairx = (0,1)
3281 3282 pairy = (2,3)
3282 3283 pairsList.append(pairx)
3283 3284 pairsList.append(pairy)
3284 3285 jph = numpy.zeros(4)
3285 3286
3286 3287 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
3287 3288 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
3288 3289 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
3289 3290
3290 3291 meteorOps = SMOperations()
3291 3292 if channelPositions is None:
3292 3293 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3293 3294 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3294 3295
3295 3296 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3296 3297 h = (hmin,hmax)
3297 3298
3298 3299 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
3299 3300
3300 3301 dataOut.data_param = arrayParameters
3301 3302 return
3302 3303
3303 3304 class SMPhaseCalibration(Operation):
3304 3305
3305 3306 __buffer = None
3306 3307
3307 3308 __initime = None
3308 3309
3309 3310 __dataReady = False
3310 3311
3311 3312 __isConfig = False
3312 3313
3313 3314 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
3314 3315
3315 3316 dataTime = currentTime + paramInterval
3316 3317 deltaTime = dataTime - initTime
3317 3318
3318 3319 if deltaTime >= outputInterval or deltaTime < 0:
3319 3320 return True
3320 3321
3321 3322 return False
3322 3323
3323 3324 def __getGammas(self, pairs, d, phases):
3324 3325 gammas = numpy.zeros(2)
3325 3326
3326 3327 for i in range(len(pairs)):
3327 3328
3328 3329 pairi = pairs[i]
3329 3330
3330 3331 phip3 = phases[:,pairi[0]]
3331 3332 d3 = d[pairi[0]]
3332 3333 phip2 = phases[:,pairi[1]]
3333 3334 d2 = d[pairi[1]]
3334 3335 #Calculating gamma
3335 3336 # jdcos = alp1/(k*d1)
3336 3337 # jgamma = numpy.angle(numpy.exp(1j*(d0*alp1/d1 - alp0)))
3337 3338 jgamma = -phip2*d3/d2 - phip3
3338 3339 jgamma = numpy.angle(numpy.exp(1j*jgamma))
3339 3340 # jgamma[jgamma>numpy.pi] -= 2*numpy.pi
3340 3341 # jgamma[jgamma<-numpy.pi] += 2*numpy.pi
3341 3342
3342 3343 #Revised distribution
3343 3344 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
3344 3345
3345 3346 #Histogram
3346 3347 nBins = 64
3347 3348 rmin = -0.5*numpy.pi
3348 3349 rmax = 0.5*numpy.pi
3349 3350 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
3350 3351
3351 3352 meteorsY = phaseHisto[0]
3352 3353 phasesX = phaseHisto[1][:-1]
3353 3354 width = phasesX[1] - phasesX[0]
3354 3355 phasesX += width/2
3355 3356
3356 3357 #Gaussian aproximation
3357 3358 bpeak = meteorsY.argmax()
3358 3359 peak = meteorsY.max()
3359 3360 jmin = bpeak - 5
3360 3361 jmax = bpeak + 5 + 1
3361 3362
3362 3363 if jmin<0:
3363 3364 jmin = 0
3364 3365 jmax = 6
3365 3366 elif jmax > meteorsY.size:
3366 3367 jmin = meteorsY.size - 6
3367 3368 jmax = meteorsY.size
3368 3369
3369 3370 x0 = numpy.array([peak,bpeak,50])
3370 3371 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
3371 3372
3372 3373 #Gammas
3373 3374 gammas[i] = coeff[0][1]
3374 3375
3375 3376 return gammas
3376 3377
3377 3378 def __residualFunction(self, coeffs, y, t):
3378 3379
3379 3380 return y - self.__gauss_function(t, coeffs)
3380 3381
3381 3382 def __gauss_function(self, t, coeffs):
3382 3383
3383 3384 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
3384 3385
3385 3386 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
3386 3387 meteorOps = SMOperations()
3387 3388 nchan = 4
3388 3389 pairx = pairsList[0] #x es 0
3389 3390 pairy = pairsList[1] #y es 1
3390 3391 center_xangle = 0
3391 3392 center_yangle = 0
3392 3393 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
3393 3394 ntimes = len(range_angle)
3394 3395
3395 3396 nstepsx = 20
3396 3397 nstepsy = 20
3397 3398
3398 3399 for iz in range(ntimes):
3399 3400 min_xangle = -range_angle[iz]/2 + center_xangle
3400 3401 max_xangle = range_angle[iz]/2 + center_xangle
3401 3402 min_yangle = -range_angle[iz]/2 + center_yangle
3402 3403 max_yangle = range_angle[iz]/2 + center_yangle
3403 3404
3404 3405 inc_x = (max_xangle-min_xangle)/nstepsx
3405 3406 inc_y = (max_yangle-min_yangle)/nstepsy
3406 3407
3407 3408 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
3408 3409 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
3409 3410 penalty = numpy.zeros((nstepsx,nstepsy))
3410 3411 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
3411 3412 jph = numpy.zeros(nchan)
3412 3413
3413 3414 # Iterations looking for the offset
3414 3415 for iy in range(int(nstepsy)):
3415 3416 for ix in range(int(nstepsx)):
3416 3417 d3 = d[pairsList[1][0]]
3417 3418 d2 = d[pairsList[1][1]]
3418 3419 d5 = d[pairsList[0][0]]
3419 3420 d4 = d[pairsList[0][1]]
3420 3421
3421 3422 alp2 = alpha_y[iy] #gamma 1
3422 3423 alp4 = alpha_x[ix] #gamma 0
3423 3424
3424 3425 alp3 = -alp2*d3/d2 - gammas[1]
3425 3426 alp5 = -alp4*d5/d4 - gammas[0]
3426 3427 # jph[pairy[1]] = alpha_y[iy]
3427 3428 # jph[pairy[0]] = -gammas[1] - alpha_y[iy]*d[pairy[1]]/d[pairy[0]]
3428 3429
3429 3430 # jph[pairx[1]] = alpha_x[ix]
3430 3431 # jph[pairx[0]] = -gammas[0] - alpha_x[ix]*d[pairx[1]]/d[pairx[0]]
3431 3432 jph[pairsList[0][1]] = alp4
3432 3433 jph[pairsList[0][0]] = alp5
3433 3434 jph[pairsList[1][0]] = alp3
3434 3435 jph[pairsList[1][1]] = alp2
3435 3436 jph_array[:,ix,iy] = jph
3436 3437 # d = [2.0,2.5,2.5,2.0]
3437 3438 #falta chequear si va a leer bien los meteoros
3438 3439 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
3439 3440 error = meteorsArray1[:,-1]
3440 3441 ind1 = numpy.where(error==0)[0]
3441 3442 penalty[ix,iy] = ind1.size
3442 3443
3443 3444 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
3444 3445 phOffset = jph_array[:,i,j]
3445 3446
3446 3447 center_xangle = phOffset[pairx[1]]
3447 3448 center_yangle = phOffset[pairy[1]]
3448 3449
3449 3450 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
3450 3451 phOffset = phOffset*180/numpy.pi
3451 3452 return phOffset
3452 3453
3453 3454
3454 3455 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
3455 3456
3456 3457 dataOut.flagNoData = True
3457 3458 self.__dataReady = False
3458 3459 dataOut.outputInterval = nHours*3600
3459 3460
3460 3461 if self.__isConfig == False:
3461 3462 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
3462 3463 #Get Initial LTC time
3463 3464 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3464 3465 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3465 3466
3466 3467 self.__isConfig = True
3467 3468
3468 3469 if self.__buffer is None:
3469 3470 self.__buffer = dataOut.data_param.copy()
3470 3471
3471 3472 else:
3472 3473 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3473 3474
3474 3475 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3475 3476
3476 3477 if self.__dataReady:
3477 3478 dataOut.utctimeInit = self.__initime
3478 3479 self.__initime += dataOut.outputInterval #to erase time offset
3479 3480
3480 3481 freq = dataOut.frequency
3481 3482 c = dataOut.C #m/s
3482 3483 lamb = c/freq
3483 3484 k = 2*numpy.pi/lamb
3484 3485 azimuth = 0
3485 3486 h = (hmin, hmax)
3486 3487 # pairs = ((0,1),(2,3)) #Estrella
3487 3488 # pairs = ((1,0),(2,3)) #T
3488 3489
3489 3490 if channelPositions is None:
3490 3491 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3491 3492 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3492 3493 meteorOps = SMOperations()
3493 3494 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3494 3495
3495 3496 #Checking correct order of pairs
3496 3497 pairs = []
3497 3498 if distances[1] > distances[0]:
3498 3499 pairs.append((1,0))
3499 3500 else:
3500 3501 pairs.append((0,1))
3501 3502
3502 3503 if distances[3] > distances[2]:
3503 3504 pairs.append((3,2))
3504 3505 else:
3505 3506 pairs.append((2,3))
3506 3507 # distances1 = [-distances[0]*lamb, distances[1]*lamb, -distances[2]*lamb, distances[3]*lamb]
3507 3508
3508 3509 meteorsArray = self.__buffer
3509 3510 error = meteorsArray[:,-1]
3510 3511 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
3511 3512 ind1 = numpy.where(boolError)[0]
3512 3513 meteorsArray = meteorsArray[ind1,:]
3513 3514 meteorsArray[:,-1] = 0
3514 3515 phases = meteorsArray[:,8:12]
3515 3516
3516 3517 #Calculate Gammas
3517 3518 gammas = self.__getGammas(pairs, distances, phases)
3518 3519 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
3519 3520 #Calculate Phases
3520 3521 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
3521 3522 phasesOff = phasesOff.reshape((1,phasesOff.size))
3522 3523 dataOut.data_output = -phasesOff
3523 3524 dataOut.flagNoData = False
3524 3525 self.__buffer = None
3525 3526
3526 3527
3527 3528 return
3528 3529
3529 3530 class SMOperations():
3530 3531
3531 3532 def __init__(self):
3532 3533
3533 3534 return
3534 3535
3535 3536 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
3536 3537
3537 3538 arrayParameters = arrayParameters0.copy()
3538 3539 hmin = h[0]
3539 3540 hmax = h[1]
3540 3541
3541 3542 #Calculate AOA (Error N 3, 4)
3542 3543 #JONES ET AL. 1998
3543 3544 AOAthresh = numpy.pi/8
3544 3545 error = arrayParameters[:,-1]
3545 3546 phases = -arrayParameters[:,8:12] + jph
3546 3547 # phases = numpy.unwrap(phases)
3547 3548 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
3548 3549
3549 3550 #Calculate Heights (Error N 13 and 14)
3550 3551 error = arrayParameters[:,-1]
3551 3552 Ranges = arrayParameters[:,1]
3552 3553 zenith = arrayParameters[:,4]
3553 3554 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
3554 3555
3555 3556 #----------------------- Get Final data ------------------------------------
3556 3557 # error = arrayParameters[:,-1]
3557 3558 # ind1 = numpy.where(error==0)[0]
3558 3559 # arrayParameters = arrayParameters[ind1,:]
3559 3560
3560 3561 return arrayParameters
3561 3562
3562 3563 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
3563 3564
3564 3565 arrayAOA = numpy.zeros((phases.shape[0],3))
3565 3566 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
3566 3567
3567 3568 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3568 3569 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3569 3570 arrayAOA[:,2] = cosDirError
3570 3571
3571 3572 azimuthAngle = arrayAOA[:,0]
3572 3573 zenithAngle = arrayAOA[:,1]
3573 3574
3574 3575 #Setting Error
3575 3576 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
3576 3577 error[indError] = 0
3577 3578 #Number 3: AOA not fesible
3578 3579 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3579 3580 error[indInvalid] = 3
3580 3581 #Number 4: Large difference in AOAs obtained from different antenna baselines
3581 3582 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3582 3583 error[indInvalid] = 4
3583 3584 return arrayAOA, error
3584 3585
3585 3586 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
3586 3587
3587 3588 #Initializing some variables
3588 3589 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3589 3590 ang_aux = ang_aux.reshape(1,ang_aux.size)
3590 3591
3591 3592 cosdir = numpy.zeros((arrayPhase.shape[0],2))
3592 3593 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3593 3594
3594 3595
3595 3596 for i in range(2):
3596 3597 ph0 = arrayPhase[:,pairsList[i][0]]
3597 3598 ph1 = arrayPhase[:,pairsList[i][1]]
3598 3599 d0 = distances[pairsList[i][0]]
3599 3600 d1 = distances[pairsList[i][1]]
3600 3601
3601 3602 ph0_aux = ph0 + ph1
3602 3603 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
3603 3604 # ph0_aux[ph0_aux > numpy.pi] -= 2*numpy.pi
3604 3605 # ph0_aux[ph0_aux < -numpy.pi] += 2*numpy.pi
3605 3606 #First Estimation
3606 3607 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
3607 3608
3608 3609 #Most-Accurate Second Estimation
3609 3610 phi1_aux = ph0 - ph1
3610 3611 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3611 3612 #Direction Cosine 1
3612 3613 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
3613 3614
3614 3615 #Searching the correct Direction Cosine
3615 3616 cosdir0_aux = cosdir0[:,i]
3616 3617 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3617 3618 #Minimum Distance
3618 3619 cosDiff = (cosdir1 - cosdir0_aux)**2
3619 3620 indcos = cosDiff.argmin(axis = 1)
3620 3621 #Saving Value obtained
3621 3622 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3622 3623
3623 3624 return cosdir0, cosdir
3624 3625
3625 3626 def __calculateAOA(self, cosdir, azimuth):
3626 3627 cosdirX = cosdir[:,0]
3627 3628 cosdirY = cosdir[:,1]
3628 3629
3629 3630 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3630 3631 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
3631 3632 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3632 3633
3633 3634 return angles
3634 3635
3635 3636 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3636 3637
3637 3638 Ramb = 375 #Ramb = c/(2*PRF)
3638 3639 Re = 6371 #Earth Radius
3639 3640 heights = numpy.zeros(Ranges.shape)
3640 3641
3641 3642 R_aux = numpy.array([0,1,2])*Ramb
3642 3643 R_aux = R_aux.reshape(1,R_aux.size)
3643 3644
3644 3645 Ranges = Ranges.reshape(Ranges.size,1)
3645 3646
3646 3647 Ri = Ranges + R_aux
3647 3648 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3648 3649
3649 3650 #Check if there is a height between 70 and 110 km
3650 3651 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3651 3652 ind_h = numpy.where(h_bool == 1)[0]
3652 3653
3653 3654 hCorr = hi[ind_h, :]
3654 3655 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3655 3656
3656 3657 hCorr = hi[ind_hCorr][:len(ind_h)]
3657 3658 heights[ind_h] = hCorr
3658 3659
3659 3660 #Setting Error
3660 3661 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3661 3662 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3662 3663 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
3663 3664 error[indError] = 0
3664 3665 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3665 3666 error[indInvalid2] = 14
3666 3667 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3667 3668 error[indInvalid1] = 13
3668 3669
3669 3670 return heights, error
3670 3671
3671 3672 def getPhasePairs(self, channelPositions):
3672 3673 chanPos = numpy.array(channelPositions)
3673 3674 listOper = list(itertools.combinations(list(range(5)),2))
3674 3675
3675 3676 distances = numpy.zeros(4)
3676 3677 axisX = []
3677 3678 axisY = []
3678 3679 distX = numpy.zeros(3)
3679 3680 distY = numpy.zeros(3)
3680 3681 ix = 0
3681 3682 iy = 0
3682 3683
3683 3684 pairX = numpy.zeros((2,2))
3684 3685 pairY = numpy.zeros((2,2))
3685 3686
3686 3687 for i in range(len(listOper)):
3687 3688 pairi = listOper[i]
3688 3689
3689 3690 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
3690 3691
3691 3692 if posDif[0] == 0:
3692 3693 axisY.append(pairi)
3693 3694 distY[iy] = posDif[1]
3694 3695 iy += 1
3695 3696 elif posDif[1] == 0:
3696 3697 axisX.append(pairi)
3697 3698 distX[ix] = posDif[0]
3698 3699 ix += 1
3699 3700
3700 3701 for i in range(2):
3701 3702 if i==0:
3702 3703 dist0 = distX
3703 3704 axis0 = axisX
3704 3705 else:
3705 3706 dist0 = distY
3706 3707 axis0 = axisY
3707 3708
3708 3709 side = numpy.argsort(dist0)[:-1]
3709 3710 axis0 = numpy.array(axis0)[side,:]
3710 3711 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
3711 3712 axis1 = numpy.unique(numpy.reshape(axis0,4))
3712 3713 side = axis1[axis1 != chanC]
3713 3714 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
3714 3715 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
3715 3716 if diff1<0:
3716 3717 chan2 = side[0]
3717 3718 d2 = numpy.abs(diff1)
3718 3719 chan1 = side[1]
3719 3720 d1 = numpy.abs(diff2)
3720 3721 else:
3721 3722 chan2 = side[1]
3722 3723 d2 = numpy.abs(diff2)
3723 3724 chan1 = side[0]
3724 3725 d1 = numpy.abs(diff1)
3725 3726
3726 3727 if i==0:
3727 3728 chanCX = chanC
3728 3729 chan1X = chan1
3729 3730 chan2X = chan2
3730 3731 distances[0:2] = numpy.array([d1,d2])
3731 3732 else:
3732 3733 chanCY = chanC
3733 3734 chan1Y = chan1
3734 3735 chan2Y = chan2
3735 3736 distances[2:4] = numpy.array([d1,d2])
3736 3737 # axisXsides = numpy.reshape(axisX[ix,:],4)
3737 3738 #
3738 3739 # channelCentX = int(numpy.intersect1d(pairX[0,:], pairX[1,:])[0])
3739 3740 # channelCentY = int(numpy.intersect1d(pairY[0,:], pairY[1,:])[0])
3740 3741 #
3741 3742 # ind25X = numpy.where(pairX[0,:] != channelCentX)[0][0]
3742 3743 # ind20X = numpy.where(pairX[1,:] != channelCentX)[0][0]
3743 3744 # channel25X = int(pairX[0,ind25X])
3744 3745 # channel20X = int(pairX[1,ind20X])
3745 3746 # ind25Y = numpy.where(pairY[0,:] != channelCentY)[0][0]
3746 3747 # ind20Y = numpy.where(pairY[1,:] != channelCentY)[0][0]
3747 3748 # channel25Y = int(pairY[0,ind25Y])
3748 3749 # channel20Y = int(pairY[1,ind20Y])
3749 3750
3750 3751 # pairslist = [(channelCentX, channel25X),(channelCentX, channel20X),(channelCentY,channel25Y),(channelCentY, channel20Y)]
3751 3752 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
3752 3753
3753 3754 return pairslist, distances
3754 3755 # def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
3755 3756 #
3756 3757 # arrayAOA = numpy.zeros((phases.shape[0],3))
3757 3758 # cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
3758 3759 #
3759 3760 # arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3760 3761 # cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3761 3762 # arrayAOA[:,2] = cosDirError
3762 3763 #
3763 3764 # azimuthAngle = arrayAOA[:,0]
3764 3765 # zenithAngle = arrayAOA[:,1]
3765 3766 #
3766 3767 # #Setting Error
3767 3768 # #Number 3: AOA not fesible
3768 3769 # indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3769 3770 # error[indInvalid] = 3
3770 3771 # #Number 4: Large difference in AOAs obtained from different antenna baselines
3771 3772 # indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3772 3773 # error[indInvalid] = 4
3773 3774 # return arrayAOA, error
3774 3775 #
3775 3776 # def __getDirectionCosines(self, arrayPhase, pairsList):
3776 3777 #
3777 3778 # #Initializing some variables
3778 3779 # ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3779 3780 # ang_aux = ang_aux.reshape(1,ang_aux.size)
3780 3781 #
3781 3782 # cosdir = numpy.zeros((arrayPhase.shape[0],2))
3782 3783 # cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3783 3784 #
3784 3785 #
3785 3786 # for i in range(2):
3786 3787 # #First Estimation
3787 3788 # phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
3788 3789 # #Dealias
3789 3790 # indcsi = numpy.where(phi0_aux > numpy.pi)
3790 3791 # phi0_aux[indcsi] -= 2*numpy.pi
3791 3792 # indcsi = numpy.where(phi0_aux < -numpy.pi)
3792 3793 # phi0_aux[indcsi] += 2*numpy.pi
3793 3794 # #Direction Cosine 0
3794 3795 # cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
3795 3796 #
3796 3797 # #Most-Accurate Second Estimation
3797 3798 # phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
3798 3799 # phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3799 3800 # #Direction Cosine 1
3800 3801 # cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
3801 3802 #
3802 3803 # #Searching the correct Direction Cosine
3803 3804 # cosdir0_aux = cosdir0[:,i]
3804 3805 # cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3805 3806 # #Minimum Distance
3806 3807 # cosDiff = (cosdir1 - cosdir0_aux)**2
3807 3808 # indcos = cosDiff.argmin(axis = 1)
3808 3809 # #Saving Value obtained
3809 3810 # cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3810 3811 #
3811 3812 # return cosdir0, cosdir
3812 3813 #
3813 3814 # def __calculateAOA(self, cosdir, azimuth):
3814 3815 # cosdirX = cosdir[:,0]
3815 3816 # cosdirY = cosdir[:,1]
3816 3817 #
3817 3818 # zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3818 3819 # azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
3819 3820 # angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3820 3821 #
3821 3822 # return angles
3822 3823 #
3823 3824 # def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3824 3825 #
3825 3826 # Ramb = 375 #Ramb = c/(2*PRF)
3826 3827 # Re = 6371 #Earth Radius
3827 3828 # heights = numpy.zeros(Ranges.shape)
3828 3829 #
3829 3830 # R_aux = numpy.array([0,1,2])*Ramb
3830 3831 # R_aux = R_aux.reshape(1,R_aux.size)
3831 3832 #
3832 3833 # Ranges = Ranges.reshape(Ranges.size,1)
3833 3834 #
3834 3835 # Ri = Ranges + R_aux
3835 3836 # hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3836 3837 #
3837 3838 # #Check if there is a height between 70 and 110 km
3838 3839 # h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3839 3840 # ind_h = numpy.where(h_bool == 1)[0]
3840 3841 #
3841 3842 # hCorr = hi[ind_h, :]
3842 3843 # ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3843 3844 #
3844 3845 # hCorr = hi[ind_hCorr]
3845 3846 # heights[ind_h] = hCorr
3846 3847 #
3847 3848 # #Setting Error
3848 3849 # #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3849 3850 # #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3850 3851 #
3851 3852 # indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3852 3853 # error[indInvalid2] = 14
3853 3854 # indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3854 3855 # error[indInvalid1] = 13
3855 3856 #
3856 3857 # return heights, error
3857 3858 No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now