##// END OF EJS Templates
Bug fixed: Padding decode data with zeros at the first heights was eliminated.
Miguel Valdez -
r611:cdbd858cadba
parent child
Show More
@@ -1,90 +1,90
1 1 import numpy
2 2 import copy
3 3
4 4 class Beam:
5 5 def __init__(self):
6 6 self.codeList = []
7 7 self.azimuthList = []
8 8 self.zenithList = []
9 9
10 10
11 11 class AMISR:
12 12 def __init__(self):
13 13 self.flagNoData = True
14 14 self.data = None
15 15 self.utctime = None
16 16 self.type = "AMISR"
17 17
18 18 #propiedades para compatibilidad con Voltages
19 19 self.timeZone = 0#timezone like jroheader, difference in minutes between UTC and localtime
20 20 self.dstFlag = 0#self.dataIn.dstFlag
21 21 self.errorCount = 0#self.dataIn.errorCount
22 22 self.useLocalTime = True#self.dataIn.useLocalTime
23 23
24 24 self.radarControllerHeaderObj = None#self.dataIn.radarControllerHeaderObj.copy()
25 25 self.systemHeaderObj = None#self.dataIn.systemHeaderObj.copy()
26 26 self.channelList = [0]#self.dataIn.channelList esto solo aplica para el caso de AMISR
27 27 self.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
28 28
29 29 self.flagDiscontinuousBlock = None#self.dataIn.flagDiscontinuousBlock
30 30 #self.utctime = #self.firstdatatime
31 31 self.flagDecodeData = None#self.dataIn.flagDecodeData #asumo q la data esta decodificada
32 32 self.flagDeflipData = None#self.dataIn.flagDeflipData #asumo q la data esta sin flip
33 33
34 34 self.nCohInt = 1#self.dataIn.nCohInt
35 35 self.nIncohInt = 1
36 36 self.ippSeconds = None#self.dataIn.ippSeconds, segun el filename/Setup/Tufile
37 37 self.windowOfFilter = None#self.dataIn.windowOfFilter
38 38
39 39 self.timeInterval = None#self.dataIn.timeInterval*self.dataOut.nFFTPoints*self.dataOut.nIncohInt
40 40 self.frequency = None#self.dataIn.frequency
41 41 self.realtime = 0#self.dataIn.realtime
42 42
43 43 #actualizar en la lectura de datos
44 44 self.heightList = None#self.dataIn.heightList
45 45 self.nProfiles = None#Number of samples or nFFTPoints
46 46 self.nRecords = None
47 47 self.nBeams = None
48 48 self.nBaud = None#self.dataIn.nBaud
49 49 self.nCode = None#self.dataIn.nCode
50 50 self.code = None#self.dataIn.code
51 51
52 52 #consideracion para los Beams
53 53 self.beamCodeDict = None
54 54 self.beamRangeDict = None
55 55 self.beamcode = None
56 56 self.azimuth = None
57 57 self.zenith = None
58 58 self.gain = None
59 59
60 60 self.npulseByFrame = None
61 61
62 62 self.profileIndex = None
63 63
64 64 self.beam = Beam()
65 65
66 66 def copy(self, inputObj=None):
67 67
68 if inputObj == None:
68 if inputObj is None:
69 69 return copy.deepcopy(self)
70 70
71 71 for key in inputObj.__dict__.keys():
72 72 self.__dict__[key] = inputObj.__dict__[key]
73 73
74 74 def getNHeights(self):
75 75
76 76 return len(self.heightList)
77 77
78 78
79 79 def isEmpty(self):
80 80
81 81 return self.flagNoData
82 82
83 83 def getTimeInterval(self):
84 84
85 85 timeInterval = self.ippSeconds * self.nCohInt
86 86
87 87 return timeInterval
88 88
89 89 timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property")
90 90 nHeights = property(getNHeights, "I'm the 'nHeights' property.") No newline at end of file
@@ -1,1124 +1,1124
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: JROData.py 173 2012-11-20 15:06:21Z murco $
5 5 '''
6 6
7 7 import copy
8 8 import numpy
9 9 import datetime
10 10
11 11 from jroheaderIO import SystemHeader, RadarControllerHeader
12 12
13 13 def getNumpyDtype(dataTypeCode):
14 14
15 15 if dataTypeCode == 0:
16 16 numpyDtype = numpy.dtype([('real','<i1'),('imag','<i1')])
17 17 elif dataTypeCode == 1:
18 18 numpyDtype = numpy.dtype([('real','<i2'),('imag','<i2')])
19 19 elif dataTypeCode == 2:
20 20 numpyDtype = numpy.dtype([('real','<i4'),('imag','<i4')])
21 21 elif dataTypeCode == 3:
22 22 numpyDtype = numpy.dtype([('real','<i8'),('imag','<i8')])
23 23 elif dataTypeCode == 4:
24 24 numpyDtype = numpy.dtype([('real','<f4'),('imag','<f4')])
25 25 elif dataTypeCode == 5:
26 26 numpyDtype = numpy.dtype([('real','<f8'),('imag','<f8')])
27 27 else:
28 28 raise ValueError, 'dataTypeCode was not defined'
29 29
30 30 return numpyDtype
31 31
32 32 def getDataTypeCode(numpyDtype):
33 33
34 34 if numpyDtype == numpy.dtype([('real','<i1'),('imag','<i1')]):
35 35 datatype = 0
36 36 elif numpyDtype == numpy.dtype([('real','<i2'),('imag','<i2')]):
37 37 datatype = 1
38 38 elif numpyDtype == numpy.dtype([('real','<i4'),('imag','<i4')]):
39 39 datatype = 2
40 40 elif numpyDtype == numpy.dtype([('real','<i8'),('imag','<i8')]):
41 41 datatype = 3
42 42 elif numpyDtype == numpy.dtype([('real','<f4'),('imag','<f4')]):
43 43 datatype = 4
44 44 elif numpyDtype == numpy.dtype([('real','<f8'),('imag','<f8')]):
45 45 datatype = 5
46 46 else:
47 47 datatype = None
48 48
49 49 return datatype
50 50
51 51 def hildebrand_sekhon(data, navg):
52 52 """
53 53 This method is for the objective determination of the noise level in Doppler spectra. This
54 54 implementation technique is based on the fact that the standard deviation of the spectral
55 55 densities is equal to the mean spectral density for white Gaussian noise
56 56
57 57 Inputs:
58 58 Data : heights
59 59 navg : numbers of averages
60 60
61 61 Return:
62 62 -1 : any error
63 63 anoise : noise's level
64 64 """
65 65
66 66 sortdata = numpy.sort(data,axis=None)
67 67 lenOfData = len(sortdata)
68 68 nums_min = lenOfData/10
69 69
70 70 if (lenOfData/10) > 2:
71 71 nums_min = lenOfData/10
72 72 else:
73 73 nums_min = 2
74 74
75 75 sump = 0.
76 76
77 77 sumq = 0.
78 78
79 79 j = 0
80 80
81 81 cont = 1
82 82
83 83 while((cont==1)and(j<lenOfData)):
84 84
85 85 sump += sortdata[j]
86 86
87 87 sumq += sortdata[j]**2
88 88
89 89 if j > nums_min:
90 90 rtest = float(j)/(j-1) + 1.0/navg
91 91 if ((sumq*j) > (rtest*sump**2)):
92 92 j = j - 1
93 93 sump = sump - sortdata[j]
94 94 sumq = sumq - sortdata[j]**2
95 95 cont = 0
96 96
97 97 j += 1
98 98
99 99 lnoise = sump /j
100 100 stdv = numpy.sqrt((sumq - lnoise**2)/(j - 1))
101 101 return lnoise
102 102
103 103 class Beam:
104 104 def __init__(self):
105 105 self.codeList = []
106 106 self.azimuthList = []
107 107 self.zenithList = []
108 108
109 109 class GenericData(object):
110 110
111 111 flagNoData = True
112 112
113 113 def __init__(self):
114 114
115 115 raise ValueError, "This class has not been implemented"
116 116
117 117 def copy(self, inputObj=None):
118 118
119 119 if inputObj == None:
120 120 return copy.deepcopy(self)
121 121
122 122 for key in inputObj.__dict__.keys():
123 123 self.__dict__[key] = inputObj.__dict__[key]
124 124
125 125 def deepcopy(self):
126 126
127 127 return copy.deepcopy(self)
128 128
129 129 def isEmpty(self):
130 130
131 131 return self.flagNoData
132 132
133 133 class JROData(GenericData):
134 134
135 135 # m_BasicHeader = BasicHeader()
136 136 # m_ProcessingHeader = ProcessingHeader()
137 137
138 138 systemHeaderObj = SystemHeader()
139 139
140 140 radarControllerHeaderObj = RadarControllerHeader()
141 141
142 142 # data = None
143 143
144 144 type = None
145 145
146 146 datatype = None #dtype but in string
147 147
148 148 # dtype = None
149 149
150 150 # nChannels = None
151 151
152 152 # nHeights = None
153 153
154 154 nProfiles = None
155 155
156 156 heightList = None
157 157
158 158 channelList = None
159 159
160 160 flagDiscontinuousBlock = False
161 161
162 162 useLocalTime = False
163 163
164 164 utctime = None
165 165
166 166 timeZone = None
167 167
168 168 dstFlag = None
169 169
170 170 errorCount = None
171 171
172 172 blocksize = None
173 173
174 174 # nCode = None
175 175 #
176 176 # nBaud = None
177 177 #
178 178 # code = None
179 179
180 180 flagDecodeData = False #asumo q la data no esta decodificada
181 181
182 182 flagDeflipData = False #asumo q la data no esta sin flip
183 183
184 184 flagShiftFFT = False
185 185
186 186 # ippSeconds = None
187 187
188 188 # timeInterval = None
189 189
190 190 nCohInt = None
191 191
192 192 # noise = None
193 193
194 194 windowOfFilter = 1
195 195
196 196 #Speed of ligth
197 197 C = 3e8
198 198
199 199 frequency = 49.92e6
200 200
201 201 realtime = False
202 202
203 203 beacon_heiIndexList = None
204 204
205 205 last_block = None
206 206
207 207 blocknow = None
208 208
209 209 azimuth = None
210 210
211 211 zenith = None
212 212
213 213 beam = Beam()
214 214
215 215 profileIndex = None
216 216
217 217 def __init__(self):
218 218
219 219 raise ValueError, "This class has not been implemented"
220 220
221 221 def getNoise(self):
222 222
223 223 raise ValueError, "Not implemented"
224 224
225 225 def getNChannels(self):
226 226
227 227 return len(self.channelList)
228 228
229 229 def getChannelIndexList(self):
230 230
231 231 return range(self.nChannels)
232 232
233 233 def getNHeights(self):
234 234
235 235 return len(self.heightList)
236 236
237 237 def getHeiRange(self, extrapoints=0):
238 238
239 239 heis = self.heightList
240 240 # deltah = self.heightList[1] - self.heightList[0]
241 241 #
242 242 # heis.append(self.heightList[-1])
243 243
244 244 return heis
245 245
246 246 def getltctime(self):
247 247
248 248 if self.useLocalTime:
249 249 return self.utctime - self.timeZone*60
250 250
251 251 return self.utctime
252 252
253 253 def getDatatime(self):
254 254
255 255 datatimeValue = datetime.datetime.utcfromtimestamp(self.ltctime)
256 256 return datatimeValue
257 257
258 258 def getTimeRange(self):
259 259
260 260 datatime = []
261 261
262 262 datatime.append(self.ltctime)
263 263 datatime.append(self.ltctime + self.timeInterval+60)
264 264
265 265 datatime = numpy.array(datatime)
266 266
267 267 return datatime
268 268
269 269 def getFmax(self):
270 270
271 271 PRF = 1./(self.ippSeconds * self.nCohInt)
272 272
273 273 fmax = PRF/2.
274 274
275 275 return fmax
276 276
277 277 def getVmax(self):
278 278
279 279 _lambda = self.C/self.frequency
280 280
281 281 vmax = self.getFmax() * _lambda
282 282
283 283 return vmax
284 284
285 285 def get_ippSeconds(self):
286 286 '''
287 287 '''
288 288 return self.radarControllerHeaderObj.ippSeconds
289 289
290 290 def set_ippSeconds(self, ippSeconds):
291 291 '''
292 292 '''
293 293
294 294 self.radarControllerHeaderObj.ippSeconds = ippSeconds
295 295
296 296 return
297 297
298 298 def get_dtype(self):
299 299 '''
300 300 '''
301 301 return getNumpyDtype(self.datatype)
302 302
303 303 def set_dtype(self, numpyDtype):
304 304 '''
305 305 '''
306 306
307 307 self.datatype = getDataTypeCode(numpyDtype)
308 308
309 309 def get_code(self):
310 310 '''
311 311 '''
312 312 return self.radarControllerHeaderObj.code
313 313
314 314 def set_code(self, code):
315 315 '''
316 316 '''
317 317 self.radarControllerHeaderObj.code = code
318 318
319 319 return
320 320
321 321 def get_ncode(self):
322 322 '''
323 323 '''
324 324 return self.radarControllerHeaderObj.nCode
325 325
326 326 def set_ncode(self, nCode):
327 327 '''
328 328 '''
329 329 self.radarControllerHeaderObj.nCode = nCode
330 330
331 331 return
332 332
333 333 def get_nbaud(self):
334 334 '''
335 335 '''
336 336 return self.radarControllerHeaderObj.nBaud
337 337
338 338 def set_nbaud(self, nBaud):
339 339 '''
340 340 '''
341 341 self.radarControllerHeaderObj.nBaud = nBaud
342 342
343 343 return
344 344 # def getTimeInterval(self):
345 345 #
346 346 # raise IOError, "This method should be implemented inside each Class"
347 347
348 348 nChannels = property(getNChannels, "I'm the 'nChannel' property.")
349 349 channelIndexList = property(getChannelIndexList, "I'm the 'channelIndexList' property.")
350 350 nHeights = property(getNHeights, "I'm the 'nHeights' property.")
351 351 #noise = property(getNoise, "I'm the 'nHeights' property.")
352 352 datatime = property(getDatatime, "I'm the 'datatime' property")
353 353 ltctime = property(getltctime, "I'm the 'ltctime' property")
354 354 ippSeconds = property(get_ippSeconds, set_ippSeconds)
355 355 dtype = property(get_dtype, set_dtype)
356 356 # timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property")
357 357 code = property(get_code, set_code)
358 358 nCode = property(get_ncode, set_ncode)
359 359 nBaud = property(get_nbaud, set_nbaud)
360 360
361 361 class Voltage(JROData):
362 362
363 363 #data es un numpy array de 2 dmensiones (canales, alturas)
364 364 data = None
365 365
366 366 def __init__(self):
367 367 '''
368 368 Constructor
369 369 '''
370 370
371 371 self.useLocalTime = True
372 372
373 373 self.radarControllerHeaderObj = RadarControllerHeader()
374 374
375 375 self.systemHeaderObj = SystemHeader()
376 376
377 377 self.type = "Voltage"
378 378
379 379 self.data = None
380 380
381 381 # self.dtype = None
382 382
383 383 # self.nChannels = 0
384 384
385 385 # self.nHeights = 0
386 386
387 387 self.nProfiles = None
388 388
389 389 self.heightList = None
390 390
391 391 self.channelList = None
392 392
393 393 # self.channelIndexList = None
394 394
395 395 self.flagNoData = True
396 396
397 397 self.flagDiscontinuousBlock = False
398 398
399 399 self.utctime = None
400 400
401 401 self.timeZone = None
402 402
403 403 self.dstFlag = None
404 404
405 405 self.errorCount = None
406 406
407 407 self.nCohInt = None
408 408
409 409 self.blocksize = None
410 410
411 411 self.flagDecodeData = False #asumo q la data no esta decodificada
412 412
413 413 self.flagDeflipData = False #asumo q la data no esta sin flip
414 414
415 415 self.flagShiftFFT = False
416 416
417 417 self.flagDataAsBlock = False #Asumo que la data es leida perfil a perfil
418 418
419 419 self.profileIndex = 0
420 420
421 421 def getNoisebyHildebrand(self, channel = None):
422 422 """
423 423 Determino el nivel de ruido usando el metodo Hildebrand-Sekhon
424 424
425 425 Return:
426 426 noiselevel
427 427 """
428 428
429 429 if channel != None:
430 430 data = self.data[channel]
431 431 nChannels = 1
432 432 else:
433 433 data = self.data
434 434 nChannels = self.nChannels
435 435
436 436 noise = numpy.zeros(nChannels)
437 437 power = data * numpy.conjugate(data)
438 438
439 439 for thisChannel in range(nChannels):
440 440 if nChannels == 1:
441 441 daux = power[:].real
442 442 else:
443 443 daux = power[thisChannel,:].real
444 444 noise[thisChannel] = hildebrand_sekhon(daux, self.nCohInt)
445 445
446 446 return noise
447 447
448 448 def getNoise(self, type = 1, channel = None):
449 449
450 450 if type == 1:
451 451 noise = self.getNoisebyHildebrand(channel)
452 452
453 453 return 10*numpy.log10(noise)
454 454
455 455 def getPower(self, channel = None):
456 456
457 457 if channel != None:
458 458 data = self.data[channel]
459 459 else:
460 460 data = self.data
461 461
462 462 power = data * numpy.conjugate(data)
463 463
464 464 return 10*numpy.log10(power.real)
465 465
466 466 def getTimeInterval(self):
467 467
468 468 timeInterval = self.ippSeconds * self.nCohInt
469 469
470 470 return timeInterval
471 471
472 472 noise = property(getNoise, "I'm the 'nHeights' property.")
473 473 timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property")
474 474
475 475 class Spectra(JROData):
476 476
477 477 #data es un numpy array de 2 dmensiones (canales, perfiles, alturas)
478 478 data_spc = None
479 479
480 480 #data es un numpy array de 2 dmensiones (canales, pares, alturas)
481 481 data_cspc = None
482 482
483 483 #data es un numpy array de 2 dmensiones (canales, alturas)
484 484 data_dc = None
485 485
486 486 nFFTPoints = None
487 487
488 488 # nPairs = None
489 489
490 490 pairsList = None
491 491
492 492 nIncohInt = None
493 493
494 494 wavelength = None #Necesario para cacular el rango de velocidad desde la frecuencia
495 495
496 496 nCohInt = None #se requiere para determinar el valor de timeInterval
497 497
498 498 ippFactor = None
499 499
500 500 profileIndex = 0
501 501
502 502 def __init__(self):
503 503 '''
504 504 Constructor
505 505 '''
506 506
507 507 self.useLocalTime = True
508 508
509 509 self.radarControllerHeaderObj = RadarControllerHeader()
510 510
511 511 self.systemHeaderObj = SystemHeader()
512 512
513 513 self.type = "Spectra"
514 514
515 515 # self.data = None
516 516
517 517 # self.dtype = None
518 518
519 519 # self.nChannels = 0
520 520
521 521 # self.nHeights = 0
522 522
523 523 self.nProfiles = None
524 524
525 525 self.heightList = None
526 526
527 527 self.channelList = None
528 528
529 529 # self.channelIndexList = None
530 530
531 531 self.pairsList = None
532 532
533 533 self.flagNoData = True
534 534
535 535 self.flagDiscontinuousBlock = False
536 536
537 537 self.utctime = None
538 538
539 539 self.nCohInt = None
540 540
541 541 self.nIncohInt = None
542 542
543 543 self.blocksize = None
544 544
545 545 self.nFFTPoints = None
546 546
547 547 self.wavelength = None
548 548
549 549 self.flagDecodeData = False #asumo q la data no esta decodificada
550 550
551 551 self.flagDeflipData = False #asumo q la data no esta sin flip
552 552
553 553 self.flagShiftFFT = False
554 554
555 555 self.ippFactor = 1
556 556
557 557 #self.noise = None
558 558
559 559 self.beacon_heiIndexList = []
560 560
561 561 self.noise_estimation = None
562 562
563 563
564 564 def getNoisebyHildebrand(self, xmin_index=None, xmax_index=None, ymin_index=None, ymax_index=None):
565 565 """
566 566 Determino el nivel de ruido usando el metodo Hildebrand-Sekhon
567 567
568 568 Return:
569 569 noiselevel
570 570 """
571 571
572 572 noise = numpy.zeros(self.nChannels)
573 573
574 574 for channel in range(self.nChannels):
575 575 daux = self.data_spc[channel,xmin_index:xmax_index,ymin_index:ymax_index]
576 576 noise[channel] = hildebrand_sekhon(daux, self.nIncohInt)
577 577
578 578 return noise
579 579
580 580 def getNoise(self, xmin_index=None, xmax_index=None, ymin_index=None, ymax_index=None):
581 581
582 582 if self.noise_estimation != None:
583 583 return self.noise_estimation #this was estimated by getNoise Operation defined in jroproc_spectra.py
584 584 else:
585 585 noise = self.getNoisebyHildebrand(xmin_index, xmax_index, ymin_index, ymax_index)
586 586 return noise
587 587
588 588
589 589 def getFreqRange(self, extrapoints=0):
590 590
591 591 deltafreq = self.getFmax() / (self.nFFTPoints*self.ippFactor)
592 592 freqrange = deltafreq*(numpy.arange(self.nFFTPoints+extrapoints)-self.nFFTPoints/2.) - deltafreq/2
593 593
594 594 return freqrange
595 595
596 596 def getVelRange(self, extrapoints=0):
597 597
598 598 deltav = self.getVmax() / (self.nFFTPoints*self.ippFactor)
599 599 velrange = deltav*(numpy.arange(self.nFFTPoints+extrapoints)-self.nFFTPoints/2.) - deltav/2
600 600
601 601 return velrange
602 602
603 603 def getNPairs(self):
604 604
605 605 return len(self.pairsList)
606 606
607 607 def getPairsIndexList(self):
608 608
609 609 return range(self.nPairs)
610 610
611 611 def getNormFactor(self):
612 612 pwcode = 1
613 613 if self.flagDecodeData:
614 614 pwcode = numpy.sum(self.code[0]**2)
615 615 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
616 616 normFactor = self.nProfiles*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
617 617
618 618 return normFactor
619 619
620 620 def getFlagCspc(self):
621 621
622 if self.data_cspc == None:
622 if self.data_cspc is None:
623 623 return True
624 624
625 625 return False
626 626
627 627 def getFlagDc(self):
628 628
629 if self.data_dc == None:
629 if self.data_dc is None:
630 630 return True
631 631
632 632 return False
633 633
634 634 def getTimeInterval(self):
635 635
636 636 timeInterval = self.ippSeconds * self.nCohInt * self.nIncohInt * self.nProfiles
637 637
638 638 return timeInterval
639 639
640 640 nPairs = property(getNPairs, "I'm the 'nPairs' property.")
641 641 pairsIndexList = property(getPairsIndexList, "I'm the 'pairsIndexList' property.")
642 642 normFactor = property(getNormFactor, "I'm the 'getNormFactor' property.")
643 643 flag_cspc = property(getFlagCspc)
644 644 flag_dc = property(getFlagDc)
645 645 noise = property(getNoise, "I'm the 'nHeights' property.")
646 646 timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property")
647 647
648 648 class SpectraHeis(Spectra):
649 649
650 650 data_spc = None
651 651
652 652 data_cspc = None
653 653
654 654 data_dc = None
655 655
656 656 nFFTPoints = None
657 657
658 658 # nPairs = None
659 659
660 660 pairsList = None
661 661
662 662 nCohInt = None
663 663
664 664 nIncohInt = None
665 665
666 666 def __init__(self):
667 667
668 668 self.radarControllerHeaderObj = RadarControllerHeader()
669 669
670 670 self.systemHeaderObj = SystemHeader()
671 671
672 672 self.type = "SpectraHeis"
673 673
674 674 # self.dtype = None
675 675
676 676 # self.nChannels = 0
677 677
678 678 # self.nHeights = 0
679 679
680 680 self.nProfiles = None
681 681
682 682 self.heightList = None
683 683
684 684 self.channelList = None
685 685
686 686 # self.channelIndexList = None
687 687
688 688 self.flagNoData = True
689 689
690 690 self.flagDiscontinuousBlock = False
691 691
692 692 # self.nPairs = 0
693 693
694 694 self.utctime = None
695 695
696 696 self.blocksize = None
697 697
698 698 self.profileIndex = 0
699 699
700 700 self.nCohInt = 1
701 701
702 702 self.nIncohInt = 1
703 703
704 704 def getNormFactor(self):
705 705 pwcode = 1
706 706 if self.flagDecodeData:
707 707 pwcode = numpy.sum(self.code[0]**2)
708 708
709 709 normFactor = self.nIncohInt*self.nCohInt*pwcode
710 710
711 711 return normFactor
712 712
713 713 def getTimeInterval(self):
714 714
715 715 timeInterval = self.ippSeconds * self.nCohInt * self.nIncohInt
716 716
717 717 return timeInterval
718 718
719 719 normFactor = property(getNormFactor, "I'm the 'getNormFactor' property.")
720 720 timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property")
721 721
722 722 class Fits(JROData):
723 723
724 724 heightList = None
725 725
726 726 channelList = None
727 727
728 728 flagNoData = True
729 729
730 730 flagDiscontinuousBlock = False
731 731
732 732 useLocalTime = False
733 733
734 734 utctime = None
735 735
736 736 timeZone = None
737 737
738 738 # ippSeconds = None
739 739
740 740 # timeInterval = None
741 741
742 742 nCohInt = None
743 743
744 744 nIncohInt = None
745 745
746 746 noise = None
747 747
748 748 windowOfFilter = 1
749 749
750 750 #Speed of ligth
751 751 C = 3e8
752 752
753 753 frequency = 49.92e6
754 754
755 755 realtime = False
756 756
757 757
758 758 def __init__(self):
759 759
760 760 self.type = "Fits"
761 761
762 762 self.nProfiles = None
763 763
764 764 self.heightList = None
765 765
766 766 self.channelList = None
767 767
768 768 # self.channelIndexList = None
769 769
770 770 self.flagNoData = True
771 771
772 772 self.utctime = None
773 773
774 774 self.nCohInt = 1
775 775
776 776 self.nIncohInt = 1
777 777
778 778 self.useLocalTime = True
779 779
780 780 self.profileIndex = 0
781 781
782 782 # self.utctime = None
783 783 # self.timeZone = None
784 784 # self.ltctime = None
785 785 # self.timeInterval = None
786 786 # self.header = None
787 787 # self.data_header = None
788 788 # self.data = None
789 789 # self.datatime = None
790 790 # self.flagNoData = False
791 791 # self.expName = ''
792 792 # self.nChannels = None
793 793 # self.nSamples = None
794 794 # self.dataBlocksPerFile = None
795 795 # self.comments = ''
796 796 #
797 797
798 798
799 799 def getltctime(self):
800 800
801 801 if self.useLocalTime:
802 802 return self.utctime - self.timeZone*60
803 803
804 804 return self.utctime
805 805
806 806 def getDatatime(self):
807 807
808 808 datatime = datetime.datetime.utcfromtimestamp(self.ltctime)
809 809 return datatime
810 810
811 811 def getTimeRange(self):
812 812
813 813 datatime = []
814 814
815 815 datatime.append(self.ltctime)
816 816 datatime.append(self.ltctime + self.timeInterval)
817 817
818 818 datatime = numpy.array(datatime)
819 819
820 820 return datatime
821 821
822 822 def getHeiRange(self):
823 823
824 824 heis = self.heightList
825 825
826 826 return heis
827 827
828 828 def isEmpty(self):
829 829
830 830 return self.flagNoData
831 831
832 832 def getNHeights(self):
833 833
834 834 return len(self.heightList)
835 835
836 836 def getNChannels(self):
837 837
838 838 return len(self.channelList)
839 839
840 840 def getChannelIndexList(self):
841 841
842 842 return range(self.nChannels)
843 843
844 844 def getNoise(self, type = 1):
845 845
846 846 #noise = numpy.zeros(self.nChannels)
847 847
848 848 if type == 1:
849 849 noise = self.getNoisebyHildebrand()
850 850
851 851 if type == 2:
852 852 noise = self.getNoisebySort()
853 853
854 854 if type == 3:
855 855 noise = self.getNoisebyWindow()
856 856
857 857 return noise
858 858
859 859 def getTimeInterval(self):
860 860
861 861 timeInterval = self.ippSeconds * self.nCohInt * self.nIncohInt
862 862
863 863 return timeInterval
864 864
865 865 datatime = property(getDatatime, "I'm the 'datatime' property")
866 866 nHeights = property(getNHeights, "I'm the 'nHeights' property.")
867 867 nChannels = property(getNChannels, "I'm the 'nChannel' property.")
868 868 channelIndexList = property(getChannelIndexList, "I'm the 'channelIndexList' property.")
869 869 noise = property(getNoise, "I'm the 'nHeights' property.")
870 870 datatime = property(getDatatime, "I'm the 'datatime' property")
871 871 ltctime = property(getltctime, "I'm the 'ltctime' property")
872 872 timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property")
873 873
874 874 class Correlation(JROData):
875 875
876 876 noise = None
877 877
878 878 SNR = None
879 879
880 880 pairsAutoCorr = None #Pairs of Autocorrelation
881 881
882 882 #--------------------------------------------------
883 883
884 884 data_corr = None
885 885
886 886 data_volt = None
887 887
888 888 lagT = None # each element value is a profileIndex
889 889
890 890 lagR = None # each element value is in km
891 891
892 892 pairsList = None
893 893
894 894 calculateVelocity = None
895 895
896 896 nPoints = None
897 897
898 898 nAvg = None
899 899
900 900 bufferSize = None
901 901
902 902 def __init__(self):
903 903 '''
904 904 Constructor
905 905 '''
906 906 self.radarControllerHeaderObj = RadarControllerHeader()
907 907
908 908 self.systemHeaderObj = SystemHeader()
909 909
910 910 self.type = "Correlation"
911 911
912 912 self.data = None
913 913
914 914 self.dtype = None
915 915
916 916 self.nProfiles = None
917 917
918 918 self.heightList = None
919 919
920 920 self.channelList = None
921 921
922 922 self.flagNoData = True
923 923
924 924 self.flagDiscontinuousBlock = False
925 925
926 926 self.utctime = None
927 927
928 928 self.timeZone = None
929 929
930 930 self.dstFlag = None
931 931
932 932 self.errorCount = None
933 933
934 934 self.blocksize = None
935 935
936 936 self.flagDecodeData = False #asumo q la data no esta decodificada
937 937
938 938 self.flagDeflipData = False #asumo q la data no esta sin flip
939 939
940 940 self.pairsList = None
941 941
942 942 self.nPoints = None
943 943
944 944 def getLagTRange(self, extrapoints=0):
945 945
946 946 lagTRange = self.lagT
947 947 diff = lagTRange[1] - lagTRange[0]
948 948 extra = numpy.arange(1,extrapoints + 1)*diff + lagTRange[-1]
949 949 lagTRange = numpy.hstack((lagTRange, extra))
950 950
951 951 return lagTRange
952 952
953 953 def getLagRRange(self, extrapoints=0):
954 954
955 955 return self.lagR
956 956
957 957 def getPairsList(self):
958 958
959 959 return self.pairsList
960 960
961 961 def getCalculateVelocity(self):
962 962
963 963 return self.calculateVelocity
964 964
965 965 def getNPoints(self):
966 966
967 967 return self.nPoints
968 968
969 969 def getNAvg(self):
970 970
971 971 return self.nAvg
972 972
973 973 def getBufferSize(self):
974 974
975 975 return self.bufferSize
976 976
977 977 def getPairsAutoCorr(self):
978 978 pairsList = self.pairsList
979 979 pairsAutoCorr = numpy.zeros(self.nChannels, dtype = 'int')*numpy.nan
980 980
981 981 for l in range(len(pairsList)):
982 982 firstChannel = pairsList[l][0]
983 983 secondChannel = pairsList[l][1]
984 984
985 985 #Obteniendo pares de Autocorrelacion
986 986 if firstChannel == secondChannel:
987 987 pairsAutoCorr[firstChannel] = int(l)
988 988
989 989 pairsAutoCorr = pairsAutoCorr.astype(int)
990 990
991 991 return pairsAutoCorr
992 992
993 993 def getNoise(self, mode = 2):
994 994
995 995 indR = numpy.where(self.lagR == 0)[0][0]
996 996 indT = numpy.where(self.lagT == 0)[0][0]
997 997
998 998 jspectra0 = self.data_corr[:,:,indR,:]
999 999 jspectra = copy.copy(jspectra0)
1000 1000
1001 1001 num_chan = jspectra.shape[0]
1002 1002 num_hei = jspectra.shape[2]
1003 1003
1004 1004 freq_dc = jspectra.shape[1]/2
1005 1005 ind_vel = numpy.array([-2,-1,1,2]) + freq_dc
1006 1006
1007 1007 if ind_vel[0]<0:
1008 1008 ind_vel[range(0,1)] = ind_vel[range(0,1)] + self.num_prof
1009 1009
1010 1010 if mode == 1:
1011 1011 jspectra[:,freq_dc,:] = (jspectra[:,ind_vel[1],:] + jspectra[:,ind_vel[2],:])/2 #CORRECCION
1012 1012
1013 1013 if mode == 2:
1014 1014
1015 1015 vel = numpy.array([-2,-1,1,2])
1016 1016 xx = numpy.zeros([4,4])
1017 1017
1018 1018 for fil in range(4):
1019 1019 xx[fil,:] = vel[fil]**numpy.asarray(range(4))
1020 1020
1021 1021 xx_inv = numpy.linalg.inv(xx)
1022 1022 xx_aux = xx_inv[0,:]
1023 1023
1024 1024 for ich in range(num_chan):
1025 1025 yy = jspectra[ich,ind_vel,:]
1026 1026 jspectra[ich,freq_dc,:] = numpy.dot(xx_aux,yy)
1027 1027
1028 1028 junkid = jspectra[ich,freq_dc,:]<=0
1029 1029 cjunkid = sum(junkid)
1030 1030
1031 1031 if cjunkid.any():
1032 1032 jspectra[ich,freq_dc,junkid.nonzero()] = (jspectra[ich,ind_vel[1],junkid] + jspectra[ich,ind_vel[2],junkid])/2
1033 1033
1034 1034 noise = jspectra0[:,freq_dc,:] - jspectra[:,freq_dc,:]
1035 1035
1036 1036 return noise
1037 1037
1038 1038 def getTimeInterval(self):
1039 1039
1040 1040 timeInterval = self.ippSeconds * self.nCohInt * self.nPoints
1041 1041
1042 1042 return timeInterval
1043 1043
1044 1044 timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property")
1045 1045 # pairsList = property(getPairsList, "I'm the 'pairsList' property.")
1046 1046 # nPoints = property(getNPoints, "I'm the 'nPoints' property.")
1047 1047 calculateVelocity = property(getCalculateVelocity, "I'm the 'calculateVelocity' property.")
1048 1048 nAvg = property(getNAvg, "I'm the 'nAvg' property.")
1049 1049 bufferSize = property(getBufferSize, "I'm the 'bufferSize' property.")
1050 1050
1051 1051
1052 1052 class Parameters(JROData):
1053 1053
1054 1054 #Information from previous data
1055 1055
1056 1056 inputUnit = None #Type of data to be processed
1057 1057
1058 1058 operation = None #Type of operation to parametrize
1059 1059
1060 1060 normFactor = None #Normalization Factor
1061 1061
1062 1062 groupList = None #List of Pairs, Groups, etc
1063 1063
1064 1064 #Parameters
1065 1065
1066 1066 data_param = None #Parameters obtained
1067 1067
1068 1068 data_pre = None #Data Pre Parametrization
1069 1069
1070 1070 data_SNR = None #Signal to Noise Ratio
1071 1071
1072 1072 # heightRange = None #Heights
1073 1073
1074 1074 abscissaList = None #Abscissa, can be velocities, lags or time
1075 1075
1076 1076 noise = None #Noise Potency
1077 1077
1078 1078 utctimeInit = None #Initial UTC time
1079 1079
1080 1080 paramInterval = None #Time interval to calculate Parameters in seconds
1081 1081
1082 1082 #Fitting
1083 1083
1084 1084 data_error = None #Error of the estimation
1085 1085
1086 1086 constants = None
1087 1087
1088 1088 library = None
1089 1089
1090 1090 #Output signal
1091 1091
1092 1092 outputInterval = None #Time interval to calculate output signal in seconds
1093 1093
1094 1094 data_output = None #Out signal
1095 1095
1096 1096
1097 1097
1098 1098 def __init__(self):
1099 1099 '''
1100 1100 Constructor
1101 1101 '''
1102 1102 self.radarControllerHeaderObj = RadarControllerHeader()
1103 1103
1104 1104 self.systemHeaderObj = SystemHeader()
1105 1105
1106 1106 self.type = "Parameters"
1107 1107
1108 1108 def getTimeRange1(self):
1109 1109
1110 1110 datatime = []
1111 1111
1112 1112 if self.useLocalTime:
1113 1113 time1 = self.utctimeInit - self.timeZone*60
1114 1114 else:
1115 1115 time1 = utctimeInit
1116 1116
1117 1117 # datatime.append(self.utctimeInit)
1118 1118 # datatime.append(self.utctimeInit + self.outputInterval)
1119 1119 datatime.append(time1)
1120 1120 datatime.append(time1 + self.outputInterval)
1121 1121
1122 1122 datatime = numpy.array(datatime)
1123 1123
1124 1124 return datatime
@@ -1,692 +1,692
1 1 '''
2 2 @author: Daniel Suarez
3 3 '''
4 4
5 5 import os
6 6 import sys
7 7 import glob
8 8 import fnmatch
9 9 import datetime
10 10 import time
11 11 import re
12 12 import h5py
13 13 import numpy
14 14
15 15 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation
16 16 from schainpy.model.data.jroamisr import AMISR
17 17
18 18 try:
19 19 from gevent import sleep
20 20 except:
21 21 from time import sleep
22 22
23 23 class RadacHeader():
24 24 def __init__(self, fp):
25 25 header = 'Raw11/Data/RadacHeader'
26 26 self.beamCodeByPulse = fp.get(header+'/BeamCode')
27 27 self.beamCode = fp.get('Raw11/Data/Beamcodes')
28 28 self.code = fp.get(header+'/Code')
29 29 self.frameCount = fp.get(header+'/FrameCount')
30 30 self.modeGroup = fp.get(header+'/ModeGroup')
31 31 self.nsamplesPulse = fp.get(header+'/NSamplesPulse')
32 32 self.pulseCount = fp.get(header+'/PulseCount')
33 33 self.radacTime = fp.get(header+'/RadacTime')
34 34 self.timeCount = fp.get(header+'/TimeCount')
35 35 self.timeStatus = fp.get(header+'/TimeStatus')
36 36
37 37 self.nrecords = self.pulseCount.shape[0] #nblocks
38 38 self.npulses = self.pulseCount.shape[1] #nprofile
39 39 self.nsamples = self.nsamplesPulse[0,0] #ngates
40 40 self.nbeams = self.beamCode.shape[1]
41 41
42 42
43 43 def getIndexRangeToPulse(self, idrecord=0):
44 44 #indexToZero = numpy.where(self.pulseCount.value[idrecord,:]==0)
45 45 #startPulseCountId = indexToZero[0][0]
46 46 #endPulseCountId = startPulseCountId - 1
47 47 #range1 = numpy.arange(startPulseCountId,self.npulses,1)
48 48 #range2 = numpy.arange(0,startPulseCountId,1)
49 49 #return range1, range2
50 50 zero = 0
51 51 npulse = max(self.pulseCount[0,:]+1)-1
52 52 looking_index = numpy.where(self.pulseCount.value[idrecord,:]==npulse)[0]
53 53 getLastIndex = looking_index[-1]
54 54 index_data = numpy.arange(0,getLastIndex+1,1)
55 55 index_buffer = numpy.arange(getLastIndex+1,self.npulses,1)
56 56 return index_data, index_buffer
57 57
58 58 class AMISRReader(ProcessingUnit):
59 59
60 60 path = None
61 61 startDate = None
62 62 endDate = None
63 63 startTime = None
64 64 endTime = None
65 65 walk = None
66 66 isConfig = False
67 67
68 68 def __init__(self):
69 69 self.set = None
70 70 self.subset = None
71 71 self.extension_file = '.h5'
72 72 self.dtc_str = 'dtc'
73 73 self.dtc_id = 0
74 74 self.status = True
75 75 self.isConfig = False
76 76 self.dirnameList = []
77 77 self.filenameList = []
78 78 self.fileIndex = None
79 79 self.flagNoMoreFiles = False
80 80 self.flagIsNewFile = 0
81 81 self.filename = ''
82 82 self.amisrFilePointer = None
83 83 self.radacHeaderObj = None
84 84 self.dataOut = self.__createObjByDefault()
85 85 self.datablock = None
86 86 self.rest_datablock = None
87 87 self.range = None
88 88 self.idrecord_count = 0
89 89 self.profileIndex = 0
90 90 self.index_amisr_sample = None
91 91 self.index_amisr_buffer = None
92 92 self.beamCodeByFrame = None
93 93 self.radacTimeByFrame = None
94 94 #atributos originales tal y como esta en el archivo de datos
95 95 self.beamCodesFromFile = None
96 96 self.radacTimeFromFile = None
97 97 self.rangeFromFile = None
98 98 self.dataByFrame = None
99 99 self.dataset = None
100 100
101 101 self.beamCodeDict = {}
102 102 self.beamRangeDict = {}
103 103
104 104 #experiment cgf file
105 105 self.npulsesint_fromfile = None
106 106 self.recordsperfile_fromfile = None
107 107 self.nbeamcodes_fromfile = None
108 108 self.ngates_fromfile = None
109 109 self.ippSeconds_fromfile = None
110 110 self.frequency_h5file = None
111 111
112 112
113 113 self.__firstFile = True
114 114 self.buffer_radactime = None
115 115
116 116 self.index4_schain_datablock = None
117 117 self.index4_buffer = None
118 118 self.schain_datablock = None
119 119 self.buffer = None
120 120 self.linear_pulseCount = None
121 121 self.npulseByFrame = None
122 122 self.profileIndex_offset = None
123 123 self.timezone = 'ut'
124 124
125 125 self.__waitForNewFile = 20
126 126 self.__filename_online = None
127 127
128 128 def __createObjByDefault(self):
129 129
130 130 dataObj = AMISR()
131 131
132 132 return dataObj
133 133
134 134 def __setParameters(self,path='', startDate='',endDate='',startTime='', endTime='', walk=''):
135 135 self.path = path
136 136 self.startDate = startDate
137 137 self.endDate = endDate
138 138 self.startTime = startTime
139 139 self.endTime = endTime
140 140 self.walk = walk
141 141
142 142 def __checkPath(self):
143 143 if os.path.exists(self.path):
144 144 self.status = 1
145 145 else:
146 146 self.status = 0
147 147 print 'Path:%s does not exists'%self.path
148 148
149 149 return
150 150
151 151 def __selDates(self, amisr_dirname_format):
152 152 try:
153 153 year = int(amisr_dirname_format[0:4])
154 154 month = int(amisr_dirname_format[4:6])
155 155 dom = int(amisr_dirname_format[6:8])
156 156 thisDate = datetime.date(year,month,dom)
157 157
158 158 if (thisDate>=self.startDate and thisDate <= self.endDate):
159 159 return amisr_dirname_format
160 160 except:
161 161 return None
162 162
163 163 def __findDataForDates(self,online=False):
164 164
165 165
166 166
167 167 if not(self.status):
168 168 return None
169 169
170 170 pat = '\d+.\d+'
171 171 dirnameList = [re.search(pat,x) for x in os.listdir(self.path)]
172 172 dirnameList = filter(lambda x:x!=None,dirnameList)
173 173 dirnameList = [x.string for x in dirnameList]
174 174 if not(online):
175 175 dirnameList = [self.__selDates(x) for x in dirnameList]
176 176 dirnameList = filter(lambda x:x!=None,dirnameList)
177 177 if len(dirnameList)>0:
178 178 self.status = 1
179 179 self.dirnameList = dirnameList
180 180 self.dirnameList.sort()
181 181 else:
182 182 self.status = 0
183 183 return None
184 184
185 185 def __getTimeFromData(self):
186 186 startDateTime_Reader = datetime.datetime.combine(self.startDate,self.startTime)
187 187 endDateTime_Reader = datetime.datetime.combine(self.endDate,self.endTime)
188 188
189 189 print 'Filtering Files from %s to %s'%(startDateTime_Reader, endDateTime_Reader)
190 190 print '........................................'
191 191 filter_filenameList = []
192 192 self.filenameList.sort()
193 193 for i in range(len(self.filenameList)-1):
194 194 filename = self.filenameList[i]
195 195 fp = h5py.File(filename,'r')
196 196 time_str = fp.get('Time/RadacTimeString')
197 197
198 198 startDateTimeStr_File = time_str[0][0].split('.')[0]
199 199 junk = time.strptime(startDateTimeStr_File, '%Y-%m-%d %H:%M:%S')
200 200 startDateTime_File = datetime.datetime(junk.tm_year,junk.tm_mon,junk.tm_mday,junk.tm_hour, junk.tm_min, junk.tm_sec)
201 201
202 202 endDateTimeStr_File = time_str[-1][-1].split('.')[0]
203 203 junk = time.strptime(endDateTimeStr_File, '%Y-%m-%d %H:%M:%S')
204 204 endDateTime_File = datetime.datetime(junk.tm_year,junk.tm_mon,junk.tm_mday,junk.tm_hour, junk.tm_min, junk.tm_sec)
205 205
206 206 fp.close()
207 207
208 208 if self.timezone == 'lt':
209 209 startDateTime_File = startDateTime_File - datetime.timedelta(minutes = 300)
210 210 endDateTime_File = endDateTime_File - datetime.timedelta(minutes = 300)
211 211
212 212 if (endDateTime_File>=startDateTime_Reader and endDateTime_File<endDateTime_Reader):
213 213 #self.filenameList.remove(filename)
214 214 filter_filenameList.append(filename)
215 215
216 216 filter_filenameList.sort()
217 217 self.filenameList = filter_filenameList
218 218 return 1
219 219
220 220 def __filterByGlob1(self, dirName):
221 221 filter_files = glob.glob1(dirName, '*.*%s'%self.extension_file)
222 222 filterDict = {}
223 223 filterDict.setdefault(dirName)
224 224 filterDict[dirName] = filter_files
225 225 return filterDict
226 226
227 227 def __getFilenameList(self, fileListInKeys, dirList):
228 228 for value in fileListInKeys:
229 229 dirName = value.keys()[0]
230 230 for file in value[dirName]:
231 231 filename = os.path.join(dirName, file)
232 232 self.filenameList.append(filename)
233 233
234 234
235 235 def __selectDataForTimes(self, online=False):
236 236 #aun no esta implementado el filtro for tiempo
237 237 if not(self.status):
238 238 return None
239 239
240 240 dirList = [os.path.join(self.path,x) for x in self.dirnameList]
241 241
242 242 fileListInKeys = [self.__filterByGlob1(x) for x in dirList]
243 243
244 244 self.__getFilenameList(fileListInKeys, dirList)
245 245 if not(online):
246 246 #filtro por tiempo
247 247 if not(self.all):
248 248 self.__getTimeFromData()
249 249
250 250 if len(self.filenameList)>0:
251 251 self.status = 1
252 252 self.filenameList.sort()
253 253 else:
254 254 self.status = 0
255 255 return None
256 256
257 257 else:
258 258 #get the last file - 1
259 259 self.filenameList = [self.filenameList[-2]]
260 260
261 261 new_dirnameList = []
262 262 for dirname in self.dirnameList:
263 263 junk = numpy.array([dirname in x for x in self.filenameList])
264 264 junk_sum = junk.sum()
265 265 if junk_sum > 0:
266 266 new_dirnameList.append(dirname)
267 267 self.dirnameList = new_dirnameList
268 268 return 1
269 269
270 270 def __searchFilesOnline(self,
271 271 path,
272 272 walk=True):
273 273
274 274 startDate = datetime.datetime.utcnow().date()
275 275 endDate = datetime.datetime.utcnow().date()
276 276
277 277 self.__setParameters(path=path, startDate=startDate, endDate=endDate, walk=walk)
278 278
279 279 self.__checkPath()
280 280
281 281 self.__findDataForDates(online=True)
282 282
283 283 self.dirnameList = [self.dirnameList[-1]]
284 284
285 285 self.__selectDataForTimes(online=True)
286 286
287 287 return
288 288
289 289
290 290 def __searchFilesOffline(self,
291 291 path,
292 292 startDate,
293 293 endDate,
294 294 startTime=datetime.time(0,0,0),
295 295 endTime=datetime.time(23,59,59),
296 296 walk=True):
297 297
298 298 self.__setParameters(path, startDate, endDate, startTime, endTime, walk)
299 299
300 300 self.__checkPath()
301 301
302 302 self.__findDataForDates()
303 303
304 304 self.__selectDataForTimes()
305 305
306 306 for i in range(len(self.filenameList)):
307 307 print "%s" %(self.filenameList[i])
308 308
309 309 return
310 310
311 311 def __setNextFileOffline(self):
312 312 idFile = self.fileIndex
313 313
314 314 while (True):
315 315 idFile += 1
316 316 if not(idFile < len(self.filenameList)):
317 317 self.flagNoMoreFiles = 1
318 318 print "No more Files"
319 319 return 0
320 320
321 321 filename = self.filenameList[idFile]
322 322
323 323 amisrFilePointer = h5py.File(filename,'r')
324 324
325 325 break
326 326
327 327 self.flagIsNewFile = 1
328 328 self.fileIndex = idFile
329 329 self.filename = filename
330 330
331 331 self.amisrFilePointer = amisrFilePointer
332 332
333 333 print "Setting the file: %s"%self.filename
334 334
335 335 return 1
336 336
337 337
338 338 def __setNextFileOnline(self):
339 339 filename = self.filenameList[0]
340 340 if self.__filename_online != None:
341 341 self.__selectDataForTimes(online=True)
342 342 filename = self.filenameList[0]
343 343 while self.__filename_online == filename:
344 344 print 'waiting %d seconds to get a new file...'%(self.__waitForNewFile)
345 345 sleep(self.__waitForNewFile)
346 346 self.__selectDataForTimes(online=True)
347 347 filename = self.filenameList[0]
348 348
349 349 self.__filename_online = filename
350 350
351 351 self.amisrFilePointer = h5py.File(filename,'r')
352 352 self.flagIsNewFile = 1
353 353 self.filename = filename
354 354 print "Setting the file: %s"%self.filename
355 355 return 1
356 356
357 357
358 358 def __readHeader(self):
359 359 self.radacHeaderObj = RadacHeader(self.amisrFilePointer)
360 360
361 361 #update values from experiment cfg file
362 362 if self.radacHeaderObj.nrecords == self.recordsperfile_fromfile:
363 363 self.radacHeaderObj.nrecords = self.recordsperfile_fromfile
364 364 self.radacHeaderObj.nbeams = self.nbeamcodes_fromfile
365 365 self.radacHeaderObj.npulses = self.npulsesint_fromfile
366 366 self.radacHeaderObj.nsamples = self.ngates_fromfile
367 367
368 368 #looking index list for data
369 369 start_index = self.radacHeaderObj.pulseCount[0,:][0]
370 370 end_index = self.radacHeaderObj.npulses
371 371 range4data = range(start_index, end_index)
372 372 self.index4_schain_datablock = numpy.array(range4data)
373 373
374 374 buffer_start_index = 0
375 375 buffer_end_index = self.radacHeaderObj.pulseCount[0,:][0]
376 376 range4buffer = range(buffer_start_index, buffer_end_index)
377 377 self.index4_buffer = numpy.array(range4buffer)
378 378
379 379 self.linear_pulseCount = numpy.array(range4data + range4buffer)
380 380 self.npulseByFrame = max(self.radacHeaderObj.pulseCount[0,:]+1)
381 381
382 382 #get tuning frequency
383 383 frequency_h5file_dataset = self.amisrFilePointer.get('Rx'+'/TuningFrequency')
384 384 self.frequency_h5file = frequency_h5file_dataset[0,0]
385 385
386 386 self.flagIsNewFile = 1
387 387
388 388 def __getBeamCode(self):
389 389 self.beamCodeDict = {}
390 390 self.beamRangeDict = {}
391 391
392 392 beamCodeMap = self.amisrFilePointer.get('Setup/BeamcodeMap')
393 393
394 394 for i in range(len(self.radacHeaderObj.beamCode[0,:])):
395 395 self.beamCodeDict.setdefault(i)
396 396 self.beamRangeDict.setdefault(i)
397 397 beamcodeValue = self.radacHeaderObj.beamCode[0,i]
398 398 beamcodeIndex = numpy.where(beamCodeMap[:,0] == beamcodeValue)[0][0]
399 399 x = beamCodeMap[beamcodeIndex][1]
400 400 y = beamCodeMap[beamcodeIndex][2]
401 401 z = beamCodeMap[beamcodeIndex][3]
402 402 self.beamCodeDict[i] = [beamcodeValue, x, y, z]
403 403
404 404 just4record0 = self.radacHeaderObj.beamCodeByPulse[0,:]
405 405
406 406 for i in range(len(self.beamCodeDict.values())):
407 407 xx = numpy.where(just4record0==self.beamCodeDict.values()[i][0])
408 408 indexPulseByBeam = self.linear_pulseCount[xx[0]]
409 409 self.beamRangeDict[i] = indexPulseByBeam
410 410
411 411 def __getExpParameters(self):
412 412 if not(self.status):
413 413 return None
414 414
415 415 experimentCfgPath = os.path.join(self.path, self.dirnameList[0], 'Setup')
416 416
417 417 expFinder = glob.glob1(experimentCfgPath,'*.exp')
418 418 if len(expFinder)== 0:
419 419 self.status = 0
420 420 return None
421 421
422 422 experimentFilename = os.path.join(experimentCfgPath,expFinder[0])
423 423
424 424 f = open(experimentFilename)
425 425 lines = f.readlines()
426 426 f.close()
427 427
428 428 parmsList = ['npulsesint*','recordsperfile*','nbeamcodes*','ngates*']
429 429 filterList = [fnmatch.filter(lines, x) for x in parmsList]
430 430
431 431
432 432 values = [re.sub(r'\D',"",x[0]) for x in filterList]
433 433
434 434 self.npulsesint_fromfile = int(values[0])
435 435 self.recordsperfile_fromfile = int(values[1])
436 436 self.nbeamcodes_fromfile = int(values[2])
437 437 self.ngates_fromfile = int(values[3])
438 438
439 439 tufileFinder = fnmatch.filter(lines, 'tufile=*')
440 440 tufile = tufileFinder[0].split('=')[1].split('\n')[0]
441 441 tufile = tufile.split('\r')[0]
442 442 tufilename = os.path.join(experimentCfgPath,tufile)
443 443
444 444 f = open(tufilename)
445 445 lines = f.readlines()
446 446 f.close()
447 447 self.ippSeconds_fromfile = float(lines[1].split()[2])/1E6
448 448
449 449
450 450 self.status = 1
451 451
452 452 def __setIdsAndArrays(self):
453 453 self.dataByFrame = self.__setDataByFrame()
454 454 self.beamCodeByFrame = self.amisrFilePointer.get('Raw11/Data/RadacHeader/BeamCode').value[0, :]
455 455 self.readRanges()
456 456 self.index_amisr_sample, self.index_amisr_buffer = self.radacHeaderObj.getIndexRangeToPulse(0)
457 457 self.radacTimeByFrame = numpy.zeros(self.radacHeaderObj.npulses)
458 458 if len(self.index_amisr_buffer) > 0:
459 459 self.buffer_radactime = numpy.zeros_like(self.radacTimeByFrame)
460 460
461 461
462 462 def __setNextFile(self,online=False):
463 463
464 464 if not(online):
465 465 newFile = self.__setNextFileOffline()
466 466 else:
467 467 newFile = self.__setNextFileOnline()
468 468
469 469 if not(newFile):
470 470 return 0
471 471
472 472 self.__readHeader()
473 473
474 474 if self.__firstFile:
475 475 self.__setIdsAndArrays()
476 476 self.__firstFile = False
477 477
478 478 self.__getBeamCode()
479 479 self.readDataBlock()
480 480
481 481
482 482 def setup(self,path=None,
483 483 startDate=None,
484 484 endDate=None,
485 485 startTime=datetime.time(0,0,0),
486 486 endTime=datetime.time(23,59,59),
487 487 walk=True,
488 488 timezone='ut',
489 489 all=0,
490 490 online=False):
491 491
492 492 self.timezone = timezone
493 493 self.all = all
494 494 self.online = online
495 495 if not(online):
496 496 #Busqueda de archivos offline
497 497 self.__searchFilesOffline(path, startDate, endDate, startTime, endTime, walk)
498 498 else:
499 499 self.__searchFilesOnline(path, walk)
500 500
501 501 if not(self.filenameList):
502 502 print "There is no files into the folder: %s"%(path)
503 503
504 504 sys.exit(-1)
505 505
506 506 self.__getExpParameters()
507 507
508 508 self.fileIndex = -1
509 509
510 510 self.__setNextFile(online)
511 511
512 512 # first_beamcode = self.radacHeaderObj.beamCodeByPulse[0,0]
513 513 # index = numpy.where(self.radacHeaderObj.beamCodeByPulse[0,:]!=first_beamcode)[0][0]
514 514 self.profileIndex_offset = self.radacHeaderObj.pulseCount[0,:][0]
515 515 self.profileIndex = self.profileIndex_offset
516 516
517 517 def readRanges(self):
518 518 dataset = self.amisrFilePointer.get('Raw11/Data/Samples/Range')
519 519
520 520 self.rangeFromFile = numpy.reshape(dataset.value,(-1))
521 521 return self.rangeFromFile
522 522
523 523
524 524 def readRadacTime(self,idrecord, range1, range2):
525 525 self.radacTimeFromFile = self.radacHeaderObj.radacTime.value
526 526
527 527 radacTimeByFrame = numpy.zeros((self.radacHeaderObj.npulses))
528 528 #radacTimeByFrame = dataset[idrecord - 1,range1]
529 529 #radacTimeByFrame = dataset[idrecord,range2]
530 530
531 531 return radacTimeByFrame
532 532
533 533 def readBeamCode(self, idrecord, range1, range2):
534 534 dataset = self.amisrFilePointer.get('Raw11/Data/RadacHeader/BeamCode')
535 535 beamcodeByFrame = numpy.zeros((self.radacHeaderObj.npulses))
536 536 self.beamCodesFromFile = dataset.value
537 537
538 538 #beamcodeByFrame[range1] = dataset[idrecord - 1, range1]
539 539 #beamcodeByFrame[range2] = dataset[idrecord, range2]
540 540 beamcodeByFrame[range1] = dataset[idrecord, range1]
541 541 beamcodeByFrame[range2] = dataset[idrecord, range2]
542 542
543 543 return beamcodeByFrame
544 544
545 545
546 546 def __setDataByFrame(self):
547 547 ndata = 2 # porque es complejo
548 548 dataByFrame = numpy.zeros((self.radacHeaderObj.npulses, self.radacHeaderObj.nsamples, ndata))
549 549 return dataByFrame
550 550
551 551 def __readDataSet(self):
552 552 dataset = self.amisrFilePointer.get('Raw11/Data/Samples/Data')
553 553 return dataset
554 554
555 555 def __setDataBlock(self,):
556 556 real = self.dataByFrame[:,:,0] #asumo que 0 es real
557 557 imag = self.dataByFrame[:,:,1] #asumo que 1 es imaginario
558 558 datablock = real + imag*1j #armo el complejo
559 559 return datablock
560 560
561 561 def readSamples_version1(self,idrecord):
562 562 #estas tres primeras lineas solo se deben ejecutar una vez
563 563 if self.flagIsNewFile:
564 564 #reading dataset
565 565 self.dataset = self.__readDataSet()
566 566 self.flagIsNewFile = 0
567 567
568 568 if idrecord == 0:
569 569 self.dataByFrame[self.index4_schain_datablock, : ,:] = self.dataset[0, self.index_amisr_sample,:,:]
570 570 self.radacTimeByFrame[self.index4_schain_datablock] = self.radacHeaderObj.radacTime[0, self.index_amisr_sample]
571 571 datablock = self.__setDataBlock()
572 572 if len(self.index_amisr_buffer) > 0:
573 573 self.buffer = self.dataset[0, self.index_amisr_buffer,:,:]
574 574 self.buffer_radactime = self.radacHeaderObj.radacTime[0, self.index_amisr_buffer]
575 575
576 576 return datablock
577 577 if len(self.index_amisr_buffer) > 0:
578 578 self.dataByFrame[self.index4_buffer,:,:] = self.buffer.copy()
579 579 self.radacTimeByFrame[self.index4_buffer] = self.buffer_radactime.copy()
580 580 self.dataByFrame[self.index4_schain_datablock,:,:] = self.dataset[idrecord, self.index_amisr_sample,:,:]
581 581 self.radacTimeByFrame[self.index4_schain_datablock] = self.radacHeaderObj.radacTime[idrecord, self.index_amisr_sample]
582 582 datablock = self.__setDataBlock()
583 583 if len(self.index_amisr_buffer) > 0:
584 584 self.buffer = self.dataset[idrecord, self.index_amisr_buffer, :, :]
585 585 self.buffer_radactime = self.radacHeaderObj.radacTime[idrecord, self.index_amisr_buffer]
586 586
587 587 return datablock
588 588
589 589
590 590 def readSamples(self,idrecord):
591 591 if self.flagIsNewFile:
592 592 self.dataByFrame = self.__setDataByFrame()
593 593 self.beamCodeByFrame = self.amisrFilePointer.get('Raw11/Data/RadacHeader/BeamCode').value[idrecord, :]
594 594
595 595 #reading ranges
596 596 self.readRanges()
597 597 #reading dataset
598 598 self.dataset = self.__readDataSet()
599 599
600 600 self.flagIsNewFile = 0
601 601 self.radacTimeByFrame = self.radacHeaderObj.radacTime.value[idrecord, :]
602 602 self.dataByFrame = self.dataset[idrecord, :, :, :]
603 603 datablock = self.__setDataBlock()
604 604 return datablock
605 605
606 606
607 607 def readDataBlock(self):
608 608
609 609 self.datablock = self.readSamples_version1(self.idrecord_count)
610 610 #self.datablock = self.readSamples(self.idrecord_count)
611 611 #print 'record:', self.idrecord_count
612 612
613 613 self.idrecord_count += 1
614 614 self.profileIndex = 0
615 615
616 616 if self.idrecord_count >= self.radacHeaderObj.nrecords:
617 617 self.idrecord_count = 0
618 618 self.flagIsNewFile = 1
619 619
620 620 def readNextBlock(self):
621 621
622 622 self.readDataBlock()
623 623
624 624 if self.flagIsNewFile:
625 625 self.__setNextFile(self.online)
626 626 pass
627 627
628 628 def __hasNotDataInBuffer(self):
629 629 #self.radacHeaderObj.npulses debe ser otra variable para considerar el numero de pulsos a tomar en el primer y ultimo record
630 630 if self.profileIndex >= self.radacHeaderObj.npulses:
631 631 return 1
632 632 return 0
633 633
634 634 def printUTC(self):
635 635 print self.dataOut.utctime
636 636 print ''
637 637
638 638 def setObjProperties(self):
639 639
640 640 self.dataOut.heightList = self.rangeFromFile/1000.0 #km
641 641 self.dataOut.nProfiles = self.radacHeaderObj.npulses
642 642 self.dataOut.nRecords = self.radacHeaderObj.nrecords
643 643 self.dataOut.nBeams = self.radacHeaderObj.nbeams
644 644 self.dataOut.ippSeconds = self.ippSeconds_fromfile
645 645 # self.dataOut.timeInterval = self.dataOut.ippSeconds * self.dataOut.nCohInt
646 646 self.dataOut.frequency = self.frequency_h5file
647 647 self.dataOut.npulseByFrame = self.npulseByFrame
648 648 self.dataOut.nBaud = None
649 649 self.dataOut.nCode = None
650 650 self.dataOut.code = None
651 651
652 652 self.dataOut.beamCodeDict = self.beamCodeDict
653 653 self.dataOut.beamRangeDict = self.beamRangeDict
654 654
655 655 if self.timezone == 'lt':
656 656 self.dataOut.timeZone = time.timezone / 60. #get the timezone in minutes
657 657 else:
658 658 self.dataOut.timeZone = 0 #by default time is UTC
659 659
660 660 def getData(self):
661 661
662 662 if self.flagNoMoreFiles:
663 663 self.dataOut.flagNoData = True
664 664 print 'Process finished'
665 665 return 0
666 666
667 667 if self.__hasNotDataInBuffer():
668 668 self.readNextBlock()
669 669
670 670
671 if self.datablock == None: # setear esta condicion cuando no hayan datos por leers
671 if self.datablock is None: # setear esta condicion cuando no hayan datos por leers
672 672 self.dataOut.flagNoData = True
673 673 return 0
674 674
675 675 self.dataOut.data = numpy.reshape(self.datablock[self.profileIndex,:],(1,-1))
676 676
677 677 self.dataOut.utctime = self.radacTimeByFrame[self.profileIndex]
678 678 self.dataOut.profileIndex = self.profileIndex
679 679 self.dataOut.flagNoData = False
680 680
681 681 self.profileIndex += 1
682 682
683 683 return self.dataOut.data
684 684
685 685
686 686 def run(self, **kwargs):
687 687 if not(self.isConfig):
688 688 self.setup(**kwargs)
689 689 self.setObjProperties()
690 690 self.isConfig = True
691 691
692 692 self.getData()
@@ -1,652 +1,652
1 1 '''
2 2 Created on Jul 2, 2014
3 3
4 4 @author: roj-idl71
5 5 '''
6 6
7 7 import numpy
8 8
9 9 from jroIO_base import LOCALTIME, JRODataReader, JRODataWriter
10 10 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation
11 11 from schainpy.model.data.jroheaderIO import PROCFLAG, BasicHeader, SystemHeader, RadarControllerHeader, ProcessingHeader
12 12 from schainpy.model.data.jrodata import Voltage
13 13
14 14 class VoltageReader(JRODataReader, ProcessingUnit):
15 15 """
16 16 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
17 17 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
18 18 perfiles*alturas*canales) son almacenados en la variable "buffer".
19 19
20 20 perfiles * alturas * canales
21 21
22 22 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
23 23 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
24 24 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
25 25 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
26 26
27 27 Example:
28 28
29 29 dpath = "/home/myuser/data"
30 30
31 31 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
32 32
33 33 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
34 34
35 35 readerObj = VoltageReader()
36 36
37 37 readerObj.setup(dpath, startTime, endTime)
38 38
39 39 while(True):
40 40
41 41 #to get one profile
42 42 profile = readerObj.getData()
43 43
44 44 #print the profile
45 45 print profile
46 46
47 47 #If you want to see all datablock
48 48 print readerObj.datablock
49 49
50 50 if readerObj.flagNoMoreFiles:
51 51 break
52 52
53 53 """
54 54
55 55 ext = ".r"
56 56
57 57 optchar = "D"
58 58 dataOut = None
59 59
60 60
61 61 def __init__(self):
62 62 """
63 63 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
64 64
65 65 Input:
66 66 dataOut : Objeto de la clase Voltage. Este objeto sera utilizado para
67 67 almacenar un perfil de datos cada vez que se haga un requerimiento
68 68 (getData). El perfil sera obtenido a partir del buffer de datos,
69 69 si el buffer esta vacio se hara un nuevo proceso de lectura de un
70 70 bloque de datos.
71 71 Si este parametro no es pasado se creara uno internamente.
72 72
73 73 Variables afectadas:
74 74 self.dataOut
75 75
76 76 Return:
77 77 None
78 78 """
79 79
80 80 ProcessingUnit.__init__(self)
81 81
82 82 self.isConfig = False
83 83
84 84 self.datablock = None
85 85
86 86 self.utc = 0
87 87
88 88 self.ext = ".r"
89 89
90 90 self.optchar = "D"
91 91
92 92 self.basicHeaderObj = BasicHeader(LOCALTIME)
93 93
94 94 self.systemHeaderObj = SystemHeader()
95 95
96 96 self.radarControllerHeaderObj = RadarControllerHeader()
97 97
98 98 self.processingHeaderObj = ProcessingHeader()
99 99
100 100 self.online = 0
101 101
102 102 self.fp = None
103 103
104 104 self.idFile = None
105 105
106 106 self.dtype = None
107 107
108 108 self.fileSizeByHeader = None
109 109
110 110 self.filenameList = []
111 111
112 112 self.filename = None
113 113
114 114 self.fileSize = None
115 115
116 116 self.firstHeaderSize = 0
117 117
118 118 self.basicHeaderSize = 24
119 119
120 120 self.pathList = []
121 121
122 122 self.filenameList = []
123 123
124 124 self.lastUTTime = 0
125 125
126 126 self.maxTimeStep = 30
127 127
128 128 self.flagNoMoreFiles = 0
129 129
130 130 self.set = 0
131 131
132 132 self.path = None
133 133
134 134 self.profileIndex = 2**32-1
135 135
136 136 self.delay = 3 #seconds
137 137
138 138 self.nTries = 3 #quantity tries
139 139
140 140 self.nFiles = 3 #number of files for searching
141 141
142 142 self.nReadBlocks = 0
143 143
144 144 self.flagIsNewFile = 1
145 145
146 146 self.__isFirstTimeOnline = 1
147 147
148 148 # self.ippSeconds = 0
149 149
150 150 self.flagDiscontinuousBlock = 0
151 151
152 152 self.flagIsNewBlock = 0
153 153
154 154 self.nTotalBlocks = 0
155 155
156 156 self.blocksize = 0
157 157
158 158 self.dataOut = self.createObjByDefault()
159 159
160 160 self.nTxs = 1
161 161
162 162 self.txIndex = 0
163 163
164 164 def createObjByDefault(self):
165 165
166 166 dataObj = Voltage()
167 167
168 168 return dataObj
169 169
170 170 def __hasNotDataInBuffer(self):
171 171
172 172 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
173 173 return 1
174 174
175 175 return 0
176 176
177 177
178 178 def getBlockDimension(self):
179 179 """
180 180 Obtiene la cantidad de puntos a leer por cada bloque de datos
181 181
182 182 Affected:
183 183 self.blocksize
184 184
185 185 Return:
186 186 None
187 187 """
188 188 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
189 189 self.blocksize = pts2read
190 190
191 191
192 192 def readBlock(self):
193 193 """
194 194 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
195 195 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
196 196 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
197 197 es seteado a 0
198 198
199 199 Inputs:
200 200 None
201 201
202 202 Return:
203 203 None
204 204
205 205 Affected:
206 206 self.profileIndex
207 207 self.datablock
208 208 self.flagIsNewFile
209 209 self.flagIsNewBlock
210 210 self.nTotalBlocks
211 211
212 212 Exceptions:
213 213 Si un bloque leido no es un bloque valido
214 214 """
215 215 current_pointer_location = self.fp.tell()
216 216 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
217 217
218 218 try:
219 219 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
220 220 except:
221 221 #print "The read block (%3d) has not enough data" %self.nReadBlocks
222 222
223 223 if self.waitDataBlock(pointer_location=current_pointer_location):
224 224 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
225 225 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
226 226 # return 0
227 227
228 228 junk = numpy.transpose(junk, (2,0,1))
229 229 self.datablock = junk['real'] + junk['imag']*1j
230 230
231 231 self.profileIndex = 0
232 232
233 233 self.flagIsNewFile = 0
234 234 self.flagIsNewBlock = 1
235 235
236 236 self.nTotalBlocks += 1
237 237 self.nReadBlocks += 1
238 238
239 239 return 1
240 240
241 241 def getFirstHeader(self):
242 242
243 243 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
244 244
245 245 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
246 246
247 247 if self.nTxs > 1:
248 248 self.dataOut.radarControllerHeaderObj.ippSeconds = self.radarControllerHeaderObj.ippSeconds/self.nTxs
249 249
250 250 # self.dataOut.timeInterval = self.radarControllerHeaderObj.ippSeconds * self.processingHeaderObj.nCohInt
251 251 #
252 252 # if self.radarControllerHeaderObj.code != None:
253 253 #
254 254 # self.dataOut.nCode = self.radarControllerHeaderObj.nCode
255 255 #
256 256 # self.dataOut.nBaud = self.radarControllerHeaderObj.nBaud
257 257 #
258 258 # self.dataOut.code = self.radarControllerHeaderObj.code
259 259
260 260 self.dataOut.dtype = self.dtype
261 261
262 262 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock*self.nTxs
263 263
264 264 if self.processingHeaderObj.nHeights % self.nTxs != 0:
265 265 raise ValueError, "nTxs (%d) should be a multiple of nHeights (%d)" %(self.nTxs, self.processingHeaderObj.nHeights)
266 266
267 267 xf = self.processingHeaderObj.firstHeight + int(self.processingHeaderObj.nHeights/self.nTxs)*self.processingHeaderObj.deltaHeight
268 268
269 269 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
270 270
271 271 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
272 272
273 273 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
274 274
275 275 self.dataOut.flagShiftFFT = False
276 276
277 277 self.dataOut.flagDecodeData = False #asumo q la data no esta decodificada
278 278
279 279 self.dataOut.flagDeflipData = False #asumo q la data no esta sin flip
280 280
281 281 self.dataOut.flagShiftFFT = False
282 282
283 283 def getData(self):
284 284 """
285 285 getData obtiene una unidad de datos del buffer de lectura, un perfil, y la copia al objeto self.dataOut
286 286 del tipo "Voltage" con todos los parametros asociados a este (metadata). cuando no hay datos
287 287 en el buffer de lectura es necesario hacer una nueva lectura de los bloques de datos usando
288 288 "readNextBlock"
289 289
290 290 Ademas incrementa el contador del buffer "self.profileIndex" en 1.
291 291
292 292 Return:
293 293
294 294 Si el flag self.getByBlock ha sido seteado el bloque completo es copiado a self.dataOut y el self.profileIndex
295 295 es igual al total de perfiles leidos desde el archivo.
296 296
297 297 Si self.getByBlock == False:
298 298
299 299 self.dataOut.data = buffer[:, thisProfile, :]
300 300
301 301 shape = [nChannels, nHeis]
302 302
303 303 Si self.getByBlock == True:
304 304
305 305 self.dataOut.data = buffer[:, :, :]
306 306
307 307 shape = [nChannels, nProfiles, nHeis]
308 308
309 309 Variables afectadas:
310 310 self.dataOut
311 311 self.profileIndex
312 312
313 313 Affected:
314 314 self.dataOut
315 315 self.profileIndex
316 316 self.flagDiscontinuousBlock
317 317 self.flagIsNewBlock
318 318 """
319 319
320 320 if self.flagNoMoreFiles:
321 321 self.dataOut.flagNoData = True
322 322 print 'Process finished'
323 323 return 0
324 324
325 325 self.flagDiscontinuousBlock = 0
326 326 self.flagIsNewBlock = 0
327 327
328 328 if self.__hasNotDataInBuffer():
329 329
330 330 if not( self.readNextBlock() ):
331 331 return 0
332 332
333 333 self.getFirstHeader()
334 334
335 if self.datablock == None:
335 if self.datablock is None:
336 336 self.dataOut.flagNoData = True
337 337 return 0
338 338
339 339 if not self.getByBlock:
340 340
341 341 """
342 342 Return profile by profile
343 343
344 344 If nTxs > 1 then one profile is divided by nTxs and number of total
345 345 blocks is increased by nTxs (nProfiles *= nTxs)
346 346 """
347 347 self.dataOut.flagDataAsBlock = False
348 348
349 349 if self.nTxs == 1:
350 350 self.dataOut.data = self.datablock[:,self.profileIndex,:]
351 351 self.dataOut.profileIndex = self.profileIndex
352 352
353 353 self.profileIndex += 1
354 354
355 355 else:
356 356 iniHei_ForThisTx = (self.txIndex)*int(self.processingHeaderObj.nHeights/self.nTxs)
357 357 endHei_ForThisTx = (self.txIndex+1)*int(self.processingHeaderObj.nHeights/self.nTxs)
358 358
359 359 # print iniHei_ForThisTx, endHei_ForThisTx
360 360
361 361 self.dataOut.data = self.datablock[:, self.profileIndex, iniHei_ForThisTx:endHei_ForThisTx]
362 362 self.dataOut.profileIndex = self.profileIndex*self.nTxs + self.txIndex
363 363
364 364 self.txIndex += 1
365 365
366 366 if self.txIndex == self.nTxs:
367 367 self.txIndex = 0
368 368 self.profileIndex += 1
369 369
370 370 else:
371 371 """
372 372 Return all block
373 373 """
374 374 self.dataOut.flagDataAsBlock = True
375 375 self.dataOut.data = self.datablock
376 376 self.dataOut.profileIndex = self.processingHeaderObj.profilesPerBlock
377 377
378 378 self.profileIndex = self.processingHeaderObj.profilesPerBlock
379 379
380 380 self.dataOut.flagNoData = False
381 381
382 382 self.getBasicHeader()
383 383
384 384 self.dataOut.realtime = self.online
385 385
386 386 return self.dataOut.data
387 387
388 388 class VoltageWriter(JRODataWriter, Operation):
389 389 """
390 390 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
391 391 de los datos siempre se realiza por bloques.
392 392 """
393 393
394 394 ext = ".r"
395 395
396 396 optchar = "D"
397 397
398 398 shapeBuffer = None
399 399
400 400
401 401 def __init__(self):
402 402 """
403 403 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
404 404
405 405 Affected:
406 406 self.dataOut
407 407
408 408 Return: None
409 409 """
410 410 Operation.__init__(self)
411 411
412 412 self.nTotalBlocks = 0
413 413
414 414 self.profileIndex = 0
415 415
416 416 self.isConfig = False
417 417
418 418 self.fp = None
419 419
420 420 self.flagIsNewFile = 1
421 421
422 422 self.nTotalBlocks = 0
423 423
424 424 self.flagIsNewBlock = 0
425 425
426 426 self.setFile = None
427 427
428 428 self.dtype = None
429 429
430 430 self.path = None
431 431
432 432 self.filename = None
433 433
434 434 self.basicHeaderObj = BasicHeader(LOCALTIME)
435 435
436 436 self.systemHeaderObj = SystemHeader()
437 437
438 438 self.radarControllerHeaderObj = RadarControllerHeader()
439 439
440 440 self.processingHeaderObj = ProcessingHeader()
441 441
442 442 def hasAllDataInBuffer(self):
443 443 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
444 444 return 1
445 445 return 0
446 446
447 447
448 448 def setBlockDimension(self):
449 449 """
450 450 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
451 451
452 452 Affected:
453 453 self.shape_spc_Buffer
454 454 self.shape_cspc_Buffer
455 455 self.shape_dc_Buffer
456 456
457 457 Return: None
458 458 """
459 459 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
460 460 self.processingHeaderObj.nHeights,
461 461 self.systemHeaderObj.nChannels)
462 462
463 463 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
464 464 self.processingHeaderObj.profilesPerBlock,
465 465 self.processingHeaderObj.nHeights),
466 466 dtype=numpy.dtype('complex64'))
467 467
468 468 def writeBlock(self):
469 469 """
470 470 Escribe el buffer en el file designado
471 471
472 472 Affected:
473 473 self.profileIndex
474 474 self.flagIsNewFile
475 475 self.flagIsNewBlock
476 476 self.nTotalBlocks
477 477 self.blockIndex
478 478
479 479 Return: None
480 480 """
481 481 data = numpy.zeros( self.shapeBuffer, self.dtype )
482 482
483 483 junk = numpy.transpose(self.datablock, (1,2,0))
484 484
485 485 data['real'] = junk.real
486 486 data['imag'] = junk.imag
487 487
488 488 data = data.reshape( (-1) )
489 489
490 490 data.tofile( self.fp )
491 491
492 492 self.datablock.fill(0)
493 493
494 494 self.profileIndex = 0
495 495 self.flagIsNewFile = 0
496 496 self.flagIsNewBlock = 1
497 497
498 498 self.blockIndex += 1
499 499 self.nTotalBlocks += 1
500 500
501 501 # print "[Writing] Block = %04d" %self.blockIndex
502 502
503 503 def putData(self):
504 504 """
505 505 Setea un bloque de datos y luego los escribe en un file
506 506
507 507 Affected:
508 508 self.flagIsNewBlock
509 509 self.profileIndex
510 510
511 511 Return:
512 512 0 : Si no hay data o no hay mas files que puedan escribirse
513 513 1 : Si se escribio la data de un bloque en un file
514 514 """
515 515 if self.dataOut.flagNoData:
516 516 return 0
517 517
518 518 self.flagIsNewBlock = 0
519 519
520 520 if self.dataOut.flagDiscontinuousBlock:
521 521 self.datablock.fill(0)
522 522 self.profileIndex = 0
523 523 self.setNextFile()
524 524
525 525 if self.profileIndex == 0:
526 526 self.setBasicHeader()
527 527
528 528 self.datablock[:,self.profileIndex,:] = self.dataOut.data
529 529
530 530 self.profileIndex += 1
531 531
532 532 if self.hasAllDataInBuffer():
533 533 #if self.flagIsNewFile:
534 534 self.writeNextBlock()
535 535 # self.setFirstHeader()
536 536
537 537 return 1
538 538
539 539 def __getProcessFlags(self):
540 540
541 541 processFlags = 0
542 542
543 543 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
544 544 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
545 545 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
546 546 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
547 547 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
548 548 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
549 549
550 550 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
551 551
552 552
553 553
554 554 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
555 555 PROCFLAG.DATATYPE_SHORT,
556 556 PROCFLAG.DATATYPE_LONG,
557 557 PROCFLAG.DATATYPE_INT64,
558 558 PROCFLAG.DATATYPE_FLOAT,
559 559 PROCFLAG.DATATYPE_DOUBLE]
560 560
561 561
562 562 for index in range(len(dtypeList)):
563 563 if self.dataOut.dtype == dtypeList[index]:
564 564 dtypeValue = datatypeValueList[index]
565 565 break
566 566
567 567 processFlags += dtypeValue
568 568
569 569 if self.dataOut.flagDecodeData:
570 570 processFlags += PROCFLAG.DECODE_DATA
571 571
572 572 if self.dataOut.flagDeflipData:
573 573 processFlags += PROCFLAG.DEFLIP_DATA
574 574
575 575 if self.dataOut.code != None:
576 576 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
577 577
578 578 if self.dataOut.nCohInt > 1:
579 579 processFlags += PROCFLAG.COHERENT_INTEGRATION
580 580
581 581 return processFlags
582 582
583 583
584 584 def __getBlockSize(self):
585 585 '''
586 586 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
587 587 '''
588 588
589 589 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
590 590 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
591 591 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
592 592 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
593 593 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
594 594 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
595 595
596 596 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
597 597 datatypeValueList = [1,2,4,8,4,8]
598 598 for index in range(len(dtypeList)):
599 599 if self.dataOut.dtype == dtypeList[index]:
600 600 datatypeValue = datatypeValueList[index]
601 601 break
602 602
603 603 blocksize = int(self.dataOut.nHeights * self.dataOut.nChannels * self.profilesPerBlock * datatypeValue * 2)
604 604
605 605 return blocksize
606 606
607 607 def setFirstHeader(self):
608 608
609 609 """
610 610 Obtiene una copia del First Header
611 611
612 612 Affected:
613 613 self.systemHeaderObj
614 614 self.radarControllerHeaderObj
615 615 self.dtype
616 616
617 617 Return:
618 618 None
619 619 """
620 620
621 621 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
622 622 self.systemHeaderObj.nChannels = self.dataOut.nChannels
623 623 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
624 624
625 625 self.setBasicHeader()
626 626
627 627 processingHeaderSize = 40 # bytes
628 628 self.processingHeaderObj.dtype = 0 # Voltage
629 629 self.processingHeaderObj.blockSize = self.__getBlockSize()
630 630 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
631 631 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
632 632 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
633 633 self.processingHeaderObj.processFlags = self.__getProcessFlags()
634 634 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt
635 635 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
636 636 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
637 637
638 638 # if self.dataOut.code != None:
639 639 # self.processingHeaderObj.code = self.dataOut.code
640 640 # self.processingHeaderObj.nCode = self.dataOut.nCode
641 641 # self.processingHeaderObj.nBaud = self.dataOut.nBaud
642 642 # codesize = int(8 + 4 * self.dataOut.nCode * self.dataOut.nBaud)
643 643 # processingHeaderSize += codesize
644 644
645 645 if self.processingHeaderObj.nWindows != 0:
646 646 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
647 647 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
648 648 self.processingHeaderObj.nHeights = self.dataOut.nHeights
649 649 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
650 650 processingHeaderSize += 12
651 651
652 652 self.processingHeaderObj.size = processingHeaderSize No newline at end of file
@@ -1,286 +1,286
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: jroproc_base.py 1 2012-11-12 18:56:07Z murco $
5 5 '''
6 6
7 7 class ProcessingUnit(object):
8 8
9 9 """
10 10 Esta es la clase base para el procesamiento de datos.
11 11
12 12 Contiene el metodo "call" para llamar operaciones. Las operaciones pueden ser:
13 13 - Metodos internos (callMethod)
14 14 - Objetos del tipo Operation (callObject). Antes de ser llamados, estos objetos
15 15 tienen que ser agreagados con el metodo "add".
16 16
17 17 """
18 18 # objeto de datos de entrada (Voltage, Spectra o Correlation)
19 19 dataIn = None
20 20 dataInList = []
21 21
22 22 # objeto de datos de entrada (Voltage, Spectra o Correlation)
23 23 dataOut = None
24 24
25 25 operations2RunDict = None
26 26
27 27 isConfig = False
28 28
29 29
30 30 def __init__(self):
31 31
32 32 self.dataIn = None
33 33 self.dataInList = []
34 34
35 35 self.dataOut = None
36 36
37 37 self.operations2RunDict = {}
38 38
39 39 self.isConfig = False
40 40
41 41 def addOperation(self, opObj, objId):
42 42
43 43 """
44 44 Agrega un objeto del tipo "Operation" (opObj) a la lista de objetos "self.objectList" y retorna el
45 45 identificador asociado a este objeto.
46 46
47 47 Input:
48 48
49 49 object : objeto de la clase "Operation"
50 50
51 51 Return:
52 52
53 53 objId : identificador del objeto, necesario para ejecutar la operacion
54 54 """
55 55
56 56 self.operations2RunDict[objId] = opObj
57 57
58 58 return objId
59 59
60 60 def getOperationObj(self, objId):
61 61
62 62 if objId not in self.operations2RunDict.keys():
63 63 return None
64 64
65 65 return self.operations2RunDict[objId]
66 66
67 67 def operation(self, **kwargs):
68 68
69 69 """
70 70 Operacion directa sobre la data (dataOut.data). Es necesario actualizar los valores de los
71 71 atributos del objeto dataOut
72 72
73 73 Input:
74 74
75 75 **kwargs : Diccionario de argumentos de la funcion a ejecutar
76 76 """
77 77
78 78 raise ValueError, "ImplementedError"
79 79
80 80 def callMethod(self, name, **kwargs):
81 81
82 82 """
83 83 Ejecuta el metodo con el nombre "name" y con argumentos **kwargs de la propia clase.
84 84
85 85 Input:
86 86 name : nombre del metodo a ejecutar
87 87
88 88 **kwargs : diccionario con los nombres y valores de la funcion a ejecutar.
89 89
90 90 """
91 91
92 92 #Checking the inputs
93 93 if name == 'run':
94 94
95 95 if not self.checkInputs():
96 96 self.dataOut.flagNoData = True
97 97 return False
98 98 else:
99 99 #Si no es un metodo RUN la entrada es la misma dataOut (interna)
100 100 if self.dataOut.isEmpty():
101 101 return False
102 102
103 103 #Getting the pointer to method
104 104 methodToCall = getattr(self, name)
105 105
106 106 #Executing the self method
107 107 methodToCall(**kwargs)
108 108
109 109 #Checkin the outputs
110 110
111 111 # if name == 'run':
112 112 # pass
113 113 # else:
114 114 # pass
115 115 #
116 116 # if name != 'run':
117 117 # return True
118 118
119 if self.dataOut == None:
119 if self.dataOut is None:
120 120 return False
121 121
122 122 if self.dataOut.isEmpty():
123 123 return False
124 124
125 125 return True
126 126
127 127 def callObject(self, objId, **kwargs):
128 128
129 129 """
130 130 Ejecuta la operacion asociada al identificador del objeto "objId"
131 131
132 132 Input:
133 133
134 134 objId : identificador del objeto a ejecutar
135 135
136 136 **kwargs : diccionario con los nombres y valores de la funcion a ejecutar.
137 137
138 138 Return:
139 139
140 140 None
141 141 """
142 142
143 143 if self.dataOut.isEmpty():
144 144 return False
145 145
146 146 externalProcObj = self.operations2RunDict[objId]
147 147
148 148 externalProcObj.run(self.dataOut, **kwargs)
149 149
150 150 return True
151 151
152 152 def call(self, opType, opName=None, opId=None, **kwargs):
153 153
154 154 """
155 155 Return True si ejecuta la operacion interna nombrada "opName" o la operacion externa
156 156 identificada con el id "opId"; con los argumentos "**kwargs".
157 157
158 158 False si la operacion no se ha ejecutado.
159 159
160 160 Input:
161 161
162 162 opType : Puede ser "self" o "external"
163 163
164 164 La operacion puede ser de dos tipos (callMethod or callObject):
165 165
166 166 1. Un metodo propio de esta clase:
167 167
168 168 opType = "self"
169 169
170 170 2. El metodo "run" de un objeto del tipo Operation o de un derivado de ella:
171 171
172 172 opType = "other" or "external".
173 173
174 174 opName : Si la operacion es interna (opType = 'self'), entonces el "opName" sera
175 175 usada para llamar a un metodo interno de la clase Processing
176 176
177 177 opId : Si la operacion es externa (opType = 'other'), entonces el "opId" sera
178 178 usada para llamar al metodo "run" de la clase Operation registrada con ese Id
179 179
180 180 Exception:
181 181 Este objeto de tipo Operation debe de haber sido agregado antes con el metodo:
182 182 "addOperation" e identificado con el valor "opId" = el id de la operacion.
183 183 De lo contrario retornara un error del tipo IOError
184 184
185 185 """
186 186
187 187 if opType == 'self':
188 188
189 189 if not opName:
190 190 raise IOError, "opName parameter should be defined"
191 191
192 192 sts = self.callMethod(opName, **kwargs)
193 193
194 194 if opType == 'other' or opType == 'external':
195 195
196 196 if not opId:
197 197 raise IOError, "opId parameter should be defined"
198 198
199 199 if opId not in self.operations2RunDict.keys():
200 200 raise IOError, "This id operation have not been registered"
201 201
202 202 sts = self.callObject(opId, **kwargs)
203 203
204 204 return sts
205 205
206 206 def setInput(self, dataIn):
207 207
208 208 self.dataIn = dataIn
209 209 self.dataInList.append(dataIn)
210 210
211 211 def getOutputObj(self):
212 212
213 213 return self.dataOut
214 214
215 215 def checkInputs(self):
216 216
217 217 for thisDataIn in self.dataInList:
218 218
219 219 if thisDataIn.isEmpty():
220 220 return False
221 221
222 222 return True
223 223
224 224 def setup(self):
225 225
226 226 raise ValueError, "Not implemented"
227 227
228 228 def run(self):
229 229
230 230 raise ValueError, "Not implemented"
231 231
232 232 def close(self):
233 233 #Close every thread, queue or any other object here is it is neccesary.
234 234 return
235 235
236 236 class Operation(object):
237 237
238 238 """
239 239 Clase base para definir las operaciones adicionales que se pueden agregar a la clase ProcessingUnit
240 240 y necesiten acumular informacion previa de los datos a procesar. De preferencia usar un buffer de
241 241 acumulacion dentro de esta clase
242 242
243 243 Ejemplo: Integraciones coherentes, necesita la informacion previa de los n perfiles anteriores (bufffer)
244 244
245 245 """
246 246
247 247 __buffer = None
248 248 isConfig = False
249 249
250 250 def __init__(self):
251 251
252 252 self.__buffer = None
253 253 self.isConfig = False
254 254
255 255 def setup(self):
256 256
257 257 self.isConfig = True
258 258
259 259 raise ValueError, "Not implemented"
260 260
261 261 def run(self, dataIn, **kwargs):
262 262
263 263 """
264 264 Realiza las operaciones necesarias sobre la dataIn.data y actualiza los
265 265 atributos del objeto dataIn.
266 266
267 267 Input:
268 268
269 269 dataIn : objeto del tipo JROData
270 270
271 271 Return:
272 272
273 273 None
274 274
275 275 Affected:
276 276 __buffer : buffer de recepcion de datos.
277 277
278 278 """
279 279 if not self.isConfig:
280 280 self.setup(**kwargs)
281 281
282 282 raise ValueError, "ImplementedError"
283 283
284 284 def close(self):
285 285
286 286 pass No newline at end of file
@@ -1,246 +1,246
1 1 import numpy
2 2
3 3 from jroproc_base import ProcessingUnit, Operation
4 4 from schainpy.model.data.jrodata import Correlation
5 5
6 6 class CorrelationProc(ProcessingUnit):
7 7
8 8 def __init__(self):
9 9
10 10 ProcessingUnit.__init__(self)
11 11
12 12 self.objectDict = {}
13 13 self.buffer = None
14 14 self.firstdatatime = None
15 15 self.profIndex = 0
16 16 self.dataOut = Correlation()
17 17
18 18 def __updateObjFromVoltage(self):
19 19
20 20 self.dataOut.timeZone = self.dataIn.timeZone
21 21 self.dataOut.dstFlag = self.dataIn.dstFlag
22 22 self.dataOut.errorCount = self.dataIn.errorCount
23 23 self.dataOut.useLocalTime = self.dataIn.useLocalTime
24 24
25 25 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
26 26 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
27 27 self.dataOut.channelList = self.dataIn.channelList
28 28 self.dataOut.heightList = self.dataIn.heightList
29 29 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
30 30 # self.dataOut.nHeights = self.dataIn.nHeights
31 31 # self.dataOut.nChannels = self.dataIn.nChannels
32 32 self.dataOut.nBaud = self.dataIn.nBaud
33 33 self.dataOut.nCode = self.dataIn.nCode
34 34 self.dataOut.code = self.dataIn.code
35 35 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
36 36 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
37 37 self.dataOut.utctime = self.firstdatatime
38 38 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
39 39 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
40 40 # self.dataOut.nCohInt = self.dataIn.nCohInt
41 41 # self.dataOut.nIncohInt = 1
42 42 self.dataOut.ippSeconds = self.dataIn.ippSeconds
43 43 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
44 44
45 45 # self.dataOut.timeInterval = self.dataIn.timeInterval*self.dataOut.nPoints
46 46
47 47
48 48 def removeDC(self, jspectra):
49 49
50 50 nChannel = jspectra.shape[0]
51 51
52 52 for i in range(nChannel):
53 53 jspectra_tmp = jspectra[i,:,:]
54 54 jspectra_DC = numpy.mean(jspectra_tmp,axis = 0)
55 55
56 56 jspectra_tmp = jspectra_tmp - jspectra_DC
57 57 jspectra[i,:,:] = jspectra_tmp
58 58
59 59 return jspectra
60 60
61 61
62 62 def removeNoise(self, mode = 2):
63 63 indR = numpy.where(self.dataOut.lagR == 0)[0][0]
64 64 indT = numpy.where(self.dataOut.lagT == 0)[0][0]
65 65
66 66 jspectra = self.dataOut.data_corr[:,:,indR,:]
67 67
68 68 num_chan = jspectra.shape[0]
69 69 num_hei = jspectra.shape[2]
70 70
71 71 freq_dc = indT
72 72 ind_vel = numpy.array([-2,-1,1,2]) + freq_dc
73 73
74 74 NPot = self.dataOut.getNoise(mode)
75 75 jspectra[:,freq_dc,:] = jspectra[:,freq_dc,:] - NPot
76 76 SPot = jspectra[:,freq_dc,:]
77 77 pairsAutoCorr = self.dataOut.getPairsAutoCorr()
78 78 # self.dataOut.signalPotency = SPot
79 79 self.dataOut.noise = NPot
80 80 self.dataOut.SNR = (SPot/NPot)[pairsAutoCorr]
81 81 self.dataOut.data_corr[:,:,indR,:] = jspectra
82 82
83 83 return 1
84 84
85 85
86 86 def calculateNormFactor(self):
87 87
88 88 pairsList = self.dataOut.pairsList
89 89 pairsAutoCorr = self.dataOut.pairsAutoCorr
90 90 nHeights = self.dataOut.nHeights
91 91 nPairs = len(pairsList)
92 92 normFactor = numpy.zeros((nPairs,nHeights))
93 93
94 94 indR = numpy.where(self.dataOut.lagR == 0)[0][0]
95 95 indT = numpy.where(self.dataOut.lagT == 0)[0][0]
96 96
97 97 for l in range(len(pairsList)):
98 98 firstChannel = pairsList[l][0]
99 99 secondChannel = pairsList[l][1]
100 100
101 101 AC1 = pairsAutoCorr[firstChannel]
102 102 AC2 = pairsAutoCorr[secondChannel]
103 103
104 104 if (AC1 >= 0 and AC2 >= 0):
105 105
106 106 data1 = numpy.abs(self.dataOut.data_corr[AC1,:,indR,:])
107 107 data2 = numpy.abs(self.dataOut.data_corr[AC2,:,indR,:])
108 108 maxim1 = data1.max(axis = 0)
109 109 maxim2 = data1.max(axis = 0)
110 110 maxim = numpy.sqrt(maxim1*maxim2)
111 111 else:
112 112 #In case there is no autocorrelation for the pair
113 113 data = numpy.abs(self.dataOut.data_corr[l,:,indR,:])
114 114 maxim = numpy.max(data, axis = 0)
115 115
116 116 normFactor[l,:] = maxim
117 117
118 118 self.dataOut.normFactor = normFactor
119 119
120 120 return 1
121 121
122 122 def run(self, lagT=None, lagR=None, pairsList=None,
123 123 nPoints=None, nAvg=None, bufferSize=None,
124 124 fullT = False, fullR = False, removeDC = False):
125 125
126 126 self.dataOut.flagNoData = True
127 127
128 128 if self.dataIn.type == "Correlation":
129 129
130 130 self.dataOut.copy(self.dataIn)
131 131
132 132 return
133 133
134 134 if self.dataIn.type == "Voltage":
135 135
136 136 if pairsList == None:
137 137 pairsList = [numpy.array([0,0])]
138 138
139 139 if nPoints == None:
140 140 nPoints = 128
141 141 #------------------------------------------------------------
142 142 #Condicionales para calcular Correlaciones en Tiempo y Rango
143 143 if fullT:
144 144 lagT = numpy.arange(nPoints*2 - 1) - nPoints + 1
145 145 elif lagT == None:
146 146 lagT = numpy.array([0])
147 147 else:
148 148 lagT = numpy.array(lagT)
149 149
150 150 if fullR:
151 151 lagR = numpy.arange(self.dataOut.nHeights)
152 152 elif lagR == None:
153 153 lagR = numpy.array([0])
154 154 #-------------------------------------------------------------
155 155
156 156 if nAvg == None:
157 157 nAvg = 1
158 158
159 159 if bufferSize == None:
160 160 bufferSize = 0
161 161
162 162 deltaH = self.dataIn.heightList[1] - self.dataIn.heightList[0]
163 163 self.dataOut.lagR = numpy.round(numpy.array(lagR)/deltaH)
164 164 self.dataOut.pairsList = pairsList
165 165 self.dataOut.nPoints = nPoints
166 166 # channels = numpy.sort(list(set(list(itertools.chain.from_iterable(pairsList)))))
167 167
168 if self.buffer == None:
168 if self.buffer is None:
169 169
170 170 self.buffer = numpy.zeros((self.dataIn.nChannels,self.dataIn.nProfiles,self.dataIn.nHeights),dtype='complex')
171 171
172 172
173 173 self.buffer[:,self.profIndex,:] = self.dataIn.data.copy()[:,:]
174 174
175 175 self.profIndex += 1
176 176
177 177 if self.firstdatatime == None:
178 178
179 179 self.firstdatatime = self.dataIn.utctime
180 180
181 181 if self.profIndex == nPoints:
182 182
183 183 tmp = self.buffer[:,0:nPoints,:]
184 184 self.buffer = None
185 185 self.buffer = tmp
186 186
187 187 #--------------- Remover DC ------------
188 188 if removeDC:
189 189 self.buffer = self.removeDC(self.buffer)
190 190 #---------------------------------------------
191 191 self.dataOut.data_volts = self.buffer
192 192 self.__updateObjFromVoltage()
193 193 self.dataOut.data_corr = numpy.zeros((len(pairsList),
194 194 len(lagT),len(lagR),
195 195 self.dataIn.nHeights),
196 196 dtype='complex')
197 197
198 198 for l in range(len(pairsList)):
199 199
200 200 firstChannel = pairsList[l][0]
201 201 secondChannel = pairsList[l][1]
202 202
203 203 tmp = None
204 204 tmp = numpy.zeros((len(lagT),len(lagR),self.dataIn.nHeights),dtype='complex')
205 205
206 206 for t in range(len(lagT)):
207 207
208 208 for r in range(len(lagR)):
209 209
210 210 idxT = lagT[t]
211 211 idxR = lagR[r]
212 212
213 213 if idxT >= 0:
214 214 vStacked = numpy.vstack((self.buffer[secondChannel,idxT:,:],
215 215 numpy.zeros((idxT,self.dataIn.nHeights),dtype='complex')))
216 216 else:
217 217 vStacked = numpy.vstack((numpy.zeros((-idxT,self.dataIn.nHeights),dtype='complex'),
218 218 self.buffer[secondChannel,:(nPoints + idxT),:]))
219 219
220 220 if idxR >= 0:
221 221 hStacked = numpy.hstack((vStacked[:,idxR:],numpy.zeros((nPoints,idxR),dtype='complex')))
222 222 else:
223 223 hStacked = numpy.hstack((numpy.zeros((nPoints,-idxR),dtype='complex'),vStacked[:,(self.dataOut.nHeights + idxR)]))
224 224
225 225
226 226 tmp[t,r,:] = numpy.sum((numpy.conjugate(self.buffer[firstChannel,:,:])*hStacked),axis=0)
227 227
228 228
229 229 hStacked = None
230 230 vStacked = None
231 231
232 232 self.dataOut.data_corr[l,:,:,:] = tmp[:,:,:]
233 233
234 234 #Se Calcula los factores de Normalizacion
235 235 self.dataOut.pairsAutoCorr = self.dataOut.getPairsAutoCorr()
236 236 self.dataOut.lagT = lagT*self.dataIn.ippSeconds*self.dataIn.nCohInt
237 237 self.dataOut.lagR = lagR
238 238
239 239 self.calculateNormFactor()
240 240
241 241 self.dataOut.flagNoData = False
242 242 self.buffer = None
243 243 self.firstdatatime = None
244 244 self.profIndex = 0
245 245
246 246 return No newline at end of file
@@ -1,343 +1,343
1 1 import numpy
2 2
3 3 from jroproc_base import ProcessingUnit, Operation
4 4 from schainpy.model.data.jrodata import SpectraHeis
5 5
6 6 class SpectraHeisProc(ProcessingUnit):
7 7
8 8 def __init__(self):
9 9
10 10 ProcessingUnit.__init__(self)
11 11
12 12 # self.buffer = None
13 13 # self.firstdatatime = None
14 14 # self.profIndex = 0
15 15 self.dataOut = SpectraHeis()
16 16
17 17 def __updateObjFromVoltage(self):
18 18
19 19 self.dataOut.timeZone = self.dataIn.timeZone
20 20 self.dataOut.dstFlag = self.dataIn.dstFlag
21 21 self.dataOut.errorCount = self.dataIn.errorCount
22 22 self.dataOut.useLocalTime = self.dataIn.useLocalTime
23 23
24 24 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()#
25 25 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()#
26 26 self.dataOut.channelList = self.dataIn.channelList
27 27 self.dataOut.heightList = self.dataIn.heightList
28 28 # self.dataOut.dtype = self.dataIn.dtype
29 29 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
30 30 # self.dataOut.nHeights = self.dataIn.nHeights
31 31 # self.dataOut.nChannels = self.dataIn.nChannels
32 32 self.dataOut.nBaud = self.dataIn.nBaud
33 33 self.dataOut.nCode = self.dataIn.nCode
34 34 self.dataOut.code = self.dataIn.code
35 35 # self.dataOut.nProfiles = 1
36 36 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
37 37 self.dataOut.nFFTPoints = self.dataIn.nHeights
38 38 # self.dataOut.channelIndexList = self.dataIn.channelIndexList
39 39 # self.dataOut.flagNoData = self.dataIn.flagNoData
40 40 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
41 41 self.dataOut.utctime = self.dataIn.utctime
42 42 # self.dataOut.utctime = self.firstdatatime
43 43 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
44 44 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
45 45 # self.dataOut.flagShiftFFT = self.dataIn.flagShiftFFT
46 46 self.dataOut.nCohInt = self.dataIn.nCohInt
47 47 self.dataOut.nIncohInt = 1
48 48 # self.dataOut.ippSeconds= self.dataIn.ippSeconds
49 49 self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
50 50
51 51 # self.dataOut.timeInterval = self.dataIn.timeInterval*self.dataOut.nIncohInt
52 52 # self.dataOut.set=self.dataIn.set
53 53 # self.dataOut.deltaHeight=self.dataIn.deltaHeight
54 54
55 55
56 56 def __updateObjFromFits(self):
57 57
58 58 self.dataOut.utctime = self.dataIn.utctime
59 59 self.dataOut.channelIndexList = self.dataIn.channelIndexList
60 60
61 61 self.dataOut.channelList = self.dataIn.channelList
62 62 self.dataOut.heightList = self.dataIn.heightList
63 63 self.dataOut.data_spc = self.dataIn.data
64 64 self.dataOut.ippSeconds = self.dataIn.ippSeconds
65 65 self.dataOut.nCohInt = self.dataIn.nCohInt
66 66 self.dataOut.nIncohInt = self.dataIn.nIncohInt
67 67 # self.dataOut.timeInterval = self.dataIn.timeInterval
68 68 self.dataOut.timeZone = self.dataIn.timeZone
69 69 self.dataOut.useLocalTime = True
70 70 # self.dataOut.
71 71 # self.dataOut.
72 72
73 73 def __getFft(self):
74 74
75 75 fft_volt = numpy.fft.fft(self.dataIn.data, axis=1)
76 76 fft_volt = numpy.fft.fftshift(fft_volt,axes=(1,))
77 77 spc = numpy.abs(fft_volt * numpy.conjugate(fft_volt))/(self.dataOut.nFFTPoints)
78 78 self.dataOut.data_spc = spc
79 79
80 80 def run(self):
81 81
82 82 self.dataOut.flagNoData = True
83 83
84 84 if self.dataIn.type == "Fits":
85 85 self.__updateObjFromFits()
86 86 self.dataOut.flagNoData = False
87 87 return
88 88
89 89 if self.dataIn.type == "SpectraHeis":
90 90 self.dataOut.copy(self.dataIn)
91 91 return
92 92
93 93 if self.dataIn.type == "Voltage":
94 94 self.__updateObjFromVoltage()
95 95 self.__getFft()
96 96 self.dataOut.flagNoData = False
97 97
98 98 return
99 99
100 100 raise ValueError, "The type object %s is not valid"%(self.dataIn.type)
101 101
102 102
103 103 def selectChannels(self, channelList):
104 104
105 105 channelIndexList = []
106 106
107 107 for channel in channelList:
108 108 index = self.dataOut.channelList.index(channel)
109 109 channelIndexList.append(index)
110 110
111 111 self.selectChannelsByIndex(channelIndexList)
112 112
113 113 def selectChannelsByIndex(self, channelIndexList):
114 114 """
115 115 Selecciona un bloque de datos en base a canales segun el channelIndexList
116 116
117 117 Input:
118 118 channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7]
119 119
120 120 Affected:
121 121 self.dataOut.data
122 122 self.dataOut.channelIndexList
123 123 self.dataOut.nChannels
124 124 self.dataOut.m_ProcessingHeader.totalSpectra
125 125 self.dataOut.systemHeaderObj.numChannels
126 126 self.dataOut.m_ProcessingHeader.blockSize
127 127
128 128 Return:
129 129 None
130 130 """
131 131
132 132 for channelIndex in channelIndexList:
133 133 if channelIndex not in self.dataOut.channelIndexList:
134 134 print channelIndexList
135 135 raise ValueError, "The value %d in channelIndexList is not valid" %channelIndex
136 136
137 137 # nChannels = len(channelIndexList)
138 138
139 139 data_spc = self.dataOut.data_spc[channelIndexList,:]
140 140
141 141 self.dataOut.data_spc = data_spc
142 142 self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
143 143
144 144 return 1
145 145
146 146 class IncohInt4SpectraHeis(Operation):
147 147
148 148 isConfig = False
149 149
150 150 __profIndex = 0
151 151 __withOverapping = False
152 152
153 153 __byTime = False
154 154 __initime = None
155 155 __lastdatatime = None
156 156 __integrationtime = None
157 157
158 158 __buffer = None
159 159
160 160 __dataReady = False
161 161
162 162 n = None
163 163
164 164
165 165 def __init__(self):
166 166
167 167 Operation.__init__(self)
168 168 # self.isConfig = False
169 169
170 170 def setup(self, n=None, timeInterval=None, overlapping=False):
171 171 """
172 172 Set the parameters of the integration class.
173 173
174 174 Inputs:
175 175
176 176 n : Number of coherent integrations
177 177 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
178 178 overlapping :
179 179
180 180 """
181 181
182 182 self.__initime = None
183 183 self.__lastdatatime = 0
184 184 self.__buffer = None
185 185 self.__dataReady = False
186 186
187 187
188 188 if n == None and timeInterval == None:
189 189 raise ValueError, "n or timeInterval should be specified ..."
190 190
191 191 if n != None:
192 192 self.n = n
193 193 self.__byTime = False
194 194 else:
195 195 self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line
196 196 self.n = 9999
197 197 self.__byTime = True
198 198
199 199 if overlapping:
200 200 self.__withOverapping = True
201 201 self.__buffer = None
202 202 else:
203 203 self.__withOverapping = False
204 204 self.__buffer = 0
205 205
206 206 self.__profIndex = 0
207 207
208 208 def putData(self, data):
209 209
210 210 """
211 211 Add a profile to the __buffer and increase in one the __profileIndex
212 212
213 213 """
214 214
215 215 if not self.__withOverapping:
216 216 self.__buffer += data.copy()
217 217 self.__profIndex += 1
218 218 return
219 219
220 220 #Overlapping data
221 221 nChannels, nHeis = data.shape
222 222 data = numpy.reshape(data, (1, nChannels, nHeis))
223 223
224 224 #If the buffer is empty then it takes the data value
225 if self.__buffer == None:
225 if self.__buffer is None:
226 226 self.__buffer = data
227 227 self.__profIndex += 1
228 228 return
229 229
230 230 #If the buffer length is lower than n then stakcing the data value
231 231 if self.__profIndex < self.n:
232 232 self.__buffer = numpy.vstack((self.__buffer, data))
233 233 self.__profIndex += 1
234 234 return
235 235
236 236 #If the buffer length is equal to n then replacing the last buffer value with the data value
237 237 self.__buffer = numpy.roll(self.__buffer, -1, axis=0)
238 238 self.__buffer[self.n-1] = data
239 239 self.__profIndex = self.n
240 240 return
241 241
242 242
243 243 def pushData(self):
244 244 """
245 245 Return the sum of the last profiles and the profiles used in the sum.
246 246
247 247 Affected:
248 248
249 249 self.__profileIndex
250 250
251 251 """
252 252
253 253 if not self.__withOverapping:
254 254 data = self.__buffer
255 255 n = self.__profIndex
256 256
257 257 self.__buffer = 0
258 258 self.__profIndex = 0
259 259
260 260 return data, n
261 261
262 262 #Integration with Overlapping
263 263 data = numpy.sum(self.__buffer, axis=0)
264 264 n = self.__profIndex
265 265
266 266 return data, n
267 267
268 268 def byProfiles(self, data):
269 269
270 270 self.__dataReady = False
271 271 avgdata = None
272 272 # n = None
273 273
274 274 self.putData(data)
275 275
276 276 if self.__profIndex == self.n:
277 277
278 278 avgdata, n = self.pushData()
279 279 self.__dataReady = True
280 280
281 281 return avgdata
282 282
283 283 def byTime(self, data, datatime):
284 284
285 285 self.__dataReady = False
286 286 avgdata = None
287 287 n = None
288 288
289 289 self.putData(data)
290 290
291 291 if (datatime - self.__initime) >= self.__integrationtime:
292 292 avgdata, n = self.pushData()
293 293 self.n = n
294 294 self.__dataReady = True
295 295
296 296 return avgdata
297 297
298 298 def integrate(self, data, datatime=None):
299 299
300 300 if self.__initime == None:
301 301 self.__initime = datatime
302 302
303 303 if self.__byTime:
304 304 avgdata = self.byTime(data, datatime)
305 305 else:
306 306 avgdata = self.byProfiles(data)
307 307
308 308
309 309 self.__lastdatatime = datatime
310 310
311 if avgdata == None:
311 if avgdata is None:
312 312 return None, None
313 313
314 314 avgdatatime = self.__initime
315 315
316 316 deltatime = datatime -self.__lastdatatime
317 317
318 318 if not self.__withOverapping:
319 319 self.__initime = datatime
320 320 else:
321 321 self.__initime += deltatime
322 322
323 323 return avgdata, avgdatatime
324 324
325 325 def run(self, dataOut, **kwargs):
326 326
327 327 if not self.isConfig:
328 328 self.setup(**kwargs)
329 329 self.isConfig = True
330 330
331 331 avgdata, avgdatatime = self.integrate(dataOut.data_spc, dataOut.utctime)
332 332
333 333 # dataOut.timeInterval *= n
334 334 dataOut.flagNoData = True
335 335
336 336 if self.__dataReady:
337 337 dataOut.data_spc = avgdata
338 338 dataOut.nIncohInt *= self.n
339 339 # dataOut.nCohInt *= self.n
340 340 dataOut.utctime = avgdatatime
341 341 # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nIncohInt
342 342 # dataOut.timeInterval = self.__timeInterval*self.n
343 343 dataOut.flagNoData = False No newline at end of file
@@ -1,2144 +1,2144
1 1 import numpy
2 2 import math
3 3 from scipy import optimize
4 4 from scipy import interpolate
5 5 from scipy import signal
6 6 from scipy import stats
7 7 import re
8 8 import datetime
9 9 import copy
10 10 import sys
11 11 import importlib
12 12 import itertools
13 13
14 14 from jroproc_base import ProcessingUnit, Operation
15 15 from schainpy.model.data.jrodata import Parameters
16 16
17 17
18 18 class ParametersProc(ProcessingUnit):
19 19
20 20 nSeconds = None
21 21
22 22 def __init__(self):
23 23 ProcessingUnit.__init__(self)
24 24
25 25 # self.objectDict = {}
26 26 self.buffer = None
27 27 self.firstdatatime = None
28 28 self.profIndex = 0
29 29 self.dataOut = Parameters()
30 30
31 31 def __updateObjFromInput(self):
32 32
33 33 self.dataOut.inputUnit = self.dataIn.type
34 34
35 35 self.dataOut.timeZone = self.dataIn.timeZone
36 36 self.dataOut.dstFlag = self.dataIn.dstFlag
37 37 self.dataOut.errorCount = self.dataIn.errorCount
38 38 self.dataOut.useLocalTime = self.dataIn.useLocalTime
39 39
40 40 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
41 41 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
42 42 self.dataOut.channelList = self.dataIn.channelList
43 43 self.dataOut.heightList = self.dataIn.heightList
44 44 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
45 45 # self.dataOut.nHeights = self.dataIn.nHeights
46 46 # self.dataOut.nChannels = self.dataIn.nChannels
47 47 self.dataOut.nBaud = self.dataIn.nBaud
48 48 self.dataOut.nCode = self.dataIn.nCode
49 49 self.dataOut.code = self.dataIn.code
50 50 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
51 51 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
52 52 self.dataOut.utctime = self.firstdatatime
53 53 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
54 54 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
55 55 # self.dataOut.nCohInt = self.dataIn.nCohInt
56 56 # self.dataOut.nIncohInt = 1
57 57 self.dataOut.ippSeconds = self.dataIn.ippSeconds
58 58 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
59 59 self.dataOut.timeInterval = self.dataIn.timeInterval
60 60 self.dataOut.heightList = self.dataIn.getHeiRange()
61 61 self.dataOut.frequency = self.dataIn.frequency
62 62
63 63 def run(self, nSeconds = None, nProfiles = None):
64 64
65 65
66 66
67 67 if self.firstdatatime == None:
68 68 self.firstdatatime = self.dataIn.utctime
69 69
70 70 #---------------------- Voltage Data ---------------------------
71 71
72 72 if self.dataIn.type == "Voltage":
73 73 self.dataOut.flagNoData = True
74 74 if nSeconds != None:
75 75 self.nSeconds = nSeconds
76 76 self.nProfiles= int(numpy.floor(nSeconds/(self.dataIn.ippSeconds*self.dataIn.nCohInt)))
77 77
78 if self.buffer == None:
78 if self.buffer is None:
79 79 self.buffer = numpy.zeros((self.dataIn.nChannels,
80 80 self.nProfiles,
81 81 self.dataIn.nHeights),
82 82 dtype='complex')
83 83
84 84 self.buffer[:,self.profIndex,:] = self.dataIn.data.copy()
85 85 self.profIndex += 1
86 86
87 87 if self.profIndex == self.nProfiles:
88 88
89 89 self.__updateObjFromInput()
90 90 self.dataOut.data_pre = self.buffer.copy()
91 91 self.dataOut.paramInterval = nSeconds
92 92 self.dataOut.flagNoData = False
93 93
94 94 self.buffer = None
95 95 self.firstdatatime = None
96 96 self.profIndex = 0
97 97 return
98 98
99 99 #---------------------- Spectra Data ---------------------------
100 100
101 101 if self.dataIn.type == "Spectra":
102 102 self.dataOut.data_pre = self.dataIn.data_spc.copy()
103 103 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
104 104 self.dataOut.noise = self.dataIn.getNoise()
105 105 self.dataOut.normFactor = self.dataIn.normFactor
106 106 self.dataOut.groupList = self.dataIn.pairsList
107 107 self.dataOut.flagNoData = False
108 108
109 109 #---------------------- Correlation Data ---------------------------
110 110
111 111 if self.dataIn.type == "Correlation":
112 112 lagRRange = self.dataIn.lagR
113 113 indR = numpy.where(lagRRange == 0)[0][0]
114 114
115 115 self.dataOut.data_pre = self.dataIn.data_corr.copy()[:,:,indR,:]
116 116 self.dataOut.abscissaList = self.dataIn.getLagTRange(1)
117 117 self.dataOut.noise = self.dataIn.noise
118 118 self.dataOut.normFactor = self.dataIn.normFactor
119 119 self.dataOut.data_SNR = self.dataIn.SNR
120 120 self.dataOut.groupList = self.dataIn.pairsList
121 121 self.dataOut.flagNoData = False
122 122
123 123 #---------------------- Correlation Data ---------------------------
124 124
125 125 if self.dataIn.type == "Parameters":
126 126 self.dataOut.copy(self.dataIn)
127 127 self.dataOut.flagNoData = False
128 128
129 129 return True
130 130
131 131 self.__updateObjFromInput()
132 132 self.firstdatatime = None
133 133 self.dataOut.utctimeInit = self.dataIn.utctime
134 134 self.dataOut.outputInterval = self.dataIn.timeInterval
135 135
136 136 #------------------- Get Moments ----------------------------------
137 137 def GetMoments(self, channelList = None):
138 138 '''
139 139 Function GetMoments()
140 140
141 141 Input:
142 142 channelList : simple channel list to select e.g. [2,3,7]
143 143 self.dataOut.data_pre
144 144 self.dataOut.abscissaList
145 145 self.dataOut.noise
146 146
147 147 Affected:
148 148 self.dataOut.data_param
149 149 self.dataOut.data_SNR
150 150
151 151 '''
152 152 data = self.dataOut.data_pre
153 153 absc = self.dataOut.abscissaList[:-1]
154 154 noise = self.dataOut.noise
155 155
156 156 data_param = numpy.zeros((data.shape[0], 4, data.shape[2]))
157 157
158 158 if channelList== None:
159 159 channelList = self.dataIn.channelList
160 160 self.dataOut.channelList = channelList
161 161
162 162 for ind in channelList:
163 163 data_param[ind,:,:] = self.__calculateMoments(data[ind,:,:], absc, noise[ind])
164 164
165 165 self.dataOut.data_param = data_param[:,1:,:]
166 166 self.dataOut.data_SNR = data_param[:,0]
167 167 return
168 168
169 169 def __calculateMoments(self, oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
170 170
171 171 if (nicoh == None): nicoh = 1
172 172 if (graph == None): graph = 0
173 173 if (smooth == None): smooth = 0
174 174 elif (self.smooth < 3): smooth = 0
175 175
176 176 if (type1 == None): type1 = 0
177 if (fwindow == None): fwindow = numpy.zeros(oldfreq.size) + 1
177 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
178 178 if (snrth == None): snrth = -3
179 179 if (dc == None): dc = 0
180 180 if (aliasing == None): aliasing = 0
181 181 if (oldfd == None): oldfd = 0
182 182 if (wwauto == None): wwauto = 0
183 183
184 184 if (n0 < 1.e-20): n0 = 1.e-20
185 185
186 186 freq = oldfreq
187 187 vec_power = numpy.zeros(oldspec.shape[1])
188 188 vec_fd = numpy.zeros(oldspec.shape[1])
189 189 vec_w = numpy.zeros(oldspec.shape[1])
190 190 vec_snr = numpy.zeros(oldspec.shape[1])
191 191
192 192 for ind in range(oldspec.shape[1]):
193 193
194 194 spec = oldspec[:,ind]
195 195 aux = spec*fwindow
196 196 max_spec = aux.max()
197 197 m = list(aux).index(max_spec)
198 198
199 199 #Smooth
200 200 if (smooth == 0): spec2 = spec
201 201 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
202 202
203 203 # Calculo de Momentos
204 204 bb = spec2[range(m,spec2.size)]
205 205 bb = (bb<n0).nonzero()
206 206 bb = bb[0]
207 207
208 208 ss = spec2[range(0,m + 1)]
209 209 ss = (ss<n0).nonzero()
210 210 ss = ss[0]
211 211
212 212 if (bb.size == 0):
213 213 bb0 = spec.size - 1 - m
214 214 else:
215 215 bb0 = bb[0] - 1
216 216 if (bb0 < 0):
217 217 bb0 = 0
218 218
219 219 if (ss.size == 0): ss1 = 1
220 220 else: ss1 = max(ss) + 1
221 221
222 222 if (ss1 > m): ss1 = m
223 223
224 224 valid = numpy.asarray(range(int(m + bb0 - ss1 + 1))) + ss1
225 225 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
226 226 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
227 227 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
228 228 snr = (spec2.mean()-n0)/n0
229 229
230 230 if (snr < 1.e-20) :
231 231 snr = 1.e-20
232 232
233 233 vec_power[ind] = power
234 234 vec_fd[ind] = fd
235 235 vec_w[ind] = w
236 236 vec_snr[ind] = snr
237 237
238 238 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
239 239 return moments
240 240
241 241 #------------------ Get SA Parameters --------------------------
242 242
243 243 def GetSAParameters(self):
244 244 pairslist = self.dataOut.groupList
245 245 num_pairs = len(pairslist)
246 246
247 247 vel = self.dataOut.abscissaList
248 248 spectra = self.dataOut.data_pre
249 249 cspectra = self.dataIn.data_cspc
250 250 delta_v = vel[1] - vel[0]
251 251
252 252 #Calculating the power spectrum
253 253 spc_pow = numpy.sum(spectra, 3)*delta_v
254 254 #Normalizing Spectra
255 255 norm_spectra = spectra/spc_pow
256 256 #Calculating the norm_spectra at peak
257 257 max_spectra = numpy.max(norm_spectra, 3)
258 258
259 259 #Normalizing Cross Spectra
260 260 norm_cspectra = numpy.zeros(cspectra.shape)
261 261
262 262 for i in range(num_chan):
263 263 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
264 264
265 265 max_cspectra = numpy.max(norm_cspectra,2)
266 266 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
267 267
268 268 for i in range(num_pairs):
269 269 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
270 270 #------------------- Get Lags ----------------------------------
271 271
272 272 def GetLags(self):
273 273 '''
274 274 Function GetMoments()
275 275
276 276 Input:
277 277 self.dataOut.data_pre
278 278 self.dataOut.abscissaList
279 279 self.dataOut.noise
280 280 self.dataOut.normFactor
281 281 self.dataOut.data_SNR
282 282 self.dataOut.groupList
283 283 self.dataOut.nChannels
284 284
285 285 Affected:
286 286 self.dataOut.data_param
287 287
288 288 '''
289 289
290 290 data = self.dataOut.data_pre
291 291 normFactor = self.dataOut.normFactor
292 292 nHeights = self.dataOut.nHeights
293 293 absc = self.dataOut.abscissaList[:-1]
294 294 noise = self.dataOut.noise
295 295 SNR = self.dataOut.data_SNR
296 296 pairsList = self.dataOut.groupList
297 297 nChannels = self.dataOut.nChannels
298 298 pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
299 299 self.dataOut.data_param = numpy.zeros((len(pairsCrossCorr)*2 + 1, nHeights))
300 300
301 301 dataNorm = numpy.abs(data)
302 302 for l in range(len(pairsList)):
303 303 dataNorm[l,:,:] = dataNorm[l,:,:]/normFactor[l,:]
304 304
305 305 self.dataOut.data_param[:-1,:] = self.__calculateTaus(dataNorm, pairsCrossCorr, pairsAutoCorr, absc)
306 306 self.dataOut.data_param[-1,:] = self.__calculateLag1Phase(data, pairsAutoCorr, absc)
307 307 return
308 308
309 309 def __getPairsAutoCorr(self, pairsList, nChannels):
310 310
311 311 pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
312 312
313 313 for l in range(len(pairsList)):
314 314 firstChannel = pairsList[l][0]
315 315 secondChannel = pairsList[l][1]
316 316
317 317 #Obteniendo pares de Autocorrelacion
318 318 if firstChannel == secondChannel:
319 319 pairsAutoCorr[firstChannel] = int(l)
320 320
321 321 pairsAutoCorr = pairsAutoCorr.astype(int)
322 322
323 323 pairsCrossCorr = range(len(pairsList))
324 324 pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
325 325
326 326 return pairsAutoCorr, pairsCrossCorr
327 327
328 328 def __calculateTaus(self, data, pairsCrossCorr, pairsAutoCorr, lagTRange):
329 329
330 330 Pt0 = data.shape[1]/2
331 331 #Funcion de Autocorrelacion
332 332 dataAutoCorr = stats.nanmean(data[pairsAutoCorr,:,:], axis = 0)
333 333
334 334 #Obtencion Indice de TauCross
335 335 indCross = data[pairsCrossCorr,:,:].argmax(axis = 1)
336 336 #Obtencion Indice de TauAuto
337 337 indAuto = numpy.zeros(indCross.shape,dtype = 'int')
338 338 CCValue = data[pairsCrossCorr,Pt0,:]
339 339 for i in range(pairsCrossCorr.size):
340 340 indAuto[i,:] = numpy.abs(dataAutoCorr - CCValue[i,:]).argmin(axis = 0)
341 341
342 342 #Obtencion de TauCross y TauAuto
343 343 tauCross = lagTRange[indCross]
344 344 tauAuto = lagTRange[indAuto]
345 345
346 346 Nan1, Nan2 = numpy.where(tauCross == lagTRange[0])
347 347
348 348 tauCross[Nan1,Nan2] = numpy.nan
349 349 tauAuto[Nan1,Nan2] = numpy.nan
350 350 tau = numpy.vstack((tauCross,tauAuto))
351 351
352 352 return tau
353 353
354 354 def __calculateLag1Phase(self, data, pairs, lagTRange):
355 355 data1 = stats.nanmean(data[pairs,:,:], axis = 0)
356 356 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
357 357
358 358 phase = numpy.angle(data1[lag1,:])
359 359
360 360 return phase
361 361 #------------------- Detect Meteors ------------------------------
362 362
363 363 def MeteorDetection(self, hei_ref = None, tauindex = 0,
364 364 predefinedPhaseShifts = None, centerReceiverIndex = 2, saveAll = False,
365 365 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
366 366 noise_timeStep = 4, noise_multiple = 4,
367 367 multDet_timeLimit = 1, multDet_rangeLimit = 3,
368 368 phaseThresh = 20, SNRThresh = 8,
369 369 hmin = 70, hmax=110, azimuth = 0) :
370 370
371 371 '''
372 372 Function DetectMeteors()
373 373 Project developed with paper:
374 374 HOLDSWORTH ET AL. 2004
375 375
376 376 Input:
377 377 self.dataOut.data_pre
378 378
379 379 centerReceiverIndex: From the channels, which is the center receiver
380 380
381 381 hei_ref: Height reference for the Beacon signal extraction
382 382 tauindex:
383 383 predefinedPhaseShifts: Predefined phase offset for the voltge signals
384 384
385 385 cohDetection: Whether to user Coherent detection or not
386 386 cohDet_timeStep: Coherent Detection calculation time step
387 387 cohDet_thresh: Coherent Detection phase threshold to correct phases
388 388
389 389 noise_timeStep: Noise calculation time step
390 390 noise_multiple: Noise multiple to define signal threshold
391 391
392 392 multDet_timeLimit: Multiple Detection Removal time limit in seconds
393 393 multDet_rangeLimit: Multiple Detection Removal range limit in km
394 394
395 395 phaseThresh: Maximum phase difference between receiver to be consider a meteor
396 396 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
397 397
398 398 hmin: Minimum Height of the meteor to use it in the further wind estimations
399 399 hmax: Maximum Height of the meteor to use it in the further wind estimations
400 400 azimuth: Azimuth angle correction
401 401
402 402 Affected:
403 403 self.dataOut.data_param
404 404
405 405 Rejection Criteria (Errors):
406 406 0: No error; analysis OK
407 407 1: SNR < SNR threshold
408 408 2: angle of arrival (AOA) ambiguously determined
409 409 3: AOA estimate not feasible
410 410 4: Large difference in AOAs obtained from different antenna baselines
411 411 5: echo at start or end of time series
412 412 6: echo less than 5 examples long; too short for analysis
413 413 7: echo rise exceeds 0.3s
414 414 8: echo decay time less than twice rise time
415 415 9: large power level before echo
416 416 10: large power level after echo
417 417 11: poor fit to amplitude for estimation of decay time
418 418 12: poor fit to CCF phase variation for estimation of radial drift velocity
419 419 13: height unresolvable echo: not valid height within 70 to 110 km
420 420 14: height ambiguous echo: more then one possible height within 70 to 110 km
421 421 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
422 422 16: oscilatory echo, indicating event most likely not an underdense echo
423 423
424 424 17: phase difference in meteor Reestimation
425 425
426 426 Data Storage:
427 427 Meteors for Wind Estimation (8):
428 428 Day Hour | Range Height
429 429 Azimuth Zenith errorCosDir
430 430 VelRad errorVelRad
431 431 TypeError
432 432
433 433 '''
434 434 #Get Beacon signal
435 435 newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
436 436
437 437 if hei_ref != None:
438 438 newheis = numpy.where(self.dataOut.heightList>hei_ref)
439 439
440 440 heiRang = self.dataOut.getHeiRange()
441 441 #Pairs List
442 442 pairslist = []
443 443 nChannel = self.dataOut.nChannels
444 444 for i in range(nChannel):
445 445 if i != centerReceiverIndex:
446 446 pairslist.append((centerReceiverIndex,i))
447 447
448 448 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
449 449 # see if the user put in pre defined phase shifts
450 450 voltsPShift = self.dataOut.data_pre.copy()
451 451
452 452 if predefinedPhaseShifts != None:
453 453 hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
454 454
455 455 elif beaconPhaseShifts:
456 456 #get hardware phase shifts using beacon signal
457 457 hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
458 458 hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
459 459
460 460 else:
461 461 hardwarePhaseShifts = numpy.zeros(5)
462 462
463 463
464 464 voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
465 465 for i in range(self.dataOut.data_pre.shape[0]):
466 466 voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
467 467
468 468
469 469 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
470 470
471 471 #Remove DC
472 472 voltsDC = numpy.mean(voltsPShift,1)
473 473 voltsDC = numpy.mean(voltsDC,1)
474 474 for i in range(voltsDC.shape[0]):
475 475 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
476 476
477 477 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
478 478 voltsPShift = voltsPShift[:,:,:newheis[0][0]]
479 479
480 480 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
481 481 #Coherent Detection
482 482 if cohDetection:
483 483 #use coherent detection to get the net power
484 484 cohDet_thresh = cohDet_thresh*numpy.pi/180
485 485 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, self.dataOut.timeInterval, pairslist, cohDet_thresh)
486 486
487 487 #Non-coherent detection!
488 488 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
489 489 #********** END OF COH/NON-COH POWER CALCULATION**********************
490 490
491 491 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
492 492 #Get noise
493 493 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, self.dataOut.timeInterval)
494 494 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
495 495 #Get signal threshold
496 496 signalThresh = noise_multiple*noise
497 497 #Meteor echoes detection
498 498 listMeteors = self.__findMeteors(powerNet, signalThresh)
499 499 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
500 500
501 501 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
502 502 #Parameters
503 503 heiRange = self.dataOut.getHeiRange()
504 504 rangeInterval = heiRange[1] - heiRange[0]
505 505 rangeLimit = multDet_rangeLimit/rangeInterval
506 506 timeLimit = multDet_timeLimit/self.dataOut.timeInterval
507 507 #Multiple detection removals
508 508 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
509 509 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
510 510
511 511 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
512 512 #Parameters
513 513 phaseThresh = phaseThresh*numpy.pi/180
514 514 thresh = [phaseThresh, noise_multiple, SNRThresh]
515 515 #Meteor reestimation (Errors N 1, 6, 12, 17)
516 516 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist, thresh, noise, self.dataOut.timeInterval, self.dataOut.frequency)
517 517 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
518 518 #Estimation of decay times (Errors N 7, 8, 11)
519 519 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, self.dataOut.timeInterval, self.dataOut.frequency)
520 520 #******************* END OF METEOR REESTIMATION *******************
521 521
522 522 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
523 523 #Calculating Radial Velocity (Error N 15)
524 524 radialStdThresh = 10
525 525 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist, self.dataOut.timeInterval)
526 526
527 527 if len(listMeteors4) > 0:
528 528
529 529 pairsList = []
530 530 pairx = (0,3)
531 531 pairy = (1,2)
532 532 pairsList.append(pairx)
533 533 pairsList.append(pairy)
534 534
535 535 #Setting New Array
536 536 date = repr(self.dataOut.datatime)
537 537 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
538 538
539 539 meteorOps = MeteorOperations()
540 540 jph = numpy.array([0,0,0,0])
541 541 h = (hmin,hmax)
542 542 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, jph)
543 543
544 544 # #Calculate AOA (Error N 3, 4)
545 545 # #JONES ET AL. 1998
546 546 # error = arrayParameters[:,-1]
547 547 # AOAthresh = numpy.pi/8
548 548 # phases = -arrayParameters[:,9:13]
549 549 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
550 550 #
551 551 # #Calculate Heights (Error N 13 and 14)
552 552 # error = arrayParameters[:,-1]
553 553 # Ranges = arrayParameters[:,2]
554 554 # zenith = arrayParameters[:,5]
555 555 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
556 556 # error = arrayParameters[:,-1]
557 557 #********************* END OF PARAMETERS CALCULATION **************************
558 558
559 559 #***************************+ PASS DATA TO NEXT STEP **********************
560 560 arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
561 561 self.dataOut.data_param = arrayFinal
562 562
563 563 return
564 564
565 565 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
566 566
567 567 minIndex = min(newheis[0])
568 568 maxIndex = max(newheis[0])
569 569
570 570 voltage = voltage0[:,:,minIndex:maxIndex+1]
571 571 nLength = voltage.shape[1]/n
572 572 nMin = 0
573 573 nMax = 0
574 574 phaseOffset = numpy.zeros((len(pairslist),n))
575 575
576 576 for i in range(n):
577 577 nMax += nLength
578 578 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
579 579 phaseCCF = numpy.mean(phaseCCF, axis = 2)
580 580 phaseOffset[:,i] = phaseCCF.transpose()
581 581 nMin = nMax
582 582 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
583 583
584 584 #Remove Outliers
585 585 factor = 2
586 586 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
587 587 dw = numpy.std(wt,axis = 1)
588 588 dw = dw.reshape((dw.size,1))
589 589 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
590 590 phaseOffset[ind] = numpy.nan
591 591 phaseOffset = stats.nanmean(phaseOffset, axis=1)
592 592
593 593 return phaseOffset
594 594
595 595 def __shiftPhase(self, data, phaseShift):
596 596 #this will shift the phase of a complex number
597 597 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
598 598 return dataShifted
599 599
600 600 def __estimatePhaseDifference(self, array, pairslist):
601 601 nChannel = array.shape[0]
602 602 nHeights = array.shape[2]
603 603 numPairs = len(pairslist)
604 604 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
605 605 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
606 606
607 607 #Correct phases
608 608 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
609 609 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
610 610
611 611 if indDer[0].shape[0] > 0:
612 612 for i in range(indDer[0].shape[0]):
613 613 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
614 614 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
615 615
616 616 # for j in range(numSides):
617 617 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
618 618 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
619 619 #
620 620 #Linear
621 621 phaseInt = numpy.zeros((numPairs,1))
622 622 angAllCCF = phaseCCF[:,[0,1,3,4],0]
623 623 for j in range(numPairs):
624 624 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
625 625 phaseInt[j] = fit[1]
626 626 #Phase Differences
627 627 phaseDiff = phaseInt - phaseCCF[:,2,:]
628 628 phaseArrival = phaseInt.reshape(phaseInt.size)
629 629
630 630 #Dealias
631 631 indAlias = numpy.where(phaseArrival > numpy.pi)
632 632 phaseArrival[indAlias] -= 2*numpy.pi
633 633 indAlias = numpy.where(phaseArrival < -numpy.pi)
634 634 phaseArrival[indAlias] += 2*numpy.pi
635 635
636 636 return phaseDiff, phaseArrival
637 637
638 638 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
639 639 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
640 640 #find the phase shifts of each channel over 1 second intervals
641 641 #only look at ranges below the beacon signal
642 642 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
643 643 numBlocks = int(volts.shape[1]/numProfPerBlock)
644 644 numHeights = volts.shape[2]
645 645 nChannel = volts.shape[0]
646 646 voltsCohDet = volts.copy()
647 647
648 648 pairsarray = numpy.array(pairslist)
649 649 indSides = pairsarray[:,1]
650 650 # indSides = numpy.array(range(nChannel))
651 651 # indSides = numpy.delete(indSides, indCenter)
652 652 #
653 653 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
654 654 listBlocks = numpy.array_split(volts, numBlocks, 1)
655 655
656 656 startInd = 0
657 657 endInd = 0
658 658
659 659 for i in range(numBlocks):
660 660 startInd = endInd
661 661 endInd = endInd + listBlocks[i].shape[1]
662 662
663 663 arrayBlock = listBlocks[i]
664 664 # arrayBlockCenter = listCenter[i]
665 665
666 666 #Estimate the Phase Difference
667 667 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
668 668 #Phase Difference RMS
669 669 arrayPhaseRMS = numpy.abs(phaseDiff)
670 670 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
671 671 indPhase = numpy.where(phaseRMSaux==4)
672 672 #Shifting
673 673 if indPhase[0].shape[0] > 0:
674 674 for j in range(indSides.size):
675 675 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
676 676 voltsCohDet[:,startInd:endInd,:] = arrayBlock
677 677
678 678 return voltsCohDet
679 679
680 680 def __calculateCCF(self, volts, pairslist ,laglist):
681 681
682 682 nHeights = volts.shape[2]
683 683 nPoints = volts.shape[1]
684 684 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
685 685
686 686 for i in range(len(pairslist)):
687 687 volts1 = volts[pairslist[i][0]]
688 688 volts2 = volts[pairslist[i][1]]
689 689
690 690 for t in range(len(laglist)):
691 691 idxT = laglist[t]
692 692 if idxT >= 0:
693 693 vStacked = numpy.vstack((volts2[idxT:,:],
694 694 numpy.zeros((idxT, nHeights),dtype='complex')))
695 695 else:
696 696 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
697 697 volts2[:(nPoints + idxT),:]))
698 698 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
699 699
700 700 vStacked = None
701 701 return voltsCCF
702 702
703 703 def __getNoise(self, power, timeSegment, timeInterval):
704 704 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
705 705 numBlocks = int(power.shape[0]/numProfPerBlock)
706 706 numHeights = power.shape[1]
707 707
708 708 listPower = numpy.array_split(power, numBlocks, 0)
709 709 noise = numpy.zeros((power.shape[0], power.shape[1]))
710 710 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
711 711
712 712 startInd = 0
713 713 endInd = 0
714 714
715 715 for i in range(numBlocks): #split por canal
716 716 startInd = endInd
717 717 endInd = endInd + listPower[i].shape[0]
718 718
719 719 arrayBlock = listPower[i]
720 720 noiseAux = numpy.mean(arrayBlock, 0)
721 721 # noiseAux = numpy.median(noiseAux)
722 722 # noiseAux = numpy.mean(arrayBlock)
723 723 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
724 724
725 725 noiseAux1 = numpy.mean(arrayBlock)
726 726 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
727 727
728 728 return noise, noise1
729 729
730 730 def __findMeteors(self, power, thresh):
731 731 nProf = power.shape[0]
732 732 nHeights = power.shape[1]
733 733 listMeteors = []
734 734
735 735 for i in range(nHeights):
736 736 powerAux = power[:,i]
737 737 threshAux = thresh[:,i]
738 738
739 739 indUPthresh = numpy.where(powerAux > threshAux)[0]
740 740 indDNthresh = numpy.where(powerAux <= threshAux)[0]
741 741
742 742 j = 0
743 743
744 744 while (j < indUPthresh.size - 2):
745 745 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
746 746 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
747 747 indDNthresh = indDNthresh[indDNAux]
748 748
749 749 if (indDNthresh.size > 0):
750 750 indEnd = indDNthresh[0] - 1
751 751 indInit = indUPthresh[j]
752 752
753 753 meteor = powerAux[indInit:indEnd + 1]
754 754 indPeak = meteor.argmax() + indInit
755 755 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
756 756
757 757 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
758 758 j = numpy.where(indUPthresh == indEnd)[0] + 1
759 759 else: j+=1
760 760 else: j+=1
761 761
762 762 return listMeteors
763 763
764 764 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
765 765
766 766 arrayMeteors = numpy.asarray(listMeteors)
767 767 listMeteors1 = []
768 768
769 769 while arrayMeteors.shape[0] > 0:
770 770 FLAs = arrayMeteors[:,4]
771 771 maxFLA = FLAs.argmax()
772 772 listMeteors1.append(arrayMeteors[maxFLA,:])
773 773
774 774 MeteorInitTime = arrayMeteors[maxFLA,1]
775 775 MeteorEndTime = arrayMeteors[maxFLA,3]
776 776 MeteorHeight = arrayMeteors[maxFLA,0]
777 777
778 778 #Check neighborhood
779 779 maxHeightIndex = MeteorHeight + rangeLimit
780 780 minHeightIndex = MeteorHeight - rangeLimit
781 781 minTimeIndex = MeteorInitTime - timeLimit
782 782 maxTimeIndex = MeteorEndTime + timeLimit
783 783
784 784 #Check Heights
785 785 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
786 786 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
787 787 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
788 788
789 789 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
790 790
791 791 return listMeteors1
792 792
793 793 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
794 794 numHeights = volts.shape[2]
795 795 nChannel = volts.shape[0]
796 796
797 797 thresholdPhase = thresh[0]
798 798 thresholdNoise = thresh[1]
799 799 thresholdDB = float(thresh[2])
800 800
801 801 thresholdDB1 = 10**(thresholdDB/10)
802 802 pairsarray = numpy.array(pairslist)
803 803 indSides = pairsarray[:,1]
804 804
805 805 pairslist1 = list(pairslist)
806 806 pairslist1.append((0,1))
807 807 pairslist1.append((3,4))
808 808
809 809 listMeteors1 = []
810 810 listPowerSeries = []
811 811 listVoltageSeries = []
812 812 #volts has the war data
813 813
814 814 if frequency == 30e6:
815 815 timeLag = 45*10**-3
816 816 else:
817 817 timeLag = 15*10**-3
818 818 lag = numpy.ceil(timeLag/timeInterval)
819 819
820 820 for i in range(len(listMeteors)):
821 821
822 822 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
823 823 meteorAux = numpy.zeros(16)
824 824
825 825 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
826 826 mHeight = listMeteors[i][0]
827 827 mStart = listMeteors[i][1]
828 828 mPeak = listMeteors[i][2]
829 829 mEnd = listMeteors[i][3]
830 830
831 831 #get the volt data between the start and end times of the meteor
832 832 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
833 833 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
834 834
835 835 #3.6. Phase Difference estimation
836 836 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
837 837
838 838 #3.7. Phase difference removal & meteor start, peak and end times reestimated
839 839 #meteorVolts0.- all Channels, all Profiles
840 840 meteorVolts0 = volts[:,:,mHeight]
841 841 meteorThresh = noise[:,mHeight]*thresholdNoise
842 842 meteorNoise = noise[:,mHeight]
843 843 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
844 844 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
845 845
846 846 #Times reestimation
847 847 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
848 848 if mStart1.size > 0:
849 849 mStart1 = mStart1[-1] + 1
850 850
851 851 else:
852 852 mStart1 = mPeak
853 853
854 854 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
855 855 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
856 856 if mEndDecayTime1.size == 0:
857 857 mEndDecayTime1 = powerNet0.size
858 858 else:
859 859 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
860 860 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
861 861
862 862 #meteorVolts1.- all Channels, from start to end
863 863 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
864 864 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
865 865 if meteorVolts2.shape[1] == 0:
866 866 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
867 867 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
868 868 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
869 869 ##################### END PARAMETERS REESTIMATION #########################
870 870
871 871 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
872 872 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
873 873 if meteorVolts2.shape[1] > 0:
874 874 #Phase Difference re-estimation
875 875 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
876 876 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
877 877 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
878 878 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
879 879 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
880 880
881 881 #Phase Difference RMS
882 882 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
883 883 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
884 884 #Data from Meteor
885 885 mPeak1 = powerNet1.argmax() + mStart1
886 886 mPeakPower1 = powerNet1.max()
887 887 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
888 888 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
889 889 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
890 890 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
891 891 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
892 892 #Vectorize
893 893 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
894 894 meteorAux[7:11] = phaseDiffint[0:4]
895 895
896 896 #Rejection Criterions
897 897 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
898 898 meteorAux[-1] = 17
899 899 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
900 900 meteorAux[-1] = 1
901 901
902 902
903 903 else:
904 904 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
905 905 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
906 906 PowerSeries = 0
907 907
908 908 listMeteors1.append(meteorAux)
909 909 listPowerSeries.append(PowerSeries)
910 910 listVoltageSeries.append(meteorVolts1)
911 911
912 912 return listMeteors1, listPowerSeries, listVoltageSeries
913 913
914 914 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
915 915
916 916 threshError = 10
917 917 #Depending if it is 30 or 50 MHz
918 918 if frequency == 30e6:
919 919 timeLag = 45*10**-3
920 920 else:
921 921 timeLag = 15*10**-3
922 922 lag = numpy.ceil(timeLag/timeInterval)
923 923
924 924 listMeteors1 = []
925 925
926 926 for i in range(len(listMeteors)):
927 927 meteorPower = listPower[i]
928 928 meteorAux = listMeteors[i]
929 929
930 930 if meteorAux[-1] == 0:
931 931
932 932 try:
933 933 indmax = meteorPower.argmax()
934 934 indlag = indmax + lag
935 935
936 936 y = meteorPower[indlag:]
937 937 x = numpy.arange(0, y.size)*timeLag
938 938
939 939 #first guess
940 940 a = y[0]
941 941 tau = timeLag
942 942 #exponential fit
943 943 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
944 944 y1 = self.__exponential_function(x, *popt)
945 945 #error estimation
946 946 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
947 947
948 948 decayTime = popt[1]
949 949 riseTime = indmax*timeInterval
950 950 meteorAux[11:13] = [decayTime, error]
951 951
952 952 #Table items 7, 8 and 11
953 953 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
954 954 meteorAux[-1] = 7
955 955 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
956 956 meteorAux[-1] = 8
957 957 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
958 958 meteorAux[-1] = 11
959 959
960 960
961 961 except:
962 962 meteorAux[-1] = 11
963 963
964 964
965 965 listMeteors1.append(meteorAux)
966 966
967 967 return listMeteors1
968 968
969 969 #Exponential Function
970 970
971 971 def __exponential_function(self, x, a, tau):
972 972 y = a*numpy.exp(-x/tau)
973 973 return y
974 974
975 975 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
976 976
977 977 pairslist1 = list(pairslist)
978 978 pairslist1.append((0,1))
979 979 pairslist1.append((3,4))
980 980 numPairs = len(pairslist1)
981 981 #Time Lag
982 982 timeLag = 45*10**-3
983 983 c = 3e8
984 984 lag = numpy.ceil(timeLag/timeInterval)
985 985 freq = 30e6
986 986
987 987 listMeteors1 = []
988 988
989 989 for i in range(len(listMeteors)):
990 990 meteorAux = listMeteors[i]
991 991 if meteorAux[-1] == 0:
992 992 mStart = listMeteors[i][1]
993 993 mPeak = listMeteors[i][2]
994 994 mLag = mPeak - mStart + lag
995 995
996 996 #get the volt data between the start and end times of the meteor
997 997 meteorVolts = listVolts[i]
998 998 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
999 999
1000 1000 #Get CCF
1001 1001 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
1002 1002
1003 1003 #Method 2
1004 1004 slopes = numpy.zeros(numPairs)
1005 1005 time = numpy.array([-2,-1,1,2])*timeInterval
1006 1006 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
1007 1007
1008 1008 #Correct phases
1009 1009 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
1010 1010 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
1011 1011
1012 1012 if indDer[0].shape[0] > 0:
1013 1013 for i in range(indDer[0].shape[0]):
1014 1014 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
1015 1015 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
1016 1016
1017 1017 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
1018 1018 for j in range(numPairs):
1019 1019 fit = stats.linregress(time, angAllCCF[j,:])
1020 1020 slopes[j] = fit[0]
1021 1021
1022 1022 #Remove Outlier
1023 1023 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
1024 1024 # slopes = numpy.delete(slopes,indOut)
1025 1025 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
1026 1026 # slopes = numpy.delete(slopes,indOut)
1027 1027
1028 1028 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
1029 1029 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
1030 1030 meteorAux[-2] = radialError
1031 1031 meteorAux[-3] = radialVelocity
1032 1032
1033 1033 #Setting Error
1034 1034 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
1035 1035 if numpy.abs(radialVelocity) > 200:
1036 1036 meteorAux[-1] = 15
1037 1037 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
1038 1038 elif radialError > radialStdThresh:
1039 1039 meteorAux[-1] = 12
1040 1040
1041 1041 listMeteors1.append(meteorAux)
1042 1042 return listMeteors1
1043 1043
1044 1044 def __setNewArrays(self, listMeteors, date, heiRang):
1045 1045
1046 1046 #New arrays
1047 1047 arrayMeteors = numpy.array(listMeteors)
1048 1048 arrayParameters = numpy.zeros((len(listMeteors), 14))
1049 1049
1050 1050 #Date inclusion
1051 1051 date = re.findall(r'\((.*?)\)', date)
1052 1052 date = date[0].split(',')
1053 1053 date = map(int, date)
1054 1054 date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
1055 1055 arrayDate = numpy.tile(date, (len(listMeteors), 1))
1056 1056
1057 1057 #Meteor array
1058 1058 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
1059 1059 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
1060 1060
1061 1061 #Parameters Array
1062 1062 arrayParameters[:,:2] = arrayDate #Date
1063 1063 arrayParameters[:,2] = heiRang[arrayMeteors[:,0].astype(int)] #Range
1064 1064 arrayParameters[:,7:9] = arrayMeteors[:,-3:-1] #Radial velocity and its error
1065 1065 arrayParameters[:,9:13] = arrayMeteors[:,7:11] #Phases
1066 1066 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
1067 1067
1068 1068
1069 1069 return arrayParameters
1070 1070
1071 1071 def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
1072 1072
1073 1073 arrayAOA = numpy.zeros((phases.shape[0],3))
1074 1074 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
1075 1075
1076 1076 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
1077 1077 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
1078 1078 arrayAOA[:,2] = cosDirError
1079 1079
1080 1080 azimuthAngle = arrayAOA[:,0]
1081 1081 zenithAngle = arrayAOA[:,1]
1082 1082
1083 1083 #Setting Error
1084 1084 #Number 3: AOA not fesible
1085 1085 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
1086 1086 error[indInvalid] = 3
1087 1087 #Number 4: Large difference in AOAs obtained from different antenna baselines
1088 1088 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
1089 1089 error[indInvalid] = 4
1090 1090 return arrayAOA, error
1091 1091
1092 1092 def __getDirectionCosines(self, arrayPhase, pairsList):
1093 1093
1094 1094 #Initializing some variables
1095 1095 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
1096 1096 ang_aux = ang_aux.reshape(1,ang_aux.size)
1097 1097
1098 1098 cosdir = numpy.zeros((arrayPhase.shape[0],2))
1099 1099 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
1100 1100
1101 1101
1102 1102 for i in range(2):
1103 1103 #First Estimation
1104 1104 phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
1105 1105 #Dealias
1106 1106 indcsi = numpy.where(phi0_aux > numpy.pi)
1107 1107 phi0_aux[indcsi] -= 2*numpy.pi
1108 1108 indcsi = numpy.where(phi0_aux < -numpy.pi)
1109 1109 phi0_aux[indcsi] += 2*numpy.pi
1110 1110 #Direction Cosine 0
1111 1111 cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
1112 1112
1113 1113 #Most-Accurate Second Estimation
1114 1114 phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
1115 1115 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
1116 1116 #Direction Cosine 1
1117 1117 cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
1118 1118
1119 1119 #Searching the correct Direction Cosine
1120 1120 cosdir0_aux = cosdir0[:,i]
1121 1121 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
1122 1122 #Minimum Distance
1123 1123 cosDiff = (cosdir1 - cosdir0_aux)**2
1124 1124 indcos = cosDiff.argmin(axis = 1)
1125 1125 #Saving Value obtained
1126 1126 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
1127 1127
1128 1128 return cosdir0, cosdir
1129 1129
1130 1130 def __calculateAOA(self, cosdir, azimuth):
1131 1131 cosdirX = cosdir[:,0]
1132 1132 cosdirY = cosdir[:,1]
1133 1133
1134 1134 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
1135 1135 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
1136 1136 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
1137 1137
1138 1138 return angles
1139 1139
1140 1140 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
1141 1141
1142 1142 Ramb = 375 #Ramb = c/(2*PRF)
1143 1143 Re = 6371 #Earth Radius
1144 1144 heights = numpy.zeros(Ranges.shape)
1145 1145
1146 1146 R_aux = numpy.array([0,1,2])*Ramb
1147 1147 R_aux = R_aux.reshape(1,R_aux.size)
1148 1148
1149 1149 Ranges = Ranges.reshape(Ranges.size,1)
1150 1150
1151 1151 Ri = Ranges + R_aux
1152 1152 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
1153 1153
1154 1154 #Check if there is a height between 70 and 110 km
1155 1155 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
1156 1156 ind_h = numpy.where(h_bool == 1)[0]
1157 1157
1158 1158 hCorr = hi[ind_h, :]
1159 1159 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
1160 1160
1161 1161 hCorr = hi[ind_hCorr]
1162 1162 heights[ind_h] = hCorr
1163 1163
1164 1164 #Setting Error
1165 1165 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
1166 1166 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
1167 1167
1168 1168 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
1169 1169 error[indInvalid2] = 14
1170 1170 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
1171 1171 error[indInvalid1] = 13
1172 1172
1173 1173 return heights, error
1174 1174
1175 1175 def SpectralFitting(self, getSNR = True, path=None, file=None, groupList=None):
1176 1176
1177 1177 '''
1178 1178 Function GetMoments()
1179 1179
1180 1180 Input:
1181 1181 Output:
1182 1182 Variables modified:
1183 1183 '''
1184 1184 if path != None:
1185 1185 sys.path.append(path)
1186 1186 self.dataOut.library = importlib.import_module(file)
1187 1187
1188 1188 #To be inserted as a parameter
1189 1189 groupArray = numpy.array(groupList)
1190 1190 # groupArray = numpy.array([[0,1],[2,3]])
1191 1191 self.dataOut.groupList = groupArray
1192 1192
1193 1193 nGroups = groupArray.shape[0]
1194 1194 nChannels = self.dataIn.nChannels
1195 1195 nHeights=self.dataIn.heightList.size
1196 1196
1197 1197 #Parameters Array
1198 1198 self.dataOut.data_param = None
1199 1199
1200 1200 #Set constants
1201 1201 constants = self.dataOut.library.setConstants(self.dataIn)
1202 1202 self.dataOut.constants = constants
1203 1203 M = self.dataIn.normFactor
1204 1204 N = self.dataIn.nFFTPoints
1205 1205 ippSeconds = self.dataIn.ippSeconds
1206 1206 K = self.dataIn.nIncohInt
1207 1207 pairsArray = numpy.array(self.dataIn.pairsList)
1208 1208
1209 1209 #List of possible combinations
1210 1210 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
1211 1211 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
1212 1212
1213 1213 if getSNR:
1214 1214 listChannels = groupArray.reshape((groupArray.size))
1215 1215 listChannels.sort()
1216 1216 noise = self.dataIn.getNoise()
1217 1217 self.dataOut.data_SNR = self.__getSNR(self.dataIn.data_spc[listChannels,:,:], noise[listChannels])
1218 1218
1219 1219 for i in range(nGroups):
1220 1220 coord = groupArray[i,:]
1221 1221
1222 1222 #Input data array
1223 1223 data = self.dataIn.data_spc[coord,:,:]/(M*N)
1224 1224 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
1225 1225
1226 1226 #Cross Spectra data array for Covariance Matrixes
1227 1227 ind = 0
1228 1228 for pairs in listComb:
1229 1229 pairsSel = numpy.array([coord[x],coord[y]])
1230 1230 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
1231 1231 ind += 1
1232 1232 dataCross = self.dataIn.data_cspc[indCross,:,:]/(M*N)
1233 1233 dataCross = dataCross**2/K
1234 1234
1235 1235 for h in range(nHeights):
1236 1236 # print self.dataOut.heightList[h]
1237 1237
1238 1238 #Input
1239 1239 d = data[:,h]
1240 1240
1241 1241 #Covariance Matrix
1242 1242 D = numpy.diag(d**2/K)
1243 1243 ind = 0
1244 1244 for pairs in listComb:
1245 1245 #Coordinates in Covariance Matrix
1246 1246 x = pairs[0]
1247 1247 y = pairs[1]
1248 1248 #Channel Index
1249 1249 S12 = dataCross[ind,:,h]
1250 1250 D12 = numpy.diag(S12)
1251 1251 #Completing Covariance Matrix with Cross Spectras
1252 1252 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
1253 1253 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
1254 1254 ind += 1
1255 1255 Dinv=numpy.linalg.inv(D)
1256 1256 L=numpy.linalg.cholesky(Dinv)
1257 1257 LT=L.T
1258 1258
1259 1259 dp = numpy.dot(LT,d)
1260 1260
1261 1261 #Initial values
1262 1262 data_spc = self.dataIn.data_spc[coord,:,h]
1263 1263
1264 1264 if (h>0)and(error1[3]<5):
1265 1265 p0 = self.dataOut.data_param[i,:,h-1]
1266 1266 else:
1267 1267 p0 = numpy.array(self.dataOut.library.initialValuesFunction(data_spc, constants, i))
1268 1268
1269 1269 try:
1270 1270 #Least Squares
1271 1271 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
1272 1272 # minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
1273 1273 #Chi square error
1274 1274 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
1275 1275 #Error with Jacobian
1276 1276 error1 = self.dataOut.library.errorFunction(minp,constants,LT)
1277 1277 except:
1278 1278 minp = p0*numpy.nan
1279 1279 error0 = numpy.nan
1280 1280 error1 = p0*numpy.nan
1281 1281
1282 1282 #Save
1283 if self.dataOut.data_param == None:
1283 if self.dataOut.data_param is None:
1284 1284 self.dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
1285 1285 self.dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
1286 1286
1287 1287 self.dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
1288 1288 self.dataOut.data_param[i,:,h] = minp
1289 1289 return
1290 1290
1291 1291 def __residFunction(self, p, dp, LT, constants):
1292 1292
1293 1293 fm = self.dataOut.library.modelFunction(p, constants)
1294 1294 fmp=numpy.dot(LT,fm)
1295 1295
1296 1296 return dp-fmp
1297 1297
1298 1298 def __getSNR(self, z, noise):
1299 1299
1300 1300 avg = numpy.average(z, axis=1)
1301 1301 SNR = (avg.T-noise)/noise
1302 1302 SNR = SNR.T
1303 1303 return SNR
1304 1304
1305 1305 def __chisq(p,chindex,hindex):
1306 1306 #similar to Resid but calculates CHI**2
1307 1307 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
1308 1308 dp=numpy.dot(LT,d)
1309 1309 fmp=numpy.dot(LT,fm)
1310 1310 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
1311 1311 return chisq
1312 1312
1313 1313
1314 1314
1315 1315 class WindProfiler(Operation):
1316 1316
1317 1317 __isConfig = False
1318 1318
1319 1319 __initime = None
1320 1320 __lastdatatime = None
1321 1321 __integrationtime = None
1322 1322
1323 1323 __buffer = None
1324 1324
1325 1325 __dataReady = False
1326 1326
1327 1327 __firstdata = None
1328 1328
1329 1329 n = None
1330 1330
1331 1331 def __init__(self):
1332 1332 Operation.__init__(self)
1333 1333
1334 1334 def __calculateCosDir(self, elev, azim):
1335 1335 zen = (90 - elev)*numpy.pi/180
1336 1336 azim = azim*numpy.pi/180
1337 1337 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
1338 1338 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
1339 1339
1340 1340 signX = numpy.sign(numpy.cos(azim))
1341 1341 signY = numpy.sign(numpy.sin(azim))
1342 1342
1343 1343 cosDirX = numpy.copysign(cosDirX, signX)
1344 1344 cosDirY = numpy.copysign(cosDirY, signY)
1345 1345 return cosDirX, cosDirY
1346 1346
1347 1347 def __calculateAngles(self, theta_x, theta_y, azimuth):
1348 1348
1349 1349 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
1350 1350 zenith_arr = numpy.arccos(dir_cosw)
1351 1351 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
1352 1352
1353 1353 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
1354 1354 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
1355 1355
1356 1356 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
1357 1357
1358 1358 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
1359 1359
1360 1360 #
1361 1361 if horOnly:
1362 1362 A = numpy.c_[dir_cosu,dir_cosv]
1363 1363 else:
1364 1364 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
1365 1365 A = numpy.asmatrix(A)
1366 1366 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
1367 1367
1368 1368 return A1
1369 1369
1370 1370 def __correctValues(self, heiRang, phi, velRadial, SNR):
1371 1371 listPhi = phi.tolist()
1372 1372 maxid = listPhi.index(max(listPhi))
1373 1373 minid = listPhi.index(min(listPhi))
1374 1374
1375 1375 rango = range(len(phi))
1376 1376 # rango = numpy.delete(rango,maxid)
1377 1377
1378 1378 heiRang1 = heiRang*math.cos(phi[maxid])
1379 1379 heiRangAux = heiRang*math.cos(phi[minid])
1380 1380 indOut = (heiRang1 < heiRangAux[0]).nonzero()
1381 1381 heiRang1 = numpy.delete(heiRang1,indOut)
1382 1382
1383 1383 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
1384 1384 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
1385 1385
1386 1386 for i in rango:
1387 1387 x = heiRang*math.cos(phi[i])
1388 1388 y1 = velRadial[i,:]
1389 1389 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
1390 1390
1391 1391 x1 = heiRang1
1392 1392 y11 = f1(x1)
1393 1393
1394 1394 y2 = SNR[i,:]
1395 1395 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
1396 1396 y21 = f2(x1)
1397 1397
1398 1398 velRadial1[i,:] = y11
1399 1399 SNR1[i,:] = y21
1400 1400
1401 1401 return heiRang1, velRadial1, SNR1
1402 1402
1403 1403 def __calculateVelUVW(self, A, velRadial):
1404 1404
1405 1405 #Operacion Matricial
1406 1406 # velUVW = numpy.zeros((velRadial.shape[1],3))
1407 1407 # for ind in range(velRadial.shape[1]):
1408 1408 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
1409 1409 # velUVW = velUVW.transpose()
1410 1410 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
1411 1411 velUVW[:,:] = numpy.dot(A,velRadial)
1412 1412
1413 1413
1414 1414 return velUVW
1415 1415
1416 1416 def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
1417 1417 """
1418 1418 Function that implements Doppler Beam Swinging (DBS) technique.
1419 1419
1420 1420 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1421 1421 Direction correction (if necessary), Ranges and SNR
1422 1422
1423 1423 Output: Winds estimation (Zonal, Meridional and Vertical)
1424 1424
1425 1425 Parameters affected: Winds, height range, SNR
1426 1426 """
1427 1427 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(dirCosx, disrCosy, azimuth)
1428 1428 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correct*velRadial0, SNR0)
1429 1429 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
1430 1430
1431 1431 #Calculo de Componentes de la velocidad con DBS
1432 1432 winds = self.__calculateVelUVW(A,velRadial1)
1433 1433
1434 1434 return winds, heiRang1, SNR1
1435 1435
1436 1436 def __calculateDistance(self, posx, posy, pairsCrossCorr, pairsList, pairs, azimuth = None):
1437 1437
1438 1438 posx = numpy.asarray(posx)
1439 1439 posy = numpy.asarray(posy)
1440 1440
1441 1441 #Rotacion Inversa para alinear con el azimuth
1442 1442 if azimuth!= None:
1443 1443 azimuth = azimuth*math.pi/180
1444 1444 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
1445 1445 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
1446 1446 else:
1447 1447 posx1 = posx
1448 1448 posy1 = posy
1449 1449
1450 1450 #Calculo de Distancias
1451 1451 distx = numpy.zeros(pairsCrossCorr.size)
1452 1452 disty = numpy.zeros(pairsCrossCorr.size)
1453 1453 dist = numpy.zeros(pairsCrossCorr.size)
1454 1454 ang = numpy.zeros(pairsCrossCorr.size)
1455 1455
1456 1456 for i in range(pairsCrossCorr.size):
1457 1457 distx[i] = posx1[pairsList[pairsCrossCorr[i]][1]] - posx1[pairsList[pairsCrossCorr[i]][0]]
1458 1458 disty[i] = posy1[pairsList[pairsCrossCorr[i]][1]] - posy1[pairsList[pairsCrossCorr[i]][0]]
1459 1459 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
1460 1460 ang[i] = numpy.arctan2(disty[i],distx[i])
1461 1461 #Calculo de Matrices
1462 1462 nPairs = len(pairs)
1463 1463 ang1 = numpy.zeros((nPairs, 2, 1))
1464 1464 dist1 = numpy.zeros((nPairs, 2, 1))
1465 1465
1466 1466 for j in range(nPairs):
1467 1467 dist1[j,0,0] = dist[pairs[j][0]]
1468 1468 dist1[j,1,0] = dist[pairs[j][1]]
1469 1469 ang1[j,0,0] = ang[pairs[j][0]]
1470 1470 ang1[j,1,0] = ang[pairs[j][1]]
1471 1471
1472 1472 return distx,disty, dist1,ang1
1473 1473
1474 1474 def __calculateVelVer(self, phase, lagTRange, _lambda):
1475 1475
1476 1476 Ts = lagTRange[1] - lagTRange[0]
1477 1477 velW = -_lambda*phase/(4*math.pi*Ts)
1478 1478
1479 1479 return velW
1480 1480
1481 1481 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
1482 1482 nPairs = tau1.shape[0]
1483 1483 vel = numpy.zeros((nPairs,3,tau1.shape[2]))
1484 1484
1485 1485 angCos = numpy.cos(ang)
1486 1486 angSin = numpy.sin(ang)
1487 1487
1488 1488 vel0 = dist*tau1/(2*tau2**2)
1489 1489 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
1490 1490 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
1491 1491
1492 1492 ind = numpy.where(numpy.isinf(vel))
1493 1493 vel[ind] = numpy.nan
1494 1494
1495 1495 return vel
1496 1496
1497 1497 def __getPairsAutoCorr(self, pairsList, nChannels):
1498 1498
1499 1499 pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1500 1500
1501 1501 for l in range(len(pairsList)):
1502 1502 firstChannel = pairsList[l][0]
1503 1503 secondChannel = pairsList[l][1]
1504 1504
1505 1505 #Obteniendo pares de Autocorrelacion
1506 1506 if firstChannel == secondChannel:
1507 1507 pairsAutoCorr[firstChannel] = int(l)
1508 1508
1509 1509 pairsAutoCorr = pairsAutoCorr.astype(int)
1510 1510
1511 1511 pairsCrossCorr = range(len(pairsList))
1512 1512 pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1513 1513
1514 1514 return pairsAutoCorr, pairsCrossCorr
1515 1515
1516 1516 def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
1517 1517 """
1518 1518 Function that implements Spaced Antenna (SA) technique.
1519 1519
1520 1520 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1521 1521 Direction correction (if necessary), Ranges and SNR
1522 1522
1523 1523 Output: Winds estimation (Zonal, Meridional and Vertical)
1524 1524
1525 1525 Parameters affected: Winds
1526 1526 """
1527 1527 #Cross Correlation pairs obtained
1528 1528 pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
1529 1529 pairsArray = numpy.array(pairsList)[pairsCrossCorr]
1530 1530 pairsSelArray = numpy.array(pairsSelected)
1531 1531 pairs = []
1532 1532
1533 1533 #Wind estimation pairs obtained
1534 1534 for i in range(pairsSelArray.shape[0]/2):
1535 1535 ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
1536 1536 ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
1537 1537 pairs.append((ind1,ind2))
1538 1538
1539 1539 indtau = tau.shape[0]/2
1540 1540 tau1 = tau[:indtau,:]
1541 1541 tau2 = tau[indtau:-1,:]
1542 1542 tau1 = tau1[pairs,:]
1543 1543 tau2 = tau2[pairs,:]
1544 1544 phase1 = tau[-1,:]
1545 1545
1546 1546 #---------------------------------------------------------------------
1547 1547 #Metodo Directo
1548 1548 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairsCrossCorr, pairsList, pairs,azimuth)
1549 1549 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
1550 1550 winds = stats.nanmean(winds, axis=0)
1551 1551 #---------------------------------------------------------------------
1552 1552 #Metodo General
1553 1553 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
1554 1554 # #Calculo Coeficientes de Funcion de Correlacion
1555 1555 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
1556 1556 # #Calculo de Velocidades
1557 1557 # winds = self.calculateVelUV(F,G,A,B,H)
1558 1558
1559 1559 #---------------------------------------------------------------------
1560 1560 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
1561 1561 winds = correctFactor*winds
1562 1562 return winds
1563 1563
1564 1564 def __checkTime(self, currentTime, paramInterval, outputInterval):
1565 1565
1566 1566 dataTime = currentTime + paramInterval
1567 1567 deltaTime = dataTime - self.__initime
1568 1568
1569 1569 if deltaTime >= outputInterval or deltaTime < 0:
1570 1570 self.__dataReady = True
1571 1571 return
1572 1572
1573 1573 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
1574 1574 '''
1575 1575 Function that implements winds estimation technique with detected meteors.
1576 1576
1577 1577 Input: Detected meteors, Minimum meteor quantity to wind estimation
1578 1578
1579 1579 Output: Winds estimation (Zonal and Meridional)
1580 1580
1581 1581 Parameters affected: Winds
1582 1582 '''
1583 1583 print arrayMeteor.shape
1584 1584 #Settings
1585 1585 nInt = (heightMax - heightMin)/2
1586 1586 winds = numpy.zeros((2,nInt))*numpy.nan
1587 1587
1588 1588 #Filter errors
1589 1589 error = numpy.where(arrayMeteor[0,:,-1] == 0)[0]
1590 1590 finalMeteor = arrayMeteor[0,error,:]
1591 1591
1592 1592 #Meteor Histogram
1593 1593 finalHeights = finalMeteor[:,3]
1594 1594 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
1595 1595 nMeteorsPerI = hist[0]
1596 1596 heightPerI = hist[1]
1597 1597
1598 1598 #Sort of meteors
1599 1599 indSort = finalHeights.argsort()
1600 1600 finalMeteor2 = finalMeteor[indSort,:]
1601 1601
1602 1602 # Calculating winds
1603 1603 ind1 = 0
1604 1604 ind2 = 0
1605 1605
1606 1606 for i in range(nInt):
1607 1607 nMet = nMeteorsPerI[i]
1608 1608 ind1 = ind2
1609 1609 ind2 = ind1 + nMet
1610 1610
1611 1611 meteorAux = finalMeteor2[ind1:ind2,:]
1612 1612
1613 1613 if meteorAux.shape[0] >= meteorThresh:
1614 1614 vel = meteorAux[:, 7]
1615 1615 zen = meteorAux[:, 5]*numpy.pi/180
1616 1616 azim = meteorAux[:, 4]*numpy.pi/180
1617 1617
1618 1618 n = numpy.cos(zen)
1619 1619 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
1620 1620 # l = m*numpy.tan(azim)
1621 1621 l = numpy.sin(zen)*numpy.sin(azim)
1622 1622 m = numpy.sin(zen)*numpy.cos(azim)
1623 1623
1624 1624 A = numpy.vstack((l, m)).transpose()
1625 1625 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
1626 1626 windsAux = numpy.dot(A1, vel)
1627 1627
1628 1628 winds[0,i] = windsAux[0]
1629 1629 winds[1,i] = windsAux[1]
1630 1630
1631 1631 return winds, heightPerI[:-1]
1632 1632
1633 1633 def run(self, dataOut, technique, **kwargs):
1634 1634
1635 1635 param = dataOut.data_param
1636 1636 # if dataOut.abscissaList != None:
1637 1637 # absc = dataOut.abscissaList[:-1]
1638 1638 noise = dataOut.noise
1639 1639 heightList = dataOut.heightList
1640 1640 SNR = dataOut.data_SNR
1641 1641
1642 1642 if technique == 'DBS':
1643 1643
1644 1644 if kwargs.has_key('dirCosx') and kwargs.has_key('dirCosy'):
1645 1645 theta_x = numpy.array(kwargs['dirCosx'])
1646 1646 theta_y = numpy.array(kwargs['dirCosy'])
1647 1647 else:
1648 1648 elev = numpy.array(kwargs['elevation'])
1649 1649 azim = numpy.array(kwargs['azimuth'])
1650 1650 theta_x, theta_y = self.__calculateCosDir(elev, azim)
1651 1651 azimuth = kwargs['correctAzimuth']
1652 1652 if kwargs.has_key('horizontalOnly'):
1653 1653 horizontalOnly = kwargs['horizontalOnly']
1654 1654 else: horizontalOnly = False
1655 1655 if kwargs.has_key('correctFactor'):
1656 1656 correctFactor = kwargs['correctFactor']
1657 1657 else: correctFactor = 1
1658 1658 if kwargs.has_key('channelList'):
1659 1659 channelList = kwargs['channelList']
1660 1660 if len(channelList) == 2:
1661 1661 horizontalOnly = True
1662 1662 arrayChannel = numpy.array(channelList)
1663 1663 param = param[arrayChannel,:,:]
1664 1664 theta_x = theta_x[arrayChannel]
1665 1665 theta_y = theta_y[arrayChannel]
1666 1666
1667 1667 velRadial0 = param[:,1,:] #Radial velocity
1668 1668 dataOut.data_output, dataOut.heightList, dataOut.data_SNR = self.techniqueDBS(velRadial0, theta_x, theta_y, azimuth, correctFactor, horizontalOnly, heightList, SNR) #DBS Function
1669 1669 dataOut.utctimeInit = dataOut.utctime
1670 1670 dataOut.outputInterval = dataOut.timeInterval
1671 1671
1672 1672 elif technique == 'SA':
1673 1673
1674 1674 #Parameters
1675 1675 position_x = kwargs['positionX']
1676 1676 position_y = kwargs['positionY']
1677 1677 azimuth = kwargs['azimuth']
1678 1678
1679 1679 if kwargs.has_key('crosspairsList'):
1680 1680 pairs = kwargs['crosspairsList']
1681 1681 else:
1682 1682 pairs = None
1683 1683
1684 1684 if kwargs.has_key('correctFactor'):
1685 1685 correctFactor = kwargs['correctFactor']
1686 1686 else:
1687 1687 correctFactor = 1
1688 1688
1689 1689 tau = dataOut.data_param
1690 1690 _lambda = dataOut.C/dataOut.frequency
1691 1691 pairsList = dataOut.groupList
1692 1692 nChannels = dataOut.nChannels
1693 1693
1694 1694 dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
1695 1695 dataOut.utctimeInit = dataOut.utctime
1696 1696 dataOut.outputInterval = dataOut.timeInterval
1697 1697
1698 1698 elif technique == 'Meteors':
1699 1699 dataOut.flagNoData = True
1700 1700 self.__dataReady = False
1701 1701
1702 1702 if kwargs.has_key('nHours'):
1703 1703 nHours = kwargs['nHours']
1704 1704 else:
1705 1705 nHours = 1
1706 1706
1707 1707 if kwargs.has_key('meteorsPerBin'):
1708 1708 meteorThresh = kwargs['meteorsPerBin']
1709 1709 else:
1710 1710 meteorThresh = 6
1711 1711
1712 1712 if kwargs.has_key('hmin'):
1713 1713 hmin = kwargs['hmin']
1714 1714 else: hmin = 70
1715 1715 if kwargs.has_key('hmax'):
1716 1716 hmax = kwargs['hmax']
1717 1717 else: hmax = 110
1718 1718
1719 1719 dataOut.outputInterval = nHours*3600
1720 1720
1721 1721 if self.__isConfig == False:
1722 1722 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
1723 1723 #Get Initial LTC time
1724 1724 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
1725 1725 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
1726 1726
1727 1727 self.__isConfig = True
1728 1728
1729 if self.__buffer == None:
1729 if self.__buffer is None:
1730 1730 self.__buffer = dataOut.data_param
1731 1731 self.__firstdata = copy.copy(dataOut)
1732 1732
1733 1733 else:
1734 1734 self.__buffer = numpy.hstack((self.__buffer, dataOut.data_param))
1735 1735
1736 1736 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
1737 1737
1738 1738 if self.__dataReady:
1739 1739 dataOut.utctimeInit = self.__initime
1740 1740
1741 1741 self.__initime += dataOut.outputInterval #to erase time offset
1742 1742
1743 1743 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
1744 1744 dataOut.flagNoData = False
1745 1745 self.__buffer = None
1746 1746
1747 1747 return
1748 1748
1749 1749 class EWDriftsEstimation(Operation):
1750 1750
1751 1751
1752 1752 def __init__(self):
1753 1753 Operation.__init__(self)
1754 1754
1755 1755 def __correctValues(self, heiRang, phi, velRadial, SNR):
1756 1756 listPhi = phi.tolist()
1757 1757 maxid = listPhi.index(max(listPhi))
1758 1758 minid = listPhi.index(min(listPhi))
1759 1759
1760 1760 rango = range(len(phi))
1761 1761 # rango = numpy.delete(rango,maxid)
1762 1762
1763 1763 heiRang1 = heiRang*math.cos(phi[maxid])
1764 1764 heiRangAux = heiRang*math.cos(phi[minid])
1765 1765 indOut = (heiRang1 < heiRangAux[0]).nonzero()
1766 1766 heiRang1 = numpy.delete(heiRang1,indOut)
1767 1767
1768 1768 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
1769 1769 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
1770 1770
1771 1771 for i in rango:
1772 1772 x = heiRang*math.cos(phi[i])
1773 1773 y1 = velRadial[i,:]
1774 1774 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
1775 1775
1776 1776 x1 = heiRang1
1777 1777 y11 = f1(x1)
1778 1778
1779 1779 y2 = SNR[i,:]
1780 1780 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
1781 1781 y21 = f2(x1)
1782 1782
1783 1783 velRadial1[i,:] = y11
1784 1784 SNR1[i,:] = y21
1785 1785
1786 1786 return heiRang1, velRadial1, SNR1
1787 1787
1788 1788 def run(self, dataOut, zenith, zenithCorrection):
1789 1789 heiRang = dataOut.heightList
1790 1790 velRadial = dataOut.data_param[:,3,:]
1791 1791 SNR = dataOut.data_SNR
1792 1792
1793 1793 zenith = numpy.array(zenith)
1794 1794 zenith -= zenithCorrection
1795 1795 zenith *= numpy.pi/180
1796 1796
1797 1797 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
1798 1798
1799 1799 alp = zenith[0]
1800 1800 bet = zenith[1]
1801 1801
1802 1802 w_w = velRadial1[0,:]
1803 1803 w_e = velRadial1[1,:]
1804 1804
1805 1805 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
1806 1806 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
1807 1807
1808 1808 winds = numpy.vstack((u,w))
1809 1809
1810 1810 dataOut.heightList = heiRang1
1811 1811 dataOut.data_output = winds
1812 1812 dataOut.data_SNR = SNR1
1813 1813
1814 1814 dataOut.utctimeInit = dataOut.utctime
1815 1815 dataOut.outputInterval = dataOut.timeInterval
1816 1816 return
1817 1817
1818 1818 class PhaseCalibration(Operation):
1819 1819
1820 1820 __buffer = None
1821 1821
1822 1822 __initime = None
1823 1823
1824 1824 __dataReady = False
1825 1825
1826 1826 __isConfig = False
1827 1827
1828 1828 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
1829 1829
1830 1830 dataTime = currentTime + paramInterval
1831 1831 deltaTime = dataTime - initTime
1832 1832
1833 1833 if deltaTime >= outputInterval or deltaTime < 0:
1834 1834 return True
1835 1835
1836 1836 return False
1837 1837
1838 1838 def __getGammas(self, pairs, k, d, phases):
1839 1839 gammas = numpy.zeros(2)
1840 1840
1841 1841 for i in range(len(pairs)):
1842 1842
1843 1843 pairi = pairs[i]
1844 1844
1845 1845 #Calculating gamma
1846 1846 jdcos = phases[:,pairi[1]]/(k*d[pairi[1]])
1847 1847 jgamma = numpy.angle(numpy.exp(1j*(k*d[pairi[0]]*jdcos - phases[:,pairi[0]])))
1848 1848
1849 1849 #Revised distribution
1850 1850 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
1851 1851
1852 1852 #Histogram
1853 1853 nBins = 64.0
1854 1854 rmin = -0.5*numpy.pi
1855 1855 rmax = 0.5*numpy.pi
1856 1856 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
1857 1857
1858 1858 meteorsY = phaseHisto[0]
1859 1859 phasesX = phaseHisto[1][:-1]
1860 1860 width = phasesX[1] - phasesX[0]
1861 1861 phasesX += width/2
1862 1862
1863 1863 #Gaussian aproximation
1864 1864 bpeak = meteorsY.argmax()
1865 1865 peak = meteorsY.max()
1866 1866 jmin = bpeak - 5
1867 1867 jmax = bpeak + 5 + 1
1868 1868
1869 1869 if jmin<0:
1870 1870 jmin = 0
1871 1871 jmax = 6
1872 1872 elif jmax > meteorsY.size:
1873 1873 jmin = meteorsY.size - 6
1874 1874 jmax = meteorsY.size
1875 1875
1876 1876 x0 = numpy.array([peak,bpeak,50])
1877 1877 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
1878 1878
1879 1879 #Gammas
1880 1880 gammas[i] = coeff[0][1]
1881 1881 # gammas[i] = bpeak
1882 1882
1883 1883 return gammas
1884 1884
1885 1885 def __residualFunction(self, coeffs, y, t):
1886 1886
1887 1887 return y - self.__gauss_function(t, coeffs)
1888 1888
1889 1889 def __gauss_function(self, t, coeffs):
1890 1890
1891 1891 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
1892 1892
1893 1893 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
1894 1894 meteorOps = MeteorOperations()
1895 1895 nchan = 4
1896 1896 pairx = pairsList[0]
1897 1897 pairy = pairsList[1]
1898 1898 center_xangle = 0
1899 1899 center_yangle = 0
1900 1900 range_angle = numpy.array([8*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
1901 1901 ntimes = len(range_angle)
1902 1902
1903 1903 nstepsx = 20.0
1904 1904 nstepsy = 20.0
1905 1905
1906 1906 for iz in range(ntimes):
1907 1907 min_xangle = -range_angle[iz]/2 + center_xangle
1908 1908 max_xangle = range_angle[iz]/2 + center_xangle
1909 1909 min_yangle = -range_angle[iz]/2 + center_yangle
1910 1910 max_yangle = range_angle[iz]/2 + center_yangle
1911 1911
1912 1912 inc_x = (max_xangle-min_xangle)/nstepsx
1913 1913 inc_y = (max_yangle-min_yangle)/nstepsy
1914 1914
1915 1915 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
1916 1916 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
1917 1917 penalty = numpy.zeros((nstepsx,nstepsy))
1918 1918 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
1919 1919 jph = numpy.zeros(nchan)
1920 1920
1921 1921 # Iterations looking for the offset
1922 1922 for iy in range(int(nstepsy)):
1923 1923 for ix in range(int(nstepsx)):
1924 1924 jph[pairy[1]] = alpha_y[iy]
1925 1925 jph[pairy[0]] = -gammas[1] + alpha_y[iy]*d[pairy[0]]/d[pairy[1]]
1926 1926
1927 1927 jph[pairx[1]] = alpha_x[ix]
1928 1928 jph[pairx[0]] = -gammas[0] + alpha_x[ix]*d[pairx[0]]/d[pairx[1]]
1929 1929
1930 1930 jph_array[:,ix,iy] = jph
1931 1931
1932 1932 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, jph)
1933 1933 error = meteorsArray1[:,-1]
1934 1934 ind1 = numpy.where(error==0)[0]
1935 1935 penalty[ix,iy] = ind1.size
1936 1936
1937 1937 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
1938 1938 phOffset = jph_array[:,i,j]
1939 1939
1940 1940 center_xangle = phOffset[pairx[1]]
1941 1941 center_yangle = phOffset[pairy[1]]
1942 1942
1943 1943 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
1944 1944 phOffset = phOffset*180/numpy.pi
1945 1945 return phOffset
1946 1946
1947 1947
1948 1948 def run(self, dataOut, pairs, distances, hmin, hmax, nHours = None):
1949 1949
1950 1950 dataOut.flagNoData = True
1951 1951 self.__dataReady = False
1952 1952
1953 1953 if nHours == None:
1954 1954 nHours = 1
1955 1955
1956 1956 dataOut.outputInterval = nHours*3600
1957 1957
1958 1958 if self.__isConfig == False:
1959 1959 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
1960 1960 #Get Initial LTC time
1961 1961 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
1962 1962 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
1963 1963
1964 1964 self.__isConfig = True
1965 1965
1966 if self.__buffer == None:
1966 if self.__buffer is None:
1967 1967 self.__buffer = dataOut.data_param.copy()
1968 1968
1969 1969 else:
1970 1970 self.__buffer = numpy.hstack((self.__buffer, dataOut.data_param))
1971 1971
1972 1972 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
1973 1973
1974 1974 if self.__dataReady:
1975 1975 dataOut.utctimeInit = self.__initime
1976 1976 self.__initime += dataOut.outputInterval #to erase time offset
1977 1977
1978 1978 freq = dataOut.frequency
1979 1979 c = dataOut.C #m/s
1980 1980 lamb = c/freq
1981 1981 k = 2*numpy.pi/lamb
1982 1982 azimuth = 0
1983 1983 h = (hmin, hmax)
1984 1984 pairsList = ((0,3),(1,2))
1985 1985
1986 1986 meteorsArray = self.__buffer[0,:,:]
1987 1987 error = meteorsArray[:,-1]
1988 1988 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
1989 1989 ind1 = numpy.where(boolError)[0]
1990 1990 meteorsArray = meteorsArray[ind1,:]
1991 1991 meteorsArray[:,-1] = 0
1992 1992 phases = meteorsArray[:,9:13]
1993 1993
1994 1994 #Calculate Gammas
1995 1995 gammas = self.__getGammas(pairs, k, distances, phases)
1996 1996 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
1997 1997 #Calculate Phases
1998 1998 phasesOff = self.__getPhases(azimuth, h, pairsList, distances, gammas, meteorsArray)
1999 1999 phasesOff = phasesOff.reshape((1,phasesOff.size))
2000 2000 dataOut.data_output = -phasesOff
2001 2001 dataOut.flagNoData = False
2002 2002 self.__buffer = None
2003 2003
2004 2004
2005 2005 return
2006 2006
2007 2007 class MeteorOperations():
2008 2008
2009 2009 def __init__(self):
2010 2010
2011 2011 return
2012 2012
2013 2013 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, jph):
2014 2014
2015 2015 arrayParameters = arrayParameters0.copy()
2016 2016 hmin = h[0]
2017 2017 hmax = h[1]
2018 2018
2019 2019 #Calculate AOA (Error N 3, 4)
2020 2020 #JONES ET AL. 1998
2021 2021 AOAthresh = numpy.pi/8
2022 2022 error = arrayParameters[:,-1]
2023 2023 phases = -arrayParameters[:,9:13] + jph
2024 2024 arrayParameters[:,4:7], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, error, AOAthresh, azimuth)
2025 2025
2026 2026 #Calculate Heights (Error N 13 and 14)
2027 2027 error = arrayParameters[:,-1]
2028 2028 Ranges = arrayParameters[:,2]
2029 2029 zenith = arrayParameters[:,5]
2030 2030 arrayParameters[:,3], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
2031 2031
2032 2032 #----------------------- Get Final data ------------------------------------
2033 2033 # error = arrayParameters[:,-1]
2034 2034 # ind1 = numpy.where(error==0)[0]
2035 2035 # arrayParameters = arrayParameters[ind1,:]
2036 2036
2037 2037 return arrayParameters
2038 2038
2039 2039 def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
2040 2040
2041 2041 arrayAOA = numpy.zeros((phases.shape[0],3))
2042 2042 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
2043 2043
2044 2044 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
2045 2045 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
2046 2046 arrayAOA[:,2] = cosDirError
2047 2047
2048 2048 azimuthAngle = arrayAOA[:,0]
2049 2049 zenithAngle = arrayAOA[:,1]
2050 2050
2051 2051 #Setting Error
2052 2052 #Number 3: AOA not fesible
2053 2053 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
2054 2054 error[indInvalid] = 3
2055 2055 #Number 4: Large difference in AOAs obtained from different antenna baselines
2056 2056 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
2057 2057 error[indInvalid] = 4
2058 2058 return arrayAOA, error
2059 2059
2060 2060 def __getDirectionCosines(self, arrayPhase, pairsList):
2061 2061
2062 2062 #Initializing some variables
2063 2063 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
2064 2064 ang_aux = ang_aux.reshape(1,ang_aux.size)
2065 2065
2066 2066 cosdir = numpy.zeros((arrayPhase.shape[0],2))
2067 2067 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
2068 2068
2069 2069
2070 2070 for i in range(2):
2071 2071 #First Estimation
2072 2072 phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
2073 2073 #Dealias
2074 2074 indcsi = numpy.where(phi0_aux > numpy.pi)
2075 2075 phi0_aux[indcsi] -= 2*numpy.pi
2076 2076 indcsi = numpy.where(phi0_aux < -numpy.pi)
2077 2077 phi0_aux[indcsi] += 2*numpy.pi
2078 2078 #Direction Cosine 0
2079 2079 cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
2080 2080
2081 2081 #Most-Accurate Second Estimation
2082 2082 phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
2083 2083 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
2084 2084 #Direction Cosine 1
2085 2085 cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
2086 2086
2087 2087 #Searching the correct Direction Cosine
2088 2088 cosdir0_aux = cosdir0[:,i]
2089 2089 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
2090 2090 #Minimum Distance
2091 2091 cosDiff = (cosdir1 - cosdir0_aux)**2
2092 2092 indcos = cosDiff.argmin(axis = 1)
2093 2093 #Saving Value obtained
2094 2094 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
2095 2095
2096 2096 return cosdir0, cosdir
2097 2097
2098 2098 def __calculateAOA(self, cosdir, azimuth):
2099 2099 cosdirX = cosdir[:,0]
2100 2100 cosdirY = cosdir[:,1]
2101 2101
2102 2102 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
2103 2103 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
2104 2104 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
2105 2105
2106 2106 return angles
2107 2107
2108 2108 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
2109 2109
2110 2110 Ramb = 375 #Ramb = c/(2*PRF)
2111 2111 Re = 6371 #Earth Radius
2112 2112 heights = numpy.zeros(Ranges.shape)
2113 2113
2114 2114 R_aux = numpy.array([0,1,2])*Ramb
2115 2115 R_aux = R_aux.reshape(1,R_aux.size)
2116 2116
2117 2117 Ranges = Ranges.reshape(Ranges.size,1)
2118 2118
2119 2119 Ri = Ranges + R_aux
2120 2120 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
2121 2121
2122 2122 #Check if there is a height between 70 and 110 km
2123 2123 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
2124 2124 ind_h = numpy.where(h_bool == 1)[0]
2125 2125
2126 2126 hCorr = hi[ind_h, :]
2127 2127 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
2128 2128
2129 2129 hCorr = hi[ind_hCorr]
2130 2130 heights[ind_h] = hCorr
2131 2131
2132 2132 #Setting Error
2133 2133 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
2134 2134 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
2135 2135
2136 2136 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
2137 2137 error[indInvalid2] = 14
2138 2138 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
2139 2139 error[indInvalid1] = 13
2140 2140
2141 2141 return heights, error
2142 2142
2143 2143
2144 2144 No newline at end of file
@@ -1,978 +1,978
1 1 import numpy
2 2 import math
3 3
4 4 from jroproc_base import ProcessingUnit, Operation
5 5 from schainpy.model.data.jrodata import Spectra
6 6 from schainpy.model.data.jrodata import hildebrand_sekhon
7 7
8 8 class SpectraProc(ProcessingUnit):
9 9
10 10 def __init__(self):
11 11
12 12 ProcessingUnit.__init__(self)
13 13
14 14 self.buffer = None
15 15 self.firstdatatime = None
16 16 self.profIndex = 0
17 17 self.dataOut = Spectra()
18 18 self.id_min = None
19 19 self.id_max = None
20 20
21 21 def __updateObjFromInput(self):
22 22
23 23 self.dataOut.timeZone = self.dataIn.timeZone
24 24 self.dataOut.dstFlag = self.dataIn.dstFlag
25 25 self.dataOut.errorCount = self.dataIn.errorCount
26 26 self.dataOut.useLocalTime = self.dataIn.useLocalTime
27 27
28 28 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
29 29 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
30 30 self.dataOut.channelList = self.dataIn.channelList
31 31 self.dataOut.heightList = self.dataIn.heightList
32 32 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
33 33 # self.dataOut.nHeights = self.dataIn.nHeights
34 34 # self.dataOut.nChannels = self.dataIn.nChannels
35 35 self.dataOut.nBaud = self.dataIn.nBaud
36 36 self.dataOut.nCode = self.dataIn.nCode
37 37 self.dataOut.code = self.dataIn.code
38 38 self.dataOut.nProfiles = self.dataOut.nFFTPoints
39 39 # self.dataOut.channelIndexList = self.dataIn.channelIndexList
40 40 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
41 41 self.dataOut.utctime = self.firstdatatime
42 42 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
43 43 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
44 44 # self.dataOut.flagShiftFFT = self.dataIn.flagShiftFFT
45 45 self.dataOut.nCohInt = self.dataIn.nCohInt
46 46 self.dataOut.nIncohInt = 1
47 47 # self.dataOut.ippSeconds = self.dataIn.ippSeconds
48 48 self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
49 49
50 50 # self.dataOut.timeInterval = self.dataIn.timeInterval*self.dataOut.nFFTPoints*self.dataOut.nIncohInt
51 51 self.dataOut.frequency = self.dataIn.frequency
52 52 self.dataOut.realtime = self.dataIn.realtime
53 53
54 54 self.dataOut.azimuth = self.dataIn.azimuth
55 55 self.dataOut.zenith = self.dataIn.zenith
56 56
57 57 self.dataOut.beam.codeList = self.dataIn.beam.codeList
58 58 self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
59 59 self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
60 60
61 61 def __getFft(self):
62 62 """
63 63 Convierte valores de Voltaje a Spectra
64 64
65 65 Affected:
66 66 self.dataOut.data_spc
67 67 self.dataOut.data_cspc
68 68 self.dataOut.data_dc
69 69 self.dataOut.heightList
70 70 self.profIndex
71 71 self.buffer
72 72 self.dataOut.flagNoData
73 73 """
74 74 fft_volt = numpy.fft.fft(self.buffer,n=self.dataOut.nFFTPoints,axis=1)
75 75 fft_volt = fft_volt.astype(numpy.dtype('complex'))
76 76 dc = fft_volt[:,0,:]
77 77
78 78 #calculo de self-spectra
79 79 fft_volt = numpy.fft.fftshift(fft_volt,axes=(1,))
80 80 spc = fft_volt * numpy.conjugate(fft_volt)
81 81 spc = spc.real
82 82
83 83 blocksize = 0
84 84 blocksize += dc.size
85 85 blocksize += spc.size
86 86
87 87 cspc = None
88 88 pairIndex = 0
89 89 if self.dataOut.pairsList != None:
90 90 #calculo de cross-spectra
91 91 cspc = numpy.zeros((self.dataOut.nPairs, self.dataOut.nFFTPoints, self.dataOut.nHeights), dtype='complex')
92 92 for pair in self.dataOut.pairsList:
93 93 if pair[0] not in self.dataOut.channelList:
94 94 raise ValueError, "Error getting CrossSpectra: pair 0 of %s is not in channelList = %s" %(str(pair), str(self.dataOut.channelList))
95 95 if pair[1] not in self.dataOut.channelList:
96 96 raise ValueError, "Error getting CrossSpectra: pair 1 of %s is not in channelList = %s" %(str(pair), str(self.dataOut.channelList))
97 97
98 98 cspc[pairIndex,:,:] = fft_volt[pair[0],:,:] * numpy.conjugate(fft_volt[pair[1],:,:])
99 99 pairIndex += 1
100 100 blocksize += cspc.size
101 101
102 102 self.dataOut.data_spc = spc
103 103 self.dataOut.data_cspc = cspc
104 104 self.dataOut.data_dc = dc
105 105 self.dataOut.blockSize = blocksize
106 106 self.dataOut.flagShiftFFT = False
107 107
108 108 def run(self, nProfiles=None, nFFTPoints=None, pairsList=[], ippFactor=None):
109 109
110 110 self.dataOut.flagNoData = True
111 111
112 112 if self.dataIn.type == "Spectra":
113 113 self.dataOut.copy(self.dataIn)
114 114 return True
115 115
116 116 if self.dataIn.type == "Voltage":
117 117
118 118 if nFFTPoints == None:
119 119 raise ValueError, "This SpectraProc.run() need nFFTPoints input variable"
120 120
121 121 if nProfiles == None:
122 122 nProfiles = nFFTPoints
123 123 # raise ValueError, "This SpectraProc.run() need nProfiles input variable"
124 124
125 125
126 126 if ippFactor == None:
127 127 ippFactor = 1
128 128 self.dataOut.ippFactor = ippFactor
129 129
130 130 self.dataOut.nFFTPoints = nFFTPoints
131 131 self.dataOut.pairsList = pairsList
132 132
133 if self.buffer == None:
133 if self.buffer is None:
134 134 self.buffer = numpy.zeros((self.dataIn.nChannels,
135 135 nProfiles,
136 136 self.dataIn.nHeights),
137 137 dtype='complex')
138 138 self.id_min = 0
139 139 self.id_max = self.dataIn.data.shape[1]
140 140
141 141 if len(self.dataIn.data.shape) == 2:
142 142 self.buffer[:,self.profIndex,:] = self.dataIn.data.copy()
143 143 self.profIndex += 1
144 144 else:
145 145 if self.dataIn.data.shape[1] == nProfiles:
146 146 self.buffer = self.dataIn.data.copy()
147 147 self.profIndex = nProfiles
148 148 elif self.dataIn.data.shape[1] < nProfiles:
149 149 self.buffer[:,self.id_min:self.id_max,:] = self.dataIn.data
150 150 self.profIndex += self.dataIn.data.shape[1]
151 151 self.id_min += self.dataIn.data.shape[1]
152 152 self.id_max += self.dataIn.data.shape[1]
153 153 else:
154 154 raise ValueError, "The type object %s has %d profiles, it should be equal to %d profiles"%(self.dataIn.type,self.dataIn.data.shape[1],nProfiles)
155 155 self.dataOut.flagNoData = True
156 156 return 0
157 157
158 158
159 159 if self.firstdatatime == None:
160 160 self.firstdatatime = self.dataIn.utctime
161 161
162 162 if self.profIndex == nProfiles:
163 163 self.__updateObjFromInput()
164 164 self.__getFft()
165 165
166 166 self.dataOut.flagNoData = False
167 167
168 168 self.buffer = None
169 169 self.firstdatatime = None
170 170 self.profIndex = 0
171 171
172 172 return True
173 173
174 174 raise ValueError, "The type of input object '%s' is not valid"%(self.dataIn.type)
175 175
176 176 def __selectPairs(self, channelList=None):
177 177
178 178 if channelList == None:
179 179 return
180 180
181 181 pairsIndexListSelected = []
182 182 for pairIndex in self.dataOut.pairsIndexList:
183 183 #First pair
184 184 if self.dataOut.pairsList[pairIndex][0] not in channelList:
185 185 continue
186 186 #Second pair
187 187 if self.dataOut.pairsList[pairIndex][1] not in channelList:
188 188 continue
189 189
190 190 pairsIndexListSelected.append(pairIndex)
191 191
192 192 if not pairsIndexListSelected:
193 193 self.dataOut.data_cspc = None
194 194 self.dataOut.pairsList = []
195 195 return
196 196
197 197 self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndexListSelected]
198 198 self.dataOut.pairsList = self.dataOut.pairsList[pairsIndexListSelected]
199 199
200 200 return
201 201
202 202 def selectChannels(self, channelList):
203 203
204 204 channelIndexList = []
205 205
206 206 for channel in channelList:
207 207 if channel not in self.dataOut.channelList:
208 208 raise ValueError, "Error selecting channels: The value %d in channelList is not valid.\nAvailable channels = %s" %(channel, str(self.dataOut.channelList))
209 209
210 210 index = self.dataOut.channelList.index(channel)
211 211 channelIndexList.append(index)
212 212
213 213 self.selectChannelsByIndex(channelIndexList)
214 214
215 215 def selectChannelsByIndex(self, channelIndexList):
216 216 """
217 217 Selecciona un bloque de datos en base a canales segun el channelIndexList
218 218
219 219 Input:
220 220 channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7]
221 221
222 222 Affected:
223 223 self.dataOut.data_spc
224 224 self.dataOut.channelIndexList
225 225 self.dataOut.nChannels
226 226
227 227 Return:
228 228 None
229 229 """
230 230
231 231 for channelIndex in channelIndexList:
232 232 if channelIndex not in self.dataOut.channelIndexList:
233 233 raise ValueError, "Error selecting channels: The value %d in channelIndexList is not valid.\nAvailable channel indexes = " %(channelIndex, self.dataOut.channelIndexList)
234 234
235 235 # nChannels = len(channelIndexList)
236 236
237 237 data_spc = self.dataOut.data_spc[channelIndexList,:]
238 238 data_dc = self.dataOut.data_dc[channelIndexList,:]
239 239
240 240 self.dataOut.data_spc = data_spc
241 241 self.dataOut.data_dc = data_dc
242 242
243 243 self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
244 244 # self.dataOut.nChannels = nChannels
245 245
246 246 self.__selectPairs(self.dataOut.channelList)
247 247
248 248 return 1
249 249
250 250 def selectHeights(self, minHei, maxHei):
251 251 """
252 252 Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango
253 253 minHei <= height <= maxHei
254 254
255 255 Input:
256 256 minHei : valor minimo de altura a considerar
257 257 maxHei : valor maximo de altura a considerar
258 258
259 259 Affected:
260 260 Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex
261 261
262 262 Return:
263 263 1 si el metodo se ejecuto con exito caso contrario devuelve 0
264 264 """
265 265
266 266 if (minHei > maxHei):
267 267 raise ValueError, "Error selecting heights: Height range (%d,%d) is not valid" % (minHei, maxHei)
268 268
269 269 if (minHei < self.dataOut.heightList[0]):
270 270 minHei = self.dataOut.heightList[0]
271 271
272 272 if (maxHei > self.dataOut.heightList[-1]):
273 273 maxHei = self.dataOut.heightList[-1]
274 274 # raise ValueError, "some value in (%d,%d) is not valid" % (minHei, maxHei)
275 275
276 276 minIndex = 0
277 277 maxIndex = 0
278 278 heights = self.dataOut.heightList
279 279
280 280 inda = numpy.where(heights >= minHei)
281 281 indb = numpy.where(heights <= maxHei)
282 282
283 283 try:
284 284 minIndex = inda[0][0]
285 285 except:
286 286 minIndex = 0
287 287
288 288 try:
289 289 maxIndex = indb[0][-1]
290 290 except:
291 291 maxIndex = len(heights)
292 292
293 293 self.selectHeightsByIndex(minIndex, maxIndex)
294 294
295 295 return 1
296 296
297 297 def getBeaconSignal(self, tauindex = 0, channelindex = 0, hei_ref=None):
298 298 newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
299 299
300 300 if hei_ref != None:
301 301 newheis = numpy.where(self.dataOut.heightList>hei_ref)
302 302
303 303 minIndex = min(newheis[0])
304 304 maxIndex = max(newheis[0])
305 305 data_spc = self.dataOut.data_spc[:,:,minIndex:maxIndex+1]
306 306 heightList = self.dataOut.heightList[minIndex:maxIndex+1]
307 307
308 308 # determina indices
309 309 nheis = int(self.dataOut.radarControllerHeaderObj.txB/(self.dataOut.heightList[1]-self.dataOut.heightList[0]))
310 310 avg_dB = 10*numpy.log10(numpy.sum(data_spc[channelindex,:,:],axis=0))
311 311 beacon_dB = numpy.sort(avg_dB)[-nheis:]
312 312 beacon_heiIndexList = []
313 313 for val in avg_dB.tolist():
314 314 if val >= beacon_dB[0]:
315 315 beacon_heiIndexList.append(avg_dB.tolist().index(val))
316 316
317 317 #data_spc = data_spc[:,:,beacon_heiIndexList]
318 318 data_cspc = None
319 319 if self.dataOut.data_cspc != None:
320 320 data_cspc = self.dataOut.data_cspc[:,:,minIndex:maxIndex+1]
321 321 #data_cspc = data_cspc[:,:,beacon_heiIndexList]
322 322
323 323 data_dc = None
324 324 if self.dataOut.data_dc != None:
325 325 data_dc = self.dataOut.data_dc[:,minIndex:maxIndex+1]
326 326 #data_dc = data_dc[:,beacon_heiIndexList]
327 327
328 328 self.dataOut.data_spc = data_spc
329 329 self.dataOut.data_cspc = data_cspc
330 330 self.dataOut.data_dc = data_dc
331 331 self.dataOut.heightList = heightList
332 332 self.dataOut.beacon_heiIndexList = beacon_heiIndexList
333 333
334 334 return 1
335 335
336 336
337 337 def selectHeightsByIndex(self, minIndex, maxIndex):
338 338 """
339 339 Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango
340 340 minIndex <= index <= maxIndex
341 341
342 342 Input:
343 343 minIndex : valor de indice minimo de altura a considerar
344 344 maxIndex : valor de indice maximo de altura a considerar
345 345
346 346 Affected:
347 347 self.dataOut.data_spc
348 348 self.dataOut.data_cspc
349 349 self.dataOut.data_dc
350 350 self.dataOut.heightList
351 351
352 352 Return:
353 353 1 si el metodo se ejecuto con exito caso contrario devuelve 0
354 354 """
355 355
356 356 if (minIndex < 0) or (minIndex > maxIndex):
357 357 raise ValueError, "Error selecting heights by index: Index range in (%d,%d) is not valid" % (minIndex, maxIndex)
358 358
359 359 if (maxIndex >= self.dataOut.nHeights):
360 360 maxIndex = self.dataOut.nHeights-1
361 361 # raise ValueError, "some value in (%d,%d) is not valid" % (minIndex, maxIndex)
362 362
363 363 # nHeights = maxIndex - minIndex + 1
364 364
365 365 #Spectra
366 366 data_spc = self.dataOut.data_spc[:,:,minIndex:maxIndex+1]
367 367
368 368 data_cspc = None
369 369 if self.dataOut.data_cspc != None:
370 370 data_cspc = self.dataOut.data_cspc[:,:,minIndex:maxIndex+1]
371 371
372 372 data_dc = None
373 373 if self.dataOut.data_dc != None:
374 374 data_dc = self.dataOut.data_dc[:,minIndex:maxIndex+1]
375 375
376 376 self.dataOut.data_spc = data_spc
377 377 self.dataOut.data_cspc = data_cspc
378 378 self.dataOut.data_dc = data_dc
379 379
380 380 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex+1]
381 381
382 382 return 1
383 383
384 384 def removeDC(self, mode = 2):
385 385 jspectra = self.dataOut.data_spc
386 386 jcspectra = self.dataOut.data_cspc
387 387
388 388
389 389 num_chan = jspectra.shape[0]
390 390 num_hei = jspectra.shape[2]
391 391
392 392 if jcspectra != None:
393 393 jcspectraExist = True
394 394 num_pairs = jcspectra.shape[0]
395 395 else: jcspectraExist = False
396 396
397 397 freq_dc = jspectra.shape[1]/2
398 398 ind_vel = numpy.array([-2,-1,1,2]) + freq_dc
399 399
400 400 if ind_vel[0]<0:
401 401 ind_vel[range(0,1)] = ind_vel[range(0,1)] + self.num_prof
402 402
403 403 if mode == 1:
404 404 jspectra[:,freq_dc,:] = (jspectra[:,ind_vel[1],:] + jspectra[:,ind_vel[2],:])/2 #CORRECCION
405 405
406 406 if jcspectraExist:
407 407 jcspectra[:,freq_dc,:] = (jcspectra[:,ind_vel[1],:] + jcspectra[:,ind_vel[2],:])/2
408 408
409 409 if mode == 2:
410 410
411 411 vel = numpy.array([-2,-1,1,2])
412 412 xx = numpy.zeros([4,4])
413 413
414 414 for fil in range(4):
415 415 xx[fil,:] = vel[fil]**numpy.asarray(range(4))
416 416
417 417 xx_inv = numpy.linalg.inv(xx)
418 418 xx_aux = xx_inv[0,:]
419 419
420 420 for ich in range(num_chan):
421 421 yy = jspectra[ich,ind_vel,:]
422 422 jspectra[ich,freq_dc,:] = numpy.dot(xx_aux,yy)
423 423
424 424 junkid = jspectra[ich,freq_dc,:]<=0
425 425 cjunkid = sum(junkid)
426 426
427 427 if cjunkid.any():
428 428 jspectra[ich,freq_dc,junkid.nonzero()] = (jspectra[ich,ind_vel[1],junkid] + jspectra[ich,ind_vel[2],junkid])/2
429 429
430 430 if jcspectraExist:
431 431 for ip in range(num_pairs):
432 432 yy = jcspectra[ip,ind_vel,:]
433 433 jcspectra[ip,freq_dc,:] = numpy.dot(xx_aux,yy)
434 434
435 435
436 436 self.dataOut.data_spc = jspectra
437 437 self.dataOut.data_cspc = jcspectra
438 438
439 439 return 1
440 440
441 441 def removeInterference(self, interf = 2,hei_interf = None, nhei_interf = None, offhei_interf = None):
442 442
443 443 jspectra = self.dataOut.data_spc
444 444 jcspectra = self.dataOut.data_cspc
445 445 jnoise = self.dataOut.getNoise()
446 446 num_incoh = self.dataOut.nIncohInt
447 447
448 448 num_channel = jspectra.shape[0]
449 449 num_prof = jspectra.shape[1]
450 450 num_hei = jspectra.shape[2]
451 451
452 452 #hei_interf
453 453 if hei_interf == None:
454 454 count_hei = num_hei/2 #Como es entero no importa
455 455 hei_interf = numpy.asmatrix(range(count_hei)) + num_hei - count_hei
456 456 hei_interf = numpy.asarray(hei_interf)[0]
457 457 #nhei_interf
458 458 if (nhei_interf == None):
459 459 nhei_interf = 5
460 460 if (nhei_interf < 1):
461 461 nhei_interf = 1
462 462 if (nhei_interf > count_hei):
463 463 nhei_interf = count_hei
464 464 if (offhei_interf == None):
465 465 offhei_interf = 0
466 466
467 467 ind_hei = range(num_hei)
468 468 # mask_prof = numpy.asarray(range(num_prof - 2)) + 1
469 469 # mask_prof[range(num_prof/2 - 1,len(mask_prof))] += 1
470 470 mask_prof = numpy.asarray(range(num_prof))
471 471 num_mask_prof = mask_prof.size
472 472 comp_mask_prof = [0, num_prof/2]
473 473
474 474
475 475 #noise_exist: Determina si la variable jnoise ha sido definida y contiene la informacion del ruido de cada canal
476 476 if (jnoise.size < num_channel or numpy.isnan(jnoise).any()):
477 477 jnoise = numpy.nan
478 478 noise_exist = jnoise[0] < numpy.Inf
479 479
480 480 #Subrutina de Remocion de la Interferencia
481 481 for ich in range(num_channel):
482 482 #Se ordena los espectros segun su potencia (menor a mayor)
483 483 power = jspectra[ich,mask_prof,:]
484 484 power = power[:,hei_interf]
485 485 power = power.sum(axis = 0)
486 486 psort = power.ravel().argsort()
487 487
488 488 #Se estima la interferencia promedio en los Espectros de Potencia empleando
489 489 junkspc_interf = jspectra[ich,:,hei_interf[psort[range(offhei_interf, nhei_interf + offhei_interf)]]]
490 490
491 491 if noise_exist:
492 492 # tmp_noise = jnoise[ich] / num_prof
493 493 tmp_noise = jnoise[ich]
494 494 junkspc_interf = junkspc_interf - tmp_noise
495 495 #junkspc_interf[:,comp_mask_prof] = 0
496 496
497 497 jspc_interf = junkspc_interf.sum(axis = 0) / nhei_interf
498 498 jspc_interf = jspc_interf.transpose()
499 499 #Calculando el espectro de interferencia promedio
500 500 noiseid = numpy.where(jspc_interf <= tmp_noise/ math.sqrt(num_incoh))
501 501 noiseid = noiseid[0]
502 502 cnoiseid = noiseid.size
503 503 interfid = numpy.where(jspc_interf > tmp_noise/ math.sqrt(num_incoh))
504 504 interfid = interfid[0]
505 505 cinterfid = interfid.size
506 506
507 507 if (cnoiseid > 0): jspc_interf[noiseid] = 0
508 508
509 509 #Expandiendo los perfiles a limpiar
510 510 if (cinterfid > 0):
511 511 new_interfid = (numpy.r_[interfid - 1, interfid, interfid + 1] + num_prof)%num_prof
512 512 new_interfid = numpy.asarray(new_interfid)
513 513 new_interfid = {x for x in new_interfid}
514 514 new_interfid = numpy.array(list(new_interfid))
515 515 new_cinterfid = new_interfid.size
516 516 else: new_cinterfid = 0
517 517
518 518 for ip in range(new_cinterfid):
519 519 ind = junkspc_interf[:,new_interfid[ip]].ravel().argsort()
520 520 jspc_interf[new_interfid[ip]] = junkspc_interf[ind[nhei_interf/2],new_interfid[ip]]
521 521
522 522
523 523 jspectra[ich,:,ind_hei] = jspectra[ich,:,ind_hei] - jspc_interf #Corregir indices
524 524
525 525 #Removiendo la interferencia del punto de mayor interferencia
526 526 ListAux = jspc_interf[mask_prof].tolist()
527 527 maxid = ListAux.index(max(ListAux))
528 528
529 529
530 530 if cinterfid > 0:
531 531 for ip in range(cinterfid*(interf == 2) - 1):
532 532 ind = (jspectra[ich,interfid[ip],:] < tmp_noise*(1 + 1/math.sqrt(num_incoh))).nonzero()
533 533 cind = len(ind)
534 534
535 535 if (cind > 0):
536 536 jspectra[ich,interfid[ip],ind] = tmp_noise*(1 + (numpy.random.uniform(cind) - 0.5)/math.sqrt(num_incoh))
537 537
538 538 ind = numpy.array([-2,-1,1,2])
539 539 xx = numpy.zeros([4,4])
540 540
541 541 for id1 in range(4):
542 542 xx[:,id1] = ind[id1]**numpy.asarray(range(4))
543 543
544 544 xx_inv = numpy.linalg.inv(xx)
545 545 xx = xx_inv[:,0]
546 546 ind = (ind + maxid + num_mask_prof)%num_mask_prof
547 547 yy = jspectra[ich,mask_prof[ind],:]
548 548 jspectra[ich,mask_prof[maxid],:] = numpy.dot(yy.transpose(),xx)
549 549
550 550
551 551 indAux = (jspectra[ich,:,:] < tmp_noise*(1-1/math.sqrt(num_incoh))).nonzero()
552 552 jspectra[ich,indAux[0],indAux[1]] = tmp_noise * (1 - 1/math.sqrt(num_incoh))
553 553
554 554 #Remocion de Interferencia en el Cross Spectra
555 555 if jcspectra == None: return jspectra, jcspectra
556 556 num_pairs = jcspectra.size/(num_prof*num_hei)
557 557 jcspectra = jcspectra.reshape(num_pairs, num_prof, num_hei)
558 558
559 559 for ip in range(num_pairs):
560 560
561 561 #-------------------------------------------
562 562
563 563 cspower = numpy.abs(jcspectra[ip,mask_prof,:])
564 564 cspower = cspower[:,hei_interf]
565 565 cspower = cspower.sum(axis = 0)
566 566
567 567 cspsort = cspower.ravel().argsort()
568 568 junkcspc_interf = jcspectra[ip,:,hei_interf[cspsort[range(offhei_interf, nhei_interf + offhei_interf)]]]
569 569 junkcspc_interf = junkcspc_interf.transpose()
570 570 jcspc_interf = junkcspc_interf.sum(axis = 1)/nhei_interf
571 571
572 572 ind = numpy.abs(jcspc_interf[mask_prof]).ravel().argsort()
573 573
574 574 median_real = numpy.median(numpy.real(junkcspc_interf[mask_prof[ind[range(3*num_prof/4)]],:]))
575 575 median_imag = numpy.median(numpy.imag(junkcspc_interf[mask_prof[ind[range(3*num_prof/4)]],:]))
576 576 junkcspc_interf[comp_mask_prof,:] = numpy.complex(median_real, median_imag)
577 577
578 578 for iprof in range(num_prof):
579 579 ind = numpy.abs(junkcspc_interf[iprof,:]).ravel().argsort()
580 580 jcspc_interf[iprof] = junkcspc_interf[iprof, ind[nhei_interf/2]]
581 581
582 582 #Removiendo la Interferencia
583 583 jcspectra[ip,:,ind_hei] = jcspectra[ip,:,ind_hei] - jcspc_interf
584 584
585 585 ListAux = numpy.abs(jcspc_interf[mask_prof]).tolist()
586 586 maxid = ListAux.index(max(ListAux))
587 587
588 588 ind = numpy.array([-2,-1,1,2])
589 589 xx = numpy.zeros([4,4])
590 590
591 591 for id1 in range(4):
592 592 xx[:,id1] = ind[id1]**numpy.asarray(range(4))
593 593
594 594 xx_inv = numpy.linalg.inv(xx)
595 595 xx = xx_inv[:,0]
596 596
597 597 ind = (ind + maxid + num_mask_prof)%num_mask_prof
598 598 yy = jcspectra[ip,mask_prof[ind],:]
599 599 jcspectra[ip,mask_prof[maxid],:] = numpy.dot(yy.transpose(),xx)
600 600
601 601 #Guardar Resultados
602 602 self.dataOut.data_spc = jspectra
603 603 self.dataOut.data_cspc = jcspectra
604 604
605 605 return 1
606 606
607 607 def setRadarFrequency(self, frequency=None):
608 608 if frequency != None:
609 609 self.dataOut.frequency = frequency
610 610
611 611 return 1
612 612
613 613 def getNoise(self, minHei=None, maxHei=None, minVel=None, maxVel=None):
614 614 #validacion de rango
615 615 if minHei == None:
616 616 minHei = self.dataOut.heightList[0]
617 617
618 618 if maxHei == None:
619 619 maxHei = self.dataOut.heightList[-1]
620 620
621 621 if (minHei < self.dataOut.heightList[0]) or (minHei > maxHei):
622 622 print 'minHei: %.2f is out of the heights range'%(minHei)
623 623 print 'minHei is setting to %.2f'%(self.dataOut.heightList[0])
624 624 minHei = self.dataOut.heightList[0]
625 625
626 626 if (maxHei > self.dataOut.heightList[-1]) or (maxHei < minHei):
627 627 print 'maxHei: %.2f is out of the heights range'%(maxHei)
628 628 print 'maxHei is setting to %.2f'%(self.dataOut.heightList[-1])
629 629 maxHei = self.dataOut.heightList[-1]
630 630
631 631 # validacion de velocidades
632 632 velrange = self.dataOut.getVelRange(1)
633 633
634 634 if minVel == None:
635 635 minVel = velrange[0]
636 636
637 637 if maxVel == None:
638 638 maxVel = velrange[-1]
639 639
640 640 if (minVel < velrange[0]) or (minVel > maxVel):
641 641 print 'minVel: %.2f is out of the velocity range'%(minVel)
642 642 print 'minVel is setting to %.2f'%(velrange[0])
643 643 minVel = velrange[0]
644 644
645 645 if (maxVel > velrange[-1]) or (maxVel < minVel):
646 646 print 'maxVel: %.2f is out of the velocity range'%(maxVel)
647 647 print 'maxVel is setting to %.2f'%(velrange[-1])
648 648 maxVel = velrange[-1]
649 649
650 650 # seleccion de indices para rango
651 651 minIndex = 0
652 652 maxIndex = 0
653 653 heights = self.dataOut.heightList
654 654
655 655 inda = numpy.where(heights >= minHei)
656 656 indb = numpy.where(heights <= maxHei)
657 657
658 658 try:
659 659 minIndex = inda[0][0]
660 660 except:
661 661 minIndex = 0
662 662
663 663 try:
664 664 maxIndex = indb[0][-1]
665 665 except:
666 666 maxIndex = len(heights)
667 667
668 668 if (minIndex < 0) or (minIndex > maxIndex):
669 669 raise ValueError, "some value in (%d,%d) is not valid" % (minIndex, maxIndex)
670 670
671 671 if (maxIndex >= self.dataOut.nHeights):
672 672 maxIndex = self.dataOut.nHeights-1
673 673
674 674 # seleccion de indices para velocidades
675 675 indminvel = numpy.where(velrange >= minVel)
676 676 indmaxvel = numpy.where(velrange <= maxVel)
677 677 try:
678 678 minIndexVel = indminvel[0][0]
679 679 except:
680 680 minIndexVel = 0
681 681
682 682 try:
683 683 maxIndexVel = indmaxvel[0][-1]
684 684 except:
685 685 maxIndexVel = len(velrange)
686 686
687 687 #seleccion del espectro
688 688 data_spc = self.dataOut.data_spc[:,minIndexVel:maxIndexVel+1,minIndex:maxIndex+1]
689 689 #estimacion de ruido
690 690 noise = numpy.zeros(self.dataOut.nChannels)
691 691
692 692 for channel in range(self.dataOut.nChannels):
693 693 daux = data_spc[channel,:,:]
694 694 noise[channel] = hildebrand_sekhon(daux, self.dataOut.nIncohInt)
695 695
696 696 self.dataOut.noise_estimation = noise.copy()
697 697
698 698 return 1
699 699
700 700 class IncohInt(Operation):
701 701
702 702
703 703 __profIndex = 0
704 704 __withOverapping = False
705 705
706 706 __byTime = False
707 707 __initime = None
708 708 __lastdatatime = None
709 709 __integrationtime = None
710 710
711 711 __buffer_spc = None
712 712 __buffer_cspc = None
713 713 __buffer_dc = None
714 714
715 715 __dataReady = False
716 716
717 717 __timeInterval = None
718 718
719 719 n = None
720 720
721 721
722 722
723 723 def __init__(self):
724 724
725 725 Operation.__init__(self)
726 726 # self.isConfig = False
727 727
728 728 def setup(self, n=None, timeInterval=None, overlapping=False):
729 729 """
730 730 Set the parameters of the integration class.
731 731
732 732 Inputs:
733 733
734 734 n : Number of coherent integrations
735 735 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
736 736 overlapping :
737 737
738 738 """
739 739
740 740 self.__initime = None
741 741 self.__lastdatatime = 0
742 742 self.__buffer_spc = None
743 743 self.__buffer_cspc = None
744 744 self.__buffer_dc = None
745 745 self.__dataReady = False
746 746
747 747
748 748 if n == None and timeInterval == None:
749 749 raise ValueError, "n or timeInterval should be specified ..."
750 750
751 751 if n != None:
752 752 self.n = n
753 753 self.__byTime = False
754 754 else:
755 755 self.__integrationtime = timeInterval #if (type(timeInterval)!=integer) -> change this line
756 756 self.n = 9999
757 757 self.__byTime = True
758 758
759 759 if overlapping:
760 760 self.__withOverapping = True
761 761 else:
762 762 self.__withOverapping = False
763 763 self.__buffer_spc = 0
764 764 self.__buffer_cspc = 0
765 765 self.__buffer_dc = 0
766 766
767 767 self.__profIndex = 0
768 768
769 769 def putData(self, data_spc, data_cspc, data_dc):
770 770
771 771 """
772 772 Add a profile to the __buffer_spc and increase in one the __profileIndex
773 773
774 774 """
775 775
776 776 if not self.__withOverapping:
777 777 self.__buffer_spc += data_spc
778 778
779 if data_cspc == None:
779 if data_cspc is None:
780 780 self.__buffer_cspc = None
781 781 else:
782 782 self.__buffer_cspc += data_cspc
783 783
784 if data_dc == None:
784 if data_dc is None:
785 785 self.__buffer_dc = None
786 786 else:
787 787 self.__buffer_dc += data_dc
788 788
789 789 self.__profIndex += 1
790 790 return
791 791
792 792 #Overlapping data
793 793 nChannels, nFFTPoints, nHeis = data_spc.shape
794 794 data_spc = numpy.reshape(data_spc, (1, nChannels, nFFTPoints, nHeis))
795 795 if data_cspc != None:
796 796 data_cspc = numpy.reshape(data_cspc, (1, -1, nFFTPoints, nHeis))
797 797 if data_dc != None:
798 798 data_dc = numpy.reshape(data_dc, (1, -1, nHeis))
799 799
800 800 #If the buffer is empty then it takes the data value
801 if self.__buffer_spc == None:
801 if self.__buffer_spc is None:
802 802 self.__buffer_spc = data_spc
803 803
804 if data_cspc == None:
804 if data_cspc is None:
805 805 self.__buffer_cspc = None
806 806 else:
807 807 self.__buffer_cspc += data_cspc
808 808
809 if data_dc == None:
809 if data_dc is None:
810 810 self.__buffer_dc = None
811 811 else:
812 812 self.__buffer_dc += data_dc
813 813
814 814 self.__profIndex += 1
815 815 return
816 816
817 817 #If the buffer length is lower than n then stakcing the data value
818 818 if self.__profIndex < self.n:
819 819 self.__buffer_spc = numpy.vstack((self.__buffer_spc, data_spc))
820 820
821 821 if data_cspc != None:
822 822 self.__buffer_cspc = numpy.vstack((self.__buffer_cspc, data_cspc))
823 823
824 824 if data_dc != None:
825 825 self.__buffer_dc = numpy.vstack((self.__buffer_dc, data_dc))
826 826
827 827 self.__profIndex += 1
828 828 return
829 829
830 830 #If the buffer length is equal to n then replacing the last buffer value with the data value
831 831 self.__buffer_spc = numpy.roll(self.__buffer_spc, -1, axis=0)
832 832 self.__buffer_spc[self.n-1] = data_spc
833 833
834 834 if data_cspc != None:
835 835 self.__buffer_cspc = numpy.roll(self.__buffer_cspc, -1, axis=0)
836 836 self.__buffer_cspc[self.n-1] = data_cspc
837 837
838 838 if data_dc != None:
839 839 self.__buffer_dc = numpy.roll(self.__buffer_dc, -1, axis=0)
840 840 self.__buffer_dc[self.n-1] = data_dc
841 841
842 842 self.__profIndex = self.n
843 843 return
844 844
845 845
846 846 def pushData(self):
847 847 """
848 848 Return the sum of the last profiles and the profiles used in the sum.
849 849
850 850 Affected:
851 851
852 852 self.__profileIndex
853 853
854 854 """
855 855 data_spc = None
856 856 data_cspc = None
857 857 data_dc = None
858 858
859 859 if not self.__withOverapping:
860 860 data_spc = self.__buffer_spc
861 861 data_cspc = self.__buffer_cspc
862 862 data_dc = self.__buffer_dc
863 863
864 864 n = self.__profIndex
865 865
866 866 self.__buffer_spc = 0
867 867 self.__buffer_cspc = 0
868 868 self.__buffer_dc = 0
869 869 self.__profIndex = 0
870 870
871 871 return data_spc, data_cspc, data_dc, n
872 872
873 873 #Integration with Overlapping
874 874 data_spc = numpy.sum(self.__buffer_spc, axis=0)
875 875
876 876 if self.__buffer_cspc != None:
877 877 data_cspc = numpy.sum(self.__buffer_cspc, axis=0)
878 878
879 879 if self.__buffer_dc != None:
880 880 data_dc = numpy.sum(self.__buffer_dc, axis=0)
881 881
882 882 n = self.__profIndex
883 883
884 884 return data_spc, data_cspc, data_dc, n
885 885
886 886 def byProfiles(self, *args):
887 887
888 888 self.__dataReady = False
889 889 avgdata_spc = None
890 890 avgdata_cspc = None
891 891 avgdata_dc = None
892 892 # n = None
893 893
894 894 self.putData(*args)
895 895
896 896 if self.__profIndex == self.n:
897 897
898 898 avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData()
899 899 self.__dataReady = True
900 900
901 901 return avgdata_spc, avgdata_cspc, avgdata_dc
902 902
903 903 def byTime(self, datatime, *args):
904 904
905 905 self.__dataReady = False
906 906 avgdata_spc = None
907 907 avgdata_cspc = None
908 908 avgdata_dc = None
909 909 n = None
910 910
911 911 self.putData(*args)
912 912
913 913 if (datatime - self.__initime) >= self.__integrationtime:
914 914 avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData()
915 915 self.n = n
916 916 self.__dataReady = True
917 917
918 918 return avgdata_spc, avgdata_cspc, avgdata_dc
919 919
920 920 def integrate(self, datatime, *args):
921 921
922 922 if self.__initime == None:
923 923 self.__initime = datatime
924 924
925 925 if self.__byTime:
926 926 avgdata_spc, avgdata_cspc, avgdata_dc = self.byTime(datatime, *args)
927 927 else:
928 928 avgdata_spc, avgdata_cspc, avgdata_dc = self.byProfiles(*args)
929 929
930 930 self.__lastdatatime = datatime
931 931
932 if avgdata_spc == None:
932 if avgdata_spc is None:
933 933 return None, None, None, None
934 934
935 935 avgdatatime = self.__initime
936 936 try:
937 937 self.__timeInterval = (self.__lastdatatime - self.__initime)/(self.n - 1)
938 938 except:
939 939 self.__timeInterval = self.__lastdatatime - self.__initime
940 940
941 941 deltatime = datatime -self.__lastdatatime
942 942
943 943 if not self.__withOverapping:
944 944 self.__initime = datatime
945 945 else:
946 946 self.__initime += deltatime
947 947
948 948 return avgdatatime, avgdata_spc, avgdata_cspc, avgdata_dc
949 949
950 950 def run(self, dataOut, n=None, timeInterval=None, overlapping=False):
951 951
952 952 if n==1:
953 953 dataOut.flagNoData = False
954 954 return
955 955
956 956 if not self.isConfig:
957 957 self.setup(n, timeInterval, overlapping)
958 958 self.isConfig = True
959 959
960 960 avgdatatime, avgdata_spc, avgdata_cspc, avgdata_dc = self.integrate(dataOut.utctime,
961 961 dataOut.data_spc,
962 962 dataOut.data_cspc,
963 963 dataOut.data_dc)
964 964
965 965 # dataOut.timeInterval *= n
966 966 dataOut.flagNoData = True
967 967
968 968 if self.__dataReady:
969 969
970 970 dataOut.data_spc = avgdata_spc
971 971 dataOut.data_cspc = avgdata_cspc
972 972 dataOut.data_dc = avgdata_dc
973 973
974 974 dataOut.nIncohInt *= self.n
975 975 dataOut.utctime = avgdatatime
976 976 #dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt * dataOut.nIncohInt * dataOut.nFFTPoints
977 977 # dataOut.timeInterval = self.__timeInterval*self.n
978 978 dataOut.flagNoData = False
@@ -1,1055 +1,1055
1 1 import numpy
2 2
3 3 from jroproc_base import ProcessingUnit, Operation
4 4 from schainpy.model.data.jrodata import Voltage
5 5
6 6 class VoltageProc(ProcessingUnit):
7 7
8 8
9 9 def __init__(self):
10 10
11 11 ProcessingUnit.__init__(self)
12 12
13 13 # self.objectDict = {}
14 14 self.dataOut = Voltage()
15 15 self.flip = 1
16 16
17 17 def run(self):
18 18 if self.dataIn.type == 'AMISR':
19 19 self.__updateObjFromAmisrInput()
20 20
21 21 if self.dataIn.type == 'Voltage':
22 22 self.dataOut.copy(self.dataIn)
23 23
24 24 # self.dataOut.copy(self.dataIn)
25 25
26 26 def __updateObjFromAmisrInput(self):
27 27
28 28 self.dataOut.timeZone = self.dataIn.timeZone
29 29 self.dataOut.dstFlag = self.dataIn.dstFlag
30 30 self.dataOut.errorCount = self.dataIn.errorCount
31 31 self.dataOut.useLocalTime = self.dataIn.useLocalTime
32 32
33 33 self.dataOut.flagNoData = self.dataIn.flagNoData
34 34 self.dataOut.data = self.dataIn.data
35 35 self.dataOut.utctime = self.dataIn.utctime
36 36 self.dataOut.channelList = self.dataIn.channelList
37 37 # self.dataOut.timeInterval = self.dataIn.timeInterval
38 38 self.dataOut.heightList = self.dataIn.heightList
39 39 self.dataOut.nProfiles = self.dataIn.nProfiles
40 40
41 41 self.dataOut.nCohInt = self.dataIn.nCohInt
42 42 self.dataOut.ippSeconds = self.dataIn.ippSeconds
43 43 self.dataOut.frequency = self.dataIn.frequency
44 44
45 45 self.dataOut.azimuth = self.dataIn.azimuth
46 46 self.dataOut.zenith = self.dataIn.zenith
47 47
48 48 self.dataOut.beam.codeList = self.dataIn.beam.codeList
49 49 self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
50 50 self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
51 51 #
52 52 # pass#
53 53 #
54 54 # def init(self):
55 55 #
56 56 #
57 57 # if self.dataIn.type == 'AMISR':
58 58 # self.__updateObjFromAmisrInput()
59 59 #
60 60 # if self.dataIn.type == 'Voltage':
61 61 # self.dataOut.copy(self.dataIn)
62 62 # # No necesita copiar en cada init() los atributos de dataIn
63 63 # # la copia deberia hacerse por cada nuevo bloque de datos
64 64
65 65 def selectChannels(self, channelList):
66 66
67 67 channelIndexList = []
68 68
69 69 for channel in channelList:
70 70 if channel not in self.dataOut.channelList:
71 71 raise ValueError, "Channel %d is not in %s" %(channel, str(self.dataOut.channelList))
72 72
73 73 index = self.dataOut.channelList.index(channel)
74 74 channelIndexList.append(index)
75 75
76 76 self.selectChannelsByIndex(channelIndexList)
77 77
78 78 def selectChannelsByIndex(self, channelIndexList):
79 79 """
80 80 Selecciona un bloque de datos en base a canales segun el channelIndexList
81 81
82 82 Input:
83 83 channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7]
84 84
85 85 Affected:
86 86 self.dataOut.data
87 87 self.dataOut.channelIndexList
88 88 self.dataOut.nChannels
89 89 self.dataOut.m_ProcessingHeader.totalSpectra
90 90 self.dataOut.systemHeaderObj.numChannels
91 91 self.dataOut.m_ProcessingHeader.blockSize
92 92
93 93 Return:
94 94 None
95 95 """
96 96
97 97 for channelIndex in channelIndexList:
98 98 if channelIndex not in self.dataOut.channelIndexList:
99 99 print channelIndexList
100 100 raise ValueError, "The value %d in channelIndexList is not valid" %channelIndex
101 101
102 102 # nChannels = len(channelIndexList)
103 103 if self.dataOut.flagDataAsBlock:
104 104 """
105 105 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
106 106 """
107 107 data = self.dataOut.data[channelIndexList,:,:]
108 108 else:
109 109 data = self.dataOut.data[channelIndexList,:]
110 110
111 111 self.dataOut.data = data
112 112 self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
113 113 # self.dataOut.nChannels = nChannels
114 114
115 115 return 1
116 116
117 117 def selectHeights(self, minHei=None, maxHei=None):
118 118 """
119 119 Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango
120 120 minHei <= height <= maxHei
121 121
122 122 Input:
123 123 minHei : valor minimo de altura a considerar
124 124 maxHei : valor maximo de altura a considerar
125 125
126 126 Affected:
127 127 Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex
128 128
129 129 Return:
130 130 1 si el metodo se ejecuto con exito caso contrario devuelve 0
131 131 """
132 132
133 133 if minHei == None:
134 134 minHei = self.dataOut.heightList[0]
135 135
136 136 if maxHei == None:
137 137 maxHei = self.dataOut.heightList[-1]
138 138
139 139 if (minHei < self.dataOut.heightList[0]) or (minHei > maxHei):
140 140 raise ValueError, "some value in (%d,%d) is not valid" % (minHei, maxHei)
141 141
142 142
143 143 if (maxHei > self.dataOut.heightList[-1]):
144 144 maxHei = self.dataOut.heightList[-1]
145 145 # raise ValueError, "some value in (%d,%d) is not valid" % (minHei, maxHei)
146 146
147 147 minIndex = 0
148 148 maxIndex = 0
149 149 heights = self.dataOut.heightList
150 150
151 151 inda = numpy.where(heights >= minHei)
152 152 indb = numpy.where(heights <= maxHei)
153 153
154 154 try:
155 155 minIndex = inda[0][0]
156 156 except:
157 157 minIndex = 0
158 158
159 159 try:
160 160 maxIndex = indb[0][-1]
161 161 except:
162 162 maxIndex = len(heights)
163 163
164 164 self.selectHeightsByIndex(minIndex, maxIndex)
165 165
166 166 return 1
167 167
168 168
169 169 def selectHeightsByIndex(self, minIndex, maxIndex):
170 170 """
171 171 Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango
172 172 minIndex <= index <= maxIndex
173 173
174 174 Input:
175 175 minIndex : valor de indice minimo de altura a considerar
176 176 maxIndex : valor de indice maximo de altura a considerar
177 177
178 178 Affected:
179 179 self.dataOut.data
180 180 self.dataOut.heightList
181 181
182 182 Return:
183 183 1 si el metodo se ejecuto con exito caso contrario devuelve 0
184 184 """
185 185
186 186 if (minIndex < 0) or (minIndex > maxIndex):
187 187 raise ValueError, "some value in (%d,%d) is not valid" % (minIndex, maxIndex)
188 188
189 189 if (maxIndex >= self.dataOut.nHeights):
190 190 maxIndex = self.dataOut.nHeights
191 191 # raise ValueError, "some value in (%d,%d) is not valid" % (minIndex, maxIndex)
192 192
193 193 # nHeights = maxIndex - minIndex + 1
194 194
195 195 #voltage
196 196 if self.dataOut.flagDataAsBlock:
197 197 """
198 198 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
199 199 """
200 200 data = self.dataOut.data[:,minIndex:maxIndex,:]
201 201 else:
202 202 data = self.dataOut.data[:,minIndex:maxIndex]
203 203
204 204 # firstHeight = self.dataOut.heightList[minIndex]
205 205
206 206 self.dataOut.data = data
207 207 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex]
208 208
209 209 if self.dataOut.nHeights <= 1:
210 210 raise ValueError, "selectHeights: Too few heights. Current number of heights is %d" %(self.dataOut.nHeights)
211 211
212 212 return 1
213 213
214 214
215 215 def filterByHeights(self, window):
216 216
217 217 deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
218 218
219 219 if window == None:
220 220 window = (self.dataOut.radarControllerHeaderObj.txA/self.dataOut.radarControllerHeaderObj.nBaud) / deltaHeight
221 221
222 222 newdelta = deltaHeight * window
223 223 r = self.dataOut.nHeights % window
224 224 newheights = (self.dataOut.nHeights-r)/window
225 225
226 226 if newheights <= 1:
227 227 raise ValueError, "filterByHeights: Too few heights. Current number of heights is %d and window is %d" %(self.dataOut.nHeights, window)
228 228
229 229 if self.dataOut.flagDataAsBlock:
230 230 """
231 231 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
232 232 """
233 233 buffer = self.dataOut.data[:, :, 0:self.dataOut.nHeights-r]
234 234 buffer = buffer.reshape(self.dataOut.nChannels,self.dataOut.nProfiles,self.dataOut.nHeights/window,window)
235 235 buffer = numpy.sum(buffer,3)
236 236
237 237 else:
238 238 buffer = self.dataOut.data[:,0:self.dataOut.nHeights-r]
239 239 buffer = buffer.reshape(self.dataOut.nChannels,self.dataOut.nHeights/window,window)
240 240 buffer = numpy.sum(buffer,2)
241 241
242 242 self.dataOut.data = buffer
243 243 self.dataOut.heightList = self.dataOut.heightList[0] + numpy.arange( newheights )*newdelta
244 244 self.dataOut.windowOfFilter = window
245 245
246 246 def setH0(self, h0, deltaHeight = None):
247 247
248 248 if not deltaHeight:
249 249 deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
250 250
251 251 nHeights = self.dataOut.nHeights
252 252
253 253 newHeiRange = h0 + numpy.arange(nHeights)*deltaHeight
254 254
255 255 self.dataOut.heightList = newHeiRange
256 256
257 257 def deFlip(self, channelList = []):
258 258
259 259 data = self.dataOut.data.copy()
260 260
261 261 if self.dataOut.flagDataAsBlock:
262 262 flip = self.flip
263 263 profileList = range(self.dataOut.nProfiles)
264 264
265 265 if not channelList:
266 266 for thisProfile in profileList:
267 267 data[:,thisProfile,:] = data[:,thisProfile,:]*flip
268 268 flip *= -1.0
269 269 else:
270 270 for thisChannel in channelList:
271 271 if thisChannel not in self.dataOut.channelList:
272 272 continue
273 273
274 274 for thisProfile in profileList:
275 275 data[thisChannel,thisProfile,:] = data[thisChannel,thisProfile,:]*flip
276 276 flip *= -1.0
277 277
278 278 self.flip = flip
279 279
280 280 else:
281 281 if not channelList:
282 282 data[:,:] = data[:,:]*self.flip
283 283 else:
284 284 for thisChannel in channelList:
285 285 if thisChannel not in self.dataOut.channelList:
286 286 continue
287 287
288 288 data[thisChannel,:] = data[thisChannel,:]*self.flip
289 289
290 290 self.flip *= -1.
291 291
292 292 self.dataOut.data = data
293 293
294 294 def setRadarFrequency(self, frequency=None):
295 295
296 296 if frequency != None:
297 297 self.dataOut.frequency = frequency
298 298
299 299 return 1
300 300
301 301 class CohInt(Operation):
302 302
303 303 isConfig = False
304 304
305 305 __profIndex = 0
306 306 __withOverapping = False
307 307
308 308 __byTime = False
309 309 __initime = None
310 310 __lastdatatime = None
311 311 __integrationtime = None
312 312
313 313 __buffer = None
314 314
315 315 __dataReady = False
316 316
317 317 n = None
318 318
319 319
320 320 def __init__(self):
321 321
322 322 Operation.__init__(self)
323 323
324 324 # self.isConfig = False
325 325
326 326 def setup(self, n=None, timeInterval=None, overlapping=False, byblock=False):
327 327 """
328 328 Set the parameters of the integration class.
329 329
330 330 Inputs:
331 331
332 332 n : Number of coherent integrations
333 333 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
334 334 overlapping :
335 335
336 336 """
337 337
338 338 self.__initime = None
339 339 self.__lastdatatime = 0
340 340 self.__buffer = None
341 341 self.__dataReady = False
342 342 self.byblock = byblock
343 343
344 344 if n == None and timeInterval == None:
345 345 raise ValueError, "n or timeInterval should be specified ..."
346 346
347 347 if n != None:
348 348 self.n = n
349 349 self.__byTime = False
350 350 else:
351 351 self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line
352 352 self.n = 9999
353 353 self.__byTime = True
354 354
355 355 if overlapping:
356 356 self.__withOverapping = True
357 357 self.__buffer = None
358 358 else:
359 359 self.__withOverapping = False
360 360 self.__buffer = 0
361 361
362 362 self.__profIndex = 0
363 363
364 364 def putData(self, data):
365 365
366 366 """
367 367 Add a profile to the __buffer and increase in one the __profileIndex
368 368
369 369 """
370 370
371 371 if not self.__withOverapping:
372 372 self.__buffer += data.copy()
373 373 self.__profIndex += 1
374 374 return
375 375
376 376 #Overlapping data
377 377 nChannels, nHeis = data.shape
378 378 data = numpy.reshape(data, (1, nChannels, nHeis))
379 379
380 380 #If the buffer is empty then it takes the data value
381 if self.__buffer == None:
381 if self.__buffer is None:
382 382 self.__buffer = data
383 383 self.__profIndex += 1
384 384 return
385 385
386 386 #If the buffer length is lower than n then stakcing the data value
387 387 if self.__profIndex < self.n:
388 388 self.__buffer = numpy.vstack((self.__buffer, data))
389 389 self.__profIndex += 1
390 390 return
391 391
392 392 #If the buffer length is equal to n then replacing the last buffer value with the data value
393 393 self.__buffer = numpy.roll(self.__buffer, -1, axis=0)
394 394 self.__buffer[self.n-1] = data
395 395 self.__profIndex = self.n
396 396 return
397 397
398 398
399 399 def pushData(self):
400 400 """
401 401 Return the sum of the last profiles and the profiles used in the sum.
402 402
403 403 Affected:
404 404
405 405 self.__profileIndex
406 406
407 407 """
408 408
409 409 if not self.__withOverapping:
410 410 data = self.__buffer
411 411 n = self.__profIndex
412 412
413 413 self.__buffer = 0
414 414 self.__profIndex = 0
415 415
416 416 return data, n
417 417
418 418 #Integration with Overlapping
419 419 data = numpy.sum(self.__buffer, axis=0)
420 420 n = self.__profIndex
421 421
422 422 return data, n
423 423
424 424 def byProfiles(self, data):
425 425
426 426 self.__dataReady = False
427 427 avgdata = None
428 428 # n = None
429 429
430 430 self.putData(data)
431 431
432 432 if self.__profIndex == self.n:
433 433
434 434 avgdata, n = self.pushData()
435 435 self.__dataReady = True
436 436
437 437 return avgdata
438 438
439 439 def byTime(self, data, datatime):
440 440
441 441 self.__dataReady = False
442 442 avgdata = None
443 443 n = None
444 444
445 445 self.putData(data)
446 446
447 447 if (datatime - self.__initime) >= self.__integrationtime:
448 448 avgdata, n = self.pushData()
449 449 self.n = n
450 450 self.__dataReady = True
451 451
452 452 return avgdata
453 453
454 454 def integrate(self, data, datatime=None):
455 455
456 456 if self.__initime == None:
457 457 self.__initime = datatime
458 458
459 459 if self.__byTime:
460 460 avgdata = self.byTime(data, datatime)
461 461 else:
462 462 avgdata = self.byProfiles(data)
463 463
464 464
465 465 self.__lastdatatime = datatime
466 466
467 if avgdata == None:
467 if avgdata is None:
468 468 return None, None
469 469
470 470 avgdatatime = self.__initime
471 471
472 472 deltatime = datatime -self.__lastdatatime
473 473
474 474 if not self.__withOverapping:
475 475 self.__initime = datatime
476 476 else:
477 477 self.__initime += deltatime
478 478
479 479 return avgdata, avgdatatime
480 480
481 481 def integrateByBlock(self, dataOut):
482 482
483 483 times = int(dataOut.data.shape[1]/self.n)
484 484 avgdata = numpy.zeros((dataOut.nChannels, times, dataOut.nHeights), dtype=numpy.complex)
485 485
486 486 id_min = 0
487 487 id_max = self.n
488 488
489 489 for i in range(times):
490 490 junk = dataOut.data[:,id_min:id_max,:]
491 491 avgdata[:,i,:] = junk.sum(axis=1)
492 492 id_min += self.n
493 493 id_max += self.n
494 494
495 495 timeInterval = dataOut.ippSeconds*self.n
496 496 avgdatatime = (times - 1) * timeInterval + dataOut.utctime
497 497 self.__dataReady = True
498 498 return avgdata, avgdatatime
499 499
500 500 def run(self, dataOut, **kwargs):
501 501
502 502 if not self.isConfig:
503 503 self.setup(**kwargs)
504 504 self.isConfig = True
505 505
506 506 if dataOut.flagDataAsBlock:
507 507 """
508 508 Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis]
509 509 """
510 510 avgdata, avgdatatime = self.integrateByBlock(dataOut)
511 511 else:
512 512 avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime)
513 513
514 514 # dataOut.timeInterval *= n
515 515 dataOut.flagNoData = True
516 516
517 517 if self.__dataReady:
518 518 dataOut.data = avgdata
519 519 dataOut.nCohInt *= self.n
520 520 dataOut.utctime = avgdatatime
521 521 # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt
522 522 dataOut.flagNoData = False
523 523
524 524 class Decoder(Operation):
525 525
526 526 isConfig = False
527 527 __profIndex = 0
528 528
529 529 code = None
530 530
531 531 nCode = None
532 532 nBaud = None
533 533
534 534
535 535 def __init__(self):
536 536
537 537 Operation.__init__(self)
538 538
539 539 self.times = None
540 540 self.osamp = None
541 541 # self.__setValues = False
542 542 self.isConfig = False
543 543
544 544 def setup(self, code, osamp, dataOut):
545 545
546 546 self.__profIndex = 0
547 547
548 548 self.code = code
549 549
550 550 self.nCode = len(code)
551 551 self.nBaud = len(code[0])
552 552
553 553 if (osamp != None) and (osamp >1):
554 554 self.osamp = osamp
555 555 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
556 556 self.nBaud = self.nBaud*self.osamp
557 557
558 558 self.__nChannels = dataOut.nChannels
559 559 self.__nProfiles = dataOut.nProfiles
560 560 self.__nHeis = dataOut.nHeights
561 561
562 562 if dataOut.flagDataAsBlock:
563 563
564 564 self.ndatadec = self.__nHeis #- self.nBaud + 1
565 565
566 566 self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex)
567 567
568 568 else:
569 569
570 570 __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex)
571 571
572 572 __codeBuffer[:,0:self.nBaud] = self.code
573 573
574 574 self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1))
575 575
576 576 self.ndatadec = self.__nHeis #- self.nBaud + 1
577 577
578 578 self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex)
579 579
580 580 def convolutionInFreq(self, data):
581 581
582 582 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
583 583
584 584 fft_data = numpy.fft.fft(data, axis=1)
585 585
586 586 conv = fft_data*fft_code
587 587
588 588 data = numpy.fft.ifft(conv,axis=1)
589 589
590 590 datadec = data#[:,:]
591 591
592 592 return datadec
593 593
594 594 def convolutionInFreqOpt(self, data):
595 595
596 596 raise NotImplementedError
597 597
598 598 # fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
599 599 #
600 600 # data = cfunctions.decoder(fft_code, data)
601 601 #
602 602 # datadec = data#[:,:]
603 603 #
604 604 # return datadec
605 605
606 606 def convolutionInTime(self, data):
607 607
608 608 code = self.code[self.__profIndex]
609 609
610 610 for i in range(self.__nChannels):
611 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='same')
611 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:]
612 612
613 613 return self.datadecTime
614 614
615 615 def convolutionByBlockInTime(self, data):
616 616
617 617 repetitions = self.__nProfiles / self.nCode
618 618
619 619 junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize))
620 620 junk = junk.flatten()
621 621 code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud))
622 622
623 623 for i in range(self.__nChannels):
624 624 for j in range(self.__nProfiles):
625 625 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='same')
626 626
627 627 return self.datadecTime
628 628
629 629 def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None):
630 630
631 631 if not self.isConfig:
632 632
633 if code == None:
633 if code is None:
634 634 code = dataOut.code
635 635 else:
636 636 code = numpy.array(code).reshape(nCode,nBaud)
637 637
638 638 self.setup(code, osamp, dataOut)
639 639
640 640 self.isConfig = True
641 641
642 642 if dataOut.flagDataAsBlock:
643 643 """
644 644 Decoding when data have been read as block,
645 645 """
646 646 datadec = self.convolutionByBlockInTime(dataOut.data)
647 647
648 648 else:
649 649 """
650 650 Decoding when data have been read profile by profile
651 651 """
652 652 if mode == 0:
653 653 datadec = self.convolutionInTime(dataOut.data)
654 654
655 655 if mode == 1:
656 656 datadec = self.convolutionInFreq(dataOut.data)
657 657
658 658 if mode == 2:
659 659 datadec = self.convolutionInFreqOpt(dataOut.data)
660 660
661 661 dataOut.code = self.code
662 662 dataOut.nCode = self.nCode
663 663 dataOut.nBaud = self.nBaud
664 664
665 665 dataOut.data = datadec
666 666
667 667 dataOut.heightList = dataOut.heightList[0:self.ndatadec]
668 668
669 669 dataOut.flagDecodeData = True #asumo q la data esta decodificada
670 670
671 671 if self.__profIndex == self.nCode-1:
672 672 self.__profIndex = 0
673 673 return 1
674 674
675 675 self.__profIndex += 1
676 676
677 677 return 1
678 678 # dataOut.flagDeflipData = True #asumo q la data no esta sin flip
679 679
680 680
681 681 class ProfileConcat(Operation):
682 682
683 683 isConfig = False
684 684 buffer = None
685 685
686 686 def __init__(self):
687 687
688 688 Operation.__init__(self)
689 689 self.profileIndex = 0
690 690
691 691 def reset(self):
692 692 self.buffer = numpy.zeros_like(self.buffer)
693 693 self.start_index = 0
694 694 self.times = 1
695 695
696 696 def setup(self, data, m, n=1):
697 697 self.buffer = numpy.zeros((data.shape[0],data.shape[1]*m),dtype=type(data[0,0]))
698 698 self.nHeights = data.nHeights
699 699 self.start_index = 0
700 700 self.times = 1
701 701
702 702 def concat(self, data):
703 703
704 704 self.buffer[:,self.start_index:self.profiles*self.times] = data.copy()
705 705 self.start_index = self.start_index + self.nHeights
706 706
707 707 def run(self, dataOut, m):
708 708
709 709 dataOut.flagNoData = True
710 710
711 711 if not self.isConfig:
712 712 self.setup(dataOut.data, m, 1)
713 713 self.isConfig = True
714 714
715 715 if dataOut.flagDataAsBlock:
716 716
717 717 raise ValueError, "ProfileConcat can only be used when voltage have been read profile by profile, getBlock = False"
718 718
719 719 else:
720 720 self.concat(dataOut.data)
721 721 self.times += 1
722 722 if self.times > m:
723 723 dataOut.data = self.buffer
724 724 self.reset()
725 725 dataOut.flagNoData = False
726 726 # se deben actualizar mas propiedades del header y del objeto dataOut, por ejemplo, las alturas
727 727 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
728 728 xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * m
729 729 dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight)
730 730 dataOut.ippSeconds *= m
731 731
732 732 class ProfileSelector(Operation):
733 733
734 734 profileIndex = None
735 735 # Tamanho total de los perfiles
736 736 nProfiles = None
737 737
738 738 def __init__(self):
739 739
740 740 Operation.__init__(self)
741 741 self.profileIndex = 0
742 742
743 743 def incIndex(self):
744 744
745 745 self.profileIndex += 1
746 746
747 747 if self.profileIndex >= self.nProfiles:
748 748 self.profileIndex = 0
749 749
750 750 def isThisProfileInRange(self, profileIndex, minIndex, maxIndex):
751 751
752 752 if profileIndex < minIndex:
753 753 return False
754 754
755 755 if profileIndex > maxIndex:
756 756 return False
757 757
758 758 return True
759 759
760 760 def isThisProfileInList(self, profileIndex, profileList):
761 761
762 762 if profileIndex not in profileList:
763 763 return False
764 764
765 765 return True
766 766
767 767 def run(self, dataOut, profileList=None, profileRangeList=None, beam=None, byblock=False, rangeList = None, nProfiles=None):
768 768
769 769 """
770 770 ProfileSelector:
771 771
772 772 Inputs:
773 773 profileList : Index of profiles selected. Example: profileList = (0,1,2,7,8)
774 774
775 775 profileRangeList : Minimum and maximum profile indexes. Example: profileRangeList = (4, 30)
776 776
777 777 rangeList : List of profile ranges. Example: rangeList = ((4, 30), (32, 64), (128, 256))
778 778
779 779 """
780 780
781 781 dataOut.flagNoData = True
782 782
783 783 if nProfiles:
784 784 self.nProfiles = dataOut.nProfiles
785 785 else:
786 786 self.nProfiles = nProfiles
787 787
788 788 if dataOut.flagDataAsBlock:
789 789 """
790 790 data dimension = [nChannels, nProfiles, nHeis]
791 791 """
792 792 if profileList != None:
793 793 dataOut.data = dataOut.data[:,profileList,:]
794 794 dataOut.nProfiles = len(profileList)
795 795 dataOut.profileIndex = dataOut.nProfiles - 1
796 796
797 797 if profileRangeList != None:
798 798 minIndex = profileRangeList[0]
799 799 maxIndex = profileRangeList[1]
800 800
801 801 dataOut.data = dataOut.data[:,minIndex:maxIndex+1,:]
802 802 dataOut.nProfiles = maxIndex - minIndex + 1
803 803 dataOut.profileIndex = dataOut.nProfiles - 1
804 804
805 805 if rangeList != None:
806 806 raise ValueError, "Profile Selector: Not implemented for rangeList yet"
807 807
808 808 dataOut.flagNoData = False
809 809
810 810 return True
811 811
812 812 else:
813 813 """
814 814 data dimension = [nChannels, nHeis]
815 815
816 816 """
817 817 if profileList != None:
818 818
819 819 dataOut.nProfiles = len(profileList)
820 820
821 821 if self.isThisProfileInList(dataOut.profileIndex, profileList):
822 822 dataOut.flagNoData = False
823 823 dataOut.profileIndex = self.profileIndex
824 824
825 825 self.incIndex()
826 826 return True
827 827
828 828
829 829 if profileRangeList != None:
830 830
831 831 minIndex = profileRangeList[0]
832 832 maxIndex = profileRangeList[1]
833 833
834 834 dataOut.nProfiles = maxIndex - minIndex + 1
835 835
836 836 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
837 837 dataOut.flagNoData = False
838 838 dataOut.profileIndex = self.profileIndex
839 839
840 840 self.incIndex()
841 841 return True
842 842
843 843 if rangeList != None:
844 844
845 845 nProfiles = 0
846 846
847 847 for thisRange in rangeList:
848 848 minIndex = thisRange[0]
849 849 maxIndex = thisRange[1]
850 850
851 851 nProfiles += maxIndex - minIndex + 1
852 852
853 853 dataOut.nProfiles = nProfiles
854 854
855 855 for thisRange in rangeList:
856 856
857 857 minIndex = thisRange[0]
858 858 maxIndex = thisRange[1]
859 859
860 860 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
861 861
862 862 # print "profileIndex = ", dataOut.profileIndex
863 863
864 864 dataOut.flagNoData = False
865 865 dataOut.profileIndex = self.profileIndex
866 866
867 867 self.incIndex()
868 868 break
869 869 return True
870 870
871 871
872 872 if beam != None: #beam is only for AMISR data
873 873 if self.isThisProfileInList(dataOut.profileIndex, dataOut.beamRangeDict[beam]):
874 874 dataOut.flagNoData = False
875 875 dataOut.profileIndex = self.profileIndex
876 876
877 877 self.incIndex()
878 878 return 1
879 879
880 880 raise ValueError, "ProfileSelector needs profileList, profileRangeList or rangeList parameter"
881 881
882 882 return 0
883 883
884 884
885 885
886 886 class Reshaper(Operation):
887 887
888 888 def __init__(self):
889 889
890 890 Operation.__init__(self)
891 891 self.updateNewHeights = True
892 892
893 893 def run(self, dataOut, shape):
894 894
895 895 if not dataOut.flagDataAsBlock:
896 896 raise ValueError, "Reshaper can only be used when voltage have been read as Block, getBlock = True"
897 897
898 898 if len(shape) != 3:
899 899 raise ValueError, "shape len should be equal to 3, (nChannels, nProfiles, nHeis)"
900 900
901 901 shape_tuple = tuple(shape)
902 902 dataOut.data = numpy.reshape(dataOut.data, shape_tuple)
903 903 dataOut.flagNoData = False
904 904
905 905 if self.updateNewHeights:
906 906
907 907 old_nheights = dataOut.nHeights
908 908 new_nheights = dataOut.data.shape[2]
909 909 factor = 1.0*new_nheights / old_nheights
910 910
911 911 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
912 912
913 913 xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * factor
914 914
915 915 dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight)
916 916
917 917 dataOut.nProfiles = dataOut.data.shape[1]
918 918
919 919 dataOut.ippSeconds *= factor
920 920
921 921 import collections
922 922 from scipy.stats import mode
923 923
924 924 class Synchronize(Operation):
925 925
926 926 isConfig = False
927 927 __profIndex = 0
928 928
929 929 def __init__(self):
930 930
931 931 Operation.__init__(self)
932 932 # self.isConfig = False
933 933 self.__powBuffer = None
934 934 self.__startIndex = 0
935 935 self.__pulseFound = False
936 936
937 937 def __findTxPulse(self, dataOut, channel=0, pulse_with = None):
938 938
939 939 #Read data
940 940
941 941 powerdB = dataOut.getPower(channel = channel)
942 942 noisedB = dataOut.getNoise(channel = channel)[0]
943 943
944 944 self.__powBuffer.extend(powerdB.flatten())
945 945
946 946 dataArray = numpy.array(self.__powBuffer)
947 947
948 948 filteredPower = numpy.correlate(dataArray, dataArray[0:self.__nSamples], "same")
949 949
950 950 maxValue = numpy.nanmax(filteredPower)
951 951
952 952 if maxValue < noisedB + 10:
953 953 #No se encuentra ningun pulso de transmision
954 954 return None
955 955
956 956 maxValuesIndex = numpy.where(filteredPower > maxValue - 0.1*abs(maxValue))[0]
957 957
958 958 if len(maxValuesIndex) < 2:
959 959 #Solo se encontro un solo pulso de transmision de un baudio, esperando por el siguiente TX
960 960 return None
961 961
962 962 phasedMaxValuesIndex = maxValuesIndex - self.__nSamples
963 963
964 964 #Seleccionar solo valores con un espaciamiento de nSamples
965 965 pulseIndex = numpy.intersect1d(maxValuesIndex, phasedMaxValuesIndex)
966 966
967 967 if len(pulseIndex) < 2:
968 968 #Solo se encontro un pulso de transmision con ancho mayor a 1
969 969 return None
970 970
971 971 spacing = pulseIndex[1:] - pulseIndex[:-1]
972 972
973 973 #remover senales que se distancien menos de 10 unidades o muestras
974 974 #(No deberian existir IPP menor a 10 unidades)
975 975
976 976 realIndex = numpy.where(spacing > 10 )[0]
977 977
978 978 if len(realIndex) < 2:
979 979 #Solo se encontro un pulso de transmision con ancho mayor a 1
980 980 return None
981 981
982 982 #Eliminar pulsos anchos (deja solo la diferencia entre IPPs)
983 983 realPulseIndex = pulseIndex[realIndex]
984 984
985 985 period = mode(realPulseIndex[1:] - realPulseIndex[:-1])[0][0]
986 986
987 987 print "IPP = %d samples" %period
988 988
989 989 self.__newNSamples = dataOut.nHeights #int(period)
990 990 self.__startIndex = int(realPulseIndex[0])
991 991
992 992 return 1
993 993
994 994
995 995 def setup(self, nSamples, nChannels, buffer_size = 4):
996 996
997 997 self.__powBuffer = collections.deque(numpy.zeros( buffer_size*nSamples,dtype=numpy.float),
998 998 maxlen = buffer_size*nSamples)
999 999
1000 1000 bufferList = []
1001 1001
1002 1002 for i in range(nChannels):
1003 1003 bufferByChannel = collections.deque(numpy.zeros( buffer_size*nSamples, dtype=numpy.complex) + numpy.NAN,
1004 1004 maxlen = buffer_size*nSamples)
1005 1005
1006 1006 bufferList.append(bufferByChannel)
1007 1007
1008 1008 self.__nSamples = nSamples
1009 1009 self.__nChannels = nChannels
1010 1010 self.__bufferList = bufferList
1011 1011
1012 1012 def run(self, dataOut, channel = 0):
1013 1013
1014 1014 if not self.isConfig:
1015 1015 nSamples = dataOut.nHeights
1016 1016 nChannels = dataOut.nChannels
1017 1017 self.setup(nSamples, nChannels)
1018 1018 self.isConfig = True
1019 1019
1020 1020 #Append new data to internal buffer
1021 1021 for thisChannel in range(self.__nChannels):
1022 1022 bufferByChannel = self.__bufferList[thisChannel]
1023 1023 bufferByChannel.extend(dataOut.data[thisChannel])
1024 1024
1025 1025 if self.__pulseFound:
1026 1026 self.__startIndex -= self.__nSamples
1027 1027
1028 1028 #Finding Tx Pulse
1029 1029 if not self.__pulseFound:
1030 1030 indexFound = self.__findTxPulse(dataOut, channel)
1031 1031
1032 1032 if indexFound == None:
1033 1033 dataOut.flagNoData = True
1034 1034 return
1035 1035
1036 1036 self.__arrayBuffer = numpy.zeros((self.__nChannels, self.__newNSamples), dtype = numpy.complex)
1037 1037 self.__pulseFound = True
1038 1038 self.__startIndex = indexFound
1039 1039
1040 1040 #If pulse was found ...
1041 1041 for thisChannel in range(self.__nChannels):
1042 1042 bufferByChannel = self.__bufferList[thisChannel]
1043 1043 #print self.__startIndex
1044 1044 x = numpy.array(bufferByChannel)
1045 1045 self.__arrayBuffer[thisChannel] = x[self.__startIndex:self.__startIndex+self.__newNSamples]
1046 1046
1047 1047 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1048 1048 dataOut.heightList = numpy.arange(self.__newNSamples)*deltaHeight
1049 1049 # dataOut.ippSeconds = (self.__newNSamples / deltaHeight)/1e6
1050 1050
1051 1051 dataOut.data = self.__arrayBuffer
1052 1052
1053 1053 self.__startIndex += self.__newNSamples
1054 1054
1055 1055 return No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now