##// END OF EJS Templates
Se agrega el metodo waitNewBoclk para lectura online
Daniel Valdez -
r216:b2a8f33edd68
parent child
Show More
@@ -1,2479 +1,2513
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: JRODataIO.py 169 2012-11-19 21:57:03Z murco $
5 5 '''
6 6
7 7 import os, sys
8 8 import glob
9 9 import time
10 10 import numpy
11 11 import fnmatch
12 12 import time, datetime
13 13
14 14 from jrodata import *
15 15 from jroheaderIO import *
16 16 from jroprocessing import *
17 17
18 18 def isNumber(str):
19 19 """
20 20 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
21 21
22 22 Excepciones:
23 23 Si un determinado string no puede ser convertido a numero
24 24 Input:
25 25 str, string al cual se le analiza para determinar si convertible a un numero o no
26 26
27 27 Return:
28 28 True : si el string es uno numerico
29 29 False : no es un string numerico
30 30 """
31 31 try:
32 32 float( str )
33 33 return True
34 34 except:
35 35 return False
36 36
37 37 def isThisFileinRange(filename, startUTSeconds, endUTSeconds):
38 38 """
39 39 Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado.
40 40
41 41 Inputs:
42 42 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
43 43
44 44 startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en
45 45 segundos contados desde 01/01/1970.
46 46 endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en
47 47 segundos contados desde 01/01/1970.
48 48
49 49 Return:
50 50 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
51 51 fecha especificado, de lo contrario retorna False.
52 52
53 53 Excepciones:
54 54 Si el archivo no existe o no puede ser abierto
55 55 Si la cabecera no puede ser leida.
56 56
57 57 """
58 58 basicHeaderObj = BasicHeader()
59 59
60 60 try:
61 61 fp = open(filename,'rb')
62 62 except:
63 63 raise IOError, "The file %s can't be opened" %(filename)
64 64
65 65 sts = basicHeaderObj.read(fp)
66 66 fp.close()
67 67
68 68 if not(sts):
69 69 print "Skipping the file %s because it has not a valid header" %(filename)
70 70 return 0
71 71
72 72 if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)):
73 73 return 0
74 74
75 75 return 1
76 76
77 77 def getlastFileFromPath(path, ext):
78 78 """
79 79 Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext"
80 80 al final de la depuracion devuelve el ultimo file de la lista que quedo.
81 81
82 82 Input:
83 83 fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta
84 84 ext : extension de los files contenidos en una carpeta
85 85
86 86 Return:
87 87 El ultimo file de una determinada carpeta, no se considera el path.
88 88 """
89 89 validFilelist = []
90 90 fileList = os.listdir(path)
91 91
92 92 # 0 1234 567 89A BCDE
93 93 # H YYYY DDD SSS .ext
94 94
95 95 for file in fileList:
96 96 try:
97 97 year = int(file[1:5])
98 98 doy = int(file[5:8])
99 99
100 100 if (os.path.splitext(file)[-1].upper() != ext.upper()) : continue
101 101 except:
102 102 continue
103 103
104 104 validFilelist.append(file)
105 105
106 106 if validFilelist:
107 107 validFilelist = sorted( validFilelist, key=str.lower )
108 108 return validFilelist[-1]
109 109
110 110 return None
111 111
112 112 def checkForRealPath(path, year, doy, set, ext):
113 113 """
114 114 Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path,
115 115 Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar
116 116 el path exacto de un determinado file.
117 117
118 118 Example :
119 119 nombre correcto del file es .../.../D2009307/P2009307367.ext
120 120
121 121 Entonces la funcion prueba con las siguientes combinaciones
122 122 .../.../x2009307/y2009307367.ext
123 123 .../.../x2009307/Y2009307367.ext
124 124 .../.../X2009307/y2009307367.ext
125 125 .../.../X2009307/Y2009307367.ext
126 126 siendo para este caso, la ultima combinacion de letras, identica al file buscado
127 127
128 128 Return:
129 129 Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file
130 130 caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas
131 131 para el filename
132 132 """
133 133 filepath = None
134 134 find_flag = False
135 135 filename = None
136 136
137 137 if ext.lower() == ".r": #voltage
138 138 header1 = "dD"
139 139 header2 = "dD"
140 140 elif ext.lower() == ".pdata": #spectra
141 141 header1 = "dD"
142 142 header2 = "pP"
143 143 else:
144 144 return None, filename
145 145
146 146 for dir in header1: #barrido por las dos combinaciones posibles de "D"
147 147 for fil in header2: #barrido por las dos combinaciones posibles de "D"
148 148 doypath = "%s%04d%03d" % ( dir, year, doy ) #formo el nombre del directorio xYYYYDDD (x=d o x=D)
149 149 filename = "%s%04d%03d%03d%s" % ( fil, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext
150 150 filepath = os.path.join( path, doypath, filename ) #formo el path completo
151 151 if os.path.exists( filepath ): #verifico que exista
152 152 find_flag = True
153 153 break
154 154 if find_flag:
155 155 break
156 156
157 157 if not(find_flag):
158 158 return None, filename
159 159
160 160 return filepath, filename
161 161
162 162 class JRODataIO:
163 163
164 164 c = 3E8
165 165
166 166 isConfig = False
167 167
168 168 basicHeaderObj = BasicHeader()
169 169
170 170 systemHeaderObj = SystemHeader()
171 171
172 172 radarControllerHeaderObj = RadarControllerHeader()
173 173
174 174 processingHeaderObj = ProcessingHeader()
175 175
176 176 online = 0
177 177
178 178 dtype = None
179 179
180 180 pathList = []
181 181
182 182 filenameList = []
183 183
184 184 filename = None
185 185
186 186 ext = None
187 187
188 188 flagIsNewFile = 1
189 189
190 190 flagTimeBlock = 0
191 191
192 192 flagIsNewBlock = 0
193 193
194 194 fp = None
195 195
196 196 firstHeaderSize = 0
197 197
198 198 basicHeaderSize = 24
199 199
200 200 versionFile = 1103
201 201
202 202 fileSize = None
203 203
204 204 ippSeconds = None
205 205
206 206 fileSizeByHeader = None
207 207
208 208 fileIndex = None
209 209
210 210 profileIndex = None
211 211
212 212 blockIndex = None
213 213
214 214 nTotalBlocks = None
215 215
216 216 maxTimeStep = 30
217 217
218 218 lastUTTime = None
219 219
220 220 datablock = None
221 221
222 222 dataOut = None
223 223
224 224 blocksize = None
225 225
226 226 def __init__(self):
227 227
228 228 raise ValueError, "Not implemented"
229 229
230 230 def run(self):
231 231
232 232 raise ValueError, "Not implemented"
233 233
234 234 def getOutput(self):
235 235
236 236 return self.dataOut
237 237
238 238 class JRODataReader(JRODataIO, ProcessingUnit):
239 239
240 240 nReadBlocks = 0
241 241
242 delay = 60 #number of seconds waiting a new file
242 delay = 10 #number of seconds waiting a new file
243 243
244 244 nTries = 3 #quantity tries
245 245
246 246 nFiles = 3 #number of files for searching
247 247
248 248 flagNoMoreFiles = 0
249 249
250 250 def __init__(self):
251 251
252 252 """
253 253
254 254 """
255 255
256 256 raise ValueError, "This method has not been implemented"
257 257
258 258
259 259 def createObjByDefault(self):
260 260 """
261 261
262 262 """
263 263 raise ValueError, "This method has not been implemented"
264 264
265 265 def getBlockDimension(self):
266 266
267 267 raise ValueError, "No implemented"
268 268
269 269 def __searchFilesOffLine(self,
270 270 path,
271 271 startDate,
272 272 endDate,
273 273 startTime=datetime.time(0,0,0),
274 274 endTime=datetime.time(23,59,59),
275 275 set=None,
276 276 expLabel="",
277 277 ext=".r"):
278 278 dirList = []
279 279 for thisPath in os.listdir(path):
280 280 if os.path.isdir(os.path.join(path,thisPath)):
281 281 dirList.append(thisPath)
282 282
283 283 if not(dirList):
284 284 return None, None
285 285
286 286 pathList = []
287 287 dateList = []
288 288
289 289 thisDate = startDate
290 290
291 291 while(thisDate <= endDate):
292 292 year = thisDate.timetuple().tm_year
293 293 doy = thisDate.timetuple().tm_yday
294 294
295 295 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
296 296 if len(match) == 0:
297 297 thisDate += datetime.timedelta(1)
298 298 continue
299 299
300 300 pathList.append(os.path.join(path,match[0],expLabel))
301 301 dateList.append(thisDate)
302 302 thisDate += datetime.timedelta(1)
303 303
304 304 filenameList = []
305 305 for index in range(len(pathList)):
306 306
307 307 thisPath = pathList[index]
308 308 fileList = glob.glob1(thisPath, "*%s" %ext)
309 309 fileList.sort()
310 310
311 311 #Busqueda de datos en el rango de horas indicados
312 312 thisDate = dateList[index]
313 313 startDT = datetime.datetime.combine(thisDate, startTime)
314 314 endDT = datetime.datetime.combine(thisDate, endTime)
315 315
316 316 startUtSeconds = time.mktime(startDT.timetuple())
317 317 endUtSeconds = time.mktime(endDT.timetuple())
318 318
319 319 for file in fileList:
320 320
321 321 filename = os.path.join(thisPath,file)
322 322
323 323 if isThisFileinRange(filename, startUtSeconds, endUtSeconds):
324 324 filenameList.append(filename)
325 325
326 326 if not(filenameList):
327 327 return None, None
328 328
329 329 self.filenameList = filenameList
330 330
331 331 return pathList, filenameList
332 332
333 333 def __searchFilesOnLine(self, path, startDate=None, endDate=None, startTime=None, endTime=None, expLabel = "", ext = None):
334 334
335 335 """
336 336 Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y
337 337 devuelve el archivo encontrado ademas de otros datos.
338 338
339 339 Input:
340 340 path : carpeta donde estan contenidos los files que contiene data
341 341
342 342 startDate : Fecha inicial. Rechaza todos los directorios donde
343 343 file end time < startDate (obejto datetime.date)
344 344
345 345 endDate : Fecha final. Rechaza todos los directorios donde
346 346 file start time > endDate (obejto datetime.date)
347 347
348 348 startTime : Tiempo inicial. Rechaza todos los archivos donde
349 349 file end time < startTime (obejto datetime.time)
350 350
351 351 endTime : Tiempo final. Rechaza todos los archivos donde
352 352 file start time > endTime (obejto datetime.time)
353 353
354 354 expLabel : Nombre del subexperimento (subfolder)
355 355
356 356 ext : extension de los files
357 357
358 358 Return:
359 359 directory : eL directorio donde esta el file encontrado
360 360 filename : el ultimo file de una determinada carpeta
361 361 year : el anho
362 362 doy : el numero de dia del anho
363 363 set : el set del archivo
364 364
365 365
366 366 """
367 367 dirList = []
368 368 pathList = []
369 369 directory = None
370 370
371 371 #Filtra solo los directorios
372 372 for thisPath in os.listdir(path):
373 373 if os.path.isdir(os.path.join(path, thisPath)):
374 374 dirList.append(thisPath)
375 375
376 376 if not(dirList):
377 377 return None, None, None, None, None
378 378
379 379 dirList = sorted( dirList, key=str.lower )
380 380
381 381 if startDate:
382 382 startDateTime = datetime.datetime.combine(startDate, startTime)
383 383 thisDateTime = startDateTime
384 384 if endDate == None: endDateTime = startDateTime
385 385 else: endDateTime = datetime.datetime.combine(endDate, endTime)
386 386
387 387 while(thisDateTime <= endDateTime):
388 388 year = thisDateTime.timetuple().tm_year
389 389 doy = thisDateTime.timetuple().tm_yday
390 390
391 391 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
392 392 if len(match) == 0:
393 393 thisDateTime += datetime.timedelta(1)
394 394 continue
395 395
396 396 pathList.append(os.path.join(path,match[0], expLabel))
397 397 thisDateTime += datetime.timedelta(1)
398 398
399 399 if not(pathList):
400 400 print "\tNo files in range: %s - %s" %(startDateTime.ctime(), endDateTime.ctime())
401 401 return None, None, None, None, None
402 402
403 403 directory = pathList[0]
404 404
405 405 else:
406 406 directory = dirList[-1]
407 407 directory = os.path.join(path,directory)
408 408
409 409 filename = getlastFileFromPath(directory, ext)
410 410
411 411 if not(filename):
412 412 return None, None, None, None, None
413 413
414 414 if not(self.__verifyFile(os.path.join(directory, filename))):
415 415 return None, None, None, None, None
416 416
417 417 year = int( filename[1:5] )
418 418 doy = int( filename[5:8] )
419 419 set = int( filename[8:11] )
420 420
421 421 return directory, filename, year, doy, set
422 422
423 423
424 424
425 425 def __setNextFileOffline(self):
426 426
427 427 idFile = self.fileIndex
428 428
429 429 while (True):
430 430 idFile += 1
431 431 if not(idFile < len(self.filenameList)):
432 432 self.flagNoMoreFiles = 1
433 433 print "No more Files"
434 434 return 0
435 435
436 436 filename = self.filenameList[idFile]
437 437
438 438 if not(self.__verifyFile(filename)):
439 439 continue
440 440
441 441 fileSize = os.path.getsize(filename)
442 442 fp = open(filename,'rb')
443 443 break
444 444
445 445 self.flagIsNewFile = 1
446 446 self.fileIndex = idFile
447 447 self.filename = filename
448 448 self.fileSize = fileSize
449 449 self.fp = fp
450 450
451 451 print "Setting the file: %s"%self.filename
452 452
453 453 return 1
454 454
455 455 def __setNextFileOnline(self):
456 456 """
457 457 Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si
458 458 no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files
459 459 siguientes.
460 460
461 461 Affected:
462 462 self.flagIsNewFile
463 463 self.filename
464 464 self.fileSize
465 465 self.fp
466 466 self.set
467 467 self.flagNoMoreFiles
468 468
469 469 Return:
470 470 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado
471 471 1 : si el file fue abierto con exito y esta listo a ser leido
472 472
473 473 Excepciones:
474 474 Si un determinado file no puede ser abierto
475 475 """
476 476 nFiles = 0
477 477 fileOk_flag = False
478 478 firstTime_flag = True
479 479
480 480 self.set += 1
481 481
482 482 #busca el 1er file disponible
483 483 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
484 484 if file:
485 485 if self.__verifyFile(file, False):
486 486 fileOk_flag = True
487 487
488 488 #si no encuentra un file entonces espera y vuelve a buscar
489 489 if not(fileOk_flag):
490 490 for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles
491 491
492 492 if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces
493 493 tries = self.nTries
494 494 else:
495 495 tries = 1 #si no es la 1era vez entonces solo lo hace una vez
496 496
497 497 for nTries in range( tries ):
498 498 if firstTime_flag:
499 499 print "\tWaiting %0.2f sec for the file \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 )
500 500 time.sleep( self.delay )
501 501 else:
502 502 print "\tSearching next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext)
503 503
504 504 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
505 505 if file:
506 506 if self.__verifyFile(file):
507 507 fileOk_flag = True
508 508 break
509 509
510 510 if fileOk_flag:
511 511 break
512 512
513 513 firstTime_flag = False
514 514
515 515 print "\tSkipping the file \"%s\" due to this file doesn't exist" % filename
516 516 self.set += 1
517 517
518 518 if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta
519 519 self.set = 0
520 520 self.doy += 1
521 521
522 522 if fileOk_flag:
523 523 self.fileSize = os.path.getsize( file )
524 524 self.filename = file
525 525 self.flagIsNewFile = 1
526 526 if self.fp != None: self.fp.close()
527 self.fp = open(file)
527 self.fp = open(file, 'rb')
528 528 self.flagNoMoreFiles = 0
529 529 print 'Setting the file: %s' % file
530 530 else:
531 531 self.fileSize = 0
532 532 self.filename = None
533 533 self.flagIsNewFile = 0
534 534 self.fp = None
535 535 self.flagNoMoreFiles = 1
536 536 print 'No more Files'
537 537
538 538 return fileOk_flag
539 539
540 540
541 541 def setNextFile(self):
542 542 if self.fp != None:
543 543 self.fp.close()
544 544
545 545 if self.online:
546 546 newFile = self.__setNextFileOnline()
547 547 else:
548 548 newFile = self.__setNextFileOffline()
549 549
550 550 if not(newFile):
551 551 return 0
552 552
553 553 self.__readFirstHeader()
554 554 self.nReadBlocks = 0
555 555 return 1
556 556
557 def __waitNewBlock(self):
558 #si es OnLine y ademas aun no se han leido un bloque completo entonces se espera por uno valido
559 if not self.online:
560 return 0
561
562 if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):
563 return 0
564
565 currentPointer = self.fp.tell()
566
567 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
568
569 for nTries in range( self.nTries ):
570
571 self.fp.close()
572 self.fp = open( self.filename, 'rb' )
573 self.fp.seek( currentPointer )
574
575 self.fileSize = os.path.getsize( self.filename )
576 currentSize = self.fileSize - currentPointer
577
578 if ( currentSize >= neededSize ):
579 self.__rdBasicHeader()
580 return 1
581
582 print "\tWaiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries+1)
583 time.sleep( self.delay )
584
585
586 return 0
587
557 588 def __setNewBlock(self):
558 589 if self.fp == None:
559 590 return 0
560 591
561 592 if self.flagIsNewFile:
562 593 return 1
563 594
564 595 self.lastUTTime = self.basicHeaderObj.utc
565 596 currentSize = self.fileSize - self.fp.tell()
566 597 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
567 598
568 599 if (currentSize >= neededSize):
569 600 self.__rdBasicHeader()
570 601 return 1
602
603 if self.__waitNewBlock():
604 return 1
571 605
572 606 if not(self.setNextFile()):
573 607 return 0
574 608
575 609 deltaTime = self.basicHeaderObj.utc - self.lastUTTime #
576 610
577 611 self.flagTimeBlock = 0
578 612
579 613 if deltaTime > self.maxTimeStep:
580 614 self.flagTimeBlock = 1
581 615
582 616 return 1
583 617
584 618
585 619 def readNextBlock(self):
586 620 if not(self.__setNewBlock()):
587 621 return 0
588 622
589 623 if not(self.readBlock()):
590 624 return 0
591 625
592 626 return 1
593 627
594 628 def __rdProcessingHeader(self, fp=None):
595 629 if fp == None:
596 630 fp = self.fp
597 631
598 632 self.processingHeaderObj.read(fp)
599 633
600 634 def __rdRadarControllerHeader(self, fp=None):
601 635 if fp == None:
602 636 fp = self.fp
603 637
604 638 self.radarControllerHeaderObj.read(fp)
605 639
606 640 def __rdSystemHeader(self, fp=None):
607 641 if fp == None:
608 642 fp = self.fp
609 643
610 644 self.systemHeaderObj.read(fp)
611 645
612 646 def __rdBasicHeader(self, fp=None):
613 647 if fp == None:
614 648 fp = self.fp
615 649
616 650 self.basicHeaderObj.read(fp)
617 651
618 652
619 653 def __readFirstHeader(self):
620 654 self.__rdBasicHeader()
621 655 self.__rdSystemHeader()
622 656 self.__rdRadarControllerHeader()
623 657 self.__rdProcessingHeader()
624 658
625 659 self.firstHeaderSize = self.basicHeaderObj.size
626 660
627 661 datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
628 662 if datatype == 0:
629 663 datatype_str = numpy.dtype([('real','<i1'),('imag','<i1')])
630 664 elif datatype == 1:
631 665 datatype_str = numpy.dtype([('real','<i2'),('imag','<i2')])
632 666 elif datatype == 2:
633 667 datatype_str = numpy.dtype([('real','<i4'),('imag','<i4')])
634 668 elif datatype == 3:
635 669 datatype_str = numpy.dtype([('real','<i8'),('imag','<i8')])
636 670 elif datatype == 4:
637 671 datatype_str = numpy.dtype([('real','<f4'),('imag','<f4')])
638 672 elif datatype == 5:
639 673 datatype_str = numpy.dtype([('real','<f8'),('imag','<f8')])
640 674 else:
641 675 raise ValueError, 'Data type was not defined'
642 676
643 677 self.dtype = datatype_str
644 678 self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c
645 679 self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.processingHeaderObj.dataBlocksPerFile - 1)
646 680 # self.dataOut.channelList = numpy.arange(self.systemHeaderObj.numChannels)
647 681 # self.dataOut.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels)
648 682 self.getBlockDimension()
649 683
650 684
651 685 def __verifyFile(self, filename, msgFlag=True):
652 686 msg = None
653 687 try:
654 688 fp = open(filename, 'rb')
655 689 currentPosition = fp.tell()
656 690 except:
657 691 if msgFlag:
658 692 print "The file %s can't be opened" % (filename)
659 693 return False
660 694
661 695 neededSize = self.processingHeaderObj.blockSize + self.firstHeaderSize
662 696
663 697 if neededSize == 0:
664 698 basicHeaderObj = BasicHeader()
665 699 systemHeaderObj = SystemHeader()
666 700 radarControllerHeaderObj = RadarControllerHeader()
667 701 processingHeaderObj = ProcessingHeader()
668 702
669 703 try:
670 704 if not( basicHeaderObj.read(fp) ): raise ValueError
671 705 if not( systemHeaderObj.read(fp) ): raise ValueError
672 706 if not( radarControllerHeaderObj.read(fp) ): raise ValueError
673 707 if not( processingHeaderObj.read(fp) ): raise ValueError
674 708 data_type = int(numpy.log2((processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
675 709
676 710 neededSize = processingHeaderObj.blockSize + basicHeaderObj.size
677 711
678 712 except:
679 713 if msgFlag:
680 714 print "\tThe file %s is empty or it hasn't enough data" % filename
681 715
682 716 fp.close()
683 717 return False
684 718 else:
685 719 msg = "\tSkipping the file %s due to it hasn't enough data" %filename
686 720
687 721 fp.close()
688 722 fileSize = os.path.getsize(filename)
689 723 currentSize = fileSize - currentPosition
690 724 if currentSize < neededSize:
691 725 if msgFlag and (msg != None):
692 726 print msg #print"\tSkipping the file %s due to it hasn't enough data" %filename
693 727 return False
694 728
695 729 return True
696 730
697 731 def setup(self,
698 732 path=None,
699 733 startDate=None,
700 734 endDate=None,
701 735 startTime=datetime.time(0,0,0),
702 736 endTime=datetime.time(23,59,59),
703 737 set=0,
704 738 expLabel = "",
705 739 ext = None,
706 740 online = False,
707 741 delay = 60):
708 742
709 743 if path == None:
710 744 raise ValueError, "The path is not valid"
711 745
712 746 if ext == None:
713 747 ext = self.ext
714 748
715 749 if online:
716 750 print "Searching files in online mode..."
717 751 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext)
718 752
719 753 if not(doypath):
720 754 for nTries in range( self.nTries ):
721 755 print '\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)
722 756 time.sleep( self.delay )
723 757 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext)
724 758 if doypath:
725 759 break
726 760
727 761 if not(doypath):
728 762 print "There 'isn't valied files in %s" % path
729 763 return None
730 764
731 765 self.year = year
732 766 self.doy = doy
733 767 self.set = set - 1
734 768 self.path = path
735 769
736 770 else:
737 771 print "Searching files in offline mode ..."
738 772 pathList, filenameList = self.__searchFilesOffLine(path, startDate, endDate, startTime, endTime, set, expLabel, ext)
739 773
740 774 if not(pathList):
741 775 print "No *%s files into the folder %s \nfor the range: %s - %s"%(ext, path,
742 776 datetime.datetime.combine(startDate,startTime).ctime(),
743 777 datetime.datetime.combine(endDate,endTime).ctime())
744 778
745 779 sys.exit(-1)
746 780
747 781
748 782 self.fileIndex = -1
749 783 self.pathList = pathList
750 784 self.filenameList = filenameList
751 785
752 786 self.online = online
753 787 self.delay = delay
754 788 ext = ext.lower()
755 789 self.ext = ext
756 790
757 791 if not(self.setNextFile()):
758 792 if (startDate!=None) and (endDate!=None):
759 793 print "No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime())
760 794 elif startDate != None:
761 795 print "No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime())
762 796 else:
763 797 print "No files"
764 798
765 799 sys.exit(-1)
766 800
767 801 # self.updateDataHeader()
768 802
769 803 return self.dataOut
770 804
771 805 def getData():
772 806
773 807 raise ValueError, "This method has not been implemented"
774 808
775 809 def hasNotDataInBuffer():
776 810
777 811 raise ValueError, "This method has not been implemented"
778 812
779 813 def readBlock():
780 814
781 815 raise ValueError, "This method has not been implemented"
782 816
783 817 def isEndProcess(self):
784 818
785 819 return self.flagNoMoreFiles
786 820
787 821 def printReadBlocks(self):
788 822
789 823 print "Number of read blocks per file %04d" %self.nReadBlocks
790 824
791 825 def printTotalBlocks(self):
792 826
793 827 print "Number of read blocks %04d" %self.nTotalBlocks
794 828
795 829 def run(self, **kwargs):
796 830
797 831 if not(self.isConfig):
798 832
799 833 # self.dataOut = dataOut
800 834 self.setup(**kwargs)
801 835 self.isConfig = True
802 836
803 837 self.getData()
804 838
805 839 class JRODataWriter(JRODataIO, Operation):
806 840
807 841 """
808 842 Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura
809 843 de los datos siempre se realiza por bloques.
810 844 """
811 845
812 846 blockIndex = 0
813 847
814 848 path = None
815 849
816 850 setFile = None
817 851
818 852 profilesPerBlock = None
819 853
820 854 blocksPerFile = None
821 855
822 856 nWriteBlocks = 0
823 857
824 858 def __init__(self, dataOut=None):
825 859 raise ValueError, "Not implemented"
826 860
827 861
828 862 def hasAllDataInBuffer(self):
829 863 raise ValueError, "Not implemented"
830 864
831 865
832 866 def setBlockDimension(self):
833 867 raise ValueError, "Not implemented"
834 868
835 869
836 870 def writeBlock(self):
837 871 raise ValueError, "No implemented"
838 872
839 873
840 874 def putData(self):
841 875 raise ValueError, "No implemented"
842 876
843 877 def getDataHeader(self):
844 878 """
845 879 Obtiene una copia del First Header
846 880
847 881 Affected:
848 882
849 883 self.basicHeaderObj
850 884 self.systemHeaderObj
851 885 self.radarControllerHeaderObj
852 886 self.processingHeaderObj self.
853 887
854 888 Return:
855 889 None
856 890 """
857 891
858 892 raise ValueError, "No implemented"
859 893
860 894 def getBasicHeader(self):
861 895
862 896 self.basicHeaderObj.size = self.basicHeaderSize #bytes
863 897 self.basicHeaderObj.version = self.versionFile
864 898 self.basicHeaderObj.dataBlock = self.nTotalBlocks
865 899
866 900 utc = numpy.floor(self.dataOut.utctime)
867 901 milisecond = (self.dataOut.utctime - utc)* 1000.0
868 902
869 903 self.basicHeaderObj.utc = utc
870 904 self.basicHeaderObj.miliSecond = milisecond
871 905 self.basicHeaderObj.timeZone = 0
872 906 self.basicHeaderObj.dstFlag = 0
873 907 self.basicHeaderObj.errorCount = 0
874 908
875 909 def __writeFirstHeader(self):
876 910 """
877 911 Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader)
878 912
879 913 Affected:
880 914 __dataType
881 915
882 916 Return:
883 917 None
884 918 """
885 919
886 920 # CALCULAR PARAMETROS
887 921
888 922 sizeLongHeader = self.systemHeaderObj.size + self.radarControllerHeaderObj.size + self.processingHeaderObj.size
889 923 self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader
890 924
891 925 self.basicHeaderObj.write(self.fp)
892 926 self.systemHeaderObj.write(self.fp)
893 927 self.radarControllerHeaderObj.write(self.fp)
894 928 self.processingHeaderObj.write(self.fp)
895 929
896 930 self.dtype = self.dataOut.dtype
897 931
898 932 def __setNewBlock(self):
899 933 """
900 934 Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header
901 935
902 936 Return:
903 937 0 : si no pudo escribir nada
904 938 1 : Si escribio el Basic el First Header
905 939 """
906 940 if self.fp == None:
907 941 self.setNextFile()
908 942
909 943 if self.flagIsNewFile:
910 944 return 1
911 945
912 946 if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile:
913 947 self.basicHeaderObj.write(self.fp)
914 948 return 1
915 949
916 950 if not( self.setNextFile() ):
917 951 return 0
918 952
919 953 return 1
920 954
921 955
922 956 def writeNextBlock(self):
923 957 """
924 958 Selecciona el bloque siguiente de datos y los escribe en un file
925 959
926 960 Return:
927 961 0 : Si no hizo pudo escribir el bloque de datos
928 962 1 : Si no pudo escribir el bloque de datos
929 963 """
930 964 if not( self.__setNewBlock() ):
931 965 return 0
932 966
933 967 self.writeBlock()
934 968
935 969 return 1
936 970
937 971 def setNextFile(self):
938 972 """
939 973 Determina el siguiente file que sera escrito
940 974
941 975 Affected:
942 976 self.filename
943 977 self.subfolder
944 978 self.fp
945 979 self.setFile
946 980 self.flagIsNewFile
947 981
948 982 Return:
949 983 0 : Si el archivo no puede ser escrito
950 984 1 : Si el archivo esta listo para ser escrito
951 985 """
952 986 ext = self.ext
953 987 path = self.path
954 988
955 989 if self.fp != None:
956 990 self.fp.close()
957 991
958 992 timeTuple = time.localtime( self.dataOut.dataUtcTime)
959 993 subfolder = 'D%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
960 994
961 995 doypath = os.path.join( path, subfolder )
962 996 if not( os.path.exists(doypath) ):
963 997 os.mkdir(doypath)
964 998 self.setFile = -1 #inicializo mi contador de seteo
965 999 else:
966 1000 filesList = os.listdir( doypath )
967 1001 if len( filesList ) > 0:
968 1002 filesList = sorted( filesList, key=str.lower )
969 1003 filen = filesList[-1]
970 1004 # el filename debera tener el siguiente formato
971 1005 # 0 1234 567 89A BCDE (hex)
972 1006 # x YYYY DDD SSS .ext
973 1007 if isNumber( filen[8:11] ):
974 1008 self.setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file
975 1009 else:
976 1010 self.setFile = -1
977 1011 else:
978 1012 self.setFile = -1 #inicializo mi contador de seteo
979 1013
980 1014 setFile = self.setFile
981 1015 setFile += 1
982 1016
983 1017 file = '%s%4.4d%3.3d%3.3d%s' % (self.optchar,
984 1018 timeTuple.tm_year,
985 1019 timeTuple.tm_yday,
986 1020 setFile,
987 1021 ext )
988 1022
989 1023 filename = os.path.join( path, subfolder, file )
990 1024
991 1025 fp = open( filename,'wb' )
992 1026
993 1027 self.blockIndex = 0
994 1028
995 1029 #guardando atributos
996 1030 self.filename = filename
997 1031 self.subfolder = subfolder
998 1032 self.fp = fp
999 1033 self.setFile = setFile
1000 1034 self.flagIsNewFile = 1
1001 1035
1002 1036 self.getDataHeader()
1003 1037
1004 1038 print 'Writing the file: %s'%self.filename
1005 1039
1006 1040 self.__writeFirstHeader()
1007 1041
1008 1042 return 1
1009 1043
1010 1044 def setup(self, dataOut, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None):
1011 1045 """
1012 1046 Setea el tipo de formato en la cual sera guardada la data y escribe el First Header
1013 1047
1014 1048 Inputs:
1015 1049 path : el path destino en el cual se escribiran los files a crear
1016 1050 format : formato en el cual sera salvado un file
1017 1051 set : el setebo del file
1018 1052
1019 1053 Return:
1020 1054 0 : Si no realizo un buen seteo
1021 1055 1 : Si realizo un buen seteo
1022 1056 """
1023 1057
1024 1058 if ext == None:
1025 1059 ext = self.ext
1026 1060
1027 1061 ext = ext.lower()
1028 1062
1029 1063 self.ext = ext
1030 1064
1031 1065 self.path = path
1032 1066
1033 1067 self.setFile = set - 1
1034 1068
1035 1069 self.blocksPerFile = blocksPerFile
1036 1070
1037 1071 self.profilesPerBlock = profilesPerBlock
1038 1072
1039 1073 self.dataOut = dataOut
1040 1074
1041 1075 if not(self.setNextFile()):
1042 1076 print "There isn't a next file"
1043 1077 return 0
1044 1078
1045 1079 self.setBlockDimension()
1046 1080
1047 1081 return 1
1048 1082
1049 1083 def run(self, dataOut, **kwargs):
1050 1084
1051 1085 if not(self.isConfig):
1052 1086
1053 1087 self.setup(dataOut, **kwargs)
1054 1088 self.isConfig = True
1055 1089
1056 1090 self.putData()
1057 1091
1058 1092 class VoltageReader(JRODataReader):
1059 1093 """
1060 1094 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
1061 1095 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
1062 1096 perfiles*alturas*canales) son almacenados en la variable "buffer".
1063 1097
1064 1098 perfiles * alturas * canales
1065 1099
1066 1100 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1067 1101 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
1068 1102 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
1069 1103 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1070 1104
1071 1105 Example:
1072 1106
1073 1107 dpath = "/home/myuser/data"
1074 1108
1075 1109 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1076 1110
1077 1111 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1078 1112
1079 1113 readerObj = VoltageReader()
1080 1114
1081 1115 readerObj.setup(dpath, startTime, endTime)
1082 1116
1083 1117 while(True):
1084 1118
1085 1119 #to get one profile
1086 1120 profile = readerObj.getData()
1087 1121
1088 1122 #print the profile
1089 1123 print profile
1090 1124
1091 1125 #If you want to see all datablock
1092 1126 print readerObj.datablock
1093 1127
1094 1128 if readerObj.flagNoMoreFiles:
1095 1129 break
1096 1130
1097 1131 """
1098 1132
1099 1133 ext = ".r"
1100 1134
1101 1135 optchar = "D"
1102 1136 dataOut = None
1103 1137
1104 1138
1105 1139 def __init__(self):
1106 1140 """
1107 1141 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
1108 1142
1109 1143 Input:
1110 1144 dataOut : Objeto de la clase Voltage. Este objeto sera utilizado para
1111 1145 almacenar un perfil de datos cada vez que se haga un requerimiento
1112 1146 (getData). El perfil sera obtenido a partir del buffer de datos,
1113 1147 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1114 1148 bloque de datos.
1115 1149 Si este parametro no es pasado se creara uno internamente.
1116 1150
1117 1151 Variables afectadas:
1118 1152 self.dataOut
1119 1153
1120 1154 Return:
1121 1155 None
1122 1156 """
1123 1157
1124 1158 self.isConfig = False
1125 1159
1126 1160 self.datablock = None
1127 1161
1128 1162 self.utc = 0
1129 1163
1130 1164 self.ext = ".r"
1131 1165
1132 1166 self.optchar = "D"
1133 1167
1134 1168 self.basicHeaderObj = BasicHeader()
1135 1169
1136 1170 self.systemHeaderObj = SystemHeader()
1137 1171
1138 1172 self.radarControllerHeaderObj = RadarControllerHeader()
1139 1173
1140 1174 self.processingHeaderObj = ProcessingHeader()
1141 1175
1142 1176 self.online = 0
1143 1177
1144 1178 self.fp = None
1145 1179
1146 1180 self.idFile = None
1147 1181
1148 1182 self.dtype = None
1149 1183
1150 1184 self.fileSizeByHeader = None
1151 1185
1152 1186 self.filenameList = []
1153 1187
1154 1188 self.filename = None
1155 1189
1156 1190 self.fileSize = None
1157 1191
1158 1192 self.firstHeaderSize = 0
1159 1193
1160 1194 self.basicHeaderSize = 24
1161 1195
1162 1196 self.pathList = []
1163 1197
1164 1198 self.filenameList = []
1165 1199
1166 1200 self.lastUTTime = 0
1167 1201
1168 1202 self.maxTimeStep = 30
1169 1203
1170 1204 self.flagNoMoreFiles = 0
1171 1205
1172 1206 self.set = 0
1173 1207
1174 1208 self.path = None
1175 1209
1176 1210 self.profileIndex = 9999
1177 1211
1178 1212 self.delay = 3 #seconds
1179 1213
1180 1214 self.nTries = 3 #quantity tries
1181 1215
1182 1216 self.nFiles = 3 #number of files for searching
1183 1217
1184 1218 self.nReadBlocks = 0
1185 1219
1186 1220 self.flagIsNewFile = 1
1187 1221
1188 1222 self.ippSeconds = 0
1189 1223
1190 1224 self.flagTimeBlock = 0
1191 1225
1192 1226 self.flagIsNewBlock = 0
1193 1227
1194 1228 self.nTotalBlocks = 0
1195 1229
1196 1230 self.blocksize = 0
1197 1231
1198 1232 self.dataOut = self.createObjByDefault()
1199 1233
1200 1234 def createObjByDefault(self):
1201 1235
1202 1236 dataObj = Voltage()
1203 1237
1204 1238 return dataObj
1205 1239
1206 1240 def __hasNotDataInBuffer(self):
1207 1241 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1208 1242 return 1
1209 1243 return 0
1210 1244
1211 1245
1212 1246 def getBlockDimension(self):
1213 1247 """
1214 1248 Obtiene la cantidad de puntos a leer por cada bloque de datos
1215 1249
1216 1250 Affected:
1217 1251 self.blocksize
1218 1252
1219 1253 Return:
1220 1254 None
1221 1255 """
1222 1256 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
1223 1257 self.blocksize = pts2read
1224 1258
1225 1259
1226 1260 def readBlock(self):
1227 1261 """
1228 1262 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
1229 1263 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1230 1264 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1231 1265 es seteado a 0
1232 1266
1233 1267 Inputs:
1234 1268 None
1235 1269
1236 1270 Return:
1237 1271 None
1238 1272
1239 1273 Affected:
1240 1274 self.profileIndex
1241 1275 self.datablock
1242 1276 self.flagIsNewFile
1243 1277 self.flagIsNewBlock
1244 1278 self.nTotalBlocks
1245 1279
1246 1280 Exceptions:
1247 1281 Si un bloque leido no es un bloque valido
1248 1282 """
1249 1283
1250 1284 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
1251 1285
1252 1286 try:
1253 1287 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
1254 1288 except:
1255 1289 print "The read block (%3d) has not enough data" %self.nReadBlocks
1256 1290 return 0
1257 1291
1258 1292 junk = numpy.transpose(junk, (2,0,1))
1259 1293 self.datablock = junk['real'] + junk['imag']*1j
1260 1294
1261 1295 self.profileIndex = 0
1262 1296
1263 1297 self.flagIsNewFile = 0
1264 1298 self.flagIsNewBlock = 1
1265 1299
1266 1300 self.nTotalBlocks += 1
1267 1301 self.nReadBlocks += 1
1268 1302
1269 1303 return 1
1270 1304
1271 1305
1272 1306 def getData(self):
1273 1307 """
1274 1308 getData obtiene una unidad de datos del buffer de lectura y la copia a la clase "Voltage"
1275 1309 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1276 1310 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1277 1311
1278 1312 Ademas incrementa el contador del buffer en 1.
1279 1313
1280 1314 Return:
1281 1315 data : retorna un perfil de voltages (alturas * canales) copiados desde el
1282 1316 buffer. Si no hay mas archivos a leer retorna None.
1283 1317
1284 1318 Variables afectadas:
1285 1319 self.dataOut
1286 1320 self.profileIndex
1287 1321
1288 1322 Affected:
1289 1323 self.dataOut
1290 1324 self.profileIndex
1291 1325 self.flagTimeBlock
1292 1326 self.flagIsNewBlock
1293 1327 """
1294 1328
1295 1329 if self.flagNoMoreFiles:
1296 1330 self.dataOut.flagNoData = True
1297 1331 print 'Process finished'
1298 1332 return 0
1299 1333
1300 1334 self.flagTimeBlock = 0
1301 1335 self.flagIsNewBlock = 0
1302 1336
1303 1337 if self.__hasNotDataInBuffer():
1304 1338
1305 1339 if not( self.readNextBlock() ):
1306 1340 return 0
1307 1341
1308 1342 # self.updateDataHeader()
1309 1343
1310 1344 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1311 1345
1312 1346 if self.datablock == None:
1313 1347 self.dataOut.flagNoData = True
1314 1348 return 0
1315 1349
1316 1350 self.dataOut.data = self.datablock[:,self.profileIndex,:]
1317 1351
1318 1352 self.dataOut.dtype = self.dtype
1319 1353
1320 1354 # self.dataOut.nChannels = self.systemHeaderObj.nChannels
1321 1355
1322 1356 # self.dataOut.nHeights = self.processingHeaderObj.nHeights
1323 1357
1324 1358 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1325 1359
1326 1360 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1327 1361
1328 1362 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1329 1363
1330 1364 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1331 1365
1332 1366 # self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
1333 1367
1334 1368 self.dataOut.flagTimeBlock = self.flagTimeBlock
1335 1369
1336 1370 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.ippSeconds
1337 1371
1338 1372 self.dataOut.ippSeconds = self.ippSeconds
1339 1373
1340 1374 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt
1341 1375
1342 1376 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
1343 1377
1344 1378 self.dataOut.flagShiftFFT = False
1345 1379
1346 1380 if self.processingHeaderObj.code != None:
1347 1381 self.dataOut.nCode = self.processingHeaderObj.nCode
1348 1382
1349 1383 self.dataOut.nBaud = self.processingHeaderObj.nBaud
1350 1384
1351 1385 self.dataOut.code = self.processingHeaderObj.code
1352 1386
1353 1387 self.profileIndex += 1
1354 1388
1355 1389 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1356 1390
1357 1391 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1358 1392
1359 1393 self.dataOut.flagNoData = False
1360 1394
1361 1395 # print self.profileIndex, self.dataOut.utctime
1362 1396 # if self.profileIndex == 800:
1363 1397 # a=1
1364 1398
1365 1399 return self.dataOut.data
1366 1400
1367 1401
1368 1402 class VoltageWriter(JRODataWriter):
1369 1403 """
1370 1404 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
1371 1405 de los datos siempre se realiza por bloques.
1372 1406 """
1373 1407
1374 1408 ext = ".r"
1375 1409
1376 1410 optchar = "D"
1377 1411
1378 1412 shapeBuffer = None
1379 1413
1380 1414
1381 1415 def __init__(self):
1382 1416 """
1383 1417 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
1384 1418
1385 1419 Affected:
1386 1420 self.dataOut
1387 1421
1388 1422 Return: None
1389 1423 """
1390 1424
1391 1425 self.nTotalBlocks = 0
1392 1426
1393 1427 self.profileIndex = 0
1394 1428
1395 1429 self.isConfig = False
1396 1430
1397 1431 self.fp = None
1398 1432
1399 1433 self.flagIsNewFile = 1
1400 1434
1401 1435 self.nTotalBlocks = 0
1402 1436
1403 1437 self.flagIsNewBlock = 0
1404 1438
1405 1439 self.setFile = None
1406 1440
1407 1441 self.dtype = None
1408 1442
1409 1443 self.path = None
1410 1444
1411 1445 self.filename = None
1412 1446
1413 1447 self.basicHeaderObj = BasicHeader()
1414 1448
1415 1449 self.systemHeaderObj = SystemHeader()
1416 1450
1417 1451 self.radarControllerHeaderObj = RadarControllerHeader()
1418 1452
1419 1453 self.processingHeaderObj = ProcessingHeader()
1420 1454
1421 1455 def hasAllDataInBuffer(self):
1422 1456 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1423 1457 return 1
1424 1458 return 0
1425 1459
1426 1460
1427 1461 def setBlockDimension(self):
1428 1462 """
1429 1463 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
1430 1464
1431 1465 Affected:
1432 1466 self.shape_spc_Buffer
1433 1467 self.shape_cspc_Buffer
1434 1468 self.shape_dc_Buffer
1435 1469
1436 1470 Return: None
1437 1471 """
1438 1472 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
1439 1473 self.processingHeaderObj.nHeights,
1440 1474 self.systemHeaderObj.nChannels)
1441 1475
1442 1476 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
1443 1477 self.processingHeaderObj.profilesPerBlock,
1444 1478 self.processingHeaderObj.nHeights),
1445 1479 dtype=numpy.dtype('complex'))
1446 1480
1447 1481
1448 1482 def writeBlock(self):
1449 1483 """
1450 1484 Escribe el buffer en el file designado
1451 1485
1452 1486 Affected:
1453 1487 self.profileIndex
1454 1488 self.flagIsNewFile
1455 1489 self.flagIsNewBlock
1456 1490 self.nTotalBlocks
1457 1491 self.blockIndex
1458 1492
1459 1493 Return: None
1460 1494 """
1461 1495 data = numpy.zeros( self.shapeBuffer, self.dtype )
1462 1496
1463 1497 junk = numpy.transpose(self.datablock, (1,2,0))
1464 1498
1465 1499 data['real'] = junk.real
1466 1500 data['imag'] = junk.imag
1467 1501
1468 1502 data = data.reshape( (-1) )
1469 1503
1470 1504 data.tofile( self.fp )
1471 1505
1472 1506 self.datablock.fill(0)
1473 1507
1474 1508 self.profileIndex = 0
1475 1509 self.flagIsNewFile = 0
1476 1510 self.flagIsNewBlock = 1
1477 1511
1478 1512 self.blockIndex += 1
1479 1513 self.nTotalBlocks += 1
1480 1514
1481 1515 def putData(self):
1482 1516 """
1483 1517 Setea un bloque de datos y luego los escribe en un file
1484 1518
1485 1519 Affected:
1486 1520 self.flagIsNewBlock
1487 1521 self.profileIndex
1488 1522
1489 1523 Return:
1490 1524 0 : Si no hay data o no hay mas files que puedan escribirse
1491 1525 1 : Si se escribio la data de un bloque en un file
1492 1526 """
1493 1527 if self.dataOut.flagNoData:
1494 1528 return 0
1495 1529
1496 1530 self.flagIsNewBlock = 0
1497 1531
1498 1532 if self.dataOut.flagTimeBlock:
1499 1533
1500 1534 self.datablock.fill(0)
1501 1535 self.profileIndex = 0
1502 1536 self.setNextFile()
1503 1537
1504 1538 if self.profileIndex == 0:
1505 1539 self.getBasicHeader()
1506 1540
1507 1541 self.datablock[:,self.profileIndex,:] = self.dataOut.data
1508 1542
1509 1543 self.profileIndex += 1
1510 1544
1511 1545 if self.hasAllDataInBuffer():
1512 1546 #if self.flagIsNewFile:
1513 1547 self.writeNextBlock()
1514 1548 # self.getDataHeader()
1515 1549
1516 1550 return 1
1517 1551
1518 1552 def __getProcessFlags(self):
1519 1553
1520 1554 processFlags = 0
1521 1555
1522 1556 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1523 1557 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1524 1558 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1525 1559 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1526 1560 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1527 1561 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1528 1562
1529 1563 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1530 1564
1531 1565
1532 1566
1533 1567 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
1534 1568 PROCFLAG.DATATYPE_SHORT,
1535 1569 PROCFLAG.DATATYPE_LONG,
1536 1570 PROCFLAG.DATATYPE_INT64,
1537 1571 PROCFLAG.DATATYPE_FLOAT,
1538 1572 PROCFLAG.DATATYPE_DOUBLE]
1539 1573
1540 1574
1541 1575 for index in range(len(dtypeList)):
1542 1576 if self.dataOut.dtype == dtypeList[index]:
1543 1577 dtypeValue = datatypeValueList[index]
1544 1578 break
1545 1579
1546 1580 processFlags += dtypeValue
1547 1581
1548 1582 if self.dataOut.flagDecodeData:
1549 1583 processFlags += PROCFLAG.DECODE_DATA
1550 1584
1551 1585 if self.dataOut.flagDeflipData:
1552 1586 processFlags += PROCFLAG.DEFLIP_DATA
1553 1587
1554 1588 if self.dataOut.code != None:
1555 1589 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
1556 1590
1557 1591 if self.dataOut.nCohInt > 1:
1558 1592 processFlags += PROCFLAG.COHERENT_INTEGRATION
1559 1593
1560 1594 return processFlags
1561 1595
1562 1596
1563 1597 def __getBlockSize(self):
1564 1598 '''
1565 1599 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
1566 1600 '''
1567 1601
1568 1602 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1569 1603 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1570 1604 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1571 1605 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1572 1606 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1573 1607 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1574 1608
1575 1609 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1576 1610 datatypeValueList = [1,2,4,8,4,8]
1577 1611 for index in range(len(dtypeList)):
1578 1612 if self.dataOut.dtype == dtypeList[index]:
1579 1613 datatypeValue = datatypeValueList[index]
1580 1614 break
1581 1615
1582 1616 blocksize = int(self.dataOut.nHeights * self.dataOut.nChannels * self.dataOut.nProfiles * datatypeValue * 2)
1583 1617
1584 1618 return blocksize
1585 1619
1586 1620 def getDataHeader(self):
1587 1621
1588 1622 """
1589 1623 Obtiene una copia del First Header
1590 1624
1591 1625 Affected:
1592 1626 self.systemHeaderObj
1593 1627 self.radarControllerHeaderObj
1594 1628 self.dtype
1595 1629
1596 1630 Return:
1597 1631 None
1598 1632 """
1599 1633
1600 1634 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
1601 1635 self.systemHeaderObj.nChannels = self.dataOut.nChannels
1602 1636 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
1603 1637
1604 1638 self.getBasicHeader()
1605 1639
1606 1640 processingHeaderSize = 40 # bytes
1607 1641 self.processingHeaderObj.dtype = 0 # Voltage
1608 1642 self.processingHeaderObj.blockSize = self.__getBlockSize()
1609 1643 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
1610 1644 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
1611 1645 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
1612 1646 self.processingHeaderObj.processFlags = self.__getProcessFlags()
1613 1647 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt
1614 1648 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
1615 1649 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
1616 1650
1617 1651 if self.dataOut.code != None:
1618 1652 self.processingHeaderObj.code = self.dataOut.code
1619 1653 self.processingHeaderObj.nCode = self.dataOut.nCode
1620 1654 self.processingHeaderObj.nBaud = self.dataOut.nBaud
1621 1655 codesize = int(8 + 4 * self.dataOut.nCode * self.dataOut.nBaud)
1622 1656 processingHeaderSize += codesize
1623 1657
1624 1658 if self.processingHeaderObj.nWindows != 0:
1625 1659 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
1626 1660 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
1627 1661 self.processingHeaderObj.nHeights = self.dataOut.nHeights
1628 1662 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
1629 1663 processingHeaderSize += 12
1630 1664
1631 1665 self.processingHeaderObj.size = processingHeaderSize
1632 1666
1633 1667 class SpectraReader(JRODataReader):
1634 1668 """
1635 1669 Esta clase permite leer datos de espectros desde archivos procesados (.pdata). La lectura
1636 1670 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones)
1637 1671 son almacenados en tres buffer's para el Self Spectra, el Cross Spectra y el DC Channel.
1638 1672
1639 1673 paresCanalesIguales * alturas * perfiles (Self Spectra)
1640 1674 paresCanalesDiferentes * alturas * perfiles (Cross Spectra)
1641 1675 canales * alturas (DC Channels)
1642 1676
1643 1677 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1644 1678 RadarControllerHeader y Spectra. Los tres primeros se usan para almacenar informacion de la
1645 1679 cabecera de datos (metadata), y el cuarto (Spectra) para obtener y almacenar un bloque de
1646 1680 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1647 1681
1648 1682 Example:
1649 1683 dpath = "/home/myuser/data"
1650 1684
1651 1685 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1652 1686
1653 1687 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1654 1688
1655 1689 readerObj = SpectraReader()
1656 1690
1657 1691 readerObj.setup(dpath, startTime, endTime)
1658 1692
1659 1693 while(True):
1660 1694
1661 1695 readerObj.getData()
1662 1696
1663 1697 print readerObj.data_spc
1664 1698
1665 1699 print readerObj.data_cspc
1666 1700
1667 1701 print readerObj.data_dc
1668 1702
1669 1703 if readerObj.flagNoMoreFiles:
1670 1704 break
1671 1705
1672 1706 """
1673 1707
1674 1708 pts2read_SelfSpectra = 0
1675 1709
1676 1710 pts2read_CrossSpectra = 0
1677 1711
1678 1712 pts2read_DCchannels = 0
1679 1713
1680 1714 ext = ".pdata"
1681 1715
1682 1716 optchar = "P"
1683 1717
1684 1718 dataOut = None
1685 1719
1686 1720 nRdChannels = None
1687 1721
1688 1722 nRdPairs = None
1689 1723
1690 1724 rdPairList = []
1691 1725
1692 1726
1693 1727 def __init__(self):
1694 1728 """
1695 1729 Inicializador de la clase SpectraReader para la lectura de datos de espectros.
1696 1730
1697 1731 Inputs:
1698 1732 dataOut : Objeto de la clase Spectra. Este objeto sera utilizado para
1699 1733 almacenar un perfil de datos cada vez que se haga un requerimiento
1700 1734 (getData). El perfil sera obtenido a partir del buffer de datos,
1701 1735 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1702 1736 bloque de datos.
1703 1737 Si este parametro no es pasado se creara uno internamente.
1704 1738
1705 1739 Affected:
1706 1740 self.dataOut
1707 1741
1708 1742 Return : None
1709 1743 """
1710 1744
1711 1745 self.isConfig = False
1712 1746
1713 1747 self.pts2read_SelfSpectra = 0
1714 1748
1715 1749 self.pts2read_CrossSpectra = 0
1716 1750
1717 1751 self.pts2read_DCchannels = 0
1718 1752
1719 1753 self.datablock = None
1720 1754
1721 1755 self.utc = None
1722 1756
1723 1757 self.ext = ".pdata"
1724 1758
1725 1759 self.optchar = "P"
1726 1760
1727 1761 self.basicHeaderObj = BasicHeader()
1728 1762
1729 1763 self.systemHeaderObj = SystemHeader()
1730 1764
1731 1765 self.radarControllerHeaderObj = RadarControllerHeader()
1732 1766
1733 1767 self.processingHeaderObj = ProcessingHeader()
1734 1768
1735 1769 self.online = 0
1736 1770
1737 1771 self.fp = None
1738 1772
1739 1773 self.idFile = None
1740 1774
1741 1775 self.dtype = None
1742 1776
1743 1777 self.fileSizeByHeader = None
1744 1778
1745 1779 self.filenameList = []
1746 1780
1747 1781 self.filename = None
1748 1782
1749 1783 self.fileSize = None
1750 1784
1751 1785 self.firstHeaderSize = 0
1752 1786
1753 1787 self.basicHeaderSize = 24
1754 1788
1755 1789 self.pathList = []
1756 1790
1757 1791 self.lastUTTime = 0
1758 1792
1759 1793 self.maxTimeStep = 30
1760 1794
1761 1795 self.flagNoMoreFiles = 0
1762 1796
1763 1797 self.set = 0
1764 1798
1765 1799 self.path = None
1766 1800
1767 1801 self.delay = 3 #seconds
1768 1802
1769 1803 self.nTries = 3 #quantity tries
1770 1804
1771 1805 self.nFiles = 3 #number of files for searching
1772 1806
1773 1807 self.nReadBlocks = 0
1774 1808
1775 1809 self.flagIsNewFile = 1
1776 1810
1777 1811 self.ippSeconds = 0
1778 1812
1779 1813 self.flagTimeBlock = 0
1780 1814
1781 1815 self.flagIsNewBlock = 0
1782 1816
1783 1817 self.nTotalBlocks = 0
1784 1818
1785 1819 self.blocksize = 0
1786 1820
1787 1821 self.dataOut = self.createObjByDefault()
1788 1822
1789 1823
1790 1824 def createObjByDefault(self):
1791 1825
1792 1826 dataObj = Spectra()
1793 1827
1794 1828 return dataObj
1795 1829
1796 1830 def __hasNotDataInBuffer(self):
1797 1831 return 1
1798 1832
1799 1833
1800 1834 def getBlockDimension(self):
1801 1835 """
1802 1836 Obtiene la cantidad de puntos a leer por cada bloque de datos
1803 1837
1804 1838 Affected:
1805 1839 self.nRdChannels
1806 1840 self.nRdPairs
1807 1841 self.pts2read_SelfSpectra
1808 1842 self.pts2read_CrossSpectra
1809 1843 self.pts2read_DCchannels
1810 1844 self.blocksize
1811 1845 self.dataOut.nChannels
1812 1846 self.dataOut.nPairs
1813 1847
1814 1848 Return:
1815 1849 None
1816 1850 """
1817 1851 self.nRdChannels = 0
1818 1852 self.nRdPairs = 0
1819 1853 self.rdPairList = []
1820 1854
1821 1855 for i in range(0, self.processingHeaderObj.totalSpectra*2, 2):
1822 1856 if self.processingHeaderObj.spectraComb[i] == self.processingHeaderObj.spectraComb[i+1]:
1823 1857 self.nRdChannels = self.nRdChannels + 1 #par de canales iguales
1824 1858 else:
1825 1859 self.nRdPairs = self.nRdPairs + 1 #par de canales diferentes
1826 1860 self.rdPairList.append((self.processingHeaderObj.spectraComb[i], self.processingHeaderObj.spectraComb[i+1]))
1827 1861
1828 1862 pts2read = self.processingHeaderObj.nHeights * self.processingHeaderObj.profilesPerBlock
1829 1863
1830 1864 self.pts2read_SelfSpectra = int(self.nRdChannels * pts2read)
1831 1865 self.blocksize = self.pts2read_SelfSpectra
1832 1866
1833 1867 if self.processingHeaderObj.flag_cspc:
1834 1868 self.pts2read_CrossSpectra = int(self.nRdPairs * pts2read)
1835 1869 self.blocksize += self.pts2read_CrossSpectra
1836 1870
1837 1871 if self.processingHeaderObj.flag_dc:
1838 1872 self.pts2read_DCchannels = int(self.systemHeaderObj.nChannels * self.processingHeaderObj.nHeights)
1839 1873 self.blocksize += self.pts2read_DCchannels
1840 1874
1841 1875 # self.blocksize = self.pts2read_SelfSpectra + self.pts2read_CrossSpectra + self.pts2read_DCchannels
1842 1876
1843 1877
1844 1878 def readBlock(self):
1845 1879 """
1846 1880 Lee el bloque de datos desde la posicion actual del puntero del archivo
1847 1881 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1848 1882 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1849 1883 es seteado a 0
1850 1884
1851 1885 Return: None
1852 1886
1853 1887 Variables afectadas:
1854 1888
1855 1889 self.flagIsNewFile
1856 1890 self.flagIsNewBlock
1857 1891 self.nTotalBlocks
1858 1892 self.data_spc
1859 1893 self.data_cspc
1860 1894 self.data_dc
1861 1895
1862 1896 Exceptions:
1863 1897 Si un bloque leido no es un bloque valido
1864 1898 """
1865 1899 blockOk_flag = False
1866 1900 fpointer = self.fp.tell()
1867 1901
1868 1902 spc = numpy.fromfile( self.fp, self.dtype[0], self.pts2read_SelfSpectra )
1869 1903 spc = spc.reshape( (self.nRdChannels, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1870 1904
1871 1905 if self.processingHeaderObj.flag_cspc:
1872 1906 cspc = numpy.fromfile( self.fp, self.dtype, self.pts2read_CrossSpectra )
1873 1907 cspc = cspc.reshape( (self.nRdPairs, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1874 1908
1875 1909 if self.processingHeaderObj.flag_dc:
1876 1910 dc = numpy.fromfile( self.fp, self.dtype, self.pts2read_DCchannels ) #int(self.processingHeaderObj.nHeights*self.systemHeaderObj.nChannels) )
1877 1911 dc = dc.reshape( (self.systemHeaderObj.nChannels, self.processingHeaderObj.nHeights) ) #transforma a un arreglo 2D
1878 1912
1879 1913
1880 1914 if not(self.processingHeaderObj.shif_fft):
1881 1915 #desplaza a la derecha en el eje 2 determinadas posiciones
1882 1916 shift = int(self.processingHeaderObj.profilesPerBlock/2)
1883 1917 spc = numpy.roll( spc, shift , axis=2 )
1884 1918
1885 1919 if self.processingHeaderObj.flag_cspc:
1886 1920 #desplaza a la derecha en el eje 2 determinadas posiciones
1887 1921 cspc = numpy.roll( cspc, shift, axis=2 )
1888 1922
1889 1923
1890 1924 spc = numpy.transpose( spc, (0,2,1) )
1891 1925 self.data_spc = spc
1892 1926
1893 1927 if self.processingHeaderObj.flag_cspc:
1894 1928 cspc = numpy.transpose( cspc, (0,2,1) )
1895 1929 self.data_cspc = cspc['real'] + cspc['imag']*1j
1896 1930 else:
1897 1931 self.data_cspc = None
1898 1932
1899 1933 if self.processingHeaderObj.flag_dc:
1900 1934 self.data_dc = dc['real'] + dc['imag']*1j
1901 1935 else:
1902 1936 self.data_dc = None
1903 1937
1904 1938 self.flagIsNewFile = 0
1905 1939 self.flagIsNewBlock = 1
1906 1940
1907 1941 self.nTotalBlocks += 1
1908 1942 self.nReadBlocks += 1
1909 1943
1910 1944 return 1
1911 1945
1912 1946
1913 1947 def getData(self):
1914 1948 """
1915 1949 Copia el buffer de lectura a la clase "Spectra",
1916 1950 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1917 1951 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1918 1952
1919 1953 Return:
1920 1954 0 : Si no hay mas archivos disponibles
1921 1955 1 : Si hizo una buena copia del buffer
1922 1956
1923 1957 Affected:
1924 1958 self.dataOut
1925 1959
1926 1960 self.flagTimeBlock
1927 1961 self.flagIsNewBlock
1928 1962 """
1929 1963
1930 1964 if self.flagNoMoreFiles:
1931 1965 self.dataOut.flagNoData = True
1932 1966 print 'Process finished'
1933 1967 return 0
1934 1968
1935 1969 self.flagTimeBlock = 0
1936 1970 self.flagIsNewBlock = 0
1937 1971
1938 1972 if self.__hasNotDataInBuffer():
1939 1973
1940 1974 if not( self.readNextBlock() ):
1941 1975 self.dataOut.flagNoData = True
1942 1976 return 0
1943 1977
1944 1978 # self.updateDataHeader()
1945 1979
1946 1980 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1947 1981
1948 1982 if self.data_dc == None:
1949 1983 self.dataOut.flagNoData = True
1950 1984 return 0
1951 1985
1952 1986 self.dataOut.data_spc = self.data_spc
1953 1987
1954 1988 self.dataOut.data_cspc = self.data_cspc
1955 1989
1956 1990 self.dataOut.data_dc = self.data_dc
1957 1991
1958 1992 self.dataOut.flagTimeBlock = self.flagTimeBlock
1959 1993
1960 1994 self.dataOut.flagNoData = False
1961 1995
1962 1996 self.dataOut.dtype = self.dtype
1963 1997
1964 1998 # self.dataOut.nChannels = self.nRdChannels
1965 1999
1966 2000 self.dataOut.nPairs = self.nRdPairs
1967 2001
1968 2002 self.dataOut.pairsList = self.rdPairList
1969 2003
1970 2004 # self.dataOut.nHeights = self.processingHeaderObj.nHeights
1971 2005
1972 2006 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1973 2007
1974 2008 self.dataOut.nFFTPoints = self.processingHeaderObj.profilesPerBlock
1975 2009
1976 2010 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
1977 2011
1978 2012 self.dataOut.nIncohInt = self.processingHeaderObj.nIncohInt
1979 2013
1980 2014 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1981 2015
1982 2016 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1983 2017
1984 2018 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1985 2019
1986 2020 # self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
1987 2021
1988 2022 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000.#+ self.profileIndex * self.ippSeconds
1989 2023
1990 2024 self.dataOut.ippSeconds = self.ippSeconds
1991 2025
1992 2026 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt * self.processingHeaderObj.nIncohInt * self.dataOut.nFFTPoints
1993 2027
1994 2028 self.dataOut.flagShiftFFT = self.processingHeaderObj.shif_fft
1995 2029
1996 2030 # self.profileIndex += 1
1997 2031
1998 2032 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1999 2033
2000 2034 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
2001 2035
2002 2036 return self.dataOut.data_spc
2003 2037
2004 2038
2005 2039 class SpectraWriter(JRODataWriter):
2006 2040
2007 2041 """
2008 2042 Esta clase permite escribir datos de espectros a archivos procesados (.pdata). La escritura
2009 2043 de los datos siempre se realiza por bloques.
2010 2044 """
2011 2045
2012 2046 ext = ".pdata"
2013 2047
2014 2048 optchar = "P"
2015 2049
2016 2050 shape_spc_Buffer = None
2017 2051
2018 2052 shape_cspc_Buffer = None
2019 2053
2020 2054 shape_dc_Buffer = None
2021 2055
2022 2056 data_spc = None
2023 2057
2024 2058 data_cspc = None
2025 2059
2026 2060 data_dc = None
2027 2061
2028 2062 # dataOut = None
2029 2063
2030 2064 def __init__(self):
2031 2065 """
2032 2066 Inicializador de la clase SpectraWriter para la escritura de datos de espectros.
2033 2067
2034 2068 Affected:
2035 2069 self.dataOut
2036 2070 self.basicHeaderObj
2037 2071 self.systemHeaderObj
2038 2072 self.radarControllerHeaderObj
2039 2073 self.processingHeaderObj
2040 2074
2041 2075 Return: None
2042 2076 """
2043 2077
2044 2078 self.isConfig = False
2045 2079
2046 2080 self.nTotalBlocks = 0
2047 2081
2048 2082 self.data_spc = None
2049 2083
2050 2084 self.data_cspc = None
2051 2085
2052 2086 self.data_dc = None
2053 2087
2054 2088 self.fp = None
2055 2089
2056 2090 self.flagIsNewFile = 1
2057 2091
2058 2092 self.nTotalBlocks = 0
2059 2093
2060 2094 self.flagIsNewBlock = 0
2061 2095
2062 2096 self.setFile = None
2063 2097
2064 2098 self.dtype = None
2065 2099
2066 2100 self.path = None
2067 2101
2068 2102 self.noMoreFiles = 0
2069 2103
2070 2104 self.filename = None
2071 2105
2072 2106 self.basicHeaderObj = BasicHeader()
2073 2107
2074 2108 self.systemHeaderObj = SystemHeader()
2075 2109
2076 2110 self.radarControllerHeaderObj = RadarControllerHeader()
2077 2111
2078 2112 self.processingHeaderObj = ProcessingHeader()
2079 2113
2080 2114
2081 2115 def hasAllDataInBuffer(self):
2082 2116 return 1
2083 2117
2084 2118
2085 2119 def setBlockDimension(self):
2086 2120 """
2087 2121 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
2088 2122
2089 2123 Affected:
2090 2124 self.shape_spc_Buffer
2091 2125 self.shape_cspc_Buffer
2092 2126 self.shape_dc_Buffer
2093 2127
2094 2128 Return: None
2095 2129 """
2096 2130 self.shape_spc_Buffer = (self.dataOut.nChannels,
2097 2131 self.processingHeaderObj.nHeights,
2098 2132 self.processingHeaderObj.profilesPerBlock)
2099 2133
2100 2134 self.shape_cspc_Buffer = (self.dataOut.nPairs,
2101 2135 self.processingHeaderObj.nHeights,
2102 2136 self.processingHeaderObj.profilesPerBlock)
2103 2137
2104 2138 self.shape_dc_Buffer = (self.dataOut.nChannels,
2105 2139 self.processingHeaderObj.nHeights)
2106 2140
2107 2141
2108 2142 def writeBlock(self):
2109 2143 """
2110 2144 Escribe el buffer en el file designado
2111 2145
2112 2146 Affected:
2113 2147 self.data_spc
2114 2148 self.data_cspc
2115 2149 self.data_dc
2116 2150 self.flagIsNewFile
2117 2151 self.flagIsNewBlock
2118 2152 self.nTotalBlocks
2119 2153 self.nWriteBlocks
2120 2154
2121 2155 Return: None
2122 2156 """
2123 2157
2124 2158 spc = numpy.transpose( self.data_spc, (0,2,1) )
2125 2159 if not( self.processingHeaderObj.shif_fft ):
2126 2160 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2127 2161 data = spc.reshape((-1))
2128 2162 data.tofile(self.fp)
2129 2163
2130 2164 if self.data_cspc != None:
2131 2165 data = numpy.zeros( self.shape_cspc_Buffer, self.dtype )
2132 2166 cspc = numpy.transpose( self.data_cspc, (0,2,1) )
2133 2167 if not( self.processingHeaderObj.shif_fft ):
2134 2168 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2135 2169 data['real'] = cspc.real
2136 2170 data['imag'] = cspc.imag
2137 2171 data = data.reshape((-1))
2138 2172 data.tofile(self.fp)
2139 2173
2140 2174 if self.data_dc != None:
2141 2175 data = numpy.zeros( self.shape_dc_Buffer, self.dtype )
2142 2176 dc = self.data_dc
2143 2177 data['real'] = dc.real
2144 2178 data['imag'] = dc.imag
2145 2179 data = data.reshape((-1))
2146 2180 data.tofile(self.fp)
2147 2181
2148 2182 self.data_spc.fill(0)
2149 2183 self.data_dc.fill(0)
2150 2184 if self.data_cspc != None:
2151 2185 self.data_cspc.fill(0)
2152 2186
2153 2187 self.flagIsNewFile = 0
2154 2188 self.flagIsNewBlock = 1
2155 2189 self.nTotalBlocks += 1
2156 2190 self.nWriteBlocks += 1
2157 2191 self.blockIndex += 1
2158 2192
2159 2193
2160 2194 def putData(self):
2161 2195 """
2162 2196 Setea un bloque de datos y luego los escribe en un file
2163 2197
2164 2198 Affected:
2165 2199 self.data_spc
2166 2200 self.data_cspc
2167 2201 self.data_dc
2168 2202
2169 2203 Return:
2170 2204 0 : Si no hay data o no hay mas files que puedan escribirse
2171 2205 1 : Si se escribio la data de un bloque en un file
2172 2206 """
2173 2207
2174 2208 if self.dataOut.flagNoData:
2175 2209 return 0
2176 2210
2177 2211 self.flagIsNewBlock = 0
2178 2212
2179 2213 if self.dataOut.flagTimeBlock:
2180 2214 self.data_spc.fill(0)
2181 2215 self.data_cspc.fill(0)
2182 2216 self.data_dc.fill(0)
2183 2217 self.setNextFile()
2184 2218
2185 2219 if self.flagIsNewFile == 0:
2186 2220 self.getBasicHeader()
2187 2221
2188 2222 self.data_spc = self.dataOut.data_spc
2189 2223 self.data_cspc = self.dataOut.data_cspc
2190 2224 self.data_dc = self.dataOut.data_dc
2191 2225
2192 2226 # #self.processingHeaderObj.dataBlocksPerFile)
2193 2227 if self.hasAllDataInBuffer():
2194 2228 # self.getDataHeader()
2195 2229 self.writeNextBlock()
2196 2230
2197 2231 return 1
2198 2232
2199 2233
2200 2234 def __getProcessFlags(self):
2201 2235
2202 2236 processFlags = 0
2203 2237
2204 2238 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2205 2239 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2206 2240 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2207 2241 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2208 2242 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2209 2243 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2210 2244
2211 2245 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2212 2246
2213 2247
2214 2248
2215 2249 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
2216 2250 PROCFLAG.DATATYPE_SHORT,
2217 2251 PROCFLAG.DATATYPE_LONG,
2218 2252 PROCFLAG.DATATYPE_INT64,
2219 2253 PROCFLAG.DATATYPE_FLOAT,
2220 2254 PROCFLAG.DATATYPE_DOUBLE]
2221 2255
2222 2256
2223 2257 for index in range(len(dtypeList)):
2224 2258 if self.dataOut.dtype == dtypeList[index]:
2225 2259 dtypeValue = datatypeValueList[index]
2226 2260 break
2227 2261
2228 2262 processFlags += dtypeValue
2229 2263
2230 2264 if self.dataOut.flagDecodeData:
2231 2265 processFlags += PROCFLAG.DECODE_DATA
2232 2266
2233 2267 if self.dataOut.flagDeflipData:
2234 2268 processFlags += PROCFLAG.DEFLIP_DATA
2235 2269
2236 2270 if self.dataOut.code != None:
2237 2271 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
2238 2272
2239 2273 if self.dataOut.nIncohInt > 1:
2240 2274 processFlags += PROCFLAG.INCOHERENT_INTEGRATION
2241 2275
2242 2276 if self.dataOut.data_dc != None:
2243 2277 processFlags += PROCFLAG.SAVE_CHANNELS_DC
2244 2278
2245 2279 return processFlags
2246 2280
2247 2281
2248 2282 def __getBlockSize(self):
2249 2283 '''
2250 2284 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Spectra
2251 2285 '''
2252 2286
2253 2287 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2254 2288 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2255 2289 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2256 2290 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2257 2291 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2258 2292 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2259 2293
2260 2294 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2261 2295 datatypeValueList = [1,2,4,8,4,8]
2262 2296 for index in range(len(dtypeList)):
2263 2297 if self.dataOut.dtype == dtypeList[index]:
2264 2298 datatypeValue = datatypeValueList[index]
2265 2299 break
2266 2300
2267 2301
2268 2302 pts2write = self.dataOut.nHeights * self.dataOut.nFFTPoints
2269 2303
2270 2304 pts2write_SelfSpectra = int(self.dataOut.nChannels * pts2write)
2271 2305 blocksize = (pts2write_SelfSpectra*datatypeValue)
2272 2306
2273 2307 if self.dataOut.data_cspc != None:
2274 2308 pts2write_CrossSpectra = int(self.dataOut.nPairs * pts2write)
2275 2309 blocksize += (pts2write_CrossSpectra*datatypeValue*2)
2276 2310
2277 2311 if self.dataOut.data_dc != None:
2278 2312 pts2write_DCchannels = int(self.dataOut.nChannels * self.dataOut.nHeights)
2279 2313 blocksize += (pts2write_DCchannels*datatypeValue*2)
2280 2314
2281 2315 blocksize = blocksize #* datatypeValue * 2 #CORREGIR ESTO
2282 2316
2283 2317 return blocksize
2284 2318
2285 2319 def getDataHeader(self):
2286 2320
2287 2321 """
2288 2322 Obtiene una copia del First Header
2289 2323
2290 2324 Affected:
2291 2325 self.systemHeaderObj
2292 2326 self.radarControllerHeaderObj
2293 2327 self.dtype
2294 2328
2295 2329 Return:
2296 2330 None
2297 2331 """
2298 2332
2299 2333 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
2300 2334 self.systemHeaderObj.nChannels = self.dataOut.nChannels
2301 2335 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
2302 2336
2303 2337 self.getBasicHeader()
2304 2338
2305 2339 processingHeaderSize = 40 # bytes
2306 2340 self.processingHeaderObj.dtype = 0 # Voltage
2307 2341 self.processingHeaderObj.blockSize = self.__getBlockSize()
2308 2342 self.processingHeaderObj.profilesPerBlock = self.dataOut.nFFTPoints
2309 2343 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
2310 2344 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
2311 2345 self.processingHeaderObj.processFlags = self.__getProcessFlags()
2312 2346 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt# Se requiere para determinar el valor de timeInterval
2313 2347 self.processingHeaderObj.nIncohInt = self.dataOut.nIncohInt
2314 2348 self.processingHeaderObj.totalSpectra = self.dataOut.nPairs + self.dataOut.nChannels
2315 2349
2316 2350 if self.processingHeaderObj.totalSpectra > 0:
2317 2351 channelList = []
2318 2352 for channel in range(self.dataOut.nChannels):
2319 2353 channelList.append(channel)
2320 2354 channelList.append(channel)
2321 2355
2322 2356 pairsList = []
2323 2357 for pair in self.dataOut.pairsList:
2324 2358 pairsList.append(pair[0])
2325 2359 pairsList.append(pair[1])
2326 2360 spectraComb = channelList + pairsList
2327 2361 spectraComb = numpy.array(spectraComb,dtype="u1")
2328 2362 self.processingHeaderObj.spectraComb = spectraComb
2329 2363 sizeOfSpcComb = len(spectraComb)
2330 2364 processingHeaderSize += sizeOfSpcComb
2331 2365
2332 2366 if self.dataOut.code != None:
2333 2367 self.processingHeaderObj.code = self.dataOut.code
2334 2368 self.processingHeaderObj.nCode = self.dataOut.nCode
2335 2369 self.processingHeaderObj.nBaud = self.dataOut.nBaud
2336 2370 nCodeSize = 4 # bytes
2337 2371 nBaudSize = 4 # bytes
2338 2372 codeSize = 4 # bytes
2339 2373 sizeOfCode = int(nCodeSize + nBaudSize + codeSize * self.dataOut.nCode * self.dataOut.nBaud)
2340 2374 processingHeaderSize += sizeOfCode
2341 2375
2342 2376 if self.processingHeaderObj.nWindows != 0:
2343 2377 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
2344 2378 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
2345 2379 self.processingHeaderObj.nHeights = self.dataOut.nHeights
2346 2380 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
2347 2381 sizeOfFirstHeight = 4
2348 2382 sizeOfdeltaHeight = 4
2349 2383 sizeOfnHeights = 4
2350 2384 sizeOfWindows = (sizeOfFirstHeight + sizeOfdeltaHeight + sizeOfnHeights)*self.processingHeaderObj.nWindows
2351 2385 processingHeaderSize += sizeOfWindows
2352 2386
2353 2387 self.processingHeaderObj.size = processingHeaderSize
2354 2388
2355 2389 class SpectraHeisWriter():
2356 2390
2357 2391 i=0
2358 2392
2359 2393 def __init__(self, dataOut):
2360 2394
2361 2395 self.wrObj = FITS()
2362 2396 self.dataOut = dataOut
2363 2397
2364 2398 def isNumber(str):
2365 2399 """
2366 2400 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
2367 2401
2368 2402 Excepciones:
2369 2403 Si un determinado string no puede ser convertido a numero
2370 2404 Input:
2371 2405 str, string al cual se le analiza para determinar si convertible a un numero o no
2372 2406
2373 2407 Return:
2374 2408 True : si el string es uno numerico
2375 2409 False : no es un string numerico
2376 2410 """
2377 2411 try:
2378 2412 float( str )
2379 2413 return True
2380 2414 except:
2381 2415 return False
2382 2416
2383 2417 def setup(self, wrpath,):
2384 2418
2385 2419 if not(os.path.exists(wrpath)):
2386 2420 os.mkdir(wrpath)
2387 2421
2388 2422 self.wrpath = wrpath
2389 2423 self.setFile = 0
2390 2424
2391 2425 def putData(self):
2392 2426 # self.wrObj.writeHeader(nChannels=self.dataOut.nChannels, nFFTPoints=self.dataOut.nFFTPoints)
2393 2427 #name = self.dataOut.utctime
2394 2428 name= time.localtime( self.dataOut.utctime)
2395 2429 ext=".fits"
2396 2430 #folder='D%4.4d%3.3d'%(name.tm_year,name.tm_yday)
2397 2431 subfolder = 'D%4.4d%3.3d' % (name.tm_year,name.tm_yday)
2398 2432
2399 2433 doypath = os.path.join( self.wrpath, subfolder )
2400 2434 if not( os.path.exists(doypath) ):
2401 2435 os.mkdir(doypath)
2402 2436 self.setFile += 1
2403 2437 file = 'D%4.4d%3.3d%3.3d%s' % (name.tm_year,name.tm_yday,self.setFile,ext)
2404 2438
2405 2439 filename = os.path.join(self.wrpath,subfolder, file)
2406 2440
2407 2441 # print self.dataOut.ippSeconds
2408 2442 freq=numpy.arange(-1*self.dataOut.nHeights/2.,self.dataOut.nHeights/2.)/(2*self.dataOut.ippSeconds)
2409 2443
2410 2444 col1=self.wrObj.setColF(name="freq", format=str(self.dataOut.nFFTPoints)+'E', array=freq)
2411 2445 col2=self.wrObj.writeData(name="P_Ch1",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[0,:]))
2412 2446 col3=self.wrObj.writeData(name="P_Ch2",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[1,:]))
2413 2447 col4=self.wrObj.writeData(name="P_Ch3",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[2,:]))
2414 2448 col5=self.wrObj.writeData(name="P_Ch4",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[3,:]))
2415 2449 col6=self.wrObj.writeData(name="P_Ch5",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[4,:]))
2416 2450 col7=self.wrObj.writeData(name="P_Ch6",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[5,:]))
2417 2451 col8=self.wrObj.writeData(name="P_Ch7",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[6,:]))
2418 2452 col9=self.wrObj.writeData(name="P_Ch8",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[7,:]))
2419 2453 #n=numpy.arange((100))
2420 2454 n=self.dataOut.data_spc[6,:]
2421 2455 a=self.wrObj.cFImage(n)
2422 2456 b=self.wrObj.Ctable(col1,col2,col3,col4,col5,col6,col7,col8,col9)
2423 2457 self.wrObj.CFile(a,b)
2424 2458 self.wrObj.wFile(filename)
2425 2459 return 1
2426 2460
2427 2461 class FITS:
2428 2462
2429 2463 name=None
2430 2464 format=None
2431 2465 array =None
2432 2466 data =None
2433 2467 thdulist=None
2434 2468
2435 2469 def __init__(self):
2436 2470
2437 2471 pass
2438 2472
2439 2473 def setColF(self,name,format,array):
2440 2474 self.name=name
2441 2475 self.format=format
2442 2476 self.array=array
2443 2477 a1=numpy.array([self.array],dtype=numpy.float32)
2444 2478 self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1)
2445 2479 return self.col1
2446 2480
2447 2481 # def setColP(self,name,format,data):
2448 2482 # self.name=name
2449 2483 # self.format=format
2450 2484 # self.data=data
2451 2485 # a2=numpy.array([self.data],dtype=numpy.float32)
2452 2486 # self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2453 2487 # return self.col2
2454 2488
2455 2489 def writeHeader(self,):
2456 2490 pass
2457 2491
2458 2492 def writeData(self,name,format,data):
2459 2493 self.name=name
2460 2494 self.format=format
2461 2495 self.data=data
2462 2496 a2=numpy.array([self.data],dtype=numpy.float32)
2463 2497 self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2464 2498 return self.col2
2465 2499
2466 2500 def cFImage(self,n):
2467 2501 self.hdu= pyfits.PrimaryHDU(n)
2468 2502 return self.hdu
2469 2503
2470 2504 def Ctable(self,col1,col2,col3,col4,col5,col6,col7,col8,col9):
2471 2505 self.cols=pyfits.ColDefs( [col1,col2,col3,col4,col5,col6,col7,col8,col9])
2472 2506 self.tbhdu = pyfits.new_table(self.cols)
2473 2507 return self.tbhdu
2474 2508
2475 2509 def CFile(self,hdu,tbhdu):
2476 2510 self.thdulist=pyfits.HDUList([hdu,tbhdu])
2477 2511
2478 2512 def wFile(self,filename):
2479 2513 self.thdulist.writeto(filename) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now