##// END OF EJS Templates
mensajes en la busqueda de datos en linea agregados
Miguel Valdez -
r292:a7eead55f08b
parent child
Show More
@@ -1,2580 +1,2584
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: JRODataIO.py 169 2012-11-19 21:57:03Z murco $
5 5 '''
6 6
7 7 import os, sys
8 8 import glob
9 9 import time
10 10 import numpy
11 11 import fnmatch
12 12 import time, datetime
13 13
14 14 from jrodata import *
15 15 from jroheaderIO import *
16 16 from jroprocessing import *
17 17
18 18 LOCALTIME = -18000
19 19
20 20 def isNumber(str):
21 21 """
22 22 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
23 23
24 24 Excepciones:
25 25 Si un determinado string no puede ser convertido a numero
26 26 Input:
27 27 str, string al cual se le analiza para determinar si convertible a un numero o no
28 28
29 29 Return:
30 30 True : si el string es uno numerico
31 31 False : no es un string numerico
32 32 """
33 33 try:
34 34 float( str )
35 35 return True
36 36 except:
37 37 return False
38 38
39 39 def isThisFileinRange(filename, startUTSeconds, endUTSeconds):
40 40 """
41 41 Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado.
42 42
43 43 Inputs:
44 44 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
45 45
46 46 startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en
47 47 segundos contados desde 01/01/1970.
48 48 endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en
49 49 segundos contados desde 01/01/1970.
50 50
51 51 Return:
52 52 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
53 53 fecha especificado, de lo contrario retorna False.
54 54
55 55 Excepciones:
56 56 Si el archivo no existe o no puede ser abierto
57 57 Si la cabecera no puede ser leida.
58 58
59 59 """
60 60 basicHeaderObj = BasicHeader(LOCALTIME)
61 61
62 62 try:
63 63 fp = open(filename,'rb')
64 64 except:
65 65 raise IOError, "The file %s can't be opened" %(filename)
66 66
67 67 sts = basicHeaderObj.read(fp)
68 68 fp.close()
69 69
70 70 if not(sts):
71 71 print "Skipping the file %s because it has not a valid header" %(filename)
72 72 return 0
73 73
74 74 if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)):
75 75 return 0
76 76
77 77 return 1
78 78
79 79 def isFileinThisTime(filename, startTime, endTime):
80 80 """
81 81 Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado.
82 82
83 83 Inputs:
84 84 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
85 85
86 86 startTime : tiempo inicial del rango seleccionado en formato datetime.time
87 87
88 88 endTime : tiempo final del rango seleccionado en formato datetime.time
89 89
90 90 Return:
91 91 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
92 92 fecha especificado, de lo contrario retorna False.
93 93
94 94 Excepciones:
95 95 Si el archivo no existe o no puede ser abierto
96 96 Si la cabecera no puede ser leida.
97 97
98 98 """
99 99
100 100
101 101 try:
102 102 fp = open(filename,'rb')
103 103 except:
104 104 raise IOError, "The file %s can't be opened" %(filename)
105 105
106 106 basicHeaderObj = BasicHeader(LOCALTIME)
107 107 sts = basicHeaderObj.read(fp)
108 108 fp.close()
109 109
110 110 thisTime = basicHeaderObj.datatime.time()
111 111
112 112 if not(sts):
113 113 print "Skipping the file %s because it has not a valid header" %(filename)
114 114 return 0
115 115
116 116 if not ((startTime <= thisTime) and (endTime > thisTime)):
117 117 return 0
118 118
119 119 return 1
120 120
121 121 def getlastFileFromPath(path, ext):
122 122 """
123 123 Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext"
124 124 al final de la depuracion devuelve el ultimo file de la lista que quedo.
125 125
126 126 Input:
127 127 fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta
128 128 ext : extension de los files contenidos en una carpeta
129 129
130 130 Return:
131 131 El ultimo file de una determinada carpeta, no se considera el path.
132 132 """
133 133 validFilelist = []
134 134 fileList = os.listdir(path)
135 135
136 136 # 0 1234 567 89A BCDE
137 137 # H YYYY DDD SSS .ext
138 138
139 139 for file in fileList:
140 140 try:
141 141 year = int(file[1:5])
142 142 doy = int(file[5:8])
143 143
144 144
145 145 except:
146 146 continue
147 147
148 148 if (os.path.splitext(file)[-1].lower() != ext.lower()):
149 149 continue
150 150
151 151 validFilelist.append(file)
152 152
153 153 if validFilelist:
154 154 validFilelist = sorted( validFilelist, key=str.lower )
155 155 return validFilelist[-1]
156 156
157 157 return None
158 158
159 159 def checkForRealPath(path, year, doy, set, ext):
160 160 """
161 161 Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path,
162 162 Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar
163 163 el path exacto de un determinado file.
164 164
165 165 Example :
166 166 nombre correcto del file es .../.../D2009307/P2009307367.ext
167 167
168 168 Entonces la funcion prueba con las siguientes combinaciones
169 169 .../.../y2009307367.ext
170 170 .../.../Y2009307367.ext
171 171 .../.../x2009307/y2009307367.ext
172 172 .../.../x2009307/Y2009307367.ext
173 173 .../.../X2009307/y2009307367.ext
174 174 .../.../X2009307/Y2009307367.ext
175 175 siendo para este caso, la ultima combinacion de letras, identica al file buscado
176 176
177 177 Return:
178 178 Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file
179 179 caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas
180 180 para el filename
181 181 """
182 182 fullfilename = None
183 183 find_flag = False
184 184 filename = None
185 185
186 186 prefixDirList = [None,'d','D']
187 187 if ext.lower() == ".r": #voltage
188 188 prefixFileList = ['d','D']
189 189 elif ext.lower() == ".pdata": #spectra
190 190 prefixFileList = ['p','P']
191 191 else:
192 192 return None, filename
193 193
194 194 #barrido por las combinaciones posibles
195 195 for prefixDir in prefixDirList:
196 196 thispath = path
197 197 if prefixDir != None:
198 198 #formo el nombre del directorio xYYYYDDD (x=d o x=D)
199 199 thispath = os.path.join(path, "%s%04d%03d" % ( prefixDir, year, doy ))
200 200
201 201 for prefixFile in prefixFileList: #barrido por las dos combinaciones posibles de "D"
202 202 filename = "%s%04d%03d%03d%s" % ( prefixFile, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext
203 203 fullfilename = os.path.join( thispath, filename ) #formo el path completo
204 204
205 205 if os.path.exists( fullfilename ): #verifico que exista
206 206 find_flag = True
207 207 break
208 208 if find_flag:
209 209 break
210 210
211 211 if not(find_flag):
212 212 return None, filename
213 213
214 214 return fullfilename, filename
215 215
216 216 class JRODataIO:
217 217
218 218 c = 3E8
219 219
220 220 isConfig = False
221 221
222 222 basicHeaderObj = BasicHeader(LOCALTIME)
223 223
224 224 systemHeaderObj = SystemHeader()
225 225
226 226 radarControllerHeaderObj = RadarControllerHeader()
227 227
228 228 processingHeaderObj = ProcessingHeader()
229 229
230 230 online = 0
231 231
232 232 dtype = None
233 233
234 234 pathList = []
235 235
236 236 filenameList = []
237 237
238 238 filename = None
239 239
240 240 ext = None
241 241
242 242 flagIsNewFile = 1
243 243
244 244 flagTimeBlock = 0
245 245
246 246 flagIsNewBlock = 0
247 247
248 248 fp = None
249 249
250 250 firstHeaderSize = 0
251 251
252 252 basicHeaderSize = 24
253 253
254 254 versionFile = 1103
255 255
256 256 fileSize = None
257 257
258 258 ippSeconds = None
259 259
260 260 fileSizeByHeader = None
261 261
262 262 fileIndex = None
263 263
264 264 profileIndex = None
265 265
266 266 blockIndex = None
267 267
268 268 nTotalBlocks = None
269 269
270 270 maxTimeStep = 30
271 271
272 272 lastUTTime = None
273 273
274 274 datablock = None
275 275
276 276 dataOut = None
277 277
278 278 blocksize = None
279 279
280 280 def __init__(self):
281 281
282 282 raise ValueError, "Not implemented"
283 283
284 284 def run(self):
285 285
286 286 raise ValueError, "Not implemented"
287 287
288 288 def getOutput(self):
289 289
290 290 return self.dataOut
291 291
292 292 class JRODataReader(JRODataIO, ProcessingUnit):
293 293
294 294 nReadBlocks = 0
295 295
296 296 delay = 10 #number of seconds waiting a new file
297 297
298 298 nTries = 3 #quantity tries
299 299
300 300 nFiles = 3 #number of files for searching
301 301
302 302 flagNoMoreFiles = 0
303 303
304 304 def __init__(self):
305 305
306 306 """
307 307
308 308 """
309 309
310 310 raise ValueError, "This method has not been implemented"
311 311
312 312
313 313 def createObjByDefault(self):
314 314 """
315 315
316 316 """
317 317 raise ValueError, "This method has not been implemented"
318 318
319 319 def getBlockDimension(self):
320 320
321 321 raise ValueError, "No implemented"
322 322
323 323 def __searchFilesOffLine(self,
324 324 path,
325 325 startDate,
326 326 endDate,
327 327 startTime=datetime.time(0,0,0),
328 328 endTime=datetime.time(23,59,59),
329 329 set=None,
330 330 expLabel='',
331 331 ext='.r',
332 332 walk=True):
333 333
334 334 pathList = []
335 335
336 336 if not walk:
337 337 pathList.append(path)
338 338
339 339 else:
340 340 dirList = []
341 341 for thisPath in os.listdir(path):
342 342 if os.path.isdir(os.path.join(path,thisPath)):
343 343 dirList.append(thisPath)
344 344
345 345 if not(dirList):
346 346 return None, None
347 347
348 348 thisDate = startDate
349 349
350 350 while(thisDate <= endDate):
351 351 year = thisDate.timetuple().tm_year
352 352 doy = thisDate.timetuple().tm_yday
353 353
354 354 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
355 355 if len(match) == 0:
356 356 thisDate += datetime.timedelta(1)
357 357 continue
358 358
359 359 pathList.append(os.path.join(path,match[0],expLabel))
360 360 thisDate += datetime.timedelta(1)
361 361
362 362 if pathList == []:
363 363 print "Any folder was found for the date range: %s-%s" %(startDate, endDate)
364 364 return None, None
365 365
366 366 print "%d folder(s) was(were) found for the date range: %s-%s" %(len(pathList), startDate, endDate)
367 367
368 368 filenameList = []
369 369 for thisPath in pathList:
370 370
371 371 fileList = glob.glob1(thisPath, "*%s" %ext)
372 372 fileList.sort()
373 373
374 374 for file in fileList:
375 375
376 376 filename = os.path.join(thisPath,file)
377 377
378 378 if isFileinThisTime(filename, startTime, endTime):
379 379 filenameList.append(filename)
380 380
381 381 if not(filenameList):
382 382 print "Any file was found for the time range %s - %s" %(startTime, endTime)
383 383 return None, None
384 384
385 385 print "%d file(s) was(were) found for the time range: %s - %s" %(len(filenameList), startTime, endTime)
386 386
387 387 self.filenameList = filenameList
388 388
389 389 return pathList, filenameList
390 390
391 391 def __searchFilesOnLine(self, path, expLabel = "", ext = None, walk=True):
392 392
393 393 """
394 394 Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y
395 395 devuelve el archivo encontrado ademas de otros datos.
396 396
397 397 Input:
398 398 path : carpeta donde estan contenidos los files que contiene data
399 399
400 400 expLabel : Nombre del subexperimento (subfolder)
401 401
402 402 ext : extension de los files
403 403
404 404 walk : Si es habilitado no realiza busquedas dentro de los ubdirectorios (doypath)
405 405
406 406 Return:
407 407 directory : eL directorio donde esta el file encontrado
408 408 filename : el ultimo file de una determinada carpeta
409 409 year : el anho
410 410 doy : el numero de dia del anho
411 411 set : el set del archivo
412 412
413 413
414 414 """
415 415 dirList = []
416 416
417 417 if walk:
418 418
419 419 #Filtra solo los directorios
420 420 for thisPath in os.listdir(path):
421 421 if os.path.isdir(os.path.join(path, thisPath)):
422 422 dirList.append(thisPath)
423 423
424 424 if not(dirList):
425 425 return None, None, None, None, None
426 426
427 427 dirList = sorted( dirList, key=str.lower )
428 428
429 429 doypath = dirList[-1]
430 430 fullpath = os.path.join(path, doypath, expLabel)
431 431
432 432 else:
433 433 fullpath = path
434 434
435 print "%d folder was found: " %(fullpath )
436
435 437 filename = getlastFileFromPath(fullpath, ext)
436 438
437 439 if not(filename):
438 440 return None, None, None, None, None
439 441
442 print "%s file was found" %(filename)
443
440 444 if not(self.__verifyFile(os.path.join(fullpath, filename))):
441 445 return None, None, None, None, None
442 446
443 447 year = int( filename[1:5] )
444 448 doy = int( filename[5:8] )
445 449 set = int( filename[8:11] )
446 450
447 451 return fullpath, filename, year, doy, set
448 452
449 453
450 454
451 455 def __setNextFileOffline(self):
452 456
453 457 idFile = self.fileIndex
454 458
455 459 while (True):
456 460 idFile += 1
457 461 if not(idFile < len(self.filenameList)):
458 462 self.flagNoMoreFiles = 1
459 463 print "No more Files"
460 464 return 0
461 465
462 466 filename = self.filenameList[idFile]
463 467
464 468 if not(self.__verifyFile(filename)):
465 469 continue
466 470
467 471 fileSize = os.path.getsize(filename)
468 472 fp = open(filename,'rb')
469 473 break
470 474
471 475 self.flagIsNewFile = 1
472 476 self.fileIndex = idFile
473 477 self.filename = filename
474 478 self.fileSize = fileSize
475 479 self.fp = fp
476 480
477 481 print "Setting the file: %s"%self.filename
478 482
479 483 return 1
480 484
481 485 def __setNextFileOnline(self):
482 486 """
483 487 Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si
484 488 no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files
485 489 siguientes.
486 490
487 491 Affected:
488 492 self.flagIsNewFile
489 493 self.filename
490 494 self.fileSize
491 495 self.fp
492 496 self.set
493 497 self.flagNoMoreFiles
494 498
495 499 Return:
496 500 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado
497 501 1 : si el file fue abierto con exito y esta listo a ser leido
498 502
499 503 Excepciones:
500 504 Si un determinado file no puede ser abierto
501 505 """
502 506 nFiles = 0
503 507 fileOk_flag = False
504 508 firstTime_flag = True
505 509
506 510 self.set += 1
507 511
508 512 #busca el 1er file disponible
509 513 fullfilename, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
510 514 if fullfilename:
511 515 if self.__verifyFile(fullfilename, False):
512 516 fileOk_flag = True
513 517
514 518 #si no encuentra un file entonces espera y vuelve a buscar
515 519 if not(fileOk_flag):
516 520 for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles
517 521
518 522 if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces
519 523 tries = self.nTries
520 524 else:
521 525 tries = 1 #si no es la 1era vez entonces solo lo hace una vez
522 526
523 527 for nTries in range( tries ):
524 528 if firstTime_flag:
525 529 print "\tWaiting %0.2f sec for the file \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 )
526 530 time.sleep( self.delay )
527 531 else:
528 532 print "\tSearching next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext)
529 533
530 534 fullfilename, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
531 535 if fullfilename:
532 536 if self.__verifyFile(fullfilename):
533 537 fileOk_flag = True
534 538 break
535 539
536 540 if fileOk_flag:
537 541 break
538 542
539 543 firstTime_flag = False
540 544
541 545 print "\tSkipping the file \"%s\" due to this file doesn't exist" % filename
542 546 self.set += 1
543 547
544 548 if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta
545 549 self.set = 0
546 550 self.doy += 1
547 551
548 552 if fileOk_flag:
549 553 self.fileSize = os.path.getsize( fullfilename )
550 554 self.filename = fullfilename
551 555 self.flagIsNewFile = 1
552 556 if self.fp != None: self.fp.close()
553 557 self.fp = open(fullfilename, 'rb')
554 558 self.flagNoMoreFiles = 0
555 559 print 'Setting the file: %s' % fullfilename
556 560 else:
557 561 self.fileSize = 0
558 562 self.filename = None
559 563 self.flagIsNewFile = 0
560 564 self.fp = None
561 565 self.flagNoMoreFiles = 1
562 566 print 'No more Files'
563 567
564 568 return fileOk_flag
565 569
566 570
567 571 def setNextFile(self):
568 572 if self.fp != None:
569 573 self.fp.close()
570 574
571 575 if self.online:
572 576 newFile = self.__setNextFileOnline()
573 577 else:
574 578 newFile = self.__setNextFileOffline()
575 579
576 580 if not(newFile):
577 581 return 0
578 582
579 583 self.__readFirstHeader()
580 584 self.nReadBlocks = 0
581 585 return 1
582 586
583 587 def __waitNewBlock(self):
584 588 """
585 589 Return 1 si se encontro un nuevo bloque de datos, 0 de otra forma.
586 590
587 591 Si el modo de lectura es OffLine siempre retorn 0
588 592 """
589 593 if not self.online:
590 594 return 0
591 595
592 596 if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):
593 597 return 0
594 598
595 599 currentPointer = self.fp.tell()
596 600
597 601 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
598 602
599 603 for nTries in range( self.nTries ):
600 604
601 605 self.fp.close()
602 606 self.fp = open( self.filename, 'rb' )
603 607 self.fp.seek( currentPointer )
604 608
605 609 self.fileSize = os.path.getsize( self.filename )
606 610 currentSize = self.fileSize - currentPointer
607 611
608 612 if ( currentSize >= neededSize ):
609 613 self.__rdBasicHeader()
610 614 return 1
611 615
612 616 print "\tWaiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries+1)
613 617 time.sleep( self.delay )
614 618
615 619
616 620 return 0
617 621
618 622 def __setNewBlock(self):
619 623
620 624 if self.fp == None:
621 625 return 0
622 626
623 627 if self.flagIsNewFile:
624 628 return 1
625 629
626 630 self.lastUTTime = self.basicHeaderObj.utc
627 631 currentSize = self.fileSize - self.fp.tell()
628 632 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
629 633
630 634 if (currentSize >= neededSize):
631 635 self.__rdBasicHeader()
632 636 return 1
633 637
634 638 if self.__waitNewBlock():
635 639 return 1
636 640
637 641 if not(self.setNextFile()):
638 642 return 0
639 643
640 644 deltaTime = self.basicHeaderObj.utc - self.lastUTTime #
641 645
642 646 self.flagTimeBlock = 0
643 647
644 648 if deltaTime > self.maxTimeStep:
645 649 self.flagTimeBlock = 1
646 650
647 651 return 1
648 652
649 653
650 654 def readNextBlock(self):
651 655 if not(self.__setNewBlock()):
652 656 return 0
653 657
654 658 if not(self.readBlock()):
655 659 return 0
656 660
657 661 return 1
658 662
659 663 def __rdProcessingHeader(self, fp=None):
660 664 if fp == None:
661 665 fp = self.fp
662 666
663 667 self.processingHeaderObj.read(fp)
664 668
665 669 def __rdRadarControllerHeader(self, fp=None):
666 670 if fp == None:
667 671 fp = self.fp
668 672
669 673 self.radarControllerHeaderObj.read(fp)
670 674
671 675 def __rdSystemHeader(self, fp=None):
672 676 if fp == None:
673 677 fp = self.fp
674 678
675 679 self.systemHeaderObj.read(fp)
676 680
677 681 def __rdBasicHeader(self, fp=None):
678 682 if fp == None:
679 683 fp = self.fp
680 684
681 685 self.basicHeaderObj.read(fp)
682 686
683 687
684 688 def __readFirstHeader(self):
685 689 self.__rdBasicHeader()
686 690 self.__rdSystemHeader()
687 691 self.__rdRadarControllerHeader()
688 692 self.__rdProcessingHeader()
689 693
690 694 self.firstHeaderSize = self.basicHeaderObj.size
691 695
692 696 datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
693 697 if datatype == 0:
694 698 datatype_str = numpy.dtype([('real','<i1'),('imag','<i1')])
695 699 elif datatype == 1:
696 700 datatype_str = numpy.dtype([('real','<i2'),('imag','<i2')])
697 701 elif datatype == 2:
698 702 datatype_str = numpy.dtype([('real','<i4'),('imag','<i4')])
699 703 elif datatype == 3:
700 704 datatype_str = numpy.dtype([('real','<i8'),('imag','<i8')])
701 705 elif datatype == 4:
702 706 datatype_str = numpy.dtype([('real','<f4'),('imag','<f4')])
703 707 elif datatype == 5:
704 708 datatype_str = numpy.dtype([('real','<f8'),('imag','<f8')])
705 709 else:
706 710 raise ValueError, 'Data type was not defined'
707 711
708 712 self.dtype = datatype_str
709 713 self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c
710 714 self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.processingHeaderObj.dataBlocksPerFile - 1)
711 715 # self.dataOut.channelList = numpy.arange(self.systemHeaderObj.numChannels)
712 716 # self.dataOut.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels)
713 717 self.getBlockDimension()
714 718
715 719
716 720 def __verifyFile(self, filename, msgFlag=True):
717 721 msg = None
718 722 try:
719 723 fp = open(filename, 'rb')
720 724 currentPosition = fp.tell()
721 725 except:
722 726 if msgFlag:
723 727 print "The file %s can't be opened" % (filename)
724 728 return False
725 729
726 730 neededSize = self.processingHeaderObj.blockSize + self.firstHeaderSize
727 731
728 732 if neededSize == 0:
729 733 basicHeaderObj = BasicHeader(LOCALTIME)
730 734 systemHeaderObj = SystemHeader()
731 735 radarControllerHeaderObj = RadarControllerHeader()
732 736 processingHeaderObj = ProcessingHeader()
733 737
734 738 try:
735 739 if not( basicHeaderObj.read(fp) ): raise IOError
736 740 if not( systemHeaderObj.read(fp) ): raise IOError
737 741 if not( radarControllerHeaderObj.read(fp) ): raise IOError
738 742 if not( processingHeaderObj.read(fp) ): raise IOError
739 743 data_type = int(numpy.log2((processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
740 744
741 745 neededSize = processingHeaderObj.blockSize + basicHeaderObj.size
742 746
743 747 except:
744 748 if msgFlag:
745 749 print "\tThe file %s is empty or it hasn't enough data" % filename
746 750
747 751 fp.close()
748 752 return False
749 753 else:
750 754 msg = "\tSkipping the file %s due to it hasn't enough data" %filename
751 755
752 756 fp.close()
753 757 fileSize = os.path.getsize(filename)
754 758 currentSize = fileSize - currentPosition
755 759 if currentSize < neededSize:
756 760 if msgFlag and (msg != None):
757 761 print msg #print"\tSkipping the file %s due to it hasn't enough data" %filename
758 762 return False
759 763
760 764 return True
761 765
762 766 def setup(self,
763 767 path=None,
764 768 startDate=None,
765 769 endDate=None,
766 770 startTime=datetime.time(0,0,0),
767 771 endTime=datetime.time(23,59,59),
768 772 set=0,
769 773 expLabel = "",
770 774 ext = None,
771 775 online = False,
772 776 delay = 60,
773 777 walk = True):
774 778
775 779 if path == None:
776 780 raise ValueError, "The path is not valid"
777 781
778 782 if ext == None:
779 783 ext = self.ext
780 784
781 785 if online:
782 786 print "Searching files in online mode..."
783 787
784 788 for nTries in range( self.nTries ):
785 789 fullpath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext, walk=walk)
786 790
787 791 if fullpath:
788 792 break
789 793
790 794 print '\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)
791 795 time.sleep( self.delay )
792 796
793 797 if not(fullpath):
794 798 print "There 'isn't valied files in %s" % path
795 799 return None
796 800
797 801 self.year = year
798 802 self.doy = doy
799 803 self.set = set - 1
800 804 self.path = path
801 805
802 806 else:
803 807 print "Searching files in offline mode ..."
804 808 pathList, filenameList = self.__searchFilesOffLine(path, startDate=startDate, endDate=endDate,
805 809 startTime=startTime, endTime=endTime,
806 810 set=set, expLabel=expLabel, ext=ext,
807 811 walk=walk)
808 812
809 813 if not(pathList):
810 814 print "No *%s files into the folder %s \nfor the range: %s - %s"%(ext, path,
811 815 datetime.datetime.combine(startDate,startTime).ctime(),
812 816 datetime.datetime.combine(endDate,endTime).ctime())
813 817
814 818 sys.exit(-1)
815 819
816 820
817 821 self.fileIndex = -1
818 822 self.pathList = pathList
819 823 self.filenameList = filenameList
820 824
821 825 self.online = online
822 826 self.delay = delay
823 827 ext = ext.lower()
824 828 self.ext = ext
825 829
826 830 if not(self.setNextFile()):
827 831 if (startDate!=None) and (endDate!=None):
828 832 print "No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime())
829 833 elif startDate != None:
830 834 print "No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime())
831 835 else:
832 836 print "No files"
833 837
834 838 sys.exit(-1)
835 839
836 840 # self.updateDataHeader()
837 841
838 842 return self.dataOut
839 843
840 844 def getData():
841 845
842 846 raise ValueError, "This method has not been implemented"
843 847
844 848 def hasNotDataInBuffer():
845 849
846 850 raise ValueError, "This method has not been implemented"
847 851
848 852 def readBlock():
849 853
850 854 raise ValueError, "This method has not been implemented"
851 855
852 856 def isEndProcess(self):
853 857
854 858 return self.flagNoMoreFiles
855 859
856 860 def printReadBlocks(self):
857 861
858 862 print "Number of read blocks per file %04d" %self.nReadBlocks
859 863
860 864 def printTotalBlocks(self):
861 865
862 866 print "Number of read blocks %04d" %self.nTotalBlocks
863 867
864 868 def printNumberOfBlock(self):
865 869
866 870 if self.flagIsNewBlock:
867 871 print "Block No. %04d, Total blocks %04d" %(self.basicHeaderObj.dataBlock, self.nTotalBlocks)
868 872
869 873 def printInfo(self):
870 874
871 875 print self.basicHeaderObj.printInfo()
872 876 print self.systemHeaderObj.printInfo()
873 877 print self.radarControllerHeaderObj.printInfo()
874 878 print self.processingHeaderObj.printInfo()
875 879
876 880
877 881 def run(self, **kwargs):
878 882
879 883 if not(self.isConfig):
880 884
881 885 # self.dataOut = dataOut
882 886 self.setup(**kwargs)
883 887 self.isConfig = True
884 888
885 889 self.getData()
886 890
887 891 class JRODataWriter(JRODataIO, Operation):
888 892
889 893 """
890 894 Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura
891 895 de los datos siempre se realiza por bloques.
892 896 """
893 897
894 898 blockIndex = 0
895 899
896 900 path = None
897 901
898 902 setFile = None
899 903
900 904 profilesPerBlock = None
901 905
902 906 blocksPerFile = None
903 907
904 908 nWriteBlocks = 0
905 909
906 910 def __init__(self, dataOut=None):
907 911 raise ValueError, "Not implemented"
908 912
909 913
910 914 def hasAllDataInBuffer(self):
911 915 raise ValueError, "Not implemented"
912 916
913 917
914 918 def setBlockDimension(self):
915 919 raise ValueError, "Not implemented"
916 920
917 921
918 922 def writeBlock(self):
919 923 raise ValueError, "No implemented"
920 924
921 925
922 926 def putData(self):
923 927 raise ValueError, "No implemented"
924 928
925 929 def getDataHeader(self):
926 930 """
927 931 Obtiene una copia del First Header
928 932
929 933 Affected:
930 934
931 935 self.basicHeaderObj
932 936 self.systemHeaderObj
933 937 self.radarControllerHeaderObj
934 938 self.processingHeaderObj self.
935 939
936 940 Return:
937 941 None
938 942 """
939 943
940 944 raise ValueError, "No implemented"
941 945
942 946 def getBasicHeader(self):
943 947
944 948 self.basicHeaderObj.size = self.basicHeaderSize #bytes
945 949 self.basicHeaderObj.version = self.versionFile
946 950 self.basicHeaderObj.dataBlock = self.nTotalBlocks
947 951
948 952 utc = numpy.floor(self.dataOut.utctime)
949 953 milisecond = (self.dataOut.utctime - utc)* 1000.0
950 954
951 955 self.basicHeaderObj.utc = utc
952 956 self.basicHeaderObj.miliSecond = milisecond
953 957 self.basicHeaderObj.timeZone = 0
954 958 self.basicHeaderObj.dstFlag = 0
955 959 self.basicHeaderObj.errorCount = 0
956 960
957 961 def __writeFirstHeader(self):
958 962 """
959 963 Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader)
960 964
961 965 Affected:
962 966 __dataType
963 967
964 968 Return:
965 969 None
966 970 """
967 971
968 972 # CALCULAR PARAMETROS
969 973
970 974 sizeLongHeader = self.systemHeaderObj.size + self.radarControllerHeaderObj.size + self.processingHeaderObj.size
971 975 self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader
972 976
973 977 self.basicHeaderObj.write(self.fp)
974 978 self.systemHeaderObj.write(self.fp)
975 979 self.radarControllerHeaderObj.write(self.fp)
976 980 self.processingHeaderObj.write(self.fp)
977 981
978 982 self.dtype = self.dataOut.dtype
979 983
980 984 def __setNewBlock(self):
981 985 """
982 986 Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header
983 987
984 988 Return:
985 989 0 : si no pudo escribir nada
986 990 1 : Si escribio el Basic el First Header
987 991 """
988 992 if self.fp == None:
989 993 self.setNextFile()
990 994
991 995 if self.flagIsNewFile:
992 996 return 1
993 997
994 998 if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile:
995 999 self.basicHeaderObj.write(self.fp)
996 1000 return 1
997 1001
998 1002 if not( self.setNextFile() ):
999 1003 return 0
1000 1004
1001 1005 return 1
1002 1006
1003 1007
1004 1008 def writeNextBlock(self):
1005 1009 """
1006 1010 Selecciona el bloque siguiente de datos y los escribe en un file
1007 1011
1008 1012 Return:
1009 1013 0 : Si no hizo pudo escribir el bloque de datos
1010 1014 1 : Si no pudo escribir el bloque de datos
1011 1015 """
1012 1016 if not( self.__setNewBlock() ):
1013 1017 return 0
1014 1018
1015 1019 self.writeBlock()
1016 1020
1017 1021 return 1
1018 1022
1019 1023 def setNextFile(self):
1020 1024 """
1021 1025 Determina el siguiente file que sera escrito
1022 1026
1023 1027 Affected:
1024 1028 self.filename
1025 1029 self.subfolder
1026 1030 self.fp
1027 1031 self.setFile
1028 1032 self.flagIsNewFile
1029 1033
1030 1034 Return:
1031 1035 0 : Si el archivo no puede ser escrito
1032 1036 1 : Si el archivo esta listo para ser escrito
1033 1037 """
1034 1038 ext = self.ext
1035 1039 path = self.path
1036 1040
1037 1041 if self.fp != None:
1038 1042 self.fp.close()
1039 1043
1040 1044 timeTuple = time.localtime( self.dataOut.utctime)
1041 1045 subfolder = 'd%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
1042 1046
1043 1047 fullpath = os.path.join( path, subfolder )
1044 1048 if not( os.path.exists(fullpath) ):
1045 1049 os.mkdir(fullpath)
1046 1050 self.setFile = -1 #inicializo mi contador de seteo
1047 1051 else:
1048 1052 filesList = os.listdir( fullpath )
1049 1053 if len( filesList ) > 0:
1050 1054 filesList = sorted( filesList, key=str.lower )
1051 1055 filen = filesList[-1]
1052 1056 # el filename debera tener el siguiente formato
1053 1057 # 0 1234 567 89A BCDE (hex)
1054 1058 # x YYYY DDD SSS .ext
1055 1059 if isNumber( filen[8:11] ):
1056 1060 self.setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file
1057 1061 else:
1058 1062 self.setFile = -1
1059 1063 else:
1060 1064 self.setFile = -1 #inicializo mi contador de seteo
1061 1065
1062 1066 setFile = self.setFile
1063 1067 setFile += 1
1064 1068
1065 1069 file = '%s%4.4d%3.3d%3.3d%s' % (self.optchar,
1066 1070 timeTuple.tm_year,
1067 1071 timeTuple.tm_yday,
1068 1072 setFile,
1069 1073 ext )
1070 1074
1071 1075 filename = os.path.join( path, subfolder, file )
1072 1076
1073 1077 fp = open( filename,'wb' )
1074 1078
1075 1079 self.blockIndex = 0
1076 1080
1077 1081 #guardando atributos
1078 1082 self.filename = filename
1079 1083 self.subfolder = subfolder
1080 1084 self.fp = fp
1081 1085 self.setFile = setFile
1082 1086 self.flagIsNewFile = 1
1083 1087
1084 1088 self.getDataHeader()
1085 1089
1086 1090 print 'Writing the file: %s'%self.filename
1087 1091
1088 1092 self.__writeFirstHeader()
1089 1093
1090 1094 return 1
1091 1095
1092 1096 def setup(self, dataOut, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None):
1093 1097 """
1094 1098 Setea el tipo de formato en la cual sera guardada la data y escribe el First Header
1095 1099
1096 1100 Inputs:
1097 1101 path : el path destino en el cual se escribiran los files a crear
1098 1102 format : formato en el cual sera salvado un file
1099 1103 set : el setebo del file
1100 1104
1101 1105 Return:
1102 1106 0 : Si no realizo un buen seteo
1103 1107 1 : Si realizo un buen seteo
1104 1108 """
1105 1109
1106 1110 if ext == None:
1107 1111 ext = self.ext
1108 1112
1109 1113 ext = ext.lower()
1110 1114
1111 1115 self.ext = ext
1112 1116
1113 1117 self.path = path
1114 1118
1115 1119 self.setFile = set - 1
1116 1120
1117 1121 self.blocksPerFile = blocksPerFile
1118 1122
1119 1123 self.profilesPerBlock = profilesPerBlock
1120 1124
1121 1125 self.dataOut = dataOut
1122 1126
1123 1127 if not(self.setNextFile()):
1124 1128 print "There isn't a next file"
1125 1129 return 0
1126 1130
1127 1131 self.setBlockDimension()
1128 1132
1129 1133 return 1
1130 1134
1131 1135 def run(self, dataOut, **kwargs):
1132 1136
1133 1137 if not(self.isConfig):
1134 1138
1135 1139 self.setup(dataOut, **kwargs)
1136 1140 self.isConfig = True
1137 1141
1138 1142 self.putData()
1139 1143
1140 1144 class VoltageReader(JRODataReader):
1141 1145 """
1142 1146 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
1143 1147 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
1144 1148 perfiles*alturas*canales) son almacenados en la variable "buffer".
1145 1149
1146 1150 perfiles * alturas * canales
1147 1151
1148 1152 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1149 1153 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
1150 1154 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
1151 1155 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1152 1156
1153 1157 Example:
1154 1158
1155 1159 dpath = "/home/myuser/data"
1156 1160
1157 1161 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1158 1162
1159 1163 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1160 1164
1161 1165 readerObj = VoltageReader()
1162 1166
1163 1167 readerObj.setup(dpath, startTime, endTime)
1164 1168
1165 1169 while(True):
1166 1170
1167 1171 #to get one profile
1168 1172 profile = readerObj.getData()
1169 1173
1170 1174 #print the profile
1171 1175 print profile
1172 1176
1173 1177 #If you want to see all datablock
1174 1178 print readerObj.datablock
1175 1179
1176 1180 if readerObj.flagNoMoreFiles:
1177 1181 break
1178 1182
1179 1183 """
1180 1184
1181 1185 ext = ".r"
1182 1186
1183 1187 optchar = "D"
1184 1188 dataOut = None
1185 1189
1186 1190
1187 1191 def __init__(self):
1188 1192 """
1189 1193 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
1190 1194
1191 1195 Input:
1192 1196 dataOut : Objeto de la clase Voltage. Este objeto sera utilizado para
1193 1197 almacenar un perfil de datos cada vez que se haga un requerimiento
1194 1198 (getData). El perfil sera obtenido a partir del buffer de datos,
1195 1199 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1196 1200 bloque de datos.
1197 1201 Si este parametro no es pasado se creara uno internamente.
1198 1202
1199 1203 Variables afectadas:
1200 1204 self.dataOut
1201 1205
1202 1206 Return:
1203 1207 None
1204 1208 """
1205 1209
1206 1210 self.isConfig = False
1207 1211
1208 1212 self.datablock = None
1209 1213
1210 1214 self.utc = 0
1211 1215
1212 1216 self.ext = ".r"
1213 1217
1214 1218 self.optchar = "D"
1215 1219
1216 1220 self.basicHeaderObj = BasicHeader(LOCALTIME)
1217 1221
1218 1222 self.systemHeaderObj = SystemHeader()
1219 1223
1220 1224 self.radarControllerHeaderObj = RadarControllerHeader()
1221 1225
1222 1226 self.processingHeaderObj = ProcessingHeader()
1223 1227
1224 1228 self.online = 0
1225 1229
1226 1230 self.fp = None
1227 1231
1228 1232 self.idFile = None
1229 1233
1230 1234 self.dtype = None
1231 1235
1232 1236 self.fileSizeByHeader = None
1233 1237
1234 1238 self.filenameList = []
1235 1239
1236 1240 self.filename = None
1237 1241
1238 1242 self.fileSize = None
1239 1243
1240 1244 self.firstHeaderSize = 0
1241 1245
1242 1246 self.basicHeaderSize = 24
1243 1247
1244 1248 self.pathList = []
1245 1249
1246 1250 self.filenameList = []
1247 1251
1248 1252 self.lastUTTime = 0
1249 1253
1250 1254 self.maxTimeStep = 30
1251 1255
1252 1256 self.flagNoMoreFiles = 0
1253 1257
1254 1258 self.set = 0
1255 1259
1256 1260 self.path = None
1257 1261
1258 1262 self.profileIndex = 9999
1259 1263
1260 1264 self.delay = 3 #seconds
1261 1265
1262 1266 self.nTries = 3 #quantity tries
1263 1267
1264 1268 self.nFiles = 3 #number of files for searching
1265 1269
1266 1270 self.nReadBlocks = 0
1267 1271
1268 1272 self.flagIsNewFile = 1
1269 1273
1270 1274 self.ippSeconds = 0
1271 1275
1272 1276 self.flagTimeBlock = 0
1273 1277
1274 1278 self.flagIsNewBlock = 0
1275 1279
1276 1280 self.nTotalBlocks = 0
1277 1281
1278 1282 self.blocksize = 0
1279 1283
1280 1284 self.dataOut = self.createObjByDefault()
1281 1285
1282 1286 def createObjByDefault(self):
1283 1287
1284 1288 dataObj = Voltage()
1285 1289
1286 1290 return dataObj
1287 1291
1288 1292 def __hasNotDataInBuffer(self):
1289 1293 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1290 1294 return 1
1291 1295 return 0
1292 1296
1293 1297
1294 1298 def getBlockDimension(self):
1295 1299 """
1296 1300 Obtiene la cantidad de puntos a leer por cada bloque de datos
1297 1301
1298 1302 Affected:
1299 1303 self.blocksize
1300 1304
1301 1305 Return:
1302 1306 None
1303 1307 """
1304 1308 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
1305 1309 self.blocksize = pts2read
1306 1310
1307 1311
1308 1312 def readBlock(self):
1309 1313 """
1310 1314 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
1311 1315 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1312 1316 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1313 1317 es seteado a 0
1314 1318
1315 1319 Inputs:
1316 1320 None
1317 1321
1318 1322 Return:
1319 1323 None
1320 1324
1321 1325 Affected:
1322 1326 self.profileIndex
1323 1327 self.datablock
1324 1328 self.flagIsNewFile
1325 1329 self.flagIsNewBlock
1326 1330 self.nTotalBlocks
1327 1331
1328 1332 Exceptions:
1329 1333 Si un bloque leido no es un bloque valido
1330 1334 """
1331 1335
1332 1336 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
1333 1337
1334 1338 try:
1335 1339 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
1336 1340 except:
1337 1341 print "The read block (%3d) has not enough data" %self.nReadBlocks
1338 1342 return 0
1339 1343
1340 1344 junk = numpy.transpose(junk, (2,0,1))
1341 1345 self.datablock = junk['real'] + junk['imag']*1j
1342 1346
1343 1347 self.profileIndex = 0
1344 1348
1345 1349 self.flagIsNewFile = 0
1346 1350 self.flagIsNewBlock = 1
1347 1351
1348 1352 self.nTotalBlocks += 1
1349 1353 self.nReadBlocks += 1
1350 1354
1351 1355 return 1
1352 1356
1353 1357
1354 1358 def getData(self):
1355 1359 """
1356 1360 getData obtiene una unidad de datos del buffer de lectura y la copia a la clase "Voltage"
1357 1361 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1358 1362 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1359 1363
1360 1364 Ademas incrementa el contador del buffer en 1.
1361 1365
1362 1366 Return:
1363 1367 data : retorna un perfil de voltages (alturas * canales) copiados desde el
1364 1368 buffer. Si no hay mas archivos a leer retorna None.
1365 1369
1366 1370 Variables afectadas:
1367 1371 self.dataOut
1368 1372 self.profileIndex
1369 1373
1370 1374 Affected:
1371 1375 self.dataOut
1372 1376 self.profileIndex
1373 1377 self.flagTimeBlock
1374 1378 self.flagIsNewBlock
1375 1379 """
1376 1380
1377 1381 if self.flagNoMoreFiles:
1378 1382 self.dataOut.flagNoData = True
1379 1383 print 'Process finished'
1380 1384 return 0
1381 1385
1382 1386 self.flagTimeBlock = 0
1383 1387 self.flagIsNewBlock = 0
1384 1388
1385 1389 if self.__hasNotDataInBuffer():
1386 1390
1387 1391 if not( self.readNextBlock() ):
1388 1392 return 0
1389 1393
1390 1394 self.dataOut.dtype = self.dtype
1391 1395
1392 1396 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1393 1397
1394 1398 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1395 1399
1396 1400 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1397 1401
1398 1402 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1399 1403
1400 1404 self.dataOut.flagTimeBlock = self.flagTimeBlock
1401 1405
1402 1406 self.dataOut.ippSeconds = self.ippSeconds
1403 1407
1404 1408 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt
1405 1409
1406 1410 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
1407 1411
1408 1412 self.dataOut.flagShiftFFT = False
1409 1413
1410 1414 if self.radarControllerHeaderObj.code != None:
1411 1415
1412 1416 self.dataOut.nCode = self.radarControllerHeaderObj.nCode
1413 1417
1414 1418 self.dataOut.nBaud = self.radarControllerHeaderObj.nBaud
1415 1419
1416 1420 self.dataOut.code = self.radarControllerHeaderObj.code
1417 1421
1418 1422 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1419 1423
1420 1424 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1421 1425
1422 1426 self.dataOut.flagDecodeData = False #asumo q la data no esta decodificada
1423 1427
1424 1428 self.dataOut.flagDeflipData = False #asumo q la data no esta sin flip
1425 1429
1426 1430 self.dataOut.flagShiftFFT = False
1427 1431
1428 1432
1429 1433 # self.updateDataHeader()
1430 1434
1431 1435 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1432 1436
1433 1437 if self.datablock == None:
1434 1438 self.dataOut.flagNoData = True
1435 1439 return 0
1436 1440
1437 1441 self.dataOut.data = self.datablock[:,self.profileIndex,:]
1438 1442
1439 1443 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.ippSeconds
1440 1444
1441 1445 self.profileIndex += 1
1442 1446
1443 1447 self.dataOut.flagNoData = False
1444 1448
1445 1449 # print self.profileIndex, self.dataOut.utctime
1446 1450 # if self.profileIndex == 800:
1447 1451 # a=1
1448 1452
1449 1453
1450 1454 return self.dataOut.data
1451 1455
1452 1456
1453 1457 class VoltageWriter(JRODataWriter):
1454 1458 """
1455 1459 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
1456 1460 de los datos siempre se realiza por bloques.
1457 1461 """
1458 1462
1459 1463 ext = ".r"
1460 1464
1461 1465 optchar = "D"
1462 1466
1463 1467 shapeBuffer = None
1464 1468
1465 1469
1466 1470 def __init__(self):
1467 1471 """
1468 1472 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
1469 1473
1470 1474 Affected:
1471 1475 self.dataOut
1472 1476
1473 1477 Return: None
1474 1478 """
1475 1479
1476 1480 self.nTotalBlocks = 0
1477 1481
1478 1482 self.profileIndex = 0
1479 1483
1480 1484 self.isConfig = False
1481 1485
1482 1486 self.fp = None
1483 1487
1484 1488 self.flagIsNewFile = 1
1485 1489
1486 1490 self.nTotalBlocks = 0
1487 1491
1488 1492 self.flagIsNewBlock = 0
1489 1493
1490 1494 self.setFile = None
1491 1495
1492 1496 self.dtype = None
1493 1497
1494 1498 self.path = None
1495 1499
1496 1500 self.filename = None
1497 1501
1498 1502 self.basicHeaderObj = BasicHeader(LOCALTIME)
1499 1503
1500 1504 self.systemHeaderObj = SystemHeader()
1501 1505
1502 1506 self.radarControllerHeaderObj = RadarControllerHeader()
1503 1507
1504 1508 self.processingHeaderObj = ProcessingHeader()
1505 1509
1506 1510 def hasAllDataInBuffer(self):
1507 1511 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1508 1512 return 1
1509 1513 return 0
1510 1514
1511 1515
1512 1516 def setBlockDimension(self):
1513 1517 """
1514 1518 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
1515 1519
1516 1520 Affected:
1517 1521 self.shape_spc_Buffer
1518 1522 self.shape_cspc_Buffer
1519 1523 self.shape_dc_Buffer
1520 1524
1521 1525 Return: None
1522 1526 """
1523 1527 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
1524 1528 self.processingHeaderObj.nHeights,
1525 1529 self.systemHeaderObj.nChannels)
1526 1530
1527 1531 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
1528 1532 self.processingHeaderObj.profilesPerBlock,
1529 1533 self.processingHeaderObj.nHeights),
1530 1534 dtype=numpy.dtype('complex64'))
1531 1535
1532 1536
1533 1537 def writeBlock(self):
1534 1538 """
1535 1539 Escribe el buffer en el file designado
1536 1540
1537 1541 Affected:
1538 1542 self.profileIndex
1539 1543 self.flagIsNewFile
1540 1544 self.flagIsNewBlock
1541 1545 self.nTotalBlocks
1542 1546 self.blockIndex
1543 1547
1544 1548 Return: None
1545 1549 """
1546 1550 data = numpy.zeros( self.shapeBuffer, self.dtype )
1547 1551
1548 1552 junk = numpy.transpose(self.datablock, (1,2,0))
1549 1553
1550 1554 data['real'] = junk.real
1551 1555 data['imag'] = junk.imag
1552 1556
1553 1557 data = data.reshape( (-1) )
1554 1558
1555 1559 data.tofile( self.fp )
1556 1560
1557 1561 self.datablock.fill(0)
1558 1562
1559 1563 self.profileIndex = 0
1560 1564 self.flagIsNewFile = 0
1561 1565 self.flagIsNewBlock = 1
1562 1566
1563 1567 self.blockIndex += 1
1564 1568 self.nTotalBlocks += 1
1565 1569
1566 1570 def putData(self):
1567 1571 """
1568 1572 Setea un bloque de datos y luego los escribe en un file
1569 1573
1570 1574 Affected:
1571 1575 self.flagIsNewBlock
1572 1576 self.profileIndex
1573 1577
1574 1578 Return:
1575 1579 0 : Si no hay data o no hay mas files que puedan escribirse
1576 1580 1 : Si se escribio la data de un bloque en un file
1577 1581 """
1578 1582 if self.dataOut.flagNoData:
1579 1583 return 0
1580 1584
1581 1585 self.flagIsNewBlock = 0
1582 1586
1583 1587 if self.dataOut.flagTimeBlock:
1584 1588
1585 1589 self.datablock.fill(0)
1586 1590 self.profileIndex = 0
1587 1591 self.setNextFile()
1588 1592
1589 1593 if self.profileIndex == 0:
1590 1594 self.getBasicHeader()
1591 1595
1592 1596 self.datablock[:,self.profileIndex,:] = self.dataOut.data
1593 1597
1594 1598 self.profileIndex += 1
1595 1599
1596 1600 if self.hasAllDataInBuffer():
1597 1601 #if self.flagIsNewFile:
1598 1602 self.writeNextBlock()
1599 1603 # self.getDataHeader()
1600 1604
1601 1605 return 1
1602 1606
1603 1607 def __getProcessFlags(self):
1604 1608
1605 1609 processFlags = 0
1606 1610
1607 1611 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1608 1612 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1609 1613 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1610 1614 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1611 1615 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1612 1616 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1613 1617
1614 1618 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1615 1619
1616 1620
1617 1621
1618 1622 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
1619 1623 PROCFLAG.DATATYPE_SHORT,
1620 1624 PROCFLAG.DATATYPE_LONG,
1621 1625 PROCFLAG.DATATYPE_INT64,
1622 1626 PROCFLAG.DATATYPE_FLOAT,
1623 1627 PROCFLAG.DATATYPE_DOUBLE]
1624 1628
1625 1629
1626 1630 for index in range(len(dtypeList)):
1627 1631 if self.dataOut.dtype == dtypeList[index]:
1628 1632 dtypeValue = datatypeValueList[index]
1629 1633 break
1630 1634
1631 1635 processFlags += dtypeValue
1632 1636
1633 1637 if self.dataOut.flagDecodeData:
1634 1638 processFlags += PROCFLAG.DECODE_DATA
1635 1639
1636 1640 if self.dataOut.flagDeflipData:
1637 1641 processFlags += PROCFLAG.DEFLIP_DATA
1638 1642
1639 1643 if self.dataOut.code != None:
1640 1644 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
1641 1645
1642 1646 if self.dataOut.nCohInt > 1:
1643 1647 processFlags += PROCFLAG.COHERENT_INTEGRATION
1644 1648
1645 1649 return processFlags
1646 1650
1647 1651
1648 1652 def __getBlockSize(self):
1649 1653 '''
1650 1654 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
1651 1655 '''
1652 1656
1653 1657 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1654 1658 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1655 1659 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1656 1660 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1657 1661 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1658 1662 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1659 1663
1660 1664 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1661 1665 datatypeValueList = [1,2,4,8,4,8]
1662 1666 for index in range(len(dtypeList)):
1663 1667 if self.dataOut.dtype == dtypeList[index]:
1664 1668 datatypeValue = datatypeValueList[index]
1665 1669 break
1666 1670
1667 1671 blocksize = int(self.dataOut.nHeights * self.dataOut.nChannels * self.dataOut.nProfiles * datatypeValue * 2)
1668 1672
1669 1673 return blocksize
1670 1674
1671 1675 def getDataHeader(self):
1672 1676
1673 1677 """
1674 1678 Obtiene una copia del First Header
1675 1679
1676 1680 Affected:
1677 1681 self.systemHeaderObj
1678 1682 self.radarControllerHeaderObj
1679 1683 self.dtype
1680 1684
1681 1685 Return:
1682 1686 None
1683 1687 """
1684 1688
1685 1689 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
1686 1690 self.systemHeaderObj.nChannels = self.dataOut.nChannels
1687 1691 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
1688 1692
1689 1693 self.getBasicHeader()
1690 1694
1691 1695 processingHeaderSize = 40 # bytes
1692 1696 self.processingHeaderObj.dtype = 0 # Voltage
1693 1697 self.processingHeaderObj.blockSize = self.__getBlockSize()
1694 1698 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
1695 1699 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
1696 1700 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
1697 1701 self.processingHeaderObj.processFlags = self.__getProcessFlags()
1698 1702 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt
1699 1703 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
1700 1704 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
1701 1705
1702 1706 if self.dataOut.code != None:
1703 1707 self.processingHeaderObj.code = self.dataOut.code
1704 1708 self.processingHeaderObj.nCode = self.dataOut.nCode
1705 1709 self.processingHeaderObj.nBaud = self.dataOut.nBaud
1706 1710 codesize = int(8 + 4 * self.dataOut.nCode * self.dataOut.nBaud)
1707 1711 processingHeaderSize += codesize
1708 1712
1709 1713 if self.processingHeaderObj.nWindows != 0:
1710 1714 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
1711 1715 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
1712 1716 self.processingHeaderObj.nHeights = self.dataOut.nHeights
1713 1717 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
1714 1718 processingHeaderSize += 12
1715 1719
1716 1720 self.processingHeaderObj.size = processingHeaderSize
1717 1721
1718 1722 class SpectraReader(JRODataReader):
1719 1723 """
1720 1724 Esta clase permite leer datos de espectros desde archivos procesados (.pdata). La lectura
1721 1725 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones)
1722 1726 son almacenados en tres buffer's para el Self Spectra, el Cross Spectra y el DC Channel.
1723 1727
1724 1728 paresCanalesIguales * alturas * perfiles (Self Spectra)
1725 1729 paresCanalesDiferentes * alturas * perfiles (Cross Spectra)
1726 1730 canales * alturas (DC Channels)
1727 1731
1728 1732 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1729 1733 RadarControllerHeader y Spectra. Los tres primeros se usan para almacenar informacion de la
1730 1734 cabecera de datos (metadata), y el cuarto (Spectra) para obtener y almacenar un bloque de
1731 1735 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1732 1736
1733 1737 Example:
1734 1738 dpath = "/home/myuser/data"
1735 1739
1736 1740 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1737 1741
1738 1742 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1739 1743
1740 1744 readerObj = SpectraReader()
1741 1745
1742 1746 readerObj.setup(dpath, startTime, endTime)
1743 1747
1744 1748 while(True):
1745 1749
1746 1750 readerObj.getData()
1747 1751
1748 1752 print readerObj.data_spc
1749 1753
1750 1754 print readerObj.data_cspc
1751 1755
1752 1756 print readerObj.data_dc
1753 1757
1754 1758 if readerObj.flagNoMoreFiles:
1755 1759 break
1756 1760
1757 1761 """
1758 1762
1759 1763 pts2read_SelfSpectra = 0
1760 1764
1761 1765 pts2read_CrossSpectra = 0
1762 1766
1763 1767 pts2read_DCchannels = 0
1764 1768
1765 1769 ext = ".pdata"
1766 1770
1767 1771 optchar = "P"
1768 1772
1769 1773 dataOut = None
1770 1774
1771 1775 nRdChannels = None
1772 1776
1773 1777 nRdPairs = None
1774 1778
1775 1779 rdPairList = []
1776 1780
1777 1781
1778 1782 def __init__(self):
1779 1783 """
1780 1784 Inicializador de la clase SpectraReader para la lectura de datos de espectros.
1781 1785
1782 1786 Inputs:
1783 1787 dataOut : Objeto de la clase Spectra. Este objeto sera utilizado para
1784 1788 almacenar un perfil de datos cada vez que se haga un requerimiento
1785 1789 (getData). El perfil sera obtenido a partir del buffer de datos,
1786 1790 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1787 1791 bloque de datos.
1788 1792 Si este parametro no es pasado se creara uno internamente.
1789 1793
1790 1794 Affected:
1791 1795 self.dataOut
1792 1796
1793 1797 Return : None
1794 1798 """
1795 1799
1796 1800 self.isConfig = False
1797 1801
1798 1802 self.pts2read_SelfSpectra = 0
1799 1803
1800 1804 self.pts2read_CrossSpectra = 0
1801 1805
1802 1806 self.pts2read_DCchannels = 0
1803 1807
1804 1808 self.datablock = None
1805 1809
1806 1810 self.utc = None
1807 1811
1808 1812 self.ext = ".pdata"
1809 1813
1810 1814 self.optchar = "P"
1811 1815
1812 1816 self.basicHeaderObj = BasicHeader(LOCALTIME)
1813 1817
1814 1818 self.systemHeaderObj = SystemHeader()
1815 1819
1816 1820 self.radarControllerHeaderObj = RadarControllerHeader()
1817 1821
1818 1822 self.processingHeaderObj = ProcessingHeader()
1819 1823
1820 1824 self.online = 0
1821 1825
1822 1826 self.fp = None
1823 1827
1824 1828 self.idFile = None
1825 1829
1826 1830 self.dtype = None
1827 1831
1828 1832 self.fileSizeByHeader = None
1829 1833
1830 1834 self.filenameList = []
1831 1835
1832 1836 self.filename = None
1833 1837
1834 1838 self.fileSize = None
1835 1839
1836 1840 self.firstHeaderSize = 0
1837 1841
1838 1842 self.basicHeaderSize = 24
1839 1843
1840 1844 self.pathList = []
1841 1845
1842 1846 self.lastUTTime = 0
1843 1847
1844 1848 self.maxTimeStep = 30
1845 1849
1846 1850 self.flagNoMoreFiles = 0
1847 1851
1848 1852 self.set = 0
1849 1853
1850 1854 self.path = None
1851 1855
1852 self.delay = 3 #seconds
1856 self.delay = 60 #seconds
1853 1857
1854 1858 self.nTries = 3 #quantity tries
1855 1859
1856 1860 self.nFiles = 3 #number of files for searching
1857 1861
1858 1862 self.nReadBlocks = 0
1859 1863
1860 1864 self.flagIsNewFile = 1
1861 1865
1862 1866 self.ippSeconds = 0
1863 1867
1864 1868 self.flagTimeBlock = 0
1865 1869
1866 1870 self.flagIsNewBlock = 0
1867 1871
1868 1872 self.nTotalBlocks = 0
1869 1873
1870 1874 self.blocksize = 0
1871 1875
1872 1876 self.dataOut = self.createObjByDefault()
1873 1877
1874 1878
1875 1879 def createObjByDefault(self):
1876 1880
1877 1881 dataObj = Spectra()
1878 1882
1879 1883 return dataObj
1880 1884
1881 1885 def __hasNotDataInBuffer(self):
1882 1886 return 1
1883 1887
1884 1888
1885 1889 def getBlockDimension(self):
1886 1890 """
1887 1891 Obtiene la cantidad de puntos a leer por cada bloque de datos
1888 1892
1889 1893 Affected:
1890 1894 self.nRdChannels
1891 1895 self.nRdPairs
1892 1896 self.pts2read_SelfSpectra
1893 1897 self.pts2read_CrossSpectra
1894 1898 self.pts2read_DCchannels
1895 1899 self.blocksize
1896 1900 self.dataOut.nChannels
1897 1901 self.dataOut.nPairs
1898 1902
1899 1903 Return:
1900 1904 None
1901 1905 """
1902 1906 self.nRdChannels = 0
1903 1907 self.nRdPairs = 0
1904 1908 self.rdPairList = []
1905 1909
1906 1910 for i in range(0, self.processingHeaderObj.totalSpectra*2, 2):
1907 1911 if self.processingHeaderObj.spectraComb[i] == self.processingHeaderObj.spectraComb[i+1]:
1908 1912 self.nRdChannels = self.nRdChannels + 1 #par de canales iguales
1909 1913 else:
1910 1914 self.nRdPairs = self.nRdPairs + 1 #par de canales diferentes
1911 1915 self.rdPairList.append((self.processingHeaderObj.spectraComb[i], self.processingHeaderObj.spectraComb[i+1]))
1912 1916
1913 1917 pts2read = self.processingHeaderObj.nHeights * self.processingHeaderObj.profilesPerBlock
1914 1918
1915 1919 self.pts2read_SelfSpectra = int(self.nRdChannels * pts2read)
1916 1920 self.blocksize = self.pts2read_SelfSpectra
1917 1921
1918 1922 if self.processingHeaderObj.flag_cspc:
1919 1923 self.pts2read_CrossSpectra = int(self.nRdPairs * pts2read)
1920 1924 self.blocksize += self.pts2read_CrossSpectra
1921 1925
1922 1926 if self.processingHeaderObj.flag_dc:
1923 1927 self.pts2read_DCchannels = int(self.systemHeaderObj.nChannels * self.processingHeaderObj.nHeights)
1924 1928 self.blocksize += self.pts2read_DCchannels
1925 1929
1926 1930 # self.blocksize = self.pts2read_SelfSpectra + self.pts2read_CrossSpectra + self.pts2read_DCchannels
1927 1931
1928 1932
1929 1933 def readBlock(self):
1930 1934 """
1931 1935 Lee el bloque de datos desde la posicion actual del puntero del archivo
1932 1936 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1933 1937 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1934 1938 es seteado a 0
1935 1939
1936 1940 Return: None
1937 1941
1938 1942 Variables afectadas:
1939 1943
1940 1944 self.flagIsNewFile
1941 1945 self.flagIsNewBlock
1942 1946 self.nTotalBlocks
1943 1947 self.data_spc
1944 1948 self.data_cspc
1945 1949 self.data_dc
1946 1950
1947 1951 Exceptions:
1948 1952 Si un bloque leido no es un bloque valido
1949 1953 """
1950 1954 blockOk_flag = False
1951 1955 fpointer = self.fp.tell()
1952 1956
1953 1957 spc = numpy.fromfile( self.fp, self.dtype[0], self.pts2read_SelfSpectra )
1954 1958 spc = spc.reshape( (self.nRdChannels, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1955 1959
1956 1960 if self.processingHeaderObj.flag_cspc:
1957 1961 cspc = numpy.fromfile( self.fp, self.dtype, self.pts2read_CrossSpectra )
1958 1962 cspc = cspc.reshape( (self.nRdPairs, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1959 1963
1960 1964 if self.processingHeaderObj.flag_dc:
1961 1965 dc = numpy.fromfile( self.fp, self.dtype, self.pts2read_DCchannels ) #int(self.processingHeaderObj.nHeights*self.systemHeaderObj.nChannels) )
1962 1966 dc = dc.reshape( (self.systemHeaderObj.nChannels, self.processingHeaderObj.nHeights) ) #transforma a un arreglo 2D
1963 1967
1964 1968
1965 1969 if not(self.processingHeaderObj.shif_fft):
1966 1970 #desplaza a la derecha en el eje 2 determinadas posiciones
1967 1971 shift = int(self.processingHeaderObj.profilesPerBlock/2)
1968 1972 spc = numpy.roll( spc, shift , axis=2 )
1969 1973
1970 1974 if self.processingHeaderObj.flag_cspc:
1971 1975 #desplaza a la derecha en el eje 2 determinadas posiciones
1972 1976 cspc = numpy.roll( cspc, shift, axis=2 )
1973 1977
1974 1978 # self.processingHeaderObj.shif_fft = True
1975 1979
1976 1980 spc = numpy.transpose( spc, (0,2,1) )
1977 1981 self.data_spc = spc
1978 1982
1979 1983 if self.processingHeaderObj.flag_cspc:
1980 1984 cspc = numpy.transpose( cspc, (0,2,1) )
1981 1985 self.data_cspc = cspc['real'] + cspc['imag']*1j
1982 1986 else:
1983 1987 self.data_cspc = None
1984 1988
1985 1989 if self.processingHeaderObj.flag_dc:
1986 1990 self.data_dc = dc['real'] + dc['imag']*1j
1987 1991 else:
1988 1992 self.data_dc = None
1989 1993
1990 1994 self.flagIsNewFile = 0
1991 1995 self.flagIsNewBlock = 1
1992 1996
1993 1997 self.nTotalBlocks += 1
1994 1998 self.nReadBlocks += 1
1995 1999
1996 2000 return 1
1997 2001
1998 2002
1999 2003 def getData(self):
2000 2004 """
2001 2005 Copia el buffer de lectura a la clase "Spectra",
2002 2006 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
2003 2007 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
2004 2008
2005 2009 Return:
2006 2010 0 : Si no hay mas archivos disponibles
2007 2011 1 : Si hizo una buena copia del buffer
2008 2012
2009 2013 Affected:
2010 2014 self.dataOut
2011 2015
2012 2016 self.flagTimeBlock
2013 2017 self.flagIsNewBlock
2014 2018 """
2015 2019
2016 2020 if self.flagNoMoreFiles:
2017 2021 self.dataOut.flagNoData = True
2018 2022 print 'Process finished'
2019 2023 return 0
2020 2024
2021 2025 self.flagTimeBlock = 0
2022 2026 self.flagIsNewBlock = 0
2023 2027
2024 2028 if self.__hasNotDataInBuffer():
2025 2029
2026 2030 if not( self.readNextBlock() ):
2027 2031 self.dataOut.flagNoData = True
2028 2032 return 0
2029 2033
2030 2034 # self.updateDataHeader()
2031 2035
2032 2036 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
2033 2037
2034 2038 if self.data_dc == None:
2035 2039 self.dataOut.flagNoData = True
2036 2040 return 0
2037 2041
2038 2042 self.dataOut.data_spc = self.data_spc
2039 2043
2040 2044 self.dataOut.data_cspc = self.data_cspc
2041 2045
2042 2046 self.dataOut.data_dc = self.data_dc
2043 2047
2044 2048 self.dataOut.flagTimeBlock = self.flagTimeBlock
2045 2049
2046 2050 self.dataOut.flagNoData = False
2047 2051
2048 2052 self.dataOut.dtype = self.dtype
2049 2053
2050 2054 # self.dataOut.nChannels = self.nRdChannels
2051 2055
2052 2056 self.dataOut.nPairs = self.nRdPairs
2053 2057
2054 2058 self.dataOut.pairsList = self.rdPairList
2055 2059
2056 2060 # self.dataOut.nHeights = self.processingHeaderObj.nHeights
2057 2061
2058 2062 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
2059 2063
2060 2064 self.dataOut.nFFTPoints = self.processingHeaderObj.profilesPerBlock
2061 2065
2062 2066 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
2063 2067
2064 2068 self.dataOut.nIncohInt = self.processingHeaderObj.nIncohInt
2065 2069
2066 2070 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
2067 2071
2068 2072 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
2069 2073
2070 2074 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
2071 2075
2072 2076 # self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
2073 2077
2074 2078 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000.#+ self.profileIndex * self.ippSeconds
2075 2079
2076 2080 self.dataOut.ippSeconds = self.ippSeconds
2077 2081
2078 2082 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt * self.processingHeaderObj.nIncohInt * self.dataOut.nFFTPoints
2079 2083
2080 2084 # self.profileIndex += 1
2081 2085
2082 2086 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
2083 2087
2084 2088 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
2085 2089
2086 2090 self.dataOut.flagShiftFFT = self.processingHeaderObj.shif_fft
2087 2091
2088 2092 self.dataOut.flagDecodeData = False #asumo q la data no esta decodificada
2089 2093
2090 2094 self.dataOut.flagDeflipData = True #asumo q la data no esta sin flip
2091 2095
2092 2096 if self.processingHeaderObj.code != None:
2093 2097
2094 2098 self.dataOut.nCode = self.processingHeaderObj.nCode
2095 2099
2096 2100 self.dataOut.nBaud = self.processingHeaderObj.nBaud
2097 2101
2098 2102 self.dataOut.code = self.processingHeaderObj.code
2099 2103
2100 2104 self.dataOut.flagDecodeData = True
2101 2105
2102 2106 return self.dataOut.data_spc
2103 2107
2104 2108
2105 2109 class SpectraWriter(JRODataWriter):
2106 2110
2107 2111 """
2108 2112 Esta clase permite escribir datos de espectros a archivos procesados (.pdata). La escritura
2109 2113 de los datos siempre se realiza por bloques.
2110 2114 """
2111 2115
2112 2116 ext = ".pdata"
2113 2117
2114 2118 optchar = "P"
2115 2119
2116 2120 shape_spc_Buffer = None
2117 2121
2118 2122 shape_cspc_Buffer = None
2119 2123
2120 2124 shape_dc_Buffer = None
2121 2125
2122 2126 data_spc = None
2123 2127
2124 2128 data_cspc = None
2125 2129
2126 2130 data_dc = None
2127 2131
2128 2132 # dataOut = None
2129 2133
2130 2134 def __init__(self):
2131 2135 """
2132 2136 Inicializador de la clase SpectraWriter para la escritura de datos de espectros.
2133 2137
2134 2138 Affected:
2135 2139 self.dataOut
2136 2140 self.basicHeaderObj
2137 2141 self.systemHeaderObj
2138 2142 self.radarControllerHeaderObj
2139 2143 self.processingHeaderObj
2140 2144
2141 2145 Return: None
2142 2146 """
2143 2147
2144 2148 self.isConfig = False
2145 2149
2146 2150 self.nTotalBlocks = 0
2147 2151
2148 2152 self.data_spc = None
2149 2153
2150 2154 self.data_cspc = None
2151 2155
2152 2156 self.data_dc = None
2153 2157
2154 2158 self.fp = None
2155 2159
2156 2160 self.flagIsNewFile = 1
2157 2161
2158 2162 self.nTotalBlocks = 0
2159 2163
2160 2164 self.flagIsNewBlock = 0
2161 2165
2162 2166 self.setFile = None
2163 2167
2164 2168 self.dtype = None
2165 2169
2166 2170 self.path = None
2167 2171
2168 2172 self.noMoreFiles = 0
2169 2173
2170 2174 self.filename = None
2171 2175
2172 2176 self.basicHeaderObj = BasicHeader(LOCALTIME)
2173 2177
2174 2178 self.systemHeaderObj = SystemHeader()
2175 2179
2176 2180 self.radarControllerHeaderObj = RadarControllerHeader()
2177 2181
2178 2182 self.processingHeaderObj = ProcessingHeader()
2179 2183
2180 2184
2181 2185 def hasAllDataInBuffer(self):
2182 2186 return 1
2183 2187
2184 2188
2185 2189 def setBlockDimension(self):
2186 2190 """
2187 2191 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
2188 2192
2189 2193 Affected:
2190 2194 self.shape_spc_Buffer
2191 2195 self.shape_cspc_Buffer
2192 2196 self.shape_dc_Buffer
2193 2197
2194 2198 Return: None
2195 2199 """
2196 2200 self.shape_spc_Buffer = (self.dataOut.nChannels,
2197 2201 self.processingHeaderObj.nHeights,
2198 2202 self.processingHeaderObj.profilesPerBlock)
2199 2203
2200 2204 self.shape_cspc_Buffer = (self.dataOut.nPairs,
2201 2205 self.processingHeaderObj.nHeights,
2202 2206 self.processingHeaderObj.profilesPerBlock)
2203 2207
2204 2208 self.shape_dc_Buffer = (self.dataOut.nChannels,
2205 2209 self.processingHeaderObj.nHeights)
2206 2210
2207 2211
2208 2212 def writeBlock(self):
2209 2213 """
2210 2214 Escribe el buffer en el file designado
2211 2215
2212 2216 Affected:
2213 2217 self.data_spc
2214 2218 self.data_cspc
2215 2219 self.data_dc
2216 2220 self.flagIsNewFile
2217 2221 self.flagIsNewBlock
2218 2222 self.nTotalBlocks
2219 2223 self.nWriteBlocks
2220 2224
2221 2225 Return: None
2222 2226 """
2223 2227
2224 2228 spc = numpy.transpose( self.data_spc, (0,2,1) )
2225 2229 if not( self.processingHeaderObj.shif_fft ):
2226 2230 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2227 2231 data = spc.reshape((-1))
2228 2232 data = data.astype(self.dtype[0])
2229 2233 data.tofile(self.fp)
2230 2234
2231 2235 if self.data_cspc != None:
2232 2236 data = numpy.zeros( self.shape_cspc_Buffer, self.dtype )
2233 2237 cspc = numpy.transpose( self.data_cspc, (0,2,1) )
2234 2238 if not( self.processingHeaderObj.shif_fft ):
2235 2239 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2236 2240 data['real'] = cspc.real
2237 2241 data['imag'] = cspc.imag
2238 2242 data = data.reshape((-1))
2239 2243 data.tofile(self.fp)
2240 2244
2241 2245 if self.data_dc != None:
2242 2246 data = numpy.zeros( self.shape_dc_Buffer, self.dtype )
2243 2247 dc = self.data_dc
2244 2248 data['real'] = dc.real
2245 2249 data['imag'] = dc.imag
2246 2250 data = data.reshape((-1))
2247 2251 data.tofile(self.fp)
2248 2252
2249 2253 self.data_spc.fill(0)
2250 2254 self.data_dc.fill(0)
2251 2255 if self.data_cspc != None:
2252 2256 self.data_cspc.fill(0)
2253 2257
2254 2258 self.flagIsNewFile = 0
2255 2259 self.flagIsNewBlock = 1
2256 2260 self.nTotalBlocks += 1
2257 2261 self.nWriteBlocks += 1
2258 2262 self.blockIndex += 1
2259 2263
2260 2264
2261 2265 def putData(self):
2262 2266 """
2263 2267 Setea un bloque de datos y luego los escribe en un file
2264 2268
2265 2269 Affected:
2266 2270 self.data_spc
2267 2271 self.data_cspc
2268 2272 self.data_dc
2269 2273
2270 2274 Return:
2271 2275 0 : Si no hay data o no hay mas files que puedan escribirse
2272 2276 1 : Si se escribio la data de un bloque en un file
2273 2277 """
2274 2278
2275 2279 if self.dataOut.flagNoData:
2276 2280 return 0
2277 2281
2278 2282 self.flagIsNewBlock = 0
2279 2283
2280 2284 if self.dataOut.flagTimeBlock:
2281 2285 self.data_spc.fill(0)
2282 2286 self.data_cspc.fill(0)
2283 2287 self.data_dc.fill(0)
2284 2288 self.setNextFile()
2285 2289
2286 2290 if self.flagIsNewFile == 0:
2287 2291 self.getBasicHeader()
2288 2292
2289 2293 self.data_spc = self.dataOut.data_spc.copy()
2290 2294 self.data_cspc = self.dataOut.data_cspc.copy()
2291 2295 self.data_dc = self.dataOut.data_dc.copy()
2292 2296
2293 2297 # #self.processingHeaderObj.dataBlocksPerFile)
2294 2298 if self.hasAllDataInBuffer():
2295 2299 # self.getDataHeader()
2296 2300 self.writeNextBlock()
2297 2301
2298 2302 return 1
2299 2303
2300 2304
2301 2305 def __getProcessFlags(self):
2302 2306
2303 2307 processFlags = 0
2304 2308
2305 2309 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2306 2310 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2307 2311 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2308 2312 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2309 2313 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2310 2314 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2311 2315
2312 2316 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2313 2317
2314 2318
2315 2319
2316 2320 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
2317 2321 PROCFLAG.DATATYPE_SHORT,
2318 2322 PROCFLAG.DATATYPE_LONG,
2319 2323 PROCFLAG.DATATYPE_INT64,
2320 2324 PROCFLAG.DATATYPE_FLOAT,
2321 2325 PROCFLAG.DATATYPE_DOUBLE]
2322 2326
2323 2327
2324 2328 for index in range(len(dtypeList)):
2325 2329 if self.dataOut.dtype == dtypeList[index]:
2326 2330 dtypeValue = datatypeValueList[index]
2327 2331 break
2328 2332
2329 2333 processFlags += dtypeValue
2330 2334
2331 2335 if self.dataOut.flagDecodeData:
2332 2336 processFlags += PROCFLAG.DECODE_DATA
2333 2337
2334 2338 if self.dataOut.flagDeflipData:
2335 2339 processFlags += PROCFLAG.DEFLIP_DATA
2336 2340
2337 2341 if self.dataOut.code != None:
2338 2342 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
2339 2343
2340 2344 if self.dataOut.nIncohInt > 1:
2341 2345 processFlags += PROCFLAG.INCOHERENT_INTEGRATION
2342 2346
2343 2347 if self.dataOut.data_dc != None:
2344 2348 processFlags += PROCFLAG.SAVE_CHANNELS_DC
2345 2349
2346 2350 return processFlags
2347 2351
2348 2352
2349 2353 def __getBlockSize(self):
2350 2354 '''
2351 2355 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Spectra
2352 2356 '''
2353 2357
2354 2358 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2355 2359 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2356 2360 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2357 2361 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2358 2362 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2359 2363 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2360 2364
2361 2365 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2362 2366 datatypeValueList = [1,2,4,8,4,8]
2363 2367 for index in range(len(dtypeList)):
2364 2368 if self.dataOut.dtype == dtypeList[index]:
2365 2369 datatypeValue = datatypeValueList[index]
2366 2370 break
2367 2371
2368 2372
2369 2373 pts2write = self.dataOut.nHeights * self.dataOut.nFFTPoints
2370 2374
2371 2375 pts2write_SelfSpectra = int(self.dataOut.nChannels * pts2write)
2372 2376 blocksize = (pts2write_SelfSpectra*datatypeValue)
2373 2377
2374 2378 if self.dataOut.data_cspc != None:
2375 2379 pts2write_CrossSpectra = int(self.dataOut.nPairs * pts2write)
2376 2380 blocksize += (pts2write_CrossSpectra*datatypeValue*2)
2377 2381
2378 2382 if self.dataOut.data_dc != None:
2379 2383 pts2write_DCchannels = int(self.dataOut.nChannels * self.dataOut.nHeights)
2380 2384 blocksize += (pts2write_DCchannels*datatypeValue*2)
2381 2385
2382 2386 blocksize = blocksize #* datatypeValue * 2 #CORREGIR ESTO
2383 2387
2384 2388 return blocksize
2385 2389
2386 2390 def getDataHeader(self):
2387 2391
2388 2392 """
2389 2393 Obtiene una copia del First Header
2390 2394
2391 2395 Affected:
2392 2396 self.systemHeaderObj
2393 2397 self.radarControllerHeaderObj
2394 2398 self.dtype
2395 2399
2396 2400 Return:
2397 2401 None
2398 2402 """
2399 2403
2400 2404 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
2401 2405 self.systemHeaderObj.nChannels = self.dataOut.nChannels
2402 2406 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
2403 2407
2404 2408 self.getBasicHeader()
2405 2409
2406 2410 processingHeaderSize = 40 # bytes
2407 2411 self.processingHeaderObj.dtype = 0 # Voltage
2408 2412 self.processingHeaderObj.blockSize = self.__getBlockSize()
2409 2413 self.processingHeaderObj.profilesPerBlock = self.dataOut.nFFTPoints
2410 2414 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
2411 2415 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
2412 2416 self.processingHeaderObj.processFlags = self.__getProcessFlags()
2413 2417 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt# Se requiere para determinar el valor de timeInterval
2414 2418 self.processingHeaderObj.nIncohInt = self.dataOut.nIncohInt
2415 2419 self.processingHeaderObj.totalSpectra = self.dataOut.nPairs + self.dataOut.nChannels
2416 2420
2417 2421 if self.processingHeaderObj.totalSpectra > 0:
2418 2422 channelList = []
2419 2423 for channel in range(self.dataOut.nChannels):
2420 2424 channelList.append(channel)
2421 2425 channelList.append(channel)
2422 2426
2423 2427 pairsList = []
2424 2428 for pair in self.dataOut.pairsList:
2425 2429 pairsList.append(pair[0])
2426 2430 pairsList.append(pair[1])
2427 2431 spectraComb = channelList + pairsList
2428 2432 spectraComb = numpy.array(spectraComb,dtype="u1")
2429 2433 self.processingHeaderObj.spectraComb = spectraComb
2430 2434 sizeOfSpcComb = len(spectraComb)
2431 2435 processingHeaderSize += sizeOfSpcComb
2432 2436
2433 2437 if self.dataOut.code != None:
2434 2438 self.processingHeaderObj.code = self.dataOut.code
2435 2439 self.processingHeaderObj.nCode = self.dataOut.nCode
2436 2440 self.processingHeaderObj.nBaud = self.dataOut.nBaud
2437 2441 nCodeSize = 4 # bytes
2438 2442 nBaudSize = 4 # bytes
2439 2443 codeSize = 4 # bytes
2440 2444 sizeOfCode = int(nCodeSize + nBaudSize + codeSize * self.dataOut.nCode * self.dataOut.nBaud)
2441 2445 processingHeaderSize += sizeOfCode
2442 2446
2443 2447 if self.processingHeaderObj.nWindows != 0:
2444 2448 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
2445 2449 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
2446 2450 self.processingHeaderObj.nHeights = self.dataOut.nHeights
2447 2451 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
2448 2452 sizeOfFirstHeight = 4
2449 2453 sizeOfdeltaHeight = 4
2450 2454 sizeOfnHeights = 4
2451 2455 sizeOfWindows = (sizeOfFirstHeight + sizeOfdeltaHeight + sizeOfnHeights)*self.processingHeaderObj.nWindows
2452 2456 processingHeaderSize += sizeOfWindows
2453 2457
2454 2458 self.processingHeaderObj.size = processingHeaderSize
2455 2459
2456 2460 class SpectraHeisWriter():
2457 2461
2458 2462 i=0
2459 2463
2460 2464 def __init__(self, dataOut):
2461 2465
2462 2466 self.wrObj = FITS()
2463 2467 self.dataOut = dataOut
2464 2468
2465 2469 def isNumber(str):
2466 2470 """
2467 2471 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
2468 2472
2469 2473 Excepciones:
2470 2474 Si un determinado string no puede ser convertido a numero
2471 2475 Input:
2472 2476 str, string al cual se le analiza para determinar si convertible a un numero o no
2473 2477
2474 2478 Return:
2475 2479 True : si el string es uno numerico
2476 2480 False : no es un string numerico
2477 2481 """
2478 2482 try:
2479 2483 float( str )
2480 2484 return True
2481 2485 except:
2482 2486 return False
2483 2487
2484 2488 def setup(self, wrpath,):
2485 2489
2486 2490 if not(os.path.exists(wrpath)):
2487 2491 os.mkdir(wrpath)
2488 2492
2489 2493 self.wrpath = wrpath
2490 2494 self.setFile = 0
2491 2495
2492 2496 def putData(self):
2493 2497 # self.wrObj.writeHeader(nChannels=self.dataOut.nChannels, nFFTPoints=self.dataOut.nFFTPoints)
2494 2498 #name = self.dataOut.utctime
2495 2499 name= time.localtime( self.dataOut.utctime)
2496 2500 ext=".fits"
2497 2501 #folder='D%4.4d%3.3d'%(name.tm_year,name.tm_yday)
2498 2502 subfolder = 'D%4.4d%3.3d' % (name.tm_year,name.tm_yday)
2499 2503
2500 2504 fullpath = os.path.join( self.wrpath, subfolder )
2501 2505 if not( os.path.exists(fullpath) ):
2502 2506 os.mkdir(fullpath)
2503 2507 self.setFile += 1
2504 2508 file = 'D%4.4d%3.3d%3.3d%s' % (name.tm_year,name.tm_yday,self.setFile,ext)
2505 2509
2506 2510 filename = os.path.join(self.wrpath,subfolder, file)
2507 2511
2508 2512 # print self.dataOut.ippSeconds
2509 2513 freq=numpy.arange(-1*self.dataOut.nHeights/2.,self.dataOut.nHeights/2.)/(2*self.dataOut.ippSeconds)
2510 2514
2511 2515 col1=self.wrObj.setColF(name="freq", format=str(self.dataOut.nFFTPoints)+'E', array=freq)
2512 2516 col2=self.wrObj.writeData(name="P_Ch1",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[0,:]))
2513 2517 col3=self.wrObj.writeData(name="P_Ch2",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[1,:]))
2514 2518 col4=self.wrObj.writeData(name="P_Ch3",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[2,:]))
2515 2519 col5=self.wrObj.writeData(name="P_Ch4",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[3,:]))
2516 2520 col6=self.wrObj.writeData(name="P_Ch5",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[4,:]))
2517 2521 col7=self.wrObj.writeData(name="P_Ch6",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[5,:]))
2518 2522 col8=self.wrObj.writeData(name="P_Ch7",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[6,:]))
2519 2523 col9=self.wrObj.writeData(name="P_Ch8",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[7,:]))
2520 2524 #n=numpy.arange((100))
2521 2525 n=self.dataOut.data_spc[6,:]
2522 2526 a=self.wrObj.cFImage(n)
2523 2527 b=self.wrObj.Ctable(col1,col2,col3,col4,col5,col6,col7,col8,col9)
2524 2528 self.wrObj.CFile(a,b)
2525 2529 self.wrObj.wFile(filename)
2526 2530 return 1
2527 2531
2528 2532 class FITS:
2529 2533
2530 2534 name=None
2531 2535 format=None
2532 2536 array =None
2533 2537 data =None
2534 2538 thdulist=None
2535 2539
2536 2540 def __init__(self):
2537 2541
2538 2542 pass
2539 2543
2540 2544 def setColF(self,name,format,array):
2541 2545 self.name=name
2542 2546 self.format=format
2543 2547 self.array=array
2544 2548 a1=numpy.array([self.array],dtype=numpy.float32)
2545 2549 self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1)
2546 2550 return self.col1
2547 2551
2548 2552 # def setColP(self,name,format,data):
2549 2553 # self.name=name
2550 2554 # self.format=format
2551 2555 # self.data=data
2552 2556 # a2=numpy.array([self.data],dtype=numpy.float32)
2553 2557 # self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2554 2558 # return self.col2
2555 2559
2556 2560 def writeHeader(self,):
2557 2561 pass
2558 2562
2559 2563 def writeData(self,name,format,data):
2560 2564 self.name=name
2561 2565 self.format=format
2562 2566 self.data=data
2563 2567 a2=numpy.array([self.data],dtype=numpy.float32)
2564 2568 self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2565 2569 return self.col2
2566 2570
2567 2571 def cFImage(self,n):
2568 2572 self.hdu= pyfits.PrimaryHDU(n)
2569 2573 return self.hdu
2570 2574
2571 2575 def Ctable(self,col1,col2,col3,col4,col5,col6,col7,col8,col9):
2572 2576 self.cols=pyfits.ColDefs( [col1,col2,col3,col4,col5,col6,col7,col8,col9])
2573 2577 self.tbhdu = pyfits.new_table(self.cols)
2574 2578 return self.tbhdu
2575 2579
2576 2580 def CFile(self,hdu,tbhdu):
2577 2581 self.thdulist=pyfits.HDUList([hdu,tbhdu])
2578 2582
2579 2583 def wFile(self,filename):
2580 2584 self.thdulist.writeto(filename) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now