##// END OF EJS Templates
imprime numero de bloques
Daniel Valdez -
r270:e2cf46a421f2
parent child
Show More
@@ -1,2574 +1,2579
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: JRODataIO.py 169 2012-11-19 21:57:03Z murco $
5 5 '''
6 6
7 7 import os, sys
8 8 import glob
9 9 import time
10 10 import numpy
11 11 import fnmatch
12 12 import time, datetime
13 13
14 14 from jrodata import *
15 15 from jroheaderIO import *
16 16 from jroprocessing import *
17 17
18 18 LOCALTIME = -18000
19 19
20 20 def isNumber(str):
21 21 """
22 22 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
23 23
24 24 Excepciones:
25 25 Si un determinado string no puede ser convertido a numero
26 26 Input:
27 27 str, string al cual se le analiza para determinar si convertible a un numero o no
28 28
29 29 Return:
30 30 True : si el string es uno numerico
31 31 False : no es un string numerico
32 32 """
33 33 try:
34 34 float( str )
35 35 return True
36 36 except:
37 37 return False
38 38
39 39 def isThisFileinRange(filename, startUTSeconds, endUTSeconds):
40 40 """
41 41 Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado.
42 42
43 43 Inputs:
44 44 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
45 45
46 46 startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en
47 47 segundos contados desde 01/01/1970.
48 48 endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en
49 49 segundos contados desde 01/01/1970.
50 50
51 51 Return:
52 52 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
53 53 fecha especificado, de lo contrario retorna False.
54 54
55 55 Excepciones:
56 56 Si el archivo no existe o no puede ser abierto
57 57 Si la cabecera no puede ser leida.
58 58
59 59 """
60 60 basicHeaderObj = BasicHeader(LOCALTIME)
61 61
62 62 try:
63 63 fp = open(filename,'rb')
64 64 except:
65 65 raise IOError, "The file %s can't be opened" %(filename)
66 66
67 67 sts = basicHeaderObj.read(fp)
68 68 fp.close()
69 69
70 70 if not(sts):
71 71 print "Skipping the file %s because it has not a valid header" %(filename)
72 72 return 0
73 73
74 74 if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)):
75 75 return 0
76 76
77 77 return 1
78 78
79 79 def isFileinThisTime(filename, startTime, endTime):
80 80 """
81 81 Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado.
82 82
83 83 Inputs:
84 84 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
85 85
86 86 startTime : tiempo inicial del rango seleccionado en formato datetime.time
87 87
88 88 endTime : tiempo final del rango seleccionado en formato datetime.time
89 89
90 90 Return:
91 91 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
92 92 fecha especificado, de lo contrario retorna False.
93 93
94 94 Excepciones:
95 95 Si el archivo no existe o no puede ser abierto
96 96 Si la cabecera no puede ser leida.
97 97
98 98 """
99 99
100 100
101 101 try:
102 102 fp = open(filename,'rb')
103 103 except:
104 104 raise IOError, "The file %s can't be opened" %(filename)
105 105
106 106 basicHeaderObj = BasicHeader(LOCALTIME)
107 107 sts = basicHeaderObj.read(fp)
108 108 fp.close()
109 109
110 110 thisTime = basicHeaderObj.datatime.time()
111 111
112 112 if not(sts):
113 113 print "Skipping the file %s because it has not a valid header" %(filename)
114 114 return 0
115 115
116 116 if not ((startTime <= thisTime) and (endTime > thisTime)):
117 117 return 0
118 118
119 119 return 1
120 120
121 121 def getlastFileFromPath(path, ext):
122 122 """
123 123 Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext"
124 124 al final de la depuracion devuelve el ultimo file de la lista que quedo.
125 125
126 126 Input:
127 127 fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta
128 128 ext : extension de los files contenidos en una carpeta
129 129
130 130 Return:
131 131 El ultimo file de una determinada carpeta, no se considera el path.
132 132 """
133 133 validFilelist = []
134 134 fileList = os.listdir(path)
135 135
136 136 # 0 1234 567 89A BCDE
137 137 # H YYYY DDD SSS .ext
138 138
139 139 for file in fileList:
140 140 try:
141 141 year = int(file[1:5])
142 142 doy = int(file[5:8])
143 143
144 144
145 145 except:
146 146 continue
147 147
148 148 if (os.path.splitext(file)[-1].lower() != ext.lower()):
149 149 continue
150 150
151 151 validFilelist.append(file)
152 152
153 153 if validFilelist:
154 154 validFilelist = sorted( validFilelist, key=str.lower )
155 155 return validFilelist[-1]
156 156
157 157 return None
158 158
159 159 def checkForRealPath(path, year, doy, set, ext):
160 160 """
161 161 Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path,
162 162 Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar
163 163 el path exacto de un determinado file.
164 164
165 165 Example :
166 166 nombre correcto del file es .../.../D2009307/P2009307367.ext
167 167
168 168 Entonces la funcion prueba con las siguientes combinaciones
169 169 .../.../y2009307367.ext
170 170 .../.../Y2009307367.ext
171 171 .../.../x2009307/y2009307367.ext
172 172 .../.../x2009307/Y2009307367.ext
173 173 .../.../X2009307/y2009307367.ext
174 174 .../.../X2009307/Y2009307367.ext
175 175 siendo para este caso, la ultima combinacion de letras, identica al file buscado
176 176
177 177 Return:
178 178 Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file
179 179 caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas
180 180 para el filename
181 181 """
182 182 fullfilename = None
183 183 find_flag = False
184 184 filename = None
185 185
186 186 prefixDirList = [None,'d','D']
187 187 if ext.lower() == ".r": #voltage
188 188 prefixFileList = ['d','D']
189 189 elif ext.lower() == ".pdata": #spectra
190 190 prefixFileList = ['p','P']
191 191 else:
192 192 return None, filename
193 193
194 194 #barrido por las combinaciones posibles
195 195 for prefixDir in prefixDirList:
196 196 thispath = path
197 197 if prefixDir != None:
198 198 #formo el nombre del directorio xYYYYDDD (x=d o x=D)
199 199 thispath = os.path.join(path, "%s%04d%03d" % ( prefixDir, year, doy ))
200 200
201 201 for prefixFile in prefixFileList: #barrido por las dos combinaciones posibles de "D"
202 202 filename = "%s%04d%03d%03d%s" % ( prefixFile, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext
203 203 fullfilename = os.path.join( thispath, filename ) #formo el path completo
204 204
205 205 if os.path.exists( fullfilename ): #verifico que exista
206 206 find_flag = True
207 207 break
208 208 if find_flag:
209 209 break
210 210
211 211 if not(find_flag):
212 212 return None, filename
213 213
214 214 return fullfilename, filename
215 215
216 216 class JRODataIO:
217 217
218 218 c = 3E8
219 219
220 220 isConfig = False
221 221
222 222 basicHeaderObj = BasicHeader(LOCALTIME)
223 223
224 224 systemHeaderObj = SystemHeader()
225 225
226 226 radarControllerHeaderObj = RadarControllerHeader()
227 227
228 228 processingHeaderObj = ProcessingHeader()
229 229
230 230 online = 0
231 231
232 232 dtype = None
233 233
234 234 pathList = []
235 235
236 236 filenameList = []
237 237
238 238 filename = None
239 239
240 240 ext = None
241 241
242 242 flagIsNewFile = 1
243 243
244 244 flagTimeBlock = 0
245 245
246 246 flagIsNewBlock = 0
247 247
248 248 fp = None
249 249
250 250 firstHeaderSize = 0
251 251
252 252 basicHeaderSize = 24
253 253
254 254 versionFile = 1103
255 255
256 256 fileSize = None
257 257
258 258 ippSeconds = None
259 259
260 260 fileSizeByHeader = None
261 261
262 262 fileIndex = None
263 263
264 264 profileIndex = None
265 265
266 266 blockIndex = None
267 267
268 268 nTotalBlocks = None
269 269
270 270 maxTimeStep = 30
271 271
272 272 lastUTTime = None
273 273
274 274 datablock = None
275 275
276 276 dataOut = None
277 277
278 278 blocksize = None
279 279
280 280 def __init__(self):
281 281
282 282 raise ValueError, "Not implemented"
283 283
284 284 def run(self):
285 285
286 286 raise ValueError, "Not implemented"
287 287
288 288 def getOutput(self):
289 289
290 290 return self.dataOut
291 291
292 292 class JRODataReader(JRODataIO, ProcessingUnit):
293 293
294 294 nReadBlocks = 0
295 295
296 296 delay = 10 #number of seconds waiting a new file
297 297
298 298 nTries = 3 #quantity tries
299 299
300 300 nFiles = 3 #number of files for searching
301 301
302 302 flagNoMoreFiles = 0
303 303
304 304 def __init__(self):
305 305
306 306 """
307 307
308 308 """
309 309
310 310 raise ValueError, "This method has not been implemented"
311 311
312 312
313 313 def createObjByDefault(self):
314 314 """
315 315
316 316 """
317 317 raise ValueError, "This method has not been implemented"
318 318
319 319 def getBlockDimension(self):
320 320
321 321 raise ValueError, "No implemented"
322 322
323 323 def __searchFilesOffLine(self,
324 324 path,
325 325 startDate,
326 326 endDate,
327 327 startTime=datetime.time(0,0,0),
328 328 endTime=datetime.time(23,59,59),
329 329 set=None,
330 330 expLabel='',
331 331 ext='.r',
332 332 walk=True):
333 333
334 334 pathList = []
335 335
336 336 if not walk:
337 337 pathList.append(path)
338 338
339 339 else:
340 340 dirList = []
341 341 for thisPath in os.listdir(path):
342 342 if os.path.isdir(os.path.join(path,thisPath)):
343 343 dirList.append(thisPath)
344 344
345 345 if not(dirList):
346 346 return None, None
347 347
348 348 thisDate = startDate
349 349
350 350 while(thisDate <= endDate):
351 351 year = thisDate.timetuple().tm_year
352 352 doy = thisDate.timetuple().tm_yday
353 353
354 354 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
355 355 if len(match) == 0:
356 356 thisDate += datetime.timedelta(1)
357 357 continue
358 358
359 359 pathList.append(os.path.join(path,match[0],expLabel))
360 360 thisDate += datetime.timedelta(1)
361 361
362 362 if pathList == []:
363 363 print "Any folder was found for the date range: %s-%s" %(startDate, endDate)
364 364 return None, None
365 365
366 366 print "%d folder(s) was(were) found for the date range: %s-%s" %(len(pathList), startDate, endDate)
367 367
368 368 filenameList = []
369 369 for thisPath in pathList:
370 370
371 371 fileList = glob.glob1(thisPath, "*%s" %ext)
372 372 fileList.sort()
373 373
374 374 for file in fileList:
375 375
376 376 filename = os.path.join(thisPath,file)
377 377
378 378 if isFileinThisTime(filename, startTime, endTime):
379 379 filenameList.append(filename)
380 380
381 381 if not(filenameList):
382 382 print "Any file was found for the time range %s - %s" %(startTime, endTime)
383 383 return None, None
384 384
385 385 print "%d file(s) was(were) found for the time range: %s - %s" %(len(filenameList), startTime, endTime)
386 386
387 387 self.filenameList = filenameList
388 388
389 389 return pathList, filenameList
390 390
391 391 def __searchFilesOnLine(self, path, expLabel = "", ext = None, walk=True):
392 392
393 393 """
394 394 Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y
395 395 devuelve el archivo encontrado ademas de otros datos.
396 396
397 397 Input:
398 398 path : carpeta donde estan contenidos los files que contiene data
399 399
400 400 expLabel : Nombre del subexperimento (subfolder)
401 401
402 402 ext : extension de los files
403 403
404 404 walk : Si es habilitado no realiza busquedas dentro de los ubdirectorios (doypath)
405 405
406 406 Return:
407 407 directory : eL directorio donde esta el file encontrado
408 408 filename : el ultimo file de una determinada carpeta
409 409 year : el anho
410 410 doy : el numero de dia del anho
411 411 set : el set del archivo
412 412
413 413
414 414 """
415 415 dirList = []
416 416
417 417 if walk:
418 418
419 419 #Filtra solo los directorios
420 420 for thisPath in os.listdir(path):
421 421 if os.path.isdir(os.path.join(path, thisPath)):
422 422 dirList.append(thisPath)
423 423
424 424 if not(dirList):
425 425 return None, None, None, None, None
426 426
427 427 dirList = sorted( dirList, key=str.lower )
428 428
429 429 doypath = dirList[-1]
430 430 fullpath = os.path.join(path, doypath, expLabel)
431 431
432 432 else:
433 433 fullpath = path
434 434
435 435 filename = getlastFileFromPath(fullpath, ext)
436 436
437 437 if not(filename):
438 438 return None, None, None, None, None
439 439
440 440 if not(self.__verifyFile(os.path.join(fullpath, filename))):
441 441 return None, None, None, None, None
442 442
443 443 year = int( filename[1:5] )
444 444 doy = int( filename[5:8] )
445 445 set = int( filename[8:11] )
446 446
447 447 return fullpath, filename, year, doy, set
448 448
449 449
450 450
451 451 def __setNextFileOffline(self):
452 452
453 453 idFile = self.fileIndex
454 454
455 455 while (True):
456 456 idFile += 1
457 457 if not(idFile < len(self.filenameList)):
458 458 self.flagNoMoreFiles = 1
459 459 print "No more Files"
460 460 return 0
461 461
462 462 filename = self.filenameList[idFile]
463 463
464 464 if not(self.__verifyFile(filename)):
465 465 continue
466 466
467 467 fileSize = os.path.getsize(filename)
468 468 fp = open(filename,'rb')
469 469 break
470 470
471 471 self.flagIsNewFile = 1
472 472 self.fileIndex = idFile
473 473 self.filename = filename
474 474 self.fileSize = fileSize
475 475 self.fp = fp
476 476
477 477 print "Setting the file: %s"%self.filename
478 478
479 479 return 1
480 480
481 481 def __setNextFileOnline(self):
482 482 """
483 483 Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si
484 484 no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files
485 485 siguientes.
486 486
487 487 Affected:
488 488 self.flagIsNewFile
489 489 self.filename
490 490 self.fileSize
491 491 self.fp
492 492 self.set
493 493 self.flagNoMoreFiles
494 494
495 495 Return:
496 496 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado
497 497 1 : si el file fue abierto con exito y esta listo a ser leido
498 498
499 499 Excepciones:
500 500 Si un determinado file no puede ser abierto
501 501 """
502 502 nFiles = 0
503 503 fileOk_flag = False
504 504 firstTime_flag = True
505 505
506 506 self.set += 1
507 507
508 508 #busca el 1er file disponible
509 509 fullfilename, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
510 510 if fullfilename:
511 511 if self.__verifyFile(fullfilename, False):
512 512 fileOk_flag = True
513 513
514 514 #si no encuentra un file entonces espera y vuelve a buscar
515 515 if not(fileOk_flag):
516 516 for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles
517 517
518 518 if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces
519 519 tries = self.nTries
520 520 else:
521 521 tries = 1 #si no es la 1era vez entonces solo lo hace una vez
522 522
523 523 for nTries in range( tries ):
524 524 if firstTime_flag:
525 525 print "\tWaiting %0.2f sec for the file \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 )
526 526 time.sleep( self.delay )
527 527 else:
528 528 print "\tSearching next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext)
529 529
530 530 fullfilename, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
531 531 if fullfilename:
532 532 if self.__verifyFile(fullfilename):
533 533 fileOk_flag = True
534 534 break
535 535
536 536 if fileOk_flag:
537 537 break
538 538
539 539 firstTime_flag = False
540 540
541 541 print "\tSkipping the file \"%s\" due to this file doesn't exist" % filename
542 542 self.set += 1
543 543
544 544 if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta
545 545 self.set = 0
546 546 self.doy += 1
547 547
548 548 if fileOk_flag:
549 549 self.fileSize = os.path.getsize( fullfilename )
550 550 self.filename = fullfilename
551 551 self.flagIsNewFile = 1
552 552 if self.fp != None: self.fp.close()
553 553 self.fp = open(fullfilename, 'rb')
554 554 self.flagNoMoreFiles = 0
555 555 print 'Setting the file: %s' % fullfilename
556 556 else:
557 557 self.fileSize = 0
558 558 self.filename = None
559 559 self.flagIsNewFile = 0
560 560 self.fp = None
561 561 self.flagNoMoreFiles = 1
562 562 print 'No more Files'
563 563
564 564 return fileOk_flag
565 565
566 566
567 567 def setNextFile(self):
568 568 if self.fp != None:
569 569 self.fp.close()
570 570
571 571 if self.online:
572 572 newFile = self.__setNextFileOnline()
573 573 else:
574 574 newFile = self.__setNextFileOffline()
575 575
576 576 if not(newFile):
577 577 return 0
578 578
579 579 self.__readFirstHeader()
580 580 self.nReadBlocks = 0
581 581 return 1
582 582
583 583 def __waitNewBlock(self):
584 584 """
585 585 Return 1 si se encontro un nuevo bloque de datos, 0 de otra forma.
586 586
587 587 Si el modo de lectura es OffLine siempre retorn 0
588 588 """
589 589 if not self.online:
590 590 return 0
591 591
592 592 if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):
593 593 return 0
594 594
595 595 currentPointer = self.fp.tell()
596 596
597 597 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
598 598
599 599 for nTries in range( self.nTries ):
600 600
601 601 self.fp.close()
602 602 self.fp = open( self.filename, 'rb' )
603 603 self.fp.seek( currentPointer )
604 604
605 605 self.fileSize = os.path.getsize( self.filename )
606 606 currentSize = self.fileSize - currentPointer
607 607
608 608 if ( currentSize >= neededSize ):
609 609 self.__rdBasicHeader()
610 610 return 1
611 611
612 612 print "\tWaiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries+1)
613 613 time.sleep( self.delay )
614 614
615 615
616 616 return 0
617 617
618 618 def __setNewBlock(self):
619 619
620 620 if self.fp == None:
621 621 return 0
622 622
623 623 if self.flagIsNewFile:
624 624 return 1
625 625
626 626 self.lastUTTime = self.basicHeaderObj.utc
627 627 currentSize = self.fileSize - self.fp.tell()
628 628 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
629 629
630 630 if (currentSize >= neededSize):
631 631 self.__rdBasicHeader()
632 632 return 1
633 633
634 634 if self.__waitNewBlock():
635 635 return 1
636 636
637 637 if not(self.setNextFile()):
638 638 return 0
639 639
640 640 deltaTime = self.basicHeaderObj.utc - self.lastUTTime #
641 641
642 642 self.flagTimeBlock = 0
643 643
644 644 if deltaTime > self.maxTimeStep:
645 645 self.flagTimeBlock = 1
646 646
647 647 return 1
648 648
649 649
650 650 def readNextBlock(self):
651 651 if not(self.__setNewBlock()):
652 652 return 0
653 653
654 654 if not(self.readBlock()):
655 655 return 0
656 656
657 657 return 1
658 658
659 659 def __rdProcessingHeader(self, fp=None):
660 660 if fp == None:
661 661 fp = self.fp
662 662
663 663 self.processingHeaderObj.read(fp)
664 664
665 665 def __rdRadarControllerHeader(self, fp=None):
666 666 if fp == None:
667 667 fp = self.fp
668 668
669 669 self.radarControllerHeaderObj.read(fp)
670 670
671 671 def __rdSystemHeader(self, fp=None):
672 672 if fp == None:
673 673 fp = self.fp
674 674
675 675 self.systemHeaderObj.read(fp)
676 676
677 677 def __rdBasicHeader(self, fp=None):
678 678 if fp == None:
679 679 fp = self.fp
680 680
681 681 self.basicHeaderObj.read(fp)
682 682
683 683
684 684 def __readFirstHeader(self):
685 685 self.__rdBasicHeader()
686 686 self.__rdSystemHeader()
687 687 self.__rdRadarControllerHeader()
688 688 self.__rdProcessingHeader()
689 689
690 690 self.firstHeaderSize = self.basicHeaderObj.size
691 691
692 692 datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
693 693 if datatype == 0:
694 694 datatype_str = numpy.dtype([('real','<i1'),('imag','<i1')])
695 695 elif datatype == 1:
696 696 datatype_str = numpy.dtype([('real','<i2'),('imag','<i2')])
697 697 elif datatype == 2:
698 698 datatype_str = numpy.dtype([('real','<i4'),('imag','<i4')])
699 699 elif datatype == 3:
700 700 datatype_str = numpy.dtype([('real','<i8'),('imag','<i8')])
701 701 elif datatype == 4:
702 702 datatype_str = numpy.dtype([('real','<f4'),('imag','<f4')])
703 703 elif datatype == 5:
704 704 datatype_str = numpy.dtype([('real','<f8'),('imag','<f8')])
705 705 else:
706 706 raise ValueError, 'Data type was not defined'
707 707
708 708 self.dtype = datatype_str
709 709 self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c
710 710 self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.processingHeaderObj.dataBlocksPerFile - 1)
711 711 # self.dataOut.channelList = numpy.arange(self.systemHeaderObj.numChannels)
712 712 # self.dataOut.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels)
713 713 self.getBlockDimension()
714 714
715 715
716 716 def __verifyFile(self, filename, msgFlag=True):
717 717 msg = None
718 718 try:
719 719 fp = open(filename, 'rb')
720 720 currentPosition = fp.tell()
721 721 except:
722 722 if msgFlag:
723 723 print "The file %s can't be opened" % (filename)
724 724 return False
725 725
726 726 neededSize = self.processingHeaderObj.blockSize + self.firstHeaderSize
727 727
728 728 if neededSize == 0:
729 729 basicHeaderObj = BasicHeader(LOCALTIME)
730 730 systemHeaderObj = SystemHeader()
731 731 radarControllerHeaderObj = RadarControllerHeader()
732 732 processingHeaderObj = ProcessingHeader()
733 733
734 734 try:
735 735 if not( basicHeaderObj.read(fp) ): raise IOError
736 736 if not( systemHeaderObj.read(fp) ): raise IOError
737 737 if not( radarControllerHeaderObj.read(fp) ): raise IOError
738 738 if not( processingHeaderObj.read(fp) ): raise IOError
739 739 data_type = int(numpy.log2((processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
740 740
741 741 neededSize = processingHeaderObj.blockSize + basicHeaderObj.size
742 742
743 743 except:
744 744 if msgFlag:
745 745 print "\tThe file %s is empty or it hasn't enough data" % filename
746 746
747 747 fp.close()
748 748 return False
749 749 else:
750 750 msg = "\tSkipping the file %s due to it hasn't enough data" %filename
751 751
752 752 fp.close()
753 753 fileSize = os.path.getsize(filename)
754 754 currentSize = fileSize - currentPosition
755 755 if currentSize < neededSize:
756 756 if msgFlag and (msg != None):
757 757 print msg #print"\tSkipping the file %s due to it hasn't enough data" %filename
758 758 return False
759 759
760 760 return True
761 761
762 762 def setup(self,
763 763 path=None,
764 764 startDate=None,
765 765 endDate=None,
766 766 startTime=datetime.time(0,0,0),
767 767 endTime=datetime.time(23,59,59),
768 768 set=0,
769 769 expLabel = "",
770 770 ext = None,
771 771 online = False,
772 772 delay = 60,
773 773 walk = True):
774 774
775 775 if path == None:
776 776 raise ValueError, "The path is not valid"
777 777
778 778 if ext == None:
779 779 ext = self.ext
780 780
781 781 if online:
782 782 print "Searching files in online mode..."
783 783
784 784 for nTries in range( self.nTries ):
785 785 fullpath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext, walk=walk)
786 786
787 787 if fullpath:
788 788 break
789 789
790 790 print '\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)
791 791 time.sleep( self.delay )
792 792
793 793 if not(fullpath):
794 794 print "There 'isn't valied files in %s" % path
795 795 return None
796 796
797 797 self.year = year
798 798 self.doy = doy
799 799 self.set = set - 1
800 800 self.path = path
801 801
802 802 else:
803 803 print "Searching files in offline mode ..."
804 804 pathList, filenameList = self.__searchFilesOffLine(path, startDate=startDate, endDate=endDate,
805 805 startTime=startTime, endTime=endTime,
806 806 set=set, expLabel=expLabel, ext=ext,
807 807 walk=walk)
808 808
809 809 if not(pathList):
810 810 print "No *%s files into the folder %s \nfor the range: %s - %s"%(ext, path,
811 811 datetime.datetime.combine(startDate,startTime).ctime(),
812 812 datetime.datetime.combine(endDate,endTime).ctime())
813 813
814 814 sys.exit(-1)
815 815
816 816
817 817 self.fileIndex = -1
818 818 self.pathList = pathList
819 819 self.filenameList = filenameList
820 820
821 821 self.online = online
822 822 self.delay = delay
823 823 ext = ext.lower()
824 824 self.ext = ext
825 825
826 826 if not(self.setNextFile()):
827 827 if (startDate!=None) and (endDate!=None):
828 828 print "No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime())
829 829 elif startDate != None:
830 830 print "No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime())
831 831 else:
832 832 print "No files"
833 833
834 834 sys.exit(-1)
835 835
836 836 # self.updateDataHeader()
837 837
838 838 return self.dataOut
839 839
840 840 def getData():
841 841
842 842 raise ValueError, "This method has not been implemented"
843 843
844 844 def hasNotDataInBuffer():
845 845
846 846 raise ValueError, "This method has not been implemented"
847 847
848 848 def readBlock():
849 849
850 850 raise ValueError, "This method has not been implemented"
851 851
852 852 def isEndProcess(self):
853 853
854 854 return self.flagNoMoreFiles
855 855
856 856 def printReadBlocks(self):
857 857
858 858 print "Number of read blocks per file %04d" %self.nReadBlocks
859 859
860 860 def printTotalBlocks(self):
861 861
862 862 print "Number of read blocks %04d" %self.nTotalBlocks
863 863
864 def printNumberOfBlock(self):
865
866 if self.flagIsNewBlock:
867 print "Block No. %04d, Total blocks %04d" %(self.basicHeaderObj.dataBlock, self.nTotalBlocks)
868
864 869 def printInfo(self):
865 870
866 871 print self.basicHeaderObj.printInfo()
867 872 print self.systemHeaderObj.printInfo()
868 873 print self.radarControllerHeaderObj.printInfo()
869 874 print self.processingHeaderObj.printInfo()
870 875
871 876
872 877 def run(self, **kwargs):
873 878
874 879 if not(self.isConfig):
875 880
876 881 # self.dataOut = dataOut
877 882 self.setup(**kwargs)
878 883 self.isConfig = True
879 884
880 885 self.getData()
881 886
882 887 class JRODataWriter(JRODataIO, Operation):
883 888
884 889 """
885 890 Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura
886 891 de los datos siempre se realiza por bloques.
887 892 """
888 893
889 894 blockIndex = 0
890 895
891 896 path = None
892 897
893 898 setFile = None
894 899
895 900 profilesPerBlock = None
896 901
897 902 blocksPerFile = None
898 903
899 904 nWriteBlocks = 0
900 905
901 906 def __init__(self, dataOut=None):
902 907 raise ValueError, "Not implemented"
903 908
904 909
905 910 def hasAllDataInBuffer(self):
906 911 raise ValueError, "Not implemented"
907 912
908 913
909 914 def setBlockDimension(self):
910 915 raise ValueError, "Not implemented"
911 916
912 917
913 918 def writeBlock(self):
914 919 raise ValueError, "No implemented"
915 920
916 921
917 922 def putData(self):
918 923 raise ValueError, "No implemented"
919 924
920 925 def getDataHeader(self):
921 926 """
922 927 Obtiene una copia del First Header
923 928
924 929 Affected:
925 930
926 931 self.basicHeaderObj
927 932 self.systemHeaderObj
928 933 self.radarControllerHeaderObj
929 934 self.processingHeaderObj self.
930 935
931 936 Return:
932 937 None
933 938 """
934 939
935 940 raise ValueError, "No implemented"
936 941
937 942 def getBasicHeader(self):
938 943
939 944 self.basicHeaderObj.size = self.basicHeaderSize #bytes
940 945 self.basicHeaderObj.version = self.versionFile
941 946 self.basicHeaderObj.dataBlock = self.nTotalBlocks
942 947
943 948 utc = numpy.floor(self.dataOut.utctime)
944 949 milisecond = (self.dataOut.utctime - utc)* 1000.0
945 950
946 951 self.basicHeaderObj.utc = utc
947 952 self.basicHeaderObj.miliSecond = milisecond
948 953 self.basicHeaderObj.timeZone = 0
949 954 self.basicHeaderObj.dstFlag = 0
950 955 self.basicHeaderObj.errorCount = 0
951 956
952 957 def __writeFirstHeader(self):
953 958 """
954 959 Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader)
955 960
956 961 Affected:
957 962 __dataType
958 963
959 964 Return:
960 965 None
961 966 """
962 967
963 968 # CALCULAR PARAMETROS
964 969
965 970 sizeLongHeader = self.systemHeaderObj.size + self.radarControllerHeaderObj.size + self.processingHeaderObj.size
966 971 self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader
967 972
968 973 self.basicHeaderObj.write(self.fp)
969 974 self.systemHeaderObj.write(self.fp)
970 975 self.radarControllerHeaderObj.write(self.fp)
971 976 self.processingHeaderObj.write(self.fp)
972 977
973 978 self.dtype = self.dataOut.dtype
974 979
975 980 def __setNewBlock(self):
976 981 """
977 982 Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header
978 983
979 984 Return:
980 985 0 : si no pudo escribir nada
981 986 1 : Si escribio el Basic el First Header
982 987 """
983 988 if self.fp == None:
984 989 self.setNextFile()
985 990
986 991 if self.flagIsNewFile:
987 992 return 1
988 993
989 994 if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile:
990 995 self.basicHeaderObj.write(self.fp)
991 996 return 1
992 997
993 998 if not( self.setNextFile() ):
994 999 return 0
995 1000
996 1001 return 1
997 1002
998 1003
999 1004 def writeNextBlock(self):
1000 1005 """
1001 1006 Selecciona el bloque siguiente de datos y los escribe en un file
1002 1007
1003 1008 Return:
1004 1009 0 : Si no hizo pudo escribir el bloque de datos
1005 1010 1 : Si no pudo escribir el bloque de datos
1006 1011 """
1007 1012 if not( self.__setNewBlock() ):
1008 1013 return 0
1009 1014
1010 1015 self.writeBlock()
1011 1016
1012 1017 return 1
1013 1018
1014 1019 def setNextFile(self):
1015 1020 """
1016 1021 Determina el siguiente file que sera escrito
1017 1022
1018 1023 Affected:
1019 1024 self.filename
1020 1025 self.subfolder
1021 1026 self.fp
1022 1027 self.setFile
1023 1028 self.flagIsNewFile
1024 1029
1025 1030 Return:
1026 1031 0 : Si el archivo no puede ser escrito
1027 1032 1 : Si el archivo esta listo para ser escrito
1028 1033 """
1029 1034 ext = self.ext
1030 1035 path = self.path
1031 1036
1032 1037 if self.fp != None:
1033 1038 self.fp.close()
1034 1039
1035 1040 timeTuple = time.localtime( self.dataOut.utctime)
1036 1041 subfolder = 'D%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
1037 1042
1038 1043 fullpath = os.path.join( path, subfolder )
1039 1044 if not( os.path.exists(fullpath) ):
1040 1045 os.mkdir(fullpath)
1041 1046 self.setFile = -1 #inicializo mi contador de seteo
1042 1047 else:
1043 1048 filesList = os.listdir( fullpath )
1044 1049 if len( filesList ) > 0:
1045 1050 filesList = sorted( filesList, key=str.lower )
1046 1051 filen = filesList[-1]
1047 1052 # el filename debera tener el siguiente formato
1048 1053 # 0 1234 567 89A BCDE (hex)
1049 1054 # x YYYY DDD SSS .ext
1050 1055 if isNumber( filen[8:11] ):
1051 1056 self.setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file
1052 1057 else:
1053 1058 self.setFile = -1
1054 1059 else:
1055 1060 self.setFile = -1 #inicializo mi contador de seteo
1056 1061
1057 1062 setFile = self.setFile
1058 1063 setFile += 1
1059 1064
1060 1065 file = '%s%4.4d%3.3d%3.3d%s' % (self.optchar,
1061 1066 timeTuple.tm_year,
1062 1067 timeTuple.tm_yday,
1063 1068 setFile,
1064 1069 ext )
1065 1070
1066 1071 filename = os.path.join( path, subfolder, file )
1067 1072
1068 1073 fp = open( filename,'wb' )
1069 1074
1070 1075 self.blockIndex = 0
1071 1076
1072 1077 #guardando atributos
1073 1078 self.filename = filename
1074 1079 self.subfolder = subfolder
1075 1080 self.fp = fp
1076 1081 self.setFile = setFile
1077 1082 self.flagIsNewFile = 1
1078 1083
1079 1084 self.getDataHeader()
1080 1085
1081 1086 print 'Writing the file: %s'%self.filename
1082 1087
1083 1088 self.__writeFirstHeader()
1084 1089
1085 1090 return 1
1086 1091
1087 1092 def setup(self, dataOut, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None):
1088 1093 """
1089 1094 Setea el tipo de formato en la cual sera guardada la data y escribe el First Header
1090 1095
1091 1096 Inputs:
1092 1097 path : el path destino en el cual se escribiran los files a crear
1093 1098 format : formato en el cual sera salvado un file
1094 1099 set : el setebo del file
1095 1100
1096 1101 Return:
1097 1102 0 : Si no realizo un buen seteo
1098 1103 1 : Si realizo un buen seteo
1099 1104 """
1100 1105
1101 1106 if ext == None:
1102 1107 ext = self.ext
1103 1108
1104 1109 ext = ext.lower()
1105 1110
1106 1111 self.ext = ext
1107 1112
1108 1113 self.path = path
1109 1114
1110 1115 self.setFile = set - 1
1111 1116
1112 1117 self.blocksPerFile = blocksPerFile
1113 1118
1114 1119 self.profilesPerBlock = profilesPerBlock
1115 1120
1116 1121 self.dataOut = dataOut
1117 1122
1118 1123 if not(self.setNextFile()):
1119 1124 print "There isn't a next file"
1120 1125 return 0
1121 1126
1122 1127 self.setBlockDimension()
1123 1128
1124 1129 return 1
1125 1130
1126 1131 def run(self, dataOut, **kwargs):
1127 1132
1128 1133 if not(self.isConfig):
1129 1134
1130 1135 self.setup(dataOut, **kwargs)
1131 1136 self.isConfig = True
1132 1137
1133 1138 self.putData()
1134 1139
1135 1140 class VoltageReader(JRODataReader):
1136 1141 """
1137 1142 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
1138 1143 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
1139 1144 perfiles*alturas*canales) son almacenados en la variable "buffer".
1140 1145
1141 1146 perfiles * alturas * canales
1142 1147
1143 1148 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1144 1149 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
1145 1150 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
1146 1151 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1147 1152
1148 1153 Example:
1149 1154
1150 1155 dpath = "/home/myuser/data"
1151 1156
1152 1157 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1153 1158
1154 1159 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1155 1160
1156 1161 readerObj = VoltageReader()
1157 1162
1158 1163 readerObj.setup(dpath, startTime, endTime)
1159 1164
1160 1165 while(True):
1161 1166
1162 1167 #to get one profile
1163 1168 profile = readerObj.getData()
1164 1169
1165 1170 #print the profile
1166 1171 print profile
1167 1172
1168 1173 #If you want to see all datablock
1169 1174 print readerObj.datablock
1170 1175
1171 1176 if readerObj.flagNoMoreFiles:
1172 1177 break
1173 1178
1174 1179 """
1175 1180
1176 1181 ext = ".r"
1177 1182
1178 1183 optchar = "D"
1179 1184 dataOut = None
1180 1185
1181 1186
1182 1187 def __init__(self):
1183 1188 """
1184 1189 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
1185 1190
1186 1191 Input:
1187 1192 dataOut : Objeto de la clase Voltage. Este objeto sera utilizado para
1188 1193 almacenar un perfil de datos cada vez que se haga un requerimiento
1189 1194 (getData). El perfil sera obtenido a partir del buffer de datos,
1190 1195 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1191 1196 bloque de datos.
1192 1197 Si este parametro no es pasado se creara uno internamente.
1193 1198
1194 1199 Variables afectadas:
1195 1200 self.dataOut
1196 1201
1197 1202 Return:
1198 1203 None
1199 1204 """
1200 1205
1201 1206 self.isConfig = False
1202 1207
1203 1208 self.datablock = None
1204 1209
1205 1210 self.utc = 0
1206 1211
1207 1212 self.ext = ".r"
1208 1213
1209 1214 self.optchar = "D"
1210 1215
1211 1216 self.basicHeaderObj = BasicHeader(LOCALTIME)
1212 1217
1213 1218 self.systemHeaderObj = SystemHeader()
1214 1219
1215 1220 self.radarControllerHeaderObj = RadarControllerHeader()
1216 1221
1217 1222 self.processingHeaderObj = ProcessingHeader()
1218 1223
1219 1224 self.online = 0
1220 1225
1221 1226 self.fp = None
1222 1227
1223 1228 self.idFile = None
1224 1229
1225 1230 self.dtype = None
1226 1231
1227 1232 self.fileSizeByHeader = None
1228 1233
1229 1234 self.filenameList = []
1230 1235
1231 1236 self.filename = None
1232 1237
1233 1238 self.fileSize = None
1234 1239
1235 1240 self.firstHeaderSize = 0
1236 1241
1237 1242 self.basicHeaderSize = 24
1238 1243
1239 1244 self.pathList = []
1240 1245
1241 1246 self.filenameList = []
1242 1247
1243 1248 self.lastUTTime = 0
1244 1249
1245 1250 self.maxTimeStep = 30
1246 1251
1247 1252 self.flagNoMoreFiles = 0
1248 1253
1249 1254 self.set = 0
1250 1255
1251 1256 self.path = None
1252 1257
1253 1258 self.profileIndex = 9999
1254 1259
1255 1260 self.delay = 3 #seconds
1256 1261
1257 1262 self.nTries = 3 #quantity tries
1258 1263
1259 1264 self.nFiles = 3 #number of files for searching
1260 1265
1261 1266 self.nReadBlocks = 0
1262 1267
1263 1268 self.flagIsNewFile = 1
1264 1269
1265 1270 self.ippSeconds = 0
1266 1271
1267 1272 self.flagTimeBlock = 0
1268 1273
1269 1274 self.flagIsNewBlock = 0
1270 1275
1271 1276 self.nTotalBlocks = 0
1272 1277
1273 1278 self.blocksize = 0
1274 1279
1275 1280 self.dataOut = self.createObjByDefault()
1276 1281
1277 1282 def createObjByDefault(self):
1278 1283
1279 1284 dataObj = Voltage()
1280 1285
1281 1286 return dataObj
1282 1287
1283 1288 def __hasNotDataInBuffer(self):
1284 1289 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1285 1290 return 1
1286 1291 return 0
1287 1292
1288 1293
1289 1294 def getBlockDimension(self):
1290 1295 """
1291 1296 Obtiene la cantidad de puntos a leer por cada bloque de datos
1292 1297
1293 1298 Affected:
1294 1299 self.blocksize
1295 1300
1296 1301 Return:
1297 1302 None
1298 1303 """
1299 1304 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
1300 1305 self.blocksize = pts2read
1301 1306
1302 1307
1303 1308 def readBlock(self):
1304 1309 """
1305 1310 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
1306 1311 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1307 1312 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1308 1313 es seteado a 0
1309 1314
1310 1315 Inputs:
1311 1316 None
1312 1317
1313 1318 Return:
1314 1319 None
1315 1320
1316 1321 Affected:
1317 1322 self.profileIndex
1318 1323 self.datablock
1319 1324 self.flagIsNewFile
1320 1325 self.flagIsNewBlock
1321 1326 self.nTotalBlocks
1322 1327
1323 1328 Exceptions:
1324 1329 Si un bloque leido no es un bloque valido
1325 1330 """
1326 1331
1327 1332 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
1328 1333
1329 1334 try:
1330 1335 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
1331 1336 except:
1332 1337 print "The read block (%3d) has not enough data" %self.nReadBlocks
1333 1338 return 0
1334 1339
1335 1340 junk = numpy.transpose(junk, (2,0,1))
1336 1341 self.datablock = junk['real'] + junk['imag']*1j
1337 1342
1338 1343 self.profileIndex = 0
1339 1344
1340 1345 self.flagIsNewFile = 0
1341 1346 self.flagIsNewBlock = 1
1342 1347
1343 1348 self.nTotalBlocks += 1
1344 1349 self.nReadBlocks += 1
1345 1350
1346 1351 return 1
1347 1352
1348 1353
1349 1354 def getData(self):
1350 1355 """
1351 1356 getData obtiene una unidad de datos del buffer de lectura y la copia a la clase "Voltage"
1352 1357 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1353 1358 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1354 1359
1355 1360 Ademas incrementa el contador del buffer en 1.
1356 1361
1357 1362 Return:
1358 1363 data : retorna un perfil de voltages (alturas * canales) copiados desde el
1359 1364 buffer. Si no hay mas archivos a leer retorna None.
1360 1365
1361 1366 Variables afectadas:
1362 1367 self.dataOut
1363 1368 self.profileIndex
1364 1369
1365 1370 Affected:
1366 1371 self.dataOut
1367 1372 self.profileIndex
1368 1373 self.flagTimeBlock
1369 1374 self.flagIsNewBlock
1370 1375 """
1371 1376
1372 1377 if self.flagNoMoreFiles:
1373 1378 self.dataOut.flagNoData = True
1374 1379 print 'Process finished'
1375 1380 return 0
1376 1381
1377 1382 self.flagTimeBlock = 0
1378 1383 self.flagIsNewBlock = 0
1379 1384
1380 1385 if self.__hasNotDataInBuffer():
1381 1386
1382 1387 if not( self.readNextBlock() ):
1383 1388 return 0
1384 1389
1385 1390 self.dataOut.dtype = numpy.dtype([('real','<f8'),('imag','<f8')]) #self.dtype
1386 1391
1387 1392 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1388 1393
1389 1394 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1390 1395
1391 1396 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1392 1397
1393 1398 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1394 1399
1395 1400 self.dataOut.flagTimeBlock = self.flagTimeBlock
1396 1401
1397 1402 self.dataOut.ippSeconds = self.ippSeconds
1398 1403
1399 1404 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt
1400 1405
1401 1406 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
1402 1407
1403 1408 self.dataOut.flagShiftFFT = False
1404 1409
1405 1410 if self.radarControllerHeaderObj.code != None:
1406 1411
1407 1412 self.dataOut.nCode = self.radarControllerHeaderObj.nCode
1408 1413
1409 1414 self.dataOut.nBaud = self.radarControllerHeaderObj.nBaud
1410 1415
1411 1416 self.dataOut.code = self.radarControllerHeaderObj.code
1412 1417
1413 1418 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1414 1419
1415 1420 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1416 1421
1417 1422 self.dataOut.flagDecodeData = False #asumo q la data no esta decodificada
1418 1423
1419 1424 self.dataOut.flagDeflipData = False #asumo q la data no esta sin flip
1420 1425
1421 1426 self.dataOut.flagShiftFFT = False
1422 1427
1423 1428
1424 1429 # self.updateDataHeader()
1425 1430
1426 1431 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1427 1432
1428 1433 if self.datablock == None:
1429 1434 self.dataOut.flagNoData = True
1430 1435 return 0
1431 1436
1432 1437 self.dataOut.data = self.datablock[:,self.profileIndex,:]
1433 1438
1434 1439 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.ippSeconds
1435 1440
1436 1441 self.profileIndex += 1
1437 1442
1438 1443 self.dataOut.flagNoData = False
1439 1444
1440 1445 # print self.profileIndex, self.dataOut.utctime
1441 1446 # if self.profileIndex == 800:
1442 1447 # a=1
1443 1448
1444 1449
1445 1450 return self.dataOut.data
1446 1451
1447 1452
1448 1453 class VoltageWriter(JRODataWriter):
1449 1454 """
1450 1455 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
1451 1456 de los datos siempre se realiza por bloques.
1452 1457 """
1453 1458
1454 1459 ext = ".r"
1455 1460
1456 1461 optchar = "D"
1457 1462
1458 1463 shapeBuffer = None
1459 1464
1460 1465
1461 1466 def __init__(self):
1462 1467 """
1463 1468 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
1464 1469
1465 1470 Affected:
1466 1471 self.dataOut
1467 1472
1468 1473 Return: None
1469 1474 """
1470 1475
1471 1476 self.nTotalBlocks = 0
1472 1477
1473 1478 self.profileIndex = 0
1474 1479
1475 1480 self.isConfig = False
1476 1481
1477 1482 self.fp = None
1478 1483
1479 1484 self.flagIsNewFile = 1
1480 1485
1481 1486 self.nTotalBlocks = 0
1482 1487
1483 1488 self.flagIsNewBlock = 0
1484 1489
1485 1490 self.setFile = None
1486 1491
1487 1492 self.dtype = None
1488 1493
1489 1494 self.path = None
1490 1495
1491 1496 self.filename = None
1492 1497
1493 1498 self.basicHeaderObj = BasicHeader(LOCALTIME)
1494 1499
1495 1500 self.systemHeaderObj = SystemHeader()
1496 1501
1497 1502 self.radarControllerHeaderObj = RadarControllerHeader()
1498 1503
1499 1504 self.processingHeaderObj = ProcessingHeader()
1500 1505
1501 1506 def hasAllDataInBuffer(self):
1502 1507 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1503 1508 return 1
1504 1509 return 0
1505 1510
1506 1511
1507 1512 def setBlockDimension(self):
1508 1513 """
1509 1514 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
1510 1515
1511 1516 Affected:
1512 1517 self.shape_spc_Buffer
1513 1518 self.shape_cspc_Buffer
1514 1519 self.shape_dc_Buffer
1515 1520
1516 1521 Return: None
1517 1522 """
1518 1523 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
1519 1524 self.processingHeaderObj.nHeights,
1520 1525 self.systemHeaderObj.nChannels)
1521 1526
1522 1527 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
1523 1528 self.processingHeaderObj.profilesPerBlock,
1524 1529 self.processingHeaderObj.nHeights),
1525 1530 dtype=numpy.dtype('complex'))
1526 1531
1527 1532
1528 1533 def writeBlock(self):
1529 1534 """
1530 1535 Escribe el buffer en el file designado
1531 1536
1532 1537 Affected:
1533 1538 self.profileIndex
1534 1539 self.flagIsNewFile
1535 1540 self.flagIsNewBlock
1536 1541 self.nTotalBlocks
1537 1542 self.blockIndex
1538 1543
1539 1544 Return: None
1540 1545 """
1541 1546 data = numpy.zeros( self.shapeBuffer, self.dtype )
1542 1547
1543 1548 junk = numpy.transpose(self.datablock, (1,2,0))
1544 1549
1545 1550 data['real'] = junk.real
1546 1551 data['imag'] = junk.imag
1547 1552
1548 1553 data = data.reshape( (-1) )
1549 1554
1550 1555 data.tofile( self.fp )
1551 1556
1552 1557 self.datablock.fill(0)
1553 1558
1554 1559 self.profileIndex = 0
1555 1560 self.flagIsNewFile = 0
1556 1561 self.flagIsNewBlock = 1
1557 1562
1558 1563 self.blockIndex += 1
1559 1564 self.nTotalBlocks += 1
1560 1565
1561 1566 def putData(self):
1562 1567 """
1563 1568 Setea un bloque de datos y luego los escribe en un file
1564 1569
1565 1570 Affected:
1566 1571 self.flagIsNewBlock
1567 1572 self.profileIndex
1568 1573
1569 1574 Return:
1570 1575 0 : Si no hay data o no hay mas files que puedan escribirse
1571 1576 1 : Si se escribio la data de un bloque en un file
1572 1577 """
1573 1578 if self.dataOut.flagNoData:
1574 1579 return 0
1575 1580
1576 1581 self.flagIsNewBlock = 0
1577 1582
1578 1583 if self.dataOut.flagTimeBlock:
1579 1584
1580 1585 self.datablock.fill(0)
1581 1586 self.profileIndex = 0
1582 1587 self.setNextFile()
1583 1588
1584 1589 if self.profileIndex == 0:
1585 1590 self.getBasicHeader()
1586 1591
1587 1592 self.datablock[:,self.profileIndex,:] = self.dataOut.data
1588 1593
1589 1594 self.profileIndex += 1
1590 1595
1591 1596 if self.hasAllDataInBuffer():
1592 1597 #if self.flagIsNewFile:
1593 1598 self.writeNextBlock()
1594 1599 # self.getDataHeader()
1595 1600
1596 1601 return 1
1597 1602
1598 1603 def __getProcessFlags(self):
1599 1604
1600 1605 processFlags = 0
1601 1606
1602 1607 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1603 1608 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1604 1609 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1605 1610 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1606 1611 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1607 1612 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1608 1613
1609 1614 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1610 1615
1611 1616
1612 1617
1613 1618 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
1614 1619 PROCFLAG.DATATYPE_SHORT,
1615 1620 PROCFLAG.DATATYPE_LONG,
1616 1621 PROCFLAG.DATATYPE_INT64,
1617 1622 PROCFLAG.DATATYPE_FLOAT,
1618 1623 PROCFLAG.DATATYPE_DOUBLE]
1619 1624
1620 1625
1621 1626 for index in range(len(dtypeList)):
1622 1627 if self.dataOut.dtype == dtypeList[index]:
1623 1628 dtypeValue = datatypeValueList[index]
1624 1629 break
1625 1630
1626 1631 processFlags += dtypeValue
1627 1632
1628 1633 if self.dataOut.flagDecodeData:
1629 1634 processFlags += PROCFLAG.DECODE_DATA
1630 1635
1631 1636 if self.dataOut.flagDeflipData:
1632 1637 processFlags += PROCFLAG.DEFLIP_DATA
1633 1638
1634 1639 if self.dataOut.code != None:
1635 1640 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
1636 1641
1637 1642 if self.dataOut.nCohInt > 1:
1638 1643 processFlags += PROCFLAG.COHERENT_INTEGRATION
1639 1644
1640 1645 return processFlags
1641 1646
1642 1647
1643 1648 def __getBlockSize(self):
1644 1649 '''
1645 1650 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
1646 1651 '''
1647 1652
1648 1653 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1649 1654 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1650 1655 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1651 1656 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1652 1657 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1653 1658 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1654 1659
1655 1660 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1656 1661 datatypeValueList = [1,2,4,8,4,8]
1657 1662 for index in range(len(dtypeList)):
1658 1663 if self.dataOut.dtype == dtypeList[index]:
1659 1664 datatypeValue = datatypeValueList[index]
1660 1665 break
1661 1666
1662 1667 blocksize = int(self.dataOut.nHeights * self.dataOut.nChannels * self.dataOut.nProfiles * datatypeValue * 2)
1663 1668
1664 1669 return blocksize
1665 1670
1666 1671 def getDataHeader(self):
1667 1672
1668 1673 """
1669 1674 Obtiene una copia del First Header
1670 1675
1671 1676 Affected:
1672 1677 self.systemHeaderObj
1673 1678 self.radarControllerHeaderObj
1674 1679 self.dtype
1675 1680
1676 1681 Return:
1677 1682 None
1678 1683 """
1679 1684
1680 1685 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
1681 1686 self.systemHeaderObj.nChannels = self.dataOut.nChannels
1682 1687 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
1683 1688
1684 1689 self.getBasicHeader()
1685 1690
1686 1691 processingHeaderSize = 40 # bytes
1687 1692 self.processingHeaderObj.dtype = 0 # Voltage
1688 1693 self.processingHeaderObj.blockSize = self.__getBlockSize()
1689 1694 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
1690 1695 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
1691 1696 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
1692 1697 self.processingHeaderObj.processFlags = self.__getProcessFlags()
1693 1698 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt
1694 1699 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
1695 1700 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
1696 1701
1697 1702 if self.dataOut.code != None:
1698 1703 self.processingHeaderObj.code = self.dataOut.code
1699 1704 self.processingHeaderObj.nCode = self.dataOut.nCode
1700 1705 self.processingHeaderObj.nBaud = self.dataOut.nBaud
1701 1706 codesize = int(8 + 4 * self.dataOut.nCode * self.dataOut.nBaud)
1702 1707 processingHeaderSize += codesize
1703 1708
1704 1709 if self.processingHeaderObj.nWindows != 0:
1705 1710 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
1706 1711 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
1707 1712 self.processingHeaderObj.nHeights = self.dataOut.nHeights
1708 1713 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
1709 1714 processingHeaderSize += 12
1710 1715
1711 1716 self.processingHeaderObj.size = processingHeaderSize
1712 1717
1713 1718 class SpectraReader(JRODataReader):
1714 1719 """
1715 1720 Esta clase permite leer datos de espectros desde archivos procesados (.pdata). La lectura
1716 1721 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones)
1717 1722 son almacenados en tres buffer's para el Self Spectra, el Cross Spectra y el DC Channel.
1718 1723
1719 1724 paresCanalesIguales * alturas * perfiles (Self Spectra)
1720 1725 paresCanalesDiferentes * alturas * perfiles (Cross Spectra)
1721 1726 canales * alturas (DC Channels)
1722 1727
1723 1728 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1724 1729 RadarControllerHeader y Spectra. Los tres primeros se usan para almacenar informacion de la
1725 1730 cabecera de datos (metadata), y el cuarto (Spectra) para obtener y almacenar un bloque de
1726 1731 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1727 1732
1728 1733 Example:
1729 1734 dpath = "/home/myuser/data"
1730 1735
1731 1736 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1732 1737
1733 1738 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1734 1739
1735 1740 readerObj = SpectraReader()
1736 1741
1737 1742 readerObj.setup(dpath, startTime, endTime)
1738 1743
1739 1744 while(True):
1740 1745
1741 1746 readerObj.getData()
1742 1747
1743 1748 print readerObj.data_spc
1744 1749
1745 1750 print readerObj.data_cspc
1746 1751
1747 1752 print readerObj.data_dc
1748 1753
1749 1754 if readerObj.flagNoMoreFiles:
1750 1755 break
1751 1756
1752 1757 """
1753 1758
1754 1759 pts2read_SelfSpectra = 0
1755 1760
1756 1761 pts2read_CrossSpectra = 0
1757 1762
1758 1763 pts2read_DCchannels = 0
1759 1764
1760 1765 ext = ".pdata"
1761 1766
1762 1767 optchar = "P"
1763 1768
1764 1769 dataOut = None
1765 1770
1766 1771 nRdChannels = None
1767 1772
1768 1773 nRdPairs = None
1769 1774
1770 1775 rdPairList = []
1771 1776
1772 1777
1773 1778 def __init__(self):
1774 1779 """
1775 1780 Inicializador de la clase SpectraReader para la lectura de datos de espectros.
1776 1781
1777 1782 Inputs:
1778 1783 dataOut : Objeto de la clase Spectra. Este objeto sera utilizado para
1779 1784 almacenar un perfil de datos cada vez que se haga un requerimiento
1780 1785 (getData). El perfil sera obtenido a partir del buffer de datos,
1781 1786 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1782 1787 bloque de datos.
1783 1788 Si este parametro no es pasado se creara uno internamente.
1784 1789
1785 1790 Affected:
1786 1791 self.dataOut
1787 1792
1788 1793 Return : None
1789 1794 """
1790 1795
1791 1796 self.isConfig = False
1792 1797
1793 1798 self.pts2read_SelfSpectra = 0
1794 1799
1795 1800 self.pts2read_CrossSpectra = 0
1796 1801
1797 1802 self.pts2read_DCchannels = 0
1798 1803
1799 1804 self.datablock = None
1800 1805
1801 1806 self.utc = None
1802 1807
1803 1808 self.ext = ".pdata"
1804 1809
1805 1810 self.optchar = "P"
1806 1811
1807 1812 self.basicHeaderObj = BasicHeader(LOCALTIME)
1808 1813
1809 1814 self.systemHeaderObj = SystemHeader()
1810 1815
1811 1816 self.radarControllerHeaderObj = RadarControllerHeader()
1812 1817
1813 1818 self.processingHeaderObj = ProcessingHeader()
1814 1819
1815 1820 self.online = 0
1816 1821
1817 1822 self.fp = None
1818 1823
1819 1824 self.idFile = None
1820 1825
1821 1826 self.dtype = None
1822 1827
1823 1828 self.fileSizeByHeader = None
1824 1829
1825 1830 self.filenameList = []
1826 1831
1827 1832 self.filename = None
1828 1833
1829 1834 self.fileSize = None
1830 1835
1831 1836 self.firstHeaderSize = 0
1832 1837
1833 1838 self.basicHeaderSize = 24
1834 1839
1835 1840 self.pathList = []
1836 1841
1837 1842 self.lastUTTime = 0
1838 1843
1839 1844 self.maxTimeStep = 30
1840 1845
1841 1846 self.flagNoMoreFiles = 0
1842 1847
1843 1848 self.set = 0
1844 1849
1845 1850 self.path = None
1846 1851
1847 1852 self.delay = 3 #seconds
1848 1853
1849 1854 self.nTries = 3 #quantity tries
1850 1855
1851 1856 self.nFiles = 3 #number of files for searching
1852 1857
1853 1858 self.nReadBlocks = 0
1854 1859
1855 1860 self.flagIsNewFile = 1
1856 1861
1857 1862 self.ippSeconds = 0
1858 1863
1859 1864 self.flagTimeBlock = 0
1860 1865
1861 1866 self.flagIsNewBlock = 0
1862 1867
1863 1868 self.nTotalBlocks = 0
1864 1869
1865 1870 self.blocksize = 0
1866 1871
1867 1872 self.dataOut = self.createObjByDefault()
1868 1873
1869 1874
1870 1875 def createObjByDefault(self):
1871 1876
1872 1877 dataObj = Spectra()
1873 1878
1874 1879 return dataObj
1875 1880
1876 1881 def __hasNotDataInBuffer(self):
1877 1882 return 1
1878 1883
1879 1884
1880 1885 def getBlockDimension(self):
1881 1886 """
1882 1887 Obtiene la cantidad de puntos a leer por cada bloque de datos
1883 1888
1884 1889 Affected:
1885 1890 self.nRdChannels
1886 1891 self.nRdPairs
1887 1892 self.pts2read_SelfSpectra
1888 1893 self.pts2read_CrossSpectra
1889 1894 self.pts2read_DCchannels
1890 1895 self.blocksize
1891 1896 self.dataOut.nChannels
1892 1897 self.dataOut.nPairs
1893 1898
1894 1899 Return:
1895 1900 None
1896 1901 """
1897 1902 self.nRdChannels = 0
1898 1903 self.nRdPairs = 0
1899 1904 self.rdPairList = []
1900 1905
1901 1906 for i in range(0, self.processingHeaderObj.totalSpectra*2, 2):
1902 1907 if self.processingHeaderObj.spectraComb[i] == self.processingHeaderObj.spectraComb[i+1]:
1903 1908 self.nRdChannels = self.nRdChannels + 1 #par de canales iguales
1904 1909 else:
1905 1910 self.nRdPairs = self.nRdPairs + 1 #par de canales diferentes
1906 1911 self.rdPairList.append((self.processingHeaderObj.spectraComb[i], self.processingHeaderObj.spectraComb[i+1]))
1907 1912
1908 1913 pts2read = self.processingHeaderObj.nHeights * self.processingHeaderObj.profilesPerBlock
1909 1914
1910 1915 self.pts2read_SelfSpectra = int(self.nRdChannels * pts2read)
1911 1916 self.blocksize = self.pts2read_SelfSpectra
1912 1917
1913 1918 if self.processingHeaderObj.flag_cspc:
1914 1919 self.pts2read_CrossSpectra = int(self.nRdPairs * pts2read)
1915 1920 self.blocksize += self.pts2read_CrossSpectra
1916 1921
1917 1922 if self.processingHeaderObj.flag_dc:
1918 1923 self.pts2read_DCchannels = int(self.systemHeaderObj.nChannels * self.processingHeaderObj.nHeights)
1919 1924 self.blocksize += self.pts2read_DCchannels
1920 1925
1921 1926 # self.blocksize = self.pts2read_SelfSpectra + self.pts2read_CrossSpectra + self.pts2read_DCchannels
1922 1927
1923 1928
1924 1929 def readBlock(self):
1925 1930 """
1926 1931 Lee el bloque de datos desde la posicion actual del puntero del archivo
1927 1932 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1928 1933 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1929 1934 es seteado a 0
1930 1935
1931 1936 Return: None
1932 1937
1933 1938 Variables afectadas:
1934 1939
1935 1940 self.flagIsNewFile
1936 1941 self.flagIsNewBlock
1937 1942 self.nTotalBlocks
1938 1943 self.data_spc
1939 1944 self.data_cspc
1940 1945 self.data_dc
1941 1946
1942 1947 Exceptions:
1943 1948 Si un bloque leido no es un bloque valido
1944 1949 """
1945 1950 blockOk_flag = False
1946 1951 fpointer = self.fp.tell()
1947 1952
1948 1953 spc = numpy.fromfile( self.fp, self.dtype[0], self.pts2read_SelfSpectra )
1949 1954 spc = spc.reshape( (self.nRdChannels, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1950 1955
1951 1956 if self.processingHeaderObj.flag_cspc:
1952 1957 cspc = numpy.fromfile( self.fp, self.dtype, self.pts2read_CrossSpectra )
1953 1958 cspc = cspc.reshape( (self.nRdPairs, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1954 1959
1955 1960 if self.processingHeaderObj.flag_dc:
1956 1961 dc = numpy.fromfile( self.fp, self.dtype, self.pts2read_DCchannels ) #int(self.processingHeaderObj.nHeights*self.systemHeaderObj.nChannels) )
1957 1962 dc = dc.reshape( (self.systemHeaderObj.nChannels, self.processingHeaderObj.nHeights) ) #transforma a un arreglo 2D
1958 1963
1959 1964
1960 1965 if not(self.processingHeaderObj.shif_fft):
1961 1966 #desplaza a la derecha en el eje 2 determinadas posiciones
1962 1967 shift = int(self.processingHeaderObj.profilesPerBlock/2)
1963 1968 spc = numpy.roll( spc, shift , axis=2 )
1964 1969
1965 1970 if self.processingHeaderObj.flag_cspc:
1966 1971 #desplaza a la derecha en el eje 2 determinadas posiciones
1967 1972 cspc = numpy.roll( cspc, shift, axis=2 )
1968 1973
1969 1974 # self.processingHeaderObj.shif_fft = True
1970 1975
1971 1976 spc = numpy.transpose( spc, (0,2,1) )
1972 1977 self.data_spc = spc
1973 1978
1974 1979 if self.processingHeaderObj.flag_cspc:
1975 1980 cspc = numpy.transpose( cspc, (0,2,1) )
1976 1981 self.data_cspc = cspc['real'] + cspc['imag']*1j
1977 1982 else:
1978 1983 self.data_cspc = None
1979 1984
1980 1985 if self.processingHeaderObj.flag_dc:
1981 1986 self.data_dc = dc['real'] + dc['imag']*1j
1982 1987 else:
1983 1988 self.data_dc = None
1984 1989
1985 1990 self.flagIsNewFile = 0
1986 1991 self.flagIsNewBlock = 1
1987 1992
1988 1993 self.nTotalBlocks += 1
1989 1994 self.nReadBlocks += 1
1990 1995
1991 1996 return 1
1992 1997
1993 1998
1994 1999 def getData(self):
1995 2000 """
1996 2001 Copia el buffer de lectura a la clase "Spectra",
1997 2002 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1998 2003 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1999 2004
2000 2005 Return:
2001 2006 0 : Si no hay mas archivos disponibles
2002 2007 1 : Si hizo una buena copia del buffer
2003 2008
2004 2009 Affected:
2005 2010 self.dataOut
2006 2011
2007 2012 self.flagTimeBlock
2008 2013 self.flagIsNewBlock
2009 2014 """
2010 2015
2011 2016 if self.flagNoMoreFiles:
2012 2017 self.dataOut.flagNoData = True
2013 2018 print 'Process finished'
2014 2019 return 0
2015 2020
2016 2021 self.flagTimeBlock = 0
2017 2022 self.flagIsNewBlock = 0
2018 2023
2019 2024 if self.__hasNotDataInBuffer():
2020 2025
2021 2026 if not( self.readNextBlock() ):
2022 2027 self.dataOut.flagNoData = True
2023 2028 return 0
2024 2029
2025 2030 # self.updateDataHeader()
2026 2031
2027 2032 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
2028 2033
2029 2034 if self.data_dc == None:
2030 2035 self.dataOut.flagNoData = True
2031 2036 return 0
2032 2037
2033 2038 self.dataOut.data_spc = self.data_spc
2034 2039
2035 2040 self.dataOut.data_cspc = self.data_cspc
2036 2041
2037 2042 self.dataOut.data_dc = self.data_dc
2038 2043
2039 2044 self.dataOut.flagTimeBlock = self.flagTimeBlock
2040 2045
2041 2046 self.dataOut.flagNoData = False
2042 2047
2043 2048 self.dataOut.dtype = numpy.dtype([('real','<f8'),('imag','<f8')])#self.dtype
2044 2049
2045 2050 # self.dataOut.nChannels = self.nRdChannels
2046 2051
2047 2052 self.dataOut.nPairs = self.nRdPairs
2048 2053
2049 2054 self.dataOut.pairsList = self.rdPairList
2050 2055
2051 2056 # self.dataOut.nHeights = self.processingHeaderObj.nHeights
2052 2057
2053 2058 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
2054 2059
2055 2060 self.dataOut.nFFTPoints = self.processingHeaderObj.profilesPerBlock
2056 2061
2057 2062 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
2058 2063
2059 2064 self.dataOut.nIncohInt = self.processingHeaderObj.nIncohInt
2060 2065
2061 2066 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
2062 2067
2063 2068 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
2064 2069
2065 2070 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
2066 2071
2067 2072 # self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
2068 2073
2069 2074 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000.#+ self.profileIndex * self.ippSeconds
2070 2075
2071 2076 self.dataOut.ippSeconds = self.ippSeconds
2072 2077
2073 2078 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt * self.processingHeaderObj.nIncohInt * self.dataOut.nFFTPoints
2074 2079
2075 2080 # self.profileIndex += 1
2076 2081
2077 2082 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
2078 2083
2079 2084 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
2080 2085
2081 2086 self.dataOut.flagShiftFFT = self.processingHeaderObj.shif_fft
2082 2087
2083 2088 self.dataOut.flagDecodeData = False #asumo q la data no esta decodificada
2084 2089
2085 2090 self.dataOut.flagDeflipData = True #asumo q la data no esta sin flip
2086 2091
2087 2092 if self.processingHeaderObj.code != None:
2088 2093
2089 2094 self.dataOut.nCode = self.processingHeaderObj.nCode
2090 2095
2091 2096 self.dataOut.nBaud = self.processingHeaderObj.nBaud
2092 2097
2093 2098 self.dataOut.code = self.processingHeaderObj.code
2094 2099
2095 2100 self.dataOut.flagDecodeData = True
2096 2101
2097 2102 return self.dataOut.data_spc
2098 2103
2099 2104
2100 2105 class SpectraWriter(JRODataWriter):
2101 2106
2102 2107 """
2103 2108 Esta clase permite escribir datos de espectros a archivos procesados (.pdata). La escritura
2104 2109 de los datos siempre se realiza por bloques.
2105 2110 """
2106 2111
2107 2112 ext = ".pdata"
2108 2113
2109 2114 optchar = "P"
2110 2115
2111 2116 shape_spc_Buffer = None
2112 2117
2113 2118 shape_cspc_Buffer = None
2114 2119
2115 2120 shape_dc_Buffer = None
2116 2121
2117 2122 data_spc = None
2118 2123
2119 2124 data_cspc = None
2120 2125
2121 2126 data_dc = None
2122 2127
2123 2128 # dataOut = None
2124 2129
2125 2130 def __init__(self):
2126 2131 """
2127 2132 Inicializador de la clase SpectraWriter para la escritura de datos de espectros.
2128 2133
2129 2134 Affected:
2130 2135 self.dataOut
2131 2136 self.basicHeaderObj
2132 2137 self.systemHeaderObj
2133 2138 self.radarControllerHeaderObj
2134 2139 self.processingHeaderObj
2135 2140
2136 2141 Return: None
2137 2142 """
2138 2143
2139 2144 self.isConfig = False
2140 2145
2141 2146 self.nTotalBlocks = 0
2142 2147
2143 2148 self.data_spc = None
2144 2149
2145 2150 self.data_cspc = None
2146 2151
2147 2152 self.data_dc = None
2148 2153
2149 2154 self.fp = None
2150 2155
2151 2156 self.flagIsNewFile = 1
2152 2157
2153 2158 self.nTotalBlocks = 0
2154 2159
2155 2160 self.flagIsNewBlock = 0
2156 2161
2157 2162 self.setFile = None
2158 2163
2159 2164 self.dtype = None
2160 2165
2161 2166 self.path = None
2162 2167
2163 2168 self.noMoreFiles = 0
2164 2169
2165 2170 self.filename = None
2166 2171
2167 2172 self.basicHeaderObj = BasicHeader(LOCALTIME)
2168 2173
2169 2174 self.systemHeaderObj = SystemHeader()
2170 2175
2171 2176 self.radarControllerHeaderObj = RadarControllerHeader()
2172 2177
2173 2178 self.processingHeaderObj = ProcessingHeader()
2174 2179
2175 2180
2176 2181 def hasAllDataInBuffer(self):
2177 2182 return 1
2178 2183
2179 2184
2180 2185 def setBlockDimension(self):
2181 2186 """
2182 2187 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
2183 2188
2184 2189 Affected:
2185 2190 self.shape_spc_Buffer
2186 2191 self.shape_cspc_Buffer
2187 2192 self.shape_dc_Buffer
2188 2193
2189 2194 Return: None
2190 2195 """
2191 2196 self.shape_spc_Buffer = (self.dataOut.nChannels,
2192 2197 self.processingHeaderObj.nHeights,
2193 2198 self.processingHeaderObj.profilesPerBlock)
2194 2199
2195 2200 self.shape_cspc_Buffer = (self.dataOut.nPairs,
2196 2201 self.processingHeaderObj.nHeights,
2197 2202 self.processingHeaderObj.profilesPerBlock)
2198 2203
2199 2204 self.shape_dc_Buffer = (self.dataOut.nChannels,
2200 2205 self.processingHeaderObj.nHeights)
2201 2206
2202 2207
2203 2208 def writeBlock(self):
2204 2209 """
2205 2210 Escribe el buffer en el file designado
2206 2211
2207 2212 Affected:
2208 2213 self.data_spc
2209 2214 self.data_cspc
2210 2215 self.data_dc
2211 2216 self.flagIsNewFile
2212 2217 self.flagIsNewBlock
2213 2218 self.nTotalBlocks
2214 2219 self.nWriteBlocks
2215 2220
2216 2221 Return: None
2217 2222 """
2218 2223
2219 2224 spc = numpy.transpose( self.data_spc, (0,2,1) )
2220 2225 if not( self.processingHeaderObj.shif_fft ):
2221 2226 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2222 2227 data = spc.reshape((-1))
2223 2228 data.tofile(self.fp)
2224 2229
2225 2230 if self.data_cspc != None:
2226 2231 data = numpy.zeros( self.shape_cspc_Buffer, self.dtype )
2227 2232 cspc = numpy.transpose( self.data_cspc, (0,2,1) )
2228 2233 if not( self.processingHeaderObj.shif_fft ):
2229 2234 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2230 2235 data['real'] = cspc.real
2231 2236 data['imag'] = cspc.imag
2232 2237 data = data.reshape((-1))
2233 2238 data.tofile(self.fp)
2234 2239
2235 2240 if self.data_dc != None:
2236 2241 data = numpy.zeros( self.shape_dc_Buffer, self.dtype )
2237 2242 dc = self.data_dc
2238 2243 data['real'] = dc.real
2239 2244 data['imag'] = dc.imag
2240 2245 data = data.reshape((-1))
2241 2246 data.tofile(self.fp)
2242 2247
2243 2248 self.data_spc.fill(0)
2244 2249 self.data_dc.fill(0)
2245 2250 if self.data_cspc != None:
2246 2251 self.data_cspc.fill(0)
2247 2252
2248 2253 self.flagIsNewFile = 0
2249 2254 self.flagIsNewBlock = 1
2250 2255 self.nTotalBlocks += 1
2251 2256 self.nWriteBlocks += 1
2252 2257 self.blockIndex += 1
2253 2258
2254 2259
2255 2260 def putData(self):
2256 2261 """
2257 2262 Setea un bloque de datos y luego los escribe en un file
2258 2263
2259 2264 Affected:
2260 2265 self.data_spc
2261 2266 self.data_cspc
2262 2267 self.data_dc
2263 2268
2264 2269 Return:
2265 2270 0 : Si no hay data o no hay mas files que puedan escribirse
2266 2271 1 : Si se escribio la data de un bloque en un file
2267 2272 """
2268 2273
2269 2274 if self.dataOut.flagNoData:
2270 2275 return 0
2271 2276
2272 2277 self.flagIsNewBlock = 0
2273 2278
2274 2279 if self.dataOut.flagTimeBlock:
2275 2280 self.data_spc.fill(0)
2276 2281 self.data_cspc.fill(0)
2277 2282 self.data_dc.fill(0)
2278 2283 self.setNextFile()
2279 2284
2280 2285 if self.flagIsNewFile == 0:
2281 2286 self.getBasicHeader()
2282 2287
2283 2288 self.data_spc = self.dataOut.data_spc.copy()
2284 2289 self.data_cspc = self.dataOut.data_cspc.copy()
2285 2290 self.data_dc = self.dataOut.data_dc.copy()
2286 2291
2287 2292 # #self.processingHeaderObj.dataBlocksPerFile)
2288 2293 if self.hasAllDataInBuffer():
2289 2294 # self.getDataHeader()
2290 2295 self.writeNextBlock()
2291 2296
2292 2297 return 1
2293 2298
2294 2299
2295 2300 def __getProcessFlags(self):
2296 2301
2297 2302 processFlags = 0
2298 2303
2299 2304 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2300 2305 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2301 2306 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2302 2307 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2303 2308 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2304 2309 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2305 2310
2306 2311 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2307 2312
2308 2313
2309 2314
2310 2315 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
2311 2316 PROCFLAG.DATATYPE_SHORT,
2312 2317 PROCFLAG.DATATYPE_LONG,
2313 2318 PROCFLAG.DATATYPE_INT64,
2314 2319 PROCFLAG.DATATYPE_FLOAT,
2315 2320 PROCFLAG.DATATYPE_DOUBLE]
2316 2321
2317 2322
2318 2323 for index in range(len(dtypeList)):
2319 2324 if self.dataOut.dtype == dtypeList[index]:
2320 2325 dtypeValue = datatypeValueList[index]
2321 2326 break
2322 2327
2323 2328 processFlags += dtypeValue
2324 2329
2325 2330 if self.dataOut.flagDecodeData:
2326 2331 processFlags += PROCFLAG.DECODE_DATA
2327 2332
2328 2333 if self.dataOut.flagDeflipData:
2329 2334 processFlags += PROCFLAG.DEFLIP_DATA
2330 2335
2331 2336 if self.dataOut.code != None:
2332 2337 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
2333 2338
2334 2339 if self.dataOut.nIncohInt > 1:
2335 2340 processFlags += PROCFLAG.INCOHERENT_INTEGRATION
2336 2341
2337 2342 if self.dataOut.data_dc != None:
2338 2343 processFlags += PROCFLAG.SAVE_CHANNELS_DC
2339 2344
2340 2345 return processFlags
2341 2346
2342 2347
2343 2348 def __getBlockSize(self):
2344 2349 '''
2345 2350 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Spectra
2346 2351 '''
2347 2352
2348 2353 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2349 2354 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2350 2355 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2351 2356 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2352 2357 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2353 2358 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2354 2359
2355 2360 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2356 2361 datatypeValueList = [1,2,4,8,4,8]
2357 2362 for index in range(len(dtypeList)):
2358 2363 if self.dataOut.dtype == dtypeList[index]:
2359 2364 datatypeValue = datatypeValueList[index]
2360 2365 break
2361 2366
2362 2367
2363 2368 pts2write = self.dataOut.nHeights * self.dataOut.nFFTPoints
2364 2369
2365 2370 pts2write_SelfSpectra = int(self.dataOut.nChannels * pts2write)
2366 2371 blocksize = (pts2write_SelfSpectra*datatypeValue)
2367 2372
2368 2373 if self.dataOut.data_cspc != None:
2369 2374 pts2write_CrossSpectra = int(self.dataOut.nPairs * pts2write)
2370 2375 blocksize += (pts2write_CrossSpectra*datatypeValue*2)
2371 2376
2372 2377 if self.dataOut.data_dc != None:
2373 2378 pts2write_DCchannels = int(self.dataOut.nChannels * self.dataOut.nHeights)
2374 2379 blocksize += (pts2write_DCchannels*datatypeValue*2)
2375 2380
2376 2381 blocksize = blocksize #* datatypeValue * 2 #CORREGIR ESTO
2377 2382
2378 2383 return blocksize
2379 2384
2380 2385 def getDataHeader(self):
2381 2386
2382 2387 """
2383 2388 Obtiene una copia del First Header
2384 2389
2385 2390 Affected:
2386 2391 self.systemHeaderObj
2387 2392 self.radarControllerHeaderObj
2388 2393 self.dtype
2389 2394
2390 2395 Return:
2391 2396 None
2392 2397 """
2393 2398
2394 2399 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
2395 2400 self.systemHeaderObj.nChannels = self.dataOut.nChannels
2396 2401 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
2397 2402
2398 2403 self.getBasicHeader()
2399 2404
2400 2405 processingHeaderSize = 40 # bytes
2401 2406 self.processingHeaderObj.dtype = 0 # Voltage
2402 2407 self.processingHeaderObj.blockSize = self.__getBlockSize()
2403 2408 self.processingHeaderObj.profilesPerBlock = self.dataOut.nFFTPoints
2404 2409 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
2405 2410 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
2406 2411 self.processingHeaderObj.processFlags = self.__getProcessFlags()
2407 2412 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt# Se requiere para determinar el valor de timeInterval
2408 2413 self.processingHeaderObj.nIncohInt = self.dataOut.nIncohInt
2409 2414 self.processingHeaderObj.totalSpectra = self.dataOut.nPairs + self.dataOut.nChannels
2410 2415
2411 2416 if self.processingHeaderObj.totalSpectra > 0:
2412 2417 channelList = []
2413 2418 for channel in range(self.dataOut.nChannels):
2414 2419 channelList.append(channel)
2415 2420 channelList.append(channel)
2416 2421
2417 2422 pairsList = []
2418 2423 for pair in self.dataOut.pairsList:
2419 2424 pairsList.append(pair[0])
2420 2425 pairsList.append(pair[1])
2421 2426 spectraComb = channelList + pairsList
2422 2427 spectraComb = numpy.array(spectraComb,dtype="u1")
2423 2428 self.processingHeaderObj.spectraComb = spectraComb
2424 2429 sizeOfSpcComb = len(spectraComb)
2425 2430 processingHeaderSize += sizeOfSpcComb
2426 2431
2427 2432 if self.dataOut.code != None:
2428 2433 self.processingHeaderObj.code = self.dataOut.code
2429 2434 self.processingHeaderObj.nCode = self.dataOut.nCode
2430 2435 self.processingHeaderObj.nBaud = self.dataOut.nBaud
2431 2436 nCodeSize = 4 # bytes
2432 2437 nBaudSize = 4 # bytes
2433 2438 codeSize = 4 # bytes
2434 2439 sizeOfCode = int(nCodeSize + nBaudSize + codeSize * self.dataOut.nCode * self.dataOut.nBaud)
2435 2440 processingHeaderSize += sizeOfCode
2436 2441
2437 2442 if self.processingHeaderObj.nWindows != 0:
2438 2443 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
2439 2444 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
2440 2445 self.processingHeaderObj.nHeights = self.dataOut.nHeights
2441 2446 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
2442 2447 sizeOfFirstHeight = 4
2443 2448 sizeOfdeltaHeight = 4
2444 2449 sizeOfnHeights = 4
2445 2450 sizeOfWindows = (sizeOfFirstHeight + sizeOfdeltaHeight + sizeOfnHeights)*self.processingHeaderObj.nWindows
2446 2451 processingHeaderSize += sizeOfWindows
2447 2452
2448 2453 self.processingHeaderObj.size = processingHeaderSize
2449 2454
2450 2455 class SpectraHeisWriter():
2451 2456
2452 2457 i=0
2453 2458
2454 2459 def __init__(self, dataOut):
2455 2460
2456 2461 self.wrObj = FITS()
2457 2462 self.dataOut = dataOut
2458 2463
2459 2464 def isNumber(str):
2460 2465 """
2461 2466 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
2462 2467
2463 2468 Excepciones:
2464 2469 Si un determinado string no puede ser convertido a numero
2465 2470 Input:
2466 2471 str, string al cual se le analiza para determinar si convertible a un numero o no
2467 2472
2468 2473 Return:
2469 2474 True : si el string es uno numerico
2470 2475 False : no es un string numerico
2471 2476 """
2472 2477 try:
2473 2478 float( str )
2474 2479 return True
2475 2480 except:
2476 2481 return False
2477 2482
2478 2483 def setup(self, wrpath,):
2479 2484
2480 2485 if not(os.path.exists(wrpath)):
2481 2486 os.mkdir(wrpath)
2482 2487
2483 2488 self.wrpath = wrpath
2484 2489 self.setFile = 0
2485 2490
2486 2491 def putData(self):
2487 2492 # self.wrObj.writeHeader(nChannels=self.dataOut.nChannels, nFFTPoints=self.dataOut.nFFTPoints)
2488 2493 #name = self.dataOut.utctime
2489 2494 name= time.localtime( self.dataOut.utctime)
2490 2495 ext=".fits"
2491 2496 #folder='D%4.4d%3.3d'%(name.tm_year,name.tm_yday)
2492 2497 subfolder = 'D%4.4d%3.3d' % (name.tm_year,name.tm_yday)
2493 2498
2494 2499 fullpath = os.path.join( self.wrpath, subfolder )
2495 2500 if not( os.path.exists(fullpath) ):
2496 2501 os.mkdir(fullpath)
2497 2502 self.setFile += 1
2498 2503 file = 'D%4.4d%3.3d%3.3d%s' % (name.tm_year,name.tm_yday,self.setFile,ext)
2499 2504
2500 2505 filename = os.path.join(self.wrpath,subfolder, file)
2501 2506
2502 2507 # print self.dataOut.ippSeconds
2503 2508 freq=numpy.arange(-1*self.dataOut.nHeights/2.,self.dataOut.nHeights/2.)/(2*self.dataOut.ippSeconds)
2504 2509
2505 2510 col1=self.wrObj.setColF(name="freq", format=str(self.dataOut.nFFTPoints)+'E', array=freq)
2506 2511 col2=self.wrObj.writeData(name="P_Ch1",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[0,:]))
2507 2512 col3=self.wrObj.writeData(name="P_Ch2",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[1,:]))
2508 2513 col4=self.wrObj.writeData(name="P_Ch3",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[2,:]))
2509 2514 col5=self.wrObj.writeData(name="P_Ch4",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[3,:]))
2510 2515 col6=self.wrObj.writeData(name="P_Ch5",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[4,:]))
2511 2516 col7=self.wrObj.writeData(name="P_Ch6",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[5,:]))
2512 2517 col8=self.wrObj.writeData(name="P_Ch7",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[6,:]))
2513 2518 col9=self.wrObj.writeData(name="P_Ch8",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[7,:]))
2514 2519 #n=numpy.arange((100))
2515 2520 n=self.dataOut.data_spc[6,:]
2516 2521 a=self.wrObj.cFImage(n)
2517 2522 b=self.wrObj.Ctable(col1,col2,col3,col4,col5,col6,col7,col8,col9)
2518 2523 self.wrObj.CFile(a,b)
2519 2524 self.wrObj.wFile(filename)
2520 2525 return 1
2521 2526
2522 2527 class FITS:
2523 2528
2524 2529 name=None
2525 2530 format=None
2526 2531 array =None
2527 2532 data =None
2528 2533 thdulist=None
2529 2534
2530 2535 def __init__(self):
2531 2536
2532 2537 pass
2533 2538
2534 2539 def setColF(self,name,format,array):
2535 2540 self.name=name
2536 2541 self.format=format
2537 2542 self.array=array
2538 2543 a1=numpy.array([self.array],dtype=numpy.float32)
2539 2544 self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1)
2540 2545 return self.col1
2541 2546
2542 2547 # def setColP(self,name,format,data):
2543 2548 # self.name=name
2544 2549 # self.format=format
2545 2550 # self.data=data
2546 2551 # a2=numpy.array([self.data],dtype=numpy.float32)
2547 2552 # self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2548 2553 # return self.col2
2549 2554
2550 2555 def writeHeader(self,):
2551 2556 pass
2552 2557
2553 2558 def writeData(self,name,format,data):
2554 2559 self.name=name
2555 2560 self.format=format
2556 2561 self.data=data
2557 2562 a2=numpy.array([self.data],dtype=numpy.float32)
2558 2563 self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2559 2564 return self.col2
2560 2565
2561 2566 def cFImage(self,n):
2562 2567 self.hdu= pyfits.PrimaryHDU(n)
2563 2568 return self.hdu
2564 2569
2565 2570 def Ctable(self,col1,col2,col3,col4,col5,col6,col7,col8,col9):
2566 2571 self.cols=pyfits.ColDefs( [col1,col2,col3,col4,col5,col6,col7,col8,col9])
2567 2572 self.tbhdu = pyfits.new_table(self.cols)
2568 2573 return self.tbhdu
2569 2574
2570 2575 def CFile(self,hdu,tbhdu):
2571 2576 self.thdulist=pyfits.HDUList([hdu,tbhdu])
2572 2577
2573 2578 def wFile(self,filename):
2574 2579 self.thdulist.writeto(filename) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now