##// END OF EJS Templates
Creacion de los objetos dataOut en las clases Voltagereader y SpectraReader en el metodo __init__
Miguel Valdez -
r186:d9fa53b18840
parent child
Show More
@@ -1,2485 +1,2474
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: JRODataIO.py 169 2012-11-19 21:57:03Z murco $
5 5 '''
6 6
7 7 import os, sys
8 8 import glob
9 9 import time
10 10 import numpy
11 11 import fnmatch
12 12 import time, datetime
13 13
14 14 from jrodata import *
15 15 from jroheaderIO import *
16 16
17 17 def isNumber(str):
18 18 """
19 19 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
20 20
21 21 Excepciones:
22 22 Si un determinado string no puede ser convertido a numero
23 23 Input:
24 24 str, string al cual se le analiza para determinar si convertible a un numero o no
25 25
26 26 Return:
27 27 True : si el string es uno numerico
28 28 False : no es un string numerico
29 29 """
30 30 try:
31 31 float( str )
32 32 return True
33 33 except:
34 34 return False
35 35
36 36 def isThisFileinRange(filename, startUTSeconds, endUTSeconds):
37 37 """
38 38 Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado.
39 39
40 40 Inputs:
41 41 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
42 42
43 43 startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en
44 44 segundos contados desde 01/01/1970.
45 45 endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en
46 46 segundos contados desde 01/01/1970.
47 47
48 48 Return:
49 49 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
50 50 fecha especificado, de lo contrario retorna False.
51 51
52 52 Excepciones:
53 53 Si el archivo no existe o no puede ser abierto
54 54 Si la cabecera no puede ser leida.
55 55
56 56 """
57 57 basicHeaderObj = BasicHeader()
58 58
59 59 try:
60 60 fp = open(filename,'rb')
61 61 except:
62 62 raise IOError, "The file %s can't be opened" %(filename)
63 63
64 64 sts = basicHeaderObj.read(fp)
65 65 fp.close()
66 66
67 67 if not(sts):
68 68 print "Skipping the file %s because it has not a valid header" %(filename)
69 69 return 0
70 70
71 71 if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)):
72 72 return 0
73 73
74 74 return 1
75 75
76 76 def getlastFileFromPath(path, ext):
77 77 """
78 78 Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext"
79 79 al final de la depuracion devuelve el ultimo file de la lista que quedo.
80 80
81 81 Input:
82 82 fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta
83 83 ext : extension de los files contenidos en una carpeta
84 84
85 85 Return:
86 86 El ultimo file de una determinada carpeta, no se considera el path.
87 87 """
88 88 validFilelist = []
89 89 fileList = os.listdir(path)
90 90
91 91 # 0 1234 567 89A BCDE
92 92 # H YYYY DDD SSS .ext
93 93
94 94 for file in fileList:
95 95 try:
96 96 year = int(file[1:5])
97 97 doy = int(file[5:8])
98 98
99 99 if (os.path.splitext(file)[-1].upper() != ext.upper()) : continue
100 100 except:
101 101 continue
102 102
103 103 validFilelist.append(file)
104 104
105 105 if validFilelist:
106 106 validFilelist = sorted( validFilelist, key=str.lower )
107 107 return validFilelist[-1]
108 108
109 109 return None
110 110
111 111 def checkForRealPath(path, year, doy, set, ext):
112 112 """
113 113 Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path,
114 114 Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar
115 115 el path exacto de un determinado file.
116 116
117 117 Example :
118 118 nombre correcto del file es .../.../D2009307/P2009307367.ext
119 119
120 120 Entonces la funcion prueba con las siguientes combinaciones
121 121 .../.../x2009307/y2009307367.ext
122 122 .../.../x2009307/Y2009307367.ext
123 123 .../.../X2009307/y2009307367.ext
124 124 .../.../X2009307/Y2009307367.ext
125 125 siendo para este caso, la ultima combinacion de letras, identica al file buscado
126 126
127 127 Return:
128 128 Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file
129 129 caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas
130 130 para el filename
131 131 """
132 132 filepath = None
133 133 find_flag = False
134 134 filename = None
135 135
136 136 if ext.lower() == ".r": #voltage
137 137 header1 = "dD"
138 138 header2 = "dD"
139 139 elif ext.lower() == ".pdata": #spectra
140 140 header1 = "dD"
141 141 header2 = "pP"
142 142 else:
143 143 return None, filename
144 144
145 145 for dir in header1: #barrido por las dos combinaciones posibles de "D"
146 146 for fil in header2: #barrido por las dos combinaciones posibles de "D"
147 147 doypath = "%s%04d%03d" % ( dir, year, doy ) #formo el nombre del directorio xYYYYDDD (x=d o x=D)
148 148 filename = "%s%04d%03d%03d%s" % ( fil, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext
149 149 filepath = os.path.join( path, doypath, filename ) #formo el path completo
150 150 if os.path.exists( filepath ): #verifico que exista
151 151 find_flag = True
152 152 break
153 153 if find_flag:
154 154 break
155 155
156 156 if not(find_flag):
157 157 return None, filename
158 158
159 159 return filepath, filename
160 160
161 161 class JRODataIO:
162 162
163 163 c = 3E8
164 164
165 165 isConfig = False
166 166
167 167 basicHeaderObj = BasicHeader()
168 168
169 169 systemHeaderObj = SystemHeader()
170 170
171 171 radarControllerHeaderObj = RadarControllerHeader()
172 172
173 173 processingHeaderObj = ProcessingHeader()
174 174
175 175 online = 0
176 176
177 177 dtype = None
178 178
179 179 pathList = []
180 180
181 181 filenameList = []
182 182
183 183 filename = None
184 184
185 185 ext = None
186 186
187 187 flagNoMoreFiles = 0
188 188
189 189 flagIsNewFile = 1
190 190
191 191 flagTimeBlock = 0
192 192
193 193 flagIsNewBlock = 0
194 194
195 195 fp = None
196 196
197 197 firstHeaderSize = 0
198 198
199 199 basicHeaderSize = 24
200 200
201 201 versionFile = 1103
202 202
203 203 fileSize = None
204 204
205 205 ippSeconds = None
206 206
207 207 fileSizeByHeader = None
208 208
209 209 fileIndex = None
210 210
211 211 profileIndex = None
212 212
213 213 blockIndex = None
214 214
215 215 nTotalBlocks = None
216 216
217 217 maxTimeStep = 30
218 218
219 219 lastUTTime = None
220 220
221 221 datablock = None
222 222
223 223 dataOut = None
224 224
225 225 blocksize = None
226 226
227 227 def __init__(self):
228 228
229 229 raise ValueError, "Not implemented"
230 230
231 231 def run(self):
232 232
233 233 raise ValueError, "Not implemented"
234 234
235 235
236 236
237 237 class JRODataReader(JRODataIO):
238 238
239 239 nReadBlocks = 0
240 240
241 241 delay = 60 #number of seconds waiting a new file
242 242
243 243 nTries = 3 #quantity tries
244 244
245 245 nFiles = 3 #number of files for searching
246 246
247 247
248 248 def __init__(self):
249 249
250 250 """
251 251
252 252 """
253 253
254 254 raise ValueError, "This method has not been implemented"
255 255
256 256
257 257 def createObjByDefault(self):
258 258 """
259 259
260 260 """
261 261 raise ValueError, "This method has not been implemented"
262 262
263 263 def getBlockDimension(self):
264 264
265 265 raise ValueError, "No implemented"
266 266
267 267 def __searchFilesOffLine(self,
268 268 path,
269 269 startDate,
270 270 endDate,
271 271 startTime=datetime.time(0,0,0),
272 272 endTime=datetime.time(23,59,59),
273 273 set=None,
274 274 expLabel="",
275 275 ext=".r"):
276 276 dirList = []
277 277 for thisPath in os.listdir(path):
278 278 if os.path.isdir(os.path.join(path,thisPath)):
279 279 dirList.append(thisPath)
280 280
281 281 if not(dirList):
282 282 return None, None
283 283
284 284 pathList = []
285 285 dateList = []
286 286
287 287 thisDate = startDate
288 288
289 289 while(thisDate <= endDate):
290 290 year = thisDate.timetuple().tm_year
291 291 doy = thisDate.timetuple().tm_yday
292 292
293 293 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
294 294 if len(match) == 0:
295 295 thisDate += datetime.timedelta(1)
296 296 continue
297 297
298 298 pathList.append(os.path.join(path,match[0],expLabel))
299 299 dateList.append(thisDate)
300 300 thisDate += datetime.timedelta(1)
301 301
302 302 filenameList = []
303 303 for index in range(len(pathList)):
304 304
305 305 thisPath = pathList[index]
306 306 fileList = glob.glob1(thisPath, "*%s" %ext)
307 307 fileList.sort()
308 308
309 309 #Busqueda de datos en el rango de horas indicados
310 310 thisDate = dateList[index]
311 311 startDT = datetime.datetime.combine(thisDate, startTime)
312 312 endDT = datetime.datetime.combine(thisDate, endTime)
313 313
314 314 startUtSeconds = time.mktime(startDT.timetuple())
315 315 endUtSeconds = time.mktime(endDT.timetuple())
316 316
317 317 for file in fileList:
318 318
319 319 filename = os.path.join(thisPath,file)
320 320
321 321 if isThisFileinRange(filename, startUtSeconds, endUtSeconds):
322 322 filenameList.append(filename)
323 323
324 324 if not(filenameList):
325 325 return None, None
326 326
327 327 self.filenameList = filenameList
328 328
329 329 return pathList, filenameList
330 330
331 331 def __searchFilesOnLine(self, path, startDate=None, endDate=None, startTime=None, endTime=None, expLabel = "", ext = None):
332 332
333 333 """
334 334 Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y
335 335 devuelve el archivo encontrado ademas de otros datos.
336 336
337 337 Input:
338 338 path : carpeta donde estan contenidos los files que contiene data
339 339
340 340 startDate : Fecha inicial. Rechaza todos los directorios donde
341 341 file end time < startDate (obejto datetime.date)
342 342
343 343 endDate : Fecha final. Rechaza todos los directorios donde
344 344 file start time > endDate (obejto datetime.date)
345 345
346 346 startTime : Tiempo inicial. Rechaza todos los archivos donde
347 347 file end time < startTime (obejto datetime.time)
348 348
349 349 endTime : Tiempo final. Rechaza todos los archivos donde
350 350 file start time > endTime (obejto datetime.time)
351 351
352 352 expLabel : Nombre del subexperimento (subfolder)
353 353
354 354 ext : extension de los files
355 355
356 356 Return:
357 357 directory : eL directorio donde esta el file encontrado
358 358 filename : el ultimo file de una determinada carpeta
359 359 year : el anho
360 360 doy : el numero de dia del anho
361 361 set : el set del archivo
362 362
363 363
364 364 """
365 365 dirList = []
366 366 pathList = []
367 367 directory = None
368 368
369 369 #Filtra solo los directorios
370 370 for thisPath in os.listdir(path):
371 371 if os.path.isdir(os.path.join(path, thisPath)):
372 372 dirList.append(thisPath)
373 373
374 374 if not(dirList):
375 375 return None, None, None, None, None
376 376
377 377 dirList = sorted( dirList, key=str.lower )
378 378
379 379 if startDate:
380 380 startDateTime = datetime.datetime.combine(startDate, startTime)
381 381 thisDateTime = startDateTime
382 382 if endDate == None: endDateTime = startDateTime
383 383 else: endDateTime = datetime.datetime.combine(endDate, endTime)
384 384
385 385 while(thisDateTime <= endDateTime):
386 386 year = thisDateTime.timetuple().tm_year
387 387 doy = thisDateTime.timetuple().tm_yday
388 388
389 389 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
390 390 if len(match) == 0:
391 391 thisDateTime += datetime.timedelta(1)
392 392 continue
393 393
394 394 pathList.append(os.path.join(path,match[0], expLabel))
395 395 thisDateTime += datetime.timedelta(1)
396 396
397 397 if not(pathList):
398 398 print "\tNo files in range: %s - %s" %(startDateTime.ctime(), endDateTime.ctime())
399 399 return None, None, None, None, None
400 400
401 401 directory = pathList[0]
402 402
403 403 else:
404 404 directory = dirList[-1]
405 405 directory = os.path.join(path,directory)
406 406
407 407 filename = getlastFileFromPath(directory, ext)
408 408
409 409 if not(filename):
410 410 return None, None, None, None, None
411 411
412 412 if not(self.__verifyFile(os.path.join(directory, filename))):
413 413 return None, None, None, None, None
414 414
415 415 year = int( filename[1:5] )
416 416 doy = int( filename[5:8] )
417 417 set = int( filename[8:11] )
418 418
419 419 return directory, filename, year, doy, set
420 420
421 def setup(self,dataOut=None,
421 def setup(self,
422 422 path=None,
423 423 startDate=None,
424 424 endDate=None,
425 425 startTime=datetime.time(0,0,0),
426 426 endTime=datetime.time(23,59,59),
427 427 set=0,
428 428 expLabel = "",
429 429 ext = None,
430 430 online = False,
431 431 delay = 60):
432 432
433 433 if path == None:
434 434 raise ValueError, "The path is not valid"
435 435
436 436 if ext == None:
437 437 ext = self.ext
438 438
439 if dataOut == None:
440 dataOut = self.createObjByDefault()
441
442 self.dataOut = dataOut
439 if dataOut != None:
440 self.dataOut = dataOut
443 441
444 442 if online:
445 443 print "Searching files in online mode..."
446 444 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext)
447 445
448 446 if not(doypath):
449 447 for nTries in range( self.nTries ):
450 448 print '\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)
451 449 time.sleep( self.delay )
452 450 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=exp)
453 451 if doypath:
454 452 break
455 453
456 454 if not(doypath):
457 455 print "There 'isn't valied files in %s" % path
458 456 return None
459 457
460 458 self.year = year
461 459 self.doy = doy
462 460 self.set = set - 1
463 461 self.path = path
464 462
465 463 else:
466 464 print "Searching files in offline mode ..."
467 465 pathList, filenameList = self.__searchFilesOffLine(path, startDate, endDate, startTime, endTime, set, expLabel, ext)
468 466
469 467 if not(pathList):
470 468 print "No *%s files into the folder %s \nfor the range: %s - %s"%(ext, path,
471 469 datetime.datetime.combine(startDate,startTime).ctime(),
472 470 datetime.datetime.combine(endDate,endTime).ctime())
473 471
474 472 sys.exit(-1)
475 473
476 474
477 475 self.fileIndex = -1
478 476 self.pathList = pathList
479 477 self.filenameList = filenameList
480 478
481 479 self.online = online
482 480 self.delay = delay
483 481 ext = ext.lower()
484 482 self.ext = ext
485 483
486 484 if not(self.setNextFile()):
487 485 if (startDate!=None) and (endDate!=None):
488 486 print "No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime())
489 487 elif startDate != None:
490 488 print "No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime())
491 489 else:
492 490 print "No files"
493 491
494 492 sys.exit(-1)
495 493
496 494 # self.updateDataHeader()
497 495
498 496 return self.dataOut
499 497
500 498 def __setNextFileOffline(self):
501 499
502 500 idFile = self.fileIndex
503 501
504 502 while (True):
505 503 idFile += 1
506 504 if not(idFile < len(self.filenameList)):
507 505 self.flagNoMoreFiles = 1
508 506 print "No more Files"
509 507 return 0
510 508
511 509 filename = self.filenameList[idFile]
512 510
513 511 if not(self.__verifyFile(filename)):
514 512 continue
515 513
516 514 fileSize = os.path.getsize(filename)
517 515 fp = open(filename,'rb')
518 516 break
519 517
520 518 self.flagIsNewFile = 1
521 519 self.fileIndex = idFile
522 520 self.filename = filename
523 521 self.fileSize = fileSize
524 522 self.fp = fp
525 523
526 524 print "Setting the file: %s"%self.filename
527 525
528 526 return 1
529 527
530 528 def __setNextFileOnline(self):
531 529 """
532 530 Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si
533 531 no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files
534 532 siguientes.
535 533
536 534 Affected:
537 535 self.flagIsNewFile
538 536 self.filename
539 537 self.fileSize
540 538 self.fp
541 539 self.set
542 540 self.flagNoMoreFiles
543 541
544 542 Return:
545 543 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado
546 544 1 : si el file fue abierto con exito y esta listo a ser leido
547 545
548 546 Excepciones:
549 547 Si un determinado file no puede ser abierto
550 548 """
551 549 nFiles = 0
552 550 fileOk_flag = False
553 551 firstTime_flag = True
554 552
555 553 self.set += 1
556 554
557 555 #busca el 1er file disponible
558 556 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
559 557 if file:
560 558 if self.__verifyFile(file, False):
561 559 fileOk_flag = True
562 560
563 561 #si no encuentra un file entonces espera y vuelve a buscar
564 562 if not(fileOk_flag):
565 563 for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles
566 564
567 565 if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces
568 566 tries = self.nTries
569 567 else:
570 568 tries = 1 #si no es la 1era vez entonces solo lo hace una vez
571 569
572 570 for nTries in range( tries ):
573 571 if firstTime_flag:
574 572 print "\tWaiting %0.2f sec for the file \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 )
575 573 time.sleep( self.delay )
576 574 else:
577 575 print "\tSearching next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext)
578 576
579 577 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
580 578 if file:
581 579 if self.__verifyFile(file):
582 580 fileOk_flag = True
583 581 break
584 582
585 583 if fileOk_flag:
586 584 break
587 585
588 586 firstTime_flag = False
589 587
590 588 print "\tSkipping the file \"%s\" due to this file doesn't exist" % filename
591 589 self.set += 1
592 590
593 591 if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta
594 592 self.set = 0
595 593 self.doy += 1
596 594
597 595 if fileOk_flag:
598 596 self.fileSize = os.path.getsize( file )
599 597 self.filename = file
600 598 self.flagIsNewFile = 1
601 599 if self.fp != None: self.fp.close()
602 600 self.fp = open(file)
603 601 self.flagNoMoreFiles = 0
604 602 print 'Setting the file: %s' % file
605 603 else:
606 604 self.fileSize = 0
607 605 self.filename = None
608 606 self.flagIsNewFile = 0
609 607 self.fp = None
610 608 self.flagNoMoreFiles = 1
611 609 print 'No more Files'
612 610
613 611 return fileOk_flag
614 612
615 613
616 614 def setNextFile(self):
617 615 if self.fp != None:
618 616 self.fp.close()
619 617
620 618 if self.online:
621 619 newFile = self.__setNextFileOnline()
622 620 else:
623 621 newFile = self.__setNextFileOffline()
624 622
625 623 if not(newFile):
626 624 return 0
627 625
628 626 self.__readFirstHeader()
629 627 self.nReadBlocks = 0
630 628 return 1
631 629
632 630 def __setNewBlock(self):
633 631 if self.fp == None:
634 632 return 0
635 633
636 634 if self.flagIsNewFile:
637 635 return 1
638 636
639 637 self.lastUTTime = self.basicHeaderObj.utc
640 638 currentSize = self.fileSize - self.fp.tell()
641 639 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
642 640
643 641 if (currentSize >= neededSize):
644 642 self.__rdBasicHeader()
645 643 return 1
646 644
647 645 if not(self.setNextFile()):
648 646 return 0
649 647
650 648 deltaTime = self.basicHeaderObj.utc - self.lastUTTime #
651 649
652 650 self.flagTimeBlock = 0
653 651
654 652 if deltaTime > self.maxTimeStep:
655 653 self.flagTimeBlock = 1
656 654
657 655 return 1
658 656
659 657
660 658 def readNextBlock(self):
661 659 if not(self.__setNewBlock()):
662 660 return 0
663 661
664 662 if not(self.readBlock()):
665 663 return 0
666 664
667 665 return 1
668 666
669 667 def __rdProcessingHeader(self, fp=None):
670 668 if fp == None:
671 669 fp = self.fp
672 670
673 671 self.processingHeaderObj.read(fp)
674 672
675 673 def __rdRadarControllerHeader(self, fp=None):
676 674 if fp == None:
677 675 fp = self.fp
678 676
679 677 self.radarControllerHeaderObj.read(fp)
680 678
681 679 def __rdSystemHeader(self, fp=None):
682 680 if fp == None:
683 681 fp = self.fp
684 682
685 683 self.systemHeaderObj.read(fp)
686 684
687 685 def __rdBasicHeader(self, fp=None):
688 686 if fp == None:
689 687 fp = self.fp
690 688
691 689 self.basicHeaderObj.read(fp)
692 690
693 691
694 692 def __readFirstHeader(self):
695 693 self.__rdBasicHeader()
696 694 self.__rdSystemHeader()
697 695 self.__rdRadarControllerHeader()
698 696 self.__rdProcessingHeader()
699 697
700 698 self.firstHeaderSize = self.basicHeaderObj.size
701 699
702 700 datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
703 701 if datatype == 0:
704 702 datatype_str = numpy.dtype([('real','<i1'),('imag','<i1')])
705 703 elif datatype == 1:
706 704 datatype_str = numpy.dtype([('real','<i2'),('imag','<i2')])
707 705 elif datatype == 2:
708 706 datatype_str = numpy.dtype([('real','<i4'),('imag','<i4')])
709 707 elif datatype == 3:
710 708 datatype_str = numpy.dtype([('real','<i8'),('imag','<i8')])
711 709 elif datatype == 4:
712 710 datatype_str = numpy.dtype([('real','<f4'),('imag','<f4')])
713 711 elif datatype == 5:
714 712 datatype_str = numpy.dtype([('real','<f8'),('imag','<f8')])
715 713 else:
716 714 raise ValueError, 'Data type was not defined'
717 715
718 716 self.dtype = datatype_str
719 717 self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c
720 718 self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.processingHeaderObj.dataBlocksPerFile - 1)
721 719 # self.dataOut.channelList = numpy.arange(self.systemHeaderObj.numChannels)
722 720 # self.dataOut.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels)
723 721 self.getBlockDimension()
724 722
725 723
726 724 def __verifyFile(self, filename, msgFlag=True):
727 725 msg = None
728 726 try:
729 727 fp = open(filename, 'rb')
730 728 currentPosition = fp.tell()
731 729 except:
732 730 if msgFlag:
733 731 print "The file %s can't be opened" % (filename)
734 732 return False
735 733
736 734 neededSize = self.processingHeaderObj.blockSize + self.firstHeaderSize
737 735
738 736 if neededSize == 0:
739 737 basicHeaderObj = BasicHeader()
740 738 systemHeaderObj = SystemHeader()
741 739 radarControllerHeaderObj = RadarControllerHeader()
742 740 processingHeaderObj = ProcessingHeader()
743 741
744 742 try:
745 743 if not( basicHeaderObj.read(fp) ): raise ValueError
746 744 if not( systemHeaderObj.read(fp) ): raise ValueError
747 745 if not( radarControllerHeaderObj.read(fp) ): raise ValueError
748 746 if not( processingHeaderObj.read(fp) ): raise ValueError
749 747 data_type = int(numpy.log2((processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
750 748
751 749 neededSize = processingHeaderObj.blockSize + basicHeaderObj.size
752 750
753 751 except:
754 752 if msgFlag:
755 753 print "\tThe file %s is empty or it hasn't enough data" % filename
756 754
757 755 fp.close()
758 756 return False
759 757 else:
760 758 msg = "\tSkipping the file %s due to it hasn't enough data" %filename
761 759
762 760 fp.close()
763 761 fileSize = os.path.getsize(filename)
764 762 currentSize = fileSize - currentPosition
765 763 if currentSize < neededSize:
766 764 if msgFlag and (msg != None):
767 765 print msg #print"\tSkipping the file %s due to it hasn't enough data" %filename
768 766 return False
769 767
770 768 return True
771 769
772 770 def getData():
773 771 pass
774 772
775 773 def hasNotDataInBuffer():
776 774 pass
777 775
778 776 def readBlock():
779 777 pass
780 778
781 779 def run(self, **kwargs):
782 780
783 781 if not(self.isConfig):
784 782
785 783 # self.dataOut = dataOut
786 784 self.setup(**kwargs)
787 785 self.isConfig = True
788 786
789 787 self.getData()
790 788
791 789 class JRODataWriter(JRODataIO):
792 790
793 791 """
794 792 Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura
795 793 de los datos siempre se realiza por bloques.
796 794 """
797 795
798 796 blockIndex = 0
799 797
800 798 path = None
801 799
802 800 setFile = None
803 801
804 802 profilesPerBlock = None
805 803
806 804 blocksPerFile = None
807 805
808 806 nWriteBlocks = 0
809 807
810 808 def __init__(self, dataOut=None):
811 809 raise ValueError, "Not implemented"
812 810
813 811
814 812 def hasAllDataInBuffer(self):
815 813 raise ValueError, "Not implemented"
816 814
817 815
818 816 def setBlockDimension(self):
819 817 raise ValueError, "Not implemented"
820 818
821 819
822 820 def writeBlock(self):
823 821 raise ValueError, "No implemented"
824 822
825 823
826 824 def putData(self):
827 825 raise ValueError, "No implemented"
828 826
829 827 def getDataHeader(self):
830 828 """
831 829 Obtiene una copia del First Header
832 830
833 831 Affected:
834 832
835 833 self.basicHeaderObj
836 834 self.systemHeaderObj
837 835 self.radarControllerHeaderObj
838 836 self.processingHeaderObj self.
839 837
840 838 Return:
841 839 None
842 840 """
843 841
844 842 raise ValueError, "No implemented"
845 843
846 844 def getBasicHeader(self):
847 845
848 846 self.basicHeaderObj.size = self.basicHeaderSize #bytes
849 847 self.basicHeaderObj.version = self.versionFile
850 848 self.basicHeaderObj.dataBlock = self.nTotalBlocks
851 849
852 850 utc = numpy.floor(self.dataOut.utctime)
853 851 milisecond = (self.dataOut.utctime - utc)* 1000.0
854 852
855 853 self.basicHeaderObj.utc = utc
856 854 self.basicHeaderObj.miliSecond = milisecond
857 855 self.basicHeaderObj.timeZone = 0
858 856 self.basicHeaderObj.dstFlag = 0
859 857 self.basicHeaderObj.errorCount = 0
860 858
861 859 def __writeFirstHeader(self):
862 860 """
863 861 Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader)
864 862
865 863 Affected:
866 864 __dataType
867 865
868 866 Return:
869 867 None
870 868 """
871 869
872 870 # CALCULAR PARAMETROS
873 871
874 872 sizeLongHeader = self.systemHeaderObj.size + self.radarControllerHeaderObj.size + self.processingHeaderObj.size
875 873 self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader
876 874
877 875 self.basicHeaderObj.write(self.fp)
878 876 self.systemHeaderObj.write(self.fp)
879 877 self.radarControllerHeaderObj.write(self.fp)
880 878 self.processingHeaderObj.write(self.fp)
881 879
882 880 self.dtype = self.dataOut.dtype
883 881
884 882 def __setNewBlock(self):
885 883 """
886 884 Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header
887 885
888 886 Return:
889 887 0 : si no pudo escribir nada
890 888 1 : Si escribio el Basic el First Header
891 889 """
892 890 if self.fp == None:
893 891 self.setNextFile()
894 892
895 893 if self.flagIsNewFile:
896 894 return 1
897 895
898 896 if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile:
899 897 self.basicHeaderObj.write(self.fp)
900 898 return 1
901 899
902 900 if not( self.setNextFile() ):
903 901 return 0
904 902
905 903 return 1
906 904
907 905
908 906 def writeNextBlock(self):
909 907 """
910 908 Selecciona el bloque siguiente de datos y los escribe en un file
911 909
912 910 Return:
913 911 0 : Si no hizo pudo escribir el bloque de datos
914 912 1 : Si no pudo escribir el bloque de datos
915 913 """
916 914 if not( self.__setNewBlock() ):
917 915 return 0
918 916
919 917 self.writeBlock()
920 918
921 919 return 1
922 920
923 921 def setNextFile(self):
924 922 """
925 923 Determina el siguiente file que sera escrito
926 924
927 925 Affected:
928 926 self.filename
929 927 self.subfolder
930 928 self.fp
931 929 self.setFile
932 930 self.flagIsNewFile
933 931
934 932 Return:
935 933 0 : Si el archivo no puede ser escrito
936 934 1 : Si el archivo esta listo para ser escrito
937 935 """
938 936 ext = self.ext
939 937 path = self.path
940 938
941 939 if self.fp != None:
942 940 self.fp.close()
943 941
944 942 timeTuple = time.localtime( self.dataOut.dataUtcTime)
945 943 subfolder = 'D%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
946 944
947 945 doypath = os.path.join( path, subfolder )
948 946 if not( os.path.exists(doypath) ):
949 947 os.mkdir(doypath)
950 948 self.setFile = -1 #inicializo mi contador de seteo
951 949 else:
952 950 filesList = os.listdir( doypath )
953 951 if len( filesList ) > 0:
954 952 filesList = sorted( filesList, key=str.lower )
955 953 filen = filesList[-1]
956 954 # el filename debera tener el siguiente formato
957 955 # 0 1234 567 89A BCDE (hex)
958 956 # x YYYY DDD SSS .ext
959 957 if isNumber( filen[8:11] ):
960 958 self.setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file
961 959 else:
962 960 self.setFile = -1
963 961 else:
964 962 self.setFile = -1 #inicializo mi contador de seteo
965 963
966 964 setFile = self.setFile
967 965 setFile += 1
968 966
969 967 file = '%s%4.4d%3.3d%3.3d%s' % (self.optchar,
970 968 timeTuple.tm_year,
971 969 timeTuple.tm_yday,
972 970 setFile,
973 971 ext )
974 972
975 973 filename = os.path.join( path, subfolder, file )
976 974
977 975 fp = open( filename,'wb' )
978 976
979 977 self.blockIndex = 0
980 978
981 979 #guardando atributos
982 980 self.filename = filename
983 981 self.subfolder = subfolder
984 982 self.fp = fp
985 983 self.setFile = setFile
986 984 self.flagIsNewFile = 1
987 985
988 986 self.getDataHeader()
989 987
990 988 print 'Writing the file: %s'%self.filename
991 989
992 990 self.__writeFirstHeader()
993 991
994 992 return 1
995 993
996 def setup(self, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None):
994 def setup(self, dataOut, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None):
997 995 """
998 996 Setea el tipo de formato en la cual sera guardada la data y escribe el First Header
999 997
1000 998 Inputs:
1001 999 path : el path destino en el cual se escribiran los files a crear
1002 1000 format : formato en el cual sera salvado un file
1003 1001 set : el setebo del file
1004 1002
1005 1003 Return:
1006 1004 0 : Si no realizo un buen seteo
1007 1005 1 : Si realizo un buen seteo
1008 1006 """
1009 1007
1010 1008 if ext == None:
1011 1009 ext = self.ext
1012 1010
1013 1011 ext = ext.lower()
1014 1012
1015 1013 self.ext = ext
1016 1014
1017 1015 self.path = path
1018 1016
1019 1017 self.setFile = set - 1
1020 1018
1021 1019 self.blocksPerFile = blocksPerFile
1022 1020
1023 1021 self.profilesPerBlock = profilesPerBlock
1024 1022
1023 self.dataOut = dataOut
1024
1025 1025 if not(self.setNextFile()):
1026 1026 print "There isn't a next file"
1027 1027 return 0
1028 1028
1029 1029 self.setBlockDimension()
1030 1030
1031 1031 return 1
1032 1032
1033 1033 def run(self, dataOut, **kwargs):
1034 1034
1035 1035 if not(self.isConfig):
1036 1036
1037 self.dataOut = dataOut
1038 self.setup(**kwargs)
1037 self.setup(dataOut, **kwargs)
1039 1038 self.isConfig = True
1040 1039
1041 1040 self.putData()
1042 1041
1043 1042 class VoltageReader(JRODataReader):
1044 1043 """
1045 1044 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
1046 1045 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
1047 1046 perfiles*alturas*canales) son almacenados en la variable "buffer".
1048 1047
1049 1048 perfiles * alturas * canales
1050 1049
1051 1050 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1052 1051 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
1053 1052 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
1054 1053 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1055 1054
1056 1055 Example:
1057 1056
1058 1057 dpath = "/home/myuser/data"
1059 1058
1060 1059 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1061 1060
1062 1061 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1063 1062
1064 1063 readerObj = VoltageReader()
1065 1064
1066 1065 readerObj.setup(dpath, startTime, endTime)
1067 1066
1068 1067 while(True):
1069 1068
1070 1069 #to get one profile
1071 1070 profile = readerObj.getData()
1072 1071
1073 1072 #print the profile
1074 1073 print profile
1075 1074
1076 1075 #If you want to see all datablock
1077 1076 print readerObj.datablock
1078 1077
1079 1078 if readerObj.flagNoMoreFiles:
1080 1079 break
1081 1080
1082 1081 """
1083 1082
1084 1083 ext = ".r"
1085 1084
1086 1085 optchar = "D"
1087 1086 dataOut = None
1088 1087
1089 1088
1090 def __init__(self, dataOut=None):
1089 def __init__(self):
1091 1090 """
1092 1091 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
1093 1092
1094 1093 Input:
1095 1094 dataOut : Objeto de la clase Voltage. Este objeto sera utilizado para
1096 1095 almacenar un perfil de datos cada vez que se haga un requerimiento
1097 1096 (getData). El perfil sera obtenido a partir del buffer de datos,
1098 1097 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1099 1098 bloque de datos.
1100 1099 Si este parametro no es pasado se creara uno internamente.
1101 1100
1102 1101 Variables afectadas:
1103 1102 self.dataOut
1104 1103
1105 1104 Return:
1106 1105 None
1107 1106 """
1108 1107
1109 1108 self.isConfig = False
1110 1109
1111 1110 self.datablock = None
1112 1111
1113 1112 self.utc = 0
1114 1113
1115 1114 self.ext = ".r"
1116 1115
1117 1116 self.optchar = "D"
1118 1117
1119 1118 self.basicHeaderObj = BasicHeader()
1120 1119
1121 1120 self.systemHeaderObj = SystemHeader()
1122 1121
1123 1122 self.radarControllerHeaderObj = RadarControllerHeader()
1124 1123
1125 1124 self.processingHeaderObj = ProcessingHeader()
1126 1125
1127 1126 self.online = 0
1128 1127
1129 1128 self.fp = None
1130 1129
1131 1130 self.idFile = None
1132 1131
1133 1132 self.dtype = None
1134 1133
1135 1134 self.fileSizeByHeader = None
1136 1135
1137 1136 self.filenameList = []
1138 1137
1139 1138 self.filename = None
1140 1139
1141 1140 self.fileSize = None
1142 1141
1143 1142 self.firstHeaderSize = 0
1144 1143
1145 1144 self.basicHeaderSize = 24
1146 1145
1147 1146 self.pathList = []
1148 1147
1149 1148 self.filenameList = []
1150 1149
1151 1150 self.lastUTTime = 0
1152 1151
1153 1152 self.maxTimeStep = 30
1154 1153
1155 1154 self.flagNoMoreFiles = 0
1156 1155
1157 1156 self.set = 0
1158 1157
1159 1158 self.path = None
1160 1159
1161 1160 self.profileIndex = 9999
1162 1161
1163 1162 self.delay = 3 #seconds
1164 1163
1165 1164 self.nTries = 3 #quantity tries
1166 1165
1167 1166 self.nFiles = 3 #number of files for searching
1168 1167
1169 1168 self.nReadBlocks = 0
1170 1169
1171 1170 self.flagIsNewFile = 1
1172 1171
1173 1172 self.ippSeconds = 0
1174 1173
1175 1174 self.flagTimeBlock = 0
1176 1175
1177 1176 self.flagIsNewBlock = 0
1178 1177
1179 1178 self.nTotalBlocks = 0
1180 1179
1181 1180 self.blocksize = 0
1181
1182 dataOut = self.createObjByDefault()
1182 1183
1183 1184 def createObjByDefault(self):
1184 1185
1185 1186 dataObj = Voltage()
1186 1187
1187 1188 return dataObj
1188 1189
1189 1190 def __hasNotDataInBuffer(self):
1190 1191 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1191 1192 return 1
1192 1193 return 0
1193 1194
1194 1195
1195 1196 def getBlockDimension(self):
1196 1197 """
1197 1198 Obtiene la cantidad de puntos a leer por cada bloque de datos
1198 1199
1199 1200 Affected:
1200 1201 self.blocksize
1201 1202
1202 1203 Return:
1203 1204 None
1204 1205 """
1205 1206 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
1206 1207 self.blocksize = pts2read
1207 1208
1208 1209
1209 1210 def readBlock(self):
1210 1211 """
1211 1212 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
1212 1213 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1213 1214 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1214 1215 es seteado a 0
1215 1216
1216 1217 Inputs:
1217 1218 None
1218 1219
1219 1220 Return:
1220 1221 None
1221 1222
1222 1223 Affected:
1223 1224 self.profileIndex
1224 1225 self.datablock
1225 1226 self.flagIsNewFile
1226 1227 self.flagIsNewBlock
1227 1228 self.nTotalBlocks
1228 1229
1229 1230 Exceptions:
1230 1231 Si un bloque leido no es un bloque valido
1231 1232 """
1232 1233
1233 1234 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
1234 1235
1235 1236 try:
1236 1237 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
1237 1238 except:
1238 1239 print "The read block (%3d) has not enough data" %self.nReadBlocks
1239 1240 return 0
1240 1241
1241 1242 junk = numpy.transpose(junk, (2,0,1))
1242 1243 self.datablock = junk['real'] + junk['imag']*1j
1243 1244
1244 1245 self.profileIndex = 0
1245 1246
1246 1247 self.flagIsNewFile = 0
1247 1248 self.flagIsNewBlock = 1
1248 1249
1249 1250 self.nTotalBlocks += 1
1250 1251 self.nReadBlocks += 1
1251 1252
1252 1253 return 1
1253 1254
1254 1255
1255 1256 def getData(self):
1256 1257 """
1257 1258 getData obtiene una unidad de datos del buffer de lectura y la copia a la clase "Voltage"
1258 1259 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1259 1260 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1260 1261
1261 1262 Ademas incrementa el contador del buffer en 1.
1262 1263
1263 1264 Return:
1264 1265 data : retorna un perfil de voltages (alturas * canales) copiados desde el
1265 1266 buffer. Si no hay mas archivos a leer retorna None.
1266 1267
1267 1268 Variables afectadas:
1268 1269 self.dataOut
1269 1270 self.profileIndex
1270 1271
1271 1272 Affected:
1272 1273 self.dataOut
1273 1274 self.profileIndex
1274 1275 self.flagTimeBlock
1275 1276 self.flagIsNewBlock
1276 1277 """
1277 1278 if self.flagNoMoreFiles: return 0
1278 1279
1279 1280 self.flagTimeBlock = 0
1280 1281 self.flagIsNewBlock = 0
1281 1282
1282 1283 if self.__hasNotDataInBuffer():
1283 1284
1284 1285 if not( self.readNextBlock() ):
1285 1286 return 0
1286 1287
1287 1288 # self.updateDataHeader()
1288 1289
1289 1290 if self.flagNoMoreFiles == 1:
1290 1291 print 'Process finished'
1291 1292 return 0
1292 1293
1293 1294 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1294 1295
1295 1296 if self.datablock == None:
1296 1297 self.dataOut.flagNoData = True
1297 1298 return 0
1298 1299
1299 1300 self.dataOut.data = self.datablock[:,self.profileIndex,:]
1300 1301
1301 1302 self.dataOut.dtype = self.dtype
1302 1303
1303 1304 self.dataOut.nChannels = self.systemHeaderObj.nChannels
1304 1305
1305 1306 self.dataOut.nHeights = self.processingHeaderObj.nHeights
1306 1307
1307 1308 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1308 1309
1309 1310 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1310 1311
1311 1312 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1312 1313
1313 1314 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1314 1315
1315 1316 self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
1316 1317
1317 1318 self.dataOut.flagTimeBlock = self.flagTimeBlock
1318 1319
1319 1320 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.ippSeconds
1320 1321
1321 1322 self.dataOut.ippSeconds = self.ippSeconds
1322 1323
1323 1324 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt
1324 1325
1325 1326 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
1326 1327
1327 1328 self.dataOut.flagShiftFFT = False
1328 1329
1329 1330 if self.processingHeaderObj.code != None:
1330 1331 self.dataOut.nCode = self.processingHeaderObj.nCode
1331 1332
1332 1333 self.dataOut.nBaud = self.processingHeaderObj.nBaud
1333 1334
1334 1335 self.dataOut.code = self.processingHeaderObj.code
1335 1336
1336 1337 self.profileIndex += 1
1337 1338
1338 1339 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1339 1340
1340 1341 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1341 1342
1342 1343 self.dataOut.flagNoData = False
1343 1344
1344 1345 # print self.profileIndex, self.dataOut.utctime
1345 1346 # if self.profileIndex == 800:
1346 1347 # a=1
1347 1348
1348 1349 return self.dataOut.data
1349 1350
1350 1351
1351 1352 class VoltageWriter(JRODataWriter):
1352 1353 """
1353 1354 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
1354 1355 de los datos siempre se realiza por bloques.
1355 1356 """
1356 1357
1357 1358 ext = ".r"
1358 1359
1359 1360 optchar = "D"
1360 1361
1361 1362 shapeBuffer = None
1362 1363
1363 1364
1364 def __init__(self, dataOut=None):
1365 def __init__(self):
1365 1366 """
1366 1367 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
1367 1368
1368 1369 Affected:
1369 1370 self.dataOut
1370 1371
1371 1372 Return: None
1372 1373 """
1373 if dataOut == None:
1374 dataOut = Voltage()
1375
1376 if not( isinstance(dataOut, Voltage) ):
1377 raise ValueError, "in VoltageReader, dataOut must be an Spectra class object"
1378
1379 self.dataOut = dataOut
1380 1374
1381 1375 self.nTotalBlocks = 0
1382 1376
1383 1377 self.profileIndex = 0
1384 1378
1385 1379 self.isConfig = False
1386 1380
1387 1381 self.fp = None
1388 1382
1389 1383 self.flagIsNewFile = 1
1390 1384
1391 1385 self.nTotalBlocks = 0
1392 1386
1393 1387 self.flagIsNewBlock = 0
1394 1388
1395 1389 self.flagNoMoreFiles = 0
1396 1390
1397 1391 self.setFile = None
1398 1392
1399 1393 self.dtype = None
1400 1394
1401 1395 self.path = None
1402 1396
1403 1397 self.noMoreFiles = 0
1404 1398
1405 1399 self.filename = None
1406 1400
1407 1401 self.basicHeaderObj = BasicHeader()
1408 1402
1409 1403 self.systemHeaderObj = SystemHeader()
1410 1404
1411 1405 self.radarControllerHeaderObj = RadarControllerHeader()
1412 1406
1413 1407 self.processingHeaderObj = ProcessingHeader()
1414 1408
1415 1409 def hasAllDataInBuffer(self):
1416 1410 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1417 1411 return 1
1418 1412 return 0
1419 1413
1420 1414
1421 1415 def setBlockDimension(self):
1422 1416 """
1423 1417 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
1424 1418
1425 1419 Affected:
1426 1420 self.shape_spc_Buffer
1427 1421 self.shape_cspc_Buffer
1428 1422 self.shape_dc_Buffer
1429 1423
1430 1424 Return: None
1431 1425 """
1432 1426 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
1433 1427 self.processingHeaderObj.nHeights,
1434 1428 self.systemHeaderObj.nChannels)
1435 1429
1436 1430 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
1437 1431 self.processingHeaderObj.profilesPerBlock,
1438 1432 self.processingHeaderObj.nHeights),
1439 1433 dtype=numpy.dtype('complex'))
1440 1434
1441 1435
1442 1436 def writeBlock(self):
1443 1437 """
1444 1438 Escribe el buffer en el file designado
1445 1439
1446 1440 Affected:
1447 1441 self.profileIndex
1448 1442 self.flagIsNewFile
1449 1443 self.flagIsNewBlock
1450 1444 self.nTotalBlocks
1451 1445 self.blockIndex
1452 1446
1453 1447 Return: None
1454 1448 """
1455 1449 data = numpy.zeros( self.shapeBuffer, self.dtype )
1456 1450
1457 1451 junk = numpy.transpose(self.datablock, (1,2,0))
1458 1452
1459 1453 data['real'] = junk.real
1460 1454 data['imag'] = junk.imag
1461 1455
1462 1456 data = data.reshape( (-1) )
1463 1457
1464 1458 data.tofile( self.fp )
1465 1459
1466 1460 self.datablock.fill(0)
1467 1461
1468 1462 self.profileIndex = 0
1469 1463 self.flagIsNewFile = 0
1470 1464 self.flagIsNewBlock = 1
1471 1465
1472 1466 self.blockIndex += 1
1473 1467 self.nTotalBlocks += 1
1474 1468
1475 1469 def putData(self):
1476 1470 """
1477 1471 Setea un bloque de datos y luego los escribe en un file
1478 1472
1479 1473 Affected:
1480 1474 self.flagIsNewBlock
1481 1475 self.profileIndex
1482 1476
1483 1477 Return:
1484 1478 0 : Si no hay data o no hay mas files que puedan escribirse
1485 1479 1 : Si se escribio la data de un bloque en un file
1486 1480 """
1487 1481 if self.dataOut.flagNoData:
1488 1482 return 0
1489 1483
1490 1484 self.flagIsNewBlock = 0
1491 1485
1492 1486 if self.dataOut.flagTimeBlock:
1493 1487
1494 1488 self.datablock.fill(0)
1495 1489 self.profileIndex = 0
1496 1490 self.setNextFile()
1497 1491
1498 1492 if self.profileIndex == 0:
1499 1493 self.getBasicHeader()
1500 1494
1501 1495 self.datablock[:,self.profileIndex,:] = self.dataOut.data
1502 1496
1503 1497 self.profileIndex += 1
1504 1498
1505 1499 if self.hasAllDataInBuffer():
1506 1500 #if self.flagIsNewFile:
1507 1501 self.writeNextBlock()
1508 1502 # self.getDataHeader()
1509 1503
1510 1504 if self.flagNoMoreFiles:
1511 1505 #print 'Process finished'
1512 1506 return 0
1513 1507
1514 1508 return 1
1515 1509
1516 1510 def __getProcessFlags(self):
1517 1511
1518 1512 processFlags = 0
1519 1513
1520 1514 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1521 1515 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1522 1516 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1523 1517 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1524 1518 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1525 1519 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1526 1520
1527 1521 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1528 1522
1529 1523
1530 1524
1531 1525 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
1532 1526 PROCFLAG.DATATYPE_SHORT,
1533 1527 PROCFLAG.DATATYPE_LONG,
1534 1528 PROCFLAG.DATATYPE_INT64,
1535 1529 PROCFLAG.DATATYPE_FLOAT,
1536 1530 PROCFLAG.DATATYPE_DOUBLE]
1537 1531
1538 1532
1539 1533 for index in range(len(dtypeList)):
1540 1534 if self.dataOut.dtype == dtypeList[index]:
1541 1535 dtypeValue = datatypeValueList[index]
1542 1536 break
1543 1537
1544 1538 processFlags += dtypeValue
1545 1539
1546 1540 if self.dataOut.flagDecodeData:
1547 1541 processFlags += PROCFLAG.DECODE_DATA
1548 1542
1549 1543 if self.dataOut.flagDeflipData:
1550 1544 processFlags += PROCFLAG.DEFLIP_DATA
1551 1545
1552 1546 if self.dataOut.code != None:
1553 1547 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
1554 1548
1555 1549 if self.dataOut.nCohInt > 1:
1556 1550 processFlags += PROCFLAG.COHERENT_INTEGRATION
1557 1551
1558 1552 return processFlags
1559 1553
1560 1554
1561 1555 def __getBlockSize(self):
1562 1556 '''
1563 1557 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
1564 1558 '''
1565 1559
1566 1560 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1567 1561 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1568 1562 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1569 1563 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1570 1564 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1571 1565 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1572 1566
1573 1567 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1574 1568 datatypeValueList = [1,2,4,8,4,8]
1575 1569 for index in range(len(dtypeList)):
1576 1570 if self.dataOut.dtype == dtypeList[index]:
1577 1571 datatypeValue = datatypeValueList[index]
1578 1572 break
1579 1573
1580 1574 blocksize = int(self.dataOut.nHeights * self.dataOut.nChannels * self.dataOut.nProfiles * datatypeValue * 2)
1581 1575
1582 1576 return blocksize
1583 1577
1584 1578 def getDataHeader(self):
1585 1579
1586 1580 """
1587 1581 Obtiene una copia del First Header
1588 1582
1589 1583 Affected:
1590 1584 self.systemHeaderObj
1591 1585 self.radarControllerHeaderObj
1592 1586 self.dtype
1593 1587
1594 1588 Return:
1595 1589 None
1596 1590 """
1597 1591
1598 1592 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
1599 1593 self.systemHeaderObj.nChannels = self.dataOut.nChannels
1600 1594 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
1601 1595
1602 1596 self.getBasicHeader()
1603 1597
1604 1598 processingHeaderSize = 40 # bytes
1605 1599 self.processingHeaderObj.dtype = 0 # Voltage
1606 1600 self.processingHeaderObj.blockSize = self.__getBlockSize()
1607 1601 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
1608 1602 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
1609 1603 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
1610 1604 self.processingHeaderObj.processFlags = self.__getProcessFlags()
1611 1605 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt
1612 1606 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
1613 1607 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
1614 1608
1615 1609 if self.dataOut.code != None:
1616 1610 self.processingHeaderObj.code = self.dataOut.code
1617 1611 self.processingHeaderObj.nCode = self.dataOut.nCode
1618 1612 self.processingHeaderObj.nBaud = self.dataOut.nBaud
1619 1613 codesize = int(8 + 4 * self.dataOut.nCode * self.dataOut.nBaud)
1620 1614 processingHeaderSize += codesize
1621 1615
1622 1616 if self.processingHeaderObj.nWindows != 0:
1623 1617 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
1624 1618 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
1625 1619 self.processingHeaderObj.nHeights = self.dataOut.nHeights
1626 1620 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
1627 1621 processingHeaderSize += 12
1628 1622
1629 1623 self.processingHeaderObj.size = processingHeaderSize
1630 1624
1631 1625 class SpectraReader(JRODataReader):
1632 1626 """
1633 1627 Esta clase permite leer datos de espectros desde archivos procesados (.pdata). La lectura
1634 1628 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones)
1635 1629 son almacenados en tres buffer's para el Self Spectra, el Cross Spectra y el DC Channel.
1636 1630
1637 1631 paresCanalesIguales * alturas * perfiles (Self Spectra)
1638 1632 paresCanalesDiferentes * alturas * perfiles (Cross Spectra)
1639 1633 canales * alturas (DC Channels)
1640 1634
1641 1635 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1642 1636 RadarControllerHeader y Spectra. Los tres primeros se usan para almacenar informacion de la
1643 1637 cabecera de datos (metadata), y el cuarto (Spectra) para obtener y almacenar un bloque de
1644 1638 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1645 1639
1646 1640 Example:
1647 1641 dpath = "/home/myuser/data"
1648 1642
1649 1643 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1650 1644
1651 1645 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1652 1646
1653 1647 readerObj = SpectraReader()
1654 1648
1655 1649 readerObj.setup(dpath, startTime, endTime)
1656 1650
1657 1651 while(True):
1658 1652
1659 1653 readerObj.getData()
1660 1654
1661 1655 print readerObj.data_spc
1662 1656
1663 1657 print readerObj.data_cspc
1664 1658
1665 1659 print readerObj.data_dc
1666 1660
1667 1661 if readerObj.flagNoMoreFiles:
1668 1662 break
1669 1663
1670 1664 """
1671 1665
1672 1666 pts2read_SelfSpectra = 0
1673 1667
1674 1668 pts2read_CrossSpectra = 0
1675 1669
1676 1670 pts2read_DCchannels = 0
1677 1671
1678 1672 ext = ".pdata"
1679 1673
1680 1674 optchar = "P"
1681 1675
1682 1676 dataOut = None
1683 1677
1684 1678 nRdChannels = None
1685 1679
1686 1680 nRdPairs = None
1687 1681
1688 1682 rdPairList = []
1689 1683
1690 1684
1691 def __init__(self, dataOut=None):
1685 def __init__(self):
1692 1686 """
1693 1687 Inicializador de la clase SpectraReader para la lectura de datos de espectros.
1694 1688
1695 1689 Inputs:
1696 1690 dataOut : Objeto de la clase Spectra. Este objeto sera utilizado para
1697 1691 almacenar un perfil de datos cada vez que se haga un requerimiento
1698 1692 (getData). El perfil sera obtenido a partir del buffer de datos,
1699 1693 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1700 1694 bloque de datos.
1701 1695 Si este parametro no es pasado se creara uno internamente.
1702 1696
1703 1697 Affected:
1704 1698 self.dataOut
1705 1699
1706 1700 Return : None
1707 1701 """
1708 1702
1709 1703 self.isConfig = False
1710 1704
1711 1705 self.pts2read_SelfSpectra = 0
1712 1706
1713 1707 self.pts2read_CrossSpectra = 0
1714 1708
1715 1709 self.pts2read_DCchannels = 0
1716 1710
1717 1711 self.datablock = None
1718 1712
1719 1713 self.utc = None
1720 1714
1721 1715 self.ext = ".pdata"
1722 1716
1723 1717 self.optchar = "P"
1724 1718
1725 1719 self.basicHeaderObj = BasicHeader()
1726 1720
1727 1721 self.systemHeaderObj = SystemHeader()
1728 1722
1729 1723 self.radarControllerHeaderObj = RadarControllerHeader()
1730 1724
1731 1725 self.processingHeaderObj = ProcessingHeader()
1732 1726
1733 1727 self.online = 0
1734 1728
1735 1729 self.fp = None
1736 1730
1737 1731 self.idFile = None
1738 1732
1739 1733 self.dtype = None
1740 1734
1741 1735 self.fileSizeByHeader = None
1742 1736
1743 1737 self.filenameList = []
1744 1738
1745 1739 self.filename = None
1746 1740
1747 1741 self.fileSize = None
1748 1742
1749 1743 self.firstHeaderSize = 0
1750 1744
1751 1745 self.basicHeaderSize = 24
1752 1746
1753 1747 self.pathList = []
1754 1748
1755 1749 self.lastUTTime = 0
1756 1750
1757 1751 self.maxTimeStep = 30
1758 1752
1759 1753 self.flagNoMoreFiles = 0
1760 1754
1761 1755 self.set = 0
1762 1756
1763 1757 self.path = None
1764 1758
1765 1759 self.delay = 3 #seconds
1766 1760
1767 1761 self.nTries = 3 #quantity tries
1768 1762
1769 1763 self.nFiles = 3 #number of files for searching
1770 1764
1771 1765 self.nReadBlocks = 0
1772 1766
1773 1767 self.flagIsNewFile = 1
1774 1768
1775 1769 self.ippSeconds = 0
1776 1770
1777 1771 self.flagTimeBlock = 0
1778 1772
1779 1773 self.flagIsNewBlock = 0
1780 1774
1781 1775 self.nTotalBlocks = 0
1782 1776
1783 1777 self.blocksize = 0
1778
1779 dataOut = self.createObjByDefault()
1784 1780
1785 1781
1786 1782 def createObjByDefault(self):
1787 1783
1788 1784 dataObj = Spectra()
1789 1785
1790 1786 return dataObj
1791 1787
1792 1788 def __hasNotDataInBuffer(self):
1793 1789 return 1
1794 1790
1795 1791
1796 1792 def getBlockDimension(self):
1797 1793 """
1798 1794 Obtiene la cantidad de puntos a leer por cada bloque de datos
1799 1795
1800 1796 Affected:
1801 1797 self.nRdChannels
1802 1798 self.nRdPairs
1803 1799 self.pts2read_SelfSpectra
1804 1800 self.pts2read_CrossSpectra
1805 1801 self.pts2read_DCchannels
1806 1802 self.blocksize
1807 1803 self.dataOut.nChannels
1808 1804 self.dataOut.nPairs
1809 1805
1810 1806 Return:
1811 1807 None
1812 1808 """
1813 1809 self.nRdChannels = 0
1814 1810 self.nRdPairs = 0
1815 1811 self.rdPairList = []
1816 1812
1817 1813 for i in range(0, self.processingHeaderObj.totalSpectra*2, 2):
1818 1814 if self.processingHeaderObj.spectraComb[i] == self.processingHeaderObj.spectraComb[i+1]:
1819 1815 self.nRdChannels = self.nRdChannels + 1 #par de canales iguales
1820 1816 else:
1821 1817 self.nRdPairs = self.nRdPairs + 1 #par de canales diferentes
1822 1818 self.rdPairList.append((self.processingHeaderObj.spectraComb[i], self.processingHeaderObj.spectraComb[i+1]))
1823 1819
1824 1820 pts2read = self.processingHeaderObj.nHeights * self.processingHeaderObj.profilesPerBlock
1825 1821
1826 1822 self.pts2read_SelfSpectra = int(self.nRdChannels * pts2read)
1827 1823 self.blocksize = self.pts2read_SelfSpectra
1828 1824
1829 1825 if self.processingHeaderObj.flag_cspc:
1830 1826 self.pts2read_CrossSpectra = int(self.nRdPairs * pts2read)
1831 1827 self.blocksize += self.pts2read_CrossSpectra
1832 1828
1833 1829 if self.processingHeaderObj.flag_dc:
1834 1830 self.pts2read_DCchannels = int(self.systemHeaderObj.nChannels * self.processingHeaderObj.nHeights)
1835 1831 self.blocksize += self.pts2read_DCchannels
1836 1832
1837 1833 # self.blocksize = self.pts2read_SelfSpectra + self.pts2read_CrossSpectra + self.pts2read_DCchannels
1838 1834
1839 1835
1840 1836 def readBlock(self):
1841 1837 """
1842 1838 Lee el bloque de datos desde la posicion actual del puntero del archivo
1843 1839 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1844 1840 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1845 1841 es seteado a 0
1846 1842
1847 1843 Return: None
1848 1844
1849 1845 Variables afectadas:
1850 1846
1851 1847 self.flagIsNewFile
1852 1848 self.flagIsNewBlock
1853 1849 self.nTotalBlocks
1854 1850 self.data_spc
1855 1851 self.data_cspc
1856 1852 self.data_dc
1857 1853
1858 1854 Exceptions:
1859 1855 Si un bloque leido no es un bloque valido
1860 1856 """
1861 1857 blockOk_flag = False
1862 1858 fpointer = self.fp.tell()
1863 1859
1864 1860 spc = numpy.fromfile( self.fp, self.dtype[0], self.pts2read_SelfSpectra )
1865 1861 spc = spc.reshape( (self.nRdChannels, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1866 1862
1867 1863 if self.processingHeaderObj.flag_cspc:
1868 1864 cspc = numpy.fromfile( self.fp, self.dtype, self.pts2read_CrossSpectra )
1869 1865 cspc = cspc.reshape( (self.nRdPairs, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1870 1866
1871 1867 if self.processingHeaderObj.flag_dc:
1872 1868 dc = numpy.fromfile( self.fp, self.dtype, self.pts2read_DCchannels ) #int(self.processingHeaderObj.nHeights*self.systemHeaderObj.nChannels) )
1873 1869 dc = dc.reshape( (self.systemHeaderObj.nChannels, self.processingHeaderObj.nHeights) ) #transforma a un arreglo 2D
1874 1870
1875 1871
1876 1872 if not(self.processingHeaderObj.shif_fft):
1877 1873 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
1878 1874
1879 1875 if self.processingHeaderObj.flag_cspc:
1880 1876 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
1881 1877
1882 1878
1883 1879 spc = numpy.transpose( spc, (0,2,1) )
1884 1880 self.data_spc = spc
1885 1881
1886 1882 if self.processingHeaderObj.flag_cspc:
1887 1883 cspc = numpy.transpose( cspc, (0,2,1) )
1888 1884 self.data_cspc = cspc['real'] + cspc['imag']*1j
1889 1885 else:
1890 1886 self.data_cspc = None
1891 1887
1892 1888 if self.processingHeaderObj.flag_dc:
1893 1889 self.data_dc = dc['real'] + dc['imag']*1j
1894 1890 else:
1895 1891 self.data_dc = None
1896 1892
1897 1893 self.flagIsNewFile = 0
1898 1894 self.flagIsNewBlock = 1
1899 1895
1900 1896 self.nTotalBlocks += 1
1901 1897 self.nReadBlocks += 1
1902 1898
1903 1899 return 1
1904 1900
1905 1901
1906 1902 def getData(self):
1907 1903 """
1908 1904 Copia el buffer de lectura a la clase "Spectra",
1909 1905 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1910 1906 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1911 1907
1912 1908 Return:
1913 1909 0 : Si no hay mas archivos disponibles
1914 1910 1 : Si hizo una buena copia del buffer
1915 1911
1916 1912 Affected:
1917 1913 self.dataOut
1918 1914
1919 1915 self.flagTimeBlock
1920 1916 self.flagIsNewBlock
1921 1917 """
1922 1918
1923 1919 if self.flagNoMoreFiles: return 0
1924 1920
1925 1921 self.flagTimeBlock = 0
1926 1922 self.flagIsNewBlock = 0
1927 1923
1928 1924 if self.__hasNotDataInBuffer():
1929 1925
1930 1926 if not( self.readNextBlock() ):
1931 1927 return 0
1932 1928
1933 1929 # self.updateDataHeader()
1934 1930
1935 1931 if self.flagNoMoreFiles == 1:
1936 1932 print 'Process finished'
1937 1933 return 0
1938 1934
1939 1935 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1940 1936
1941 1937 if self.data_dc == None:
1942 1938 self.dataOut.flagNoData = True
1943 1939 return 0
1944 1940
1945 1941
1946 1942 self.dataOut.data_spc = self.data_spc
1947 1943
1948 1944 self.dataOut.data_cspc = self.data_cspc
1949 1945
1950 1946 self.dataOut.data_dc = self.data_dc
1951 1947
1952 1948 self.dataOut.flagTimeBlock = self.flagTimeBlock
1953 1949
1954 1950 self.dataOut.flagNoData = False
1955 1951
1956 1952 self.dataOut.dtype = self.dtype
1957 1953
1958 1954 self.dataOut.nChannels = self.nRdChannels
1959 1955
1960 1956 self.dataOut.nPairs = self.nRdPairs
1961 1957
1962 1958 self.dataOut.pairsList = self.rdPairList
1963 1959
1964 1960 self.dataOut.nHeights = self.processingHeaderObj.nHeights
1965 1961
1966 1962 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1967 1963
1968 1964 self.dataOut.nFFTPoints = self.processingHeaderObj.profilesPerBlock
1969 1965
1970 1966 self.dataOut.nIncohInt = self.processingHeaderObj.nIncohInt
1971 1967
1972 1968
1973 1969 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1974 1970
1975 1971 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1976 1972
1977 1973 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1978 1974
1979 1975 self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
1980 1976
1981 1977 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000.#+ self.profileIndex * self.ippSeconds
1982 1978
1983 1979 self.dataOut.ippSeconds = self.ippSeconds
1984 1980
1985 1981 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt * self.processingHeaderObj.nIncohInt * self.dataOut.nFFTPoints
1986 1982
1987 1983 self.dataOut.flagShiftFFT = self.processingHeaderObj.shif_fft
1988 1984
1989 1985 # self.profileIndex += 1
1990 1986
1991 1987 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1992 1988
1993 1989 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1994 1990
1995 1991 return self.dataOut.data_spc
1996 1992
1997 1993
1998 1994 class SpectraWriter(JRODataWriter):
1999 1995
2000 1996 """
2001 1997 Esta clase permite escribir datos de espectros a archivos procesados (.pdata). La escritura
2002 1998 de los datos siempre se realiza por bloques.
2003 1999 """
2004 2000
2005 2001 ext = ".pdata"
2006 2002
2007 2003 optchar = "P"
2008 2004
2009 2005 shape_spc_Buffer = None
2010 2006
2011 2007 shape_cspc_Buffer = None
2012 2008
2013 2009 shape_dc_Buffer = None
2014 2010
2015 2011 data_spc = None
2016 2012
2017 2013 data_cspc = None
2018 2014
2019 2015 data_dc = None
2020 2016
2021 2017 # dataOut = None
2022 2018
2023 def __init__(self, dataOut=None):
2019 def __init__(self):
2024 2020 """
2025 2021 Inicializador de la clase SpectraWriter para la escritura de datos de espectros.
2026 2022
2027 2023 Affected:
2028 2024 self.dataOut
2029 2025 self.basicHeaderObj
2030 2026 self.systemHeaderObj
2031 2027 self.radarControllerHeaderObj
2032 2028 self.processingHeaderObj
2033 2029
2034 2030 Return: None
2035 2031 """
2036 if dataOut == None:
2037 dataOut = Spectra()
2038
2039 if not( isinstance(dataOut, Spectra) ):
2040 raise ValueError, "in SpectraReader, dataOut must be an Spectra class object"
2041
2042 self.dataOut = dataOut
2043 2032
2044 2033 self.isConfig = False
2045 2034
2046 2035 self.nTotalBlocks = 0
2047 2036
2048 2037 self.data_spc = None
2049 2038
2050 2039 self.data_cspc = None
2051 2040
2052 2041 self.data_dc = None
2053 2042
2054 2043 self.fp = None
2055 2044
2056 2045 self.flagIsNewFile = 1
2057 2046
2058 2047 self.nTotalBlocks = 0
2059 2048
2060 2049 self.flagIsNewBlock = 0
2061 2050
2062 2051 self.flagNoMoreFiles = 0
2063 2052
2064 2053 self.setFile = None
2065 2054
2066 2055 self.dtype = None
2067 2056
2068 2057 self.path = None
2069 2058
2070 2059 self.noMoreFiles = 0
2071 2060
2072 2061 self.filename = None
2073 2062
2074 2063 self.basicHeaderObj = BasicHeader()
2075 2064
2076 2065 self.systemHeaderObj = SystemHeader()
2077 2066
2078 2067 self.radarControllerHeaderObj = RadarControllerHeader()
2079 2068
2080 2069 self.processingHeaderObj = ProcessingHeader()
2081 2070
2082 2071
2083 2072 def hasAllDataInBuffer(self):
2084 2073 return 1
2085 2074
2086 2075
2087 2076 def setBlockDimension(self):
2088 2077 """
2089 2078 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
2090 2079
2091 2080 Affected:
2092 2081 self.shape_spc_Buffer
2093 2082 self.shape_cspc_Buffer
2094 2083 self.shape_dc_Buffer
2095 2084
2096 2085 Return: None
2097 2086 """
2098 2087 self.shape_spc_Buffer = (self.dataOut.nChannels,
2099 2088 self.processingHeaderObj.nHeights,
2100 2089 self.processingHeaderObj.profilesPerBlock)
2101 2090
2102 2091 self.shape_cspc_Buffer = (self.dataOut.nPairs,
2103 2092 self.processingHeaderObj.nHeights,
2104 2093 self.processingHeaderObj.profilesPerBlock)
2105 2094
2106 2095 self.shape_dc_Buffer = (self.dataOut.nChannels,
2107 2096 self.processingHeaderObj.nHeights)
2108 2097
2109 2098
2110 2099 def writeBlock(self):
2111 2100 """
2112 2101 Escribe el buffer en el file designado
2113 2102
2114 2103 Affected:
2115 2104 self.data_spc
2116 2105 self.data_cspc
2117 2106 self.data_dc
2118 2107 self.flagIsNewFile
2119 2108 self.flagIsNewBlock
2120 2109 self.nTotalBlocks
2121 2110 self.nWriteBlocks
2122 2111
2123 2112 Return: None
2124 2113 """
2125 2114
2126 2115 spc = numpy.transpose( self.data_spc, (0,2,1) )
2127 2116 if not( self.processingHeaderObj.shif_fft ):
2128 2117 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2129 2118 data = spc.reshape((-1))
2130 2119 data.tofile(self.fp)
2131 2120
2132 2121 if self.data_cspc != None:
2133 2122 data = numpy.zeros( self.shape_cspc_Buffer, self.dtype )
2134 2123 cspc = numpy.transpose( self.data_cspc, (0,2,1) )
2135 2124 if not( self.processingHeaderObj.shif_fft ):
2136 2125 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2137 2126 data['real'] = cspc.real
2138 2127 data['imag'] = cspc.imag
2139 2128 data = data.reshape((-1))
2140 2129 data.tofile(self.fp)
2141 2130
2142 2131 if self.data_dc != None:
2143 2132 data = numpy.zeros( self.shape_dc_Buffer, self.dtype )
2144 2133 dc = self.data_dc
2145 2134 data['real'] = dc.real
2146 2135 data['imag'] = dc.imag
2147 2136 data = data.reshape((-1))
2148 2137 data.tofile(self.fp)
2149 2138
2150 2139 self.data_spc.fill(0)
2151 2140 self.data_dc.fill(0)
2152 2141 if self.data_cspc != None:
2153 2142 self.data_cspc.fill(0)
2154 2143
2155 2144 self.flagIsNewFile = 0
2156 2145 self.flagIsNewBlock = 1
2157 2146 self.nTotalBlocks += 1
2158 2147 self.nWriteBlocks += 1
2159 2148 self.blockIndex += 1
2160 2149
2161 2150
2162 2151 def putData(self):
2163 2152 """
2164 2153 Setea un bloque de datos y luego los escribe en un file
2165 2154
2166 2155 Affected:
2167 2156 self.data_spc
2168 2157 self.data_cspc
2169 2158 self.data_dc
2170 2159
2171 2160 Return:
2172 2161 0 : Si no hay data o no hay mas files que puedan escribirse
2173 2162 1 : Si se escribio la data de un bloque en un file
2174 2163 """
2175 2164
2176 2165 if self.dataOut.flagNoData:
2177 2166 return 0
2178 2167
2179 2168 self.flagIsNewBlock = 0
2180 2169
2181 2170 if self.dataOut.flagTimeBlock:
2182 2171 self.data_spc.fill(0)
2183 2172 self.data_cspc.fill(0)
2184 2173 self.data_dc.fill(0)
2185 2174 self.setNextFile()
2186 2175
2187 2176 if self.flagIsNewFile == 0:
2188 2177 self.getBasicHeader()
2189 2178
2190 2179 self.data_spc = self.dataOut.data_spc
2191 2180 self.data_cspc = self.dataOut.data_cspc
2192 2181 self.data_dc = self.dataOut.data_dc
2193 2182
2194 2183 # #self.processingHeaderObj.dataBlocksPerFile)
2195 2184 if self.hasAllDataInBuffer():
2196 2185 # self.getDataHeader()
2197 2186 self.writeNextBlock()
2198 2187
2199 2188 if self.flagNoMoreFiles:
2200 2189 #print 'Process finished'
2201 2190 return 0
2202 2191
2203 2192 return 1
2204 2193
2205 2194
2206 2195 def __getProcessFlags(self):
2207 2196
2208 2197 processFlags = 0
2209 2198
2210 2199 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2211 2200 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2212 2201 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2213 2202 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2214 2203 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2215 2204 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2216 2205
2217 2206 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2218 2207
2219 2208
2220 2209
2221 2210 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
2222 2211 PROCFLAG.DATATYPE_SHORT,
2223 2212 PROCFLAG.DATATYPE_LONG,
2224 2213 PROCFLAG.DATATYPE_INT64,
2225 2214 PROCFLAG.DATATYPE_FLOAT,
2226 2215 PROCFLAG.DATATYPE_DOUBLE]
2227 2216
2228 2217
2229 2218 for index in range(len(dtypeList)):
2230 2219 if self.dataOut.dtype == dtypeList[index]:
2231 2220 dtypeValue = datatypeValueList[index]
2232 2221 break
2233 2222
2234 2223 processFlags += dtypeValue
2235 2224
2236 2225 if self.dataOut.flagDecodeData:
2237 2226 processFlags += PROCFLAG.DECODE_DATA
2238 2227
2239 2228 if self.dataOut.flagDeflipData:
2240 2229 processFlags += PROCFLAG.DEFLIP_DATA
2241 2230
2242 2231 if self.dataOut.code != None:
2243 2232 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
2244 2233
2245 2234 if self.dataOut.nIncohInt > 1:
2246 2235 processFlags += PROCFLAG.INCOHERENT_INTEGRATION
2247 2236
2248 2237 if self.dataOut.data_dc != None:
2249 2238 processFlags += PROCFLAG.SAVE_CHANNELS_DC
2250 2239
2251 2240 return processFlags
2252 2241
2253 2242
2254 2243 def __getBlockSize(self):
2255 2244 '''
2256 2245 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Spectra
2257 2246 '''
2258 2247
2259 2248 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2260 2249 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2261 2250 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2262 2251 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2263 2252 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2264 2253 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2265 2254
2266 2255 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2267 2256 datatypeValueList = [1,2,4,8,4,8]
2268 2257 for index in range(len(dtypeList)):
2269 2258 if self.dataOut.dtype == dtypeList[index]:
2270 2259 datatypeValue = datatypeValueList[index]
2271 2260 break
2272 2261
2273 2262
2274 2263 pts2write = self.dataOut.nHeights * self.dataOut.nFFTPoints
2275 2264
2276 2265 pts2write_SelfSpectra = int(self.dataOut.nChannels * pts2write)
2277 2266 blocksize = (pts2write_SelfSpectra*datatypeValue)
2278 2267
2279 2268 if self.dataOut.data_cspc != None:
2280 2269 pts2write_CrossSpectra = int(self.dataOut.nPairs * pts2write)
2281 2270 blocksize += (pts2write_CrossSpectra*datatypeValue*2)
2282 2271
2283 2272 if self.dataOut.data_dc != None:
2284 2273 pts2write_DCchannels = int(self.dataOut.nChannels * self.dataOut.nHeights)
2285 2274 blocksize += (pts2write_DCchannels*datatypeValue*2)
2286 2275
2287 2276 blocksize = blocksize #* datatypeValue * 2 #CORREGIR ESTO
2288 2277
2289 2278 return blocksize
2290 2279
2291 2280 def getDataHeader(self):
2292 2281
2293 2282 """
2294 2283 Obtiene una copia del First Header
2295 2284
2296 2285 Affected:
2297 2286 self.systemHeaderObj
2298 2287 self.radarControllerHeaderObj
2299 2288 self.dtype
2300 2289
2301 2290 Return:
2302 2291 None
2303 2292 """
2304 2293
2305 2294 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
2306 2295 self.systemHeaderObj.nChannels = self.dataOut.nChannels
2307 2296 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
2308 2297
2309 2298 self.getBasicHeader()
2310 2299
2311 2300 processingHeaderSize = 40 # bytes
2312 2301 self.processingHeaderObj.dtype = 0 # Voltage
2313 2302 self.processingHeaderObj.blockSize = self.__getBlockSize()
2314 2303 self.processingHeaderObj.profilesPerBlock = self.dataOut.nFFTPoints
2315 2304 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
2316 2305 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
2317 2306 self.processingHeaderObj.processFlags = self.__getProcessFlags()
2318 2307 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt# Se requiere para determinar el valor de timeInterval
2319 2308 self.processingHeaderObj.nIncohInt = self.dataOut.nIncohInt
2320 2309 self.processingHeaderObj.totalSpectra = self.dataOut.nPairs + self.dataOut.nChannels
2321 2310
2322 2311 if self.processingHeaderObj.totalSpectra > 0:
2323 2312 channelList = []
2324 2313 for channel in range(self.dataOut.nChannels):
2325 2314 channelList.append(channel)
2326 2315 channelList.append(channel)
2327 2316
2328 2317 pairsList = []
2329 2318 for pair in self.dataOut.pairsList:
2330 2319 pairsList.append(pair[0])
2331 2320 pairsList.append(pair[1])
2332 2321 spectraComb = channelList + pairsList
2333 2322 spectraComb = numpy.array(spectraComb,dtype="u1")
2334 2323 self.processingHeaderObj.spectraComb = spectraComb
2335 2324 sizeOfSpcComb = len(spectraComb)
2336 2325 processingHeaderSize += sizeOfSpcComb
2337 2326
2338 2327 if self.dataOut.code != None:
2339 2328 self.processingHeaderObj.code = self.dataOut.code
2340 2329 self.processingHeaderObj.nCode = self.dataOut.nCode
2341 2330 self.processingHeaderObj.nBaud = self.dataOut.nBaud
2342 2331 nCodeSize = 4 # bytes
2343 2332 nBaudSize = 4 # bytes
2344 2333 codeSize = 4 # bytes
2345 2334 sizeOfCode = int(nCodeSize + nBaudSize + codeSize * self.dataOut.nCode * self.dataOut.nBaud)
2346 2335 processingHeaderSize += sizeOfCode
2347 2336
2348 2337 if self.processingHeaderObj.nWindows != 0:
2349 2338 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
2350 2339 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
2351 2340 self.processingHeaderObj.nHeights = self.dataOut.nHeights
2352 2341 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
2353 2342 sizeOfFirstHeight = 4
2354 2343 sizeOfdeltaHeight = 4
2355 2344 sizeOfnHeights = 4
2356 2345 sizeOfWindows = (sizeOfFirstHeight + sizeOfdeltaHeight + sizeOfnHeights)*self.processingHeaderObj.nWindows
2357 2346 processingHeaderSize += sizeOfWindows
2358 2347
2359 2348 self.processingHeaderObj.size = processingHeaderSize
2360 2349
2361 2350 class SpectraHeisWriter():
2362 2351
2363 2352 i=0
2364 2353
2365 2354 def __init__(self, dataOut):
2366 2355
2367 2356 self.wrObj = FITS()
2368 2357 self.dataOut = dataOut
2369 2358
2370 2359 def isNumber(str):
2371 2360 """
2372 2361 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
2373 2362
2374 2363 Excepciones:
2375 2364 Si un determinado string no puede ser convertido a numero
2376 2365 Input:
2377 2366 str, string al cual se le analiza para determinar si convertible a un numero o no
2378 2367
2379 2368 Return:
2380 2369 True : si el string es uno numerico
2381 2370 False : no es un string numerico
2382 2371 """
2383 2372 try:
2384 2373 float( str )
2385 2374 return True
2386 2375 except:
2387 2376 return False
2388 2377
2389 2378 def setup(self, wrpath,):
2390 2379
2391 2380 if not(os.path.exists(wrpath)):
2392 2381 os.mkdir(wrpath)
2393 2382
2394 2383 self.wrpath = wrpath
2395 2384 self.setFile = 0
2396 2385
2397 2386 def putData(self):
2398 2387 # self.wrObj.writeHeader(nChannels=self.dataOut.nChannels, nFFTPoints=self.dataOut.nFFTPoints)
2399 2388 #name = self.dataOut.utctime
2400 2389 name= time.localtime( self.dataOut.utctime)
2401 2390 ext=".fits"
2402 2391 #folder='D%4.4d%3.3d'%(name.tm_year,name.tm_yday)
2403 2392 subfolder = 'D%4.4d%3.3d' % (name.tm_year,name.tm_yday)
2404 2393
2405 2394 doypath = os.path.join( self.wrpath, subfolder )
2406 2395 if not( os.path.exists(doypath) ):
2407 2396 os.mkdir(doypath)
2408 2397 self.setFile += 1
2409 2398 file = 'D%4.4d%3.3d%3.3d%s' % (name.tm_year,name.tm_yday,self.setFile,ext)
2410 2399
2411 2400 filename = os.path.join(self.wrpath,subfolder, file)
2412 2401
2413 2402 # print self.dataOut.ippSeconds
2414 2403 freq=numpy.arange(-1*self.dataOut.nHeights/2.,self.dataOut.nHeights/2.)/(2*self.dataOut.ippSeconds)
2415 2404
2416 2405 col1=self.wrObj.setColF(name="freq", format=str(self.dataOut.nFFTPoints)+'E', array=freq)
2417 2406 col2=self.wrObj.writeData(name="P_Ch1",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[0,:]))
2418 2407 col3=self.wrObj.writeData(name="P_Ch2",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[1,:]))
2419 2408 col4=self.wrObj.writeData(name="P_Ch3",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[2,:]))
2420 2409 col5=self.wrObj.writeData(name="P_Ch4",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[3,:]))
2421 2410 col6=self.wrObj.writeData(name="P_Ch5",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[4,:]))
2422 2411 col7=self.wrObj.writeData(name="P_Ch6",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[5,:]))
2423 2412 col8=self.wrObj.writeData(name="P_Ch7",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[6,:]))
2424 2413 col9=self.wrObj.writeData(name="P_Ch8",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[7,:]))
2425 2414 #n=numpy.arange((100))
2426 2415 n=self.dataOut.data_spc[6,:]
2427 2416 a=self.wrObj.cFImage(n)
2428 2417 b=self.wrObj.Ctable(col1,col2,col3,col4,col5,col6,col7,col8,col9)
2429 2418 self.wrObj.CFile(a,b)
2430 2419 self.wrObj.wFile(filename)
2431 2420 return 1
2432 2421
2433 2422 class FITS:
2434 2423
2435 2424 name=None
2436 2425 format=None
2437 2426 array =None
2438 2427 data =None
2439 2428 thdulist=None
2440 2429
2441 2430 def __init__(self):
2442 2431
2443 2432 pass
2444 2433
2445 2434 def setColF(self,name,format,array):
2446 2435 self.name=name
2447 2436 self.format=format
2448 2437 self.array=array
2449 2438 a1=numpy.array([self.array],dtype=numpy.float32)
2450 2439 self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1)
2451 2440 return self.col1
2452 2441
2453 2442 # def setColP(self,name,format,data):
2454 2443 # self.name=name
2455 2444 # self.format=format
2456 2445 # self.data=data
2457 2446 # a2=numpy.array([self.data],dtype=numpy.float32)
2458 2447 # self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2459 2448 # return self.col2
2460 2449
2461 2450 def writeHeader(self,):
2462 2451 pass
2463 2452
2464 2453 def writeData(self,name,format,data):
2465 2454 self.name=name
2466 2455 self.format=format
2467 2456 self.data=data
2468 2457 a2=numpy.array([self.data],dtype=numpy.float32)
2469 2458 self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2470 2459 return self.col2
2471 2460
2472 2461 def cFImage(self,n):
2473 2462 self.hdu= pyfits.PrimaryHDU(n)
2474 2463 return self.hdu
2475 2464
2476 2465 def Ctable(self,col1,col2,col3,col4,col5,col6,col7,col8,col9):
2477 2466 self.cols=pyfits.ColDefs( [col1,col2,col3,col4,col5,col6,col7,col8,col9])
2478 2467 self.tbhdu = pyfits.new_table(self.cols)
2479 2468 return self.tbhdu
2480 2469
2481 2470 def CFile(self,hdu,tbhdu):
2482 2471 self.thdulist=pyfits.HDUList([hdu,tbhdu])
2483 2472
2484 2473 def wFile(self,filename):
2485 2474 self.thdulist.writeto(filename) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now