##// END OF EJS Templates
Adicion del metodo run a la clase jrodataio
Miguel Valdez -
r176:9bf673cfa96e
parent child
Show More
@@ -1,2461 +1,2478
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: JRODataIO.py 169 2012-11-19 21:57:03Z murco $
5 5 '''
6 6
7 7 import os, sys
8 8 import glob
9 9 import time
10 10 import numpy
11 11 import fnmatch
12 12 import time, datetime
13 13
14 14 from Data.JROData import *
15 15 from JROHeaderIO import *
16 16
17 17 def isNumber(str):
18 18 """
19 19 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
20 20
21 21 Excepciones:
22 22 Si un determinado string no puede ser convertido a numero
23 23 Input:
24 24 str, string al cual se le analiza para determinar si convertible a un numero o no
25 25
26 26 Return:
27 27 True : si el string es uno numerico
28 28 False : no es un string numerico
29 29 """
30 30 try:
31 31 float( str )
32 32 return True
33 33 except:
34 34 return False
35 35
36 36 def isThisFileinRange(filename, startUTSeconds, endUTSeconds):
37 37 """
38 38 Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado.
39 39
40 40 Inputs:
41 41 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
42 42
43 43 startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en
44 44 segundos contados desde 01/01/1970.
45 45 endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en
46 46 segundos contados desde 01/01/1970.
47 47
48 48 Return:
49 49 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
50 50 fecha especificado, de lo contrario retorna False.
51 51
52 52 Excepciones:
53 53 Si el archivo no existe o no puede ser abierto
54 54 Si la cabecera no puede ser leida.
55 55
56 56 """
57 57 basicHeaderObj = BasicHeader()
58 58
59 59 try:
60 60 fp = open(filename,'rb')
61 61 except:
62 62 raise IOError, "The file %s can't be opened" %(filename)
63 63
64 64 sts = basicHeaderObj.read(fp)
65 65 fp.close()
66 66
67 67 if not(sts):
68 68 print "Skipping the file %s because it has not a valid header" %(filename)
69 69 return 0
70 70
71 71 if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)):
72 72 return 0
73 73
74 74 return 1
75 75
76 76 def getlastFileFromPath(path, ext):
77 77 """
78 78 Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext"
79 79 al final de la depuracion devuelve el ultimo file de la lista que quedo.
80 80
81 81 Input:
82 82 fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta
83 83 ext : extension de los files contenidos en una carpeta
84 84
85 85 Return:
86 86 El ultimo file de una determinada carpeta, no se considera el path.
87 87 """
88 88 validFilelist = []
89 89 fileList = os.listdir(path)
90 90
91 91 # 0 1234 567 89A BCDE
92 92 # H YYYY DDD SSS .ext
93 93
94 94 for file in fileList:
95 95 try:
96 96 year = int(file[1:5])
97 97 doy = int(file[5:8])
98 98
99 99 if (os.path.splitext(file)[-1].upper() != ext.upper()) : continue
100 100 except:
101 101 continue
102 102
103 103 validFilelist.append(file)
104 104
105 105 if validFilelist:
106 106 validFilelist = sorted( validFilelist, key=str.lower )
107 107 return validFilelist[-1]
108 108
109 109 return None
110 110
111 111 def checkForRealPath(path, year, doy, set, ext):
112 112 """
113 113 Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path,
114 114 Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar
115 115 el path exacto de un determinado file.
116 116
117 117 Example :
118 118 nombre correcto del file es .../.../D2009307/P2009307367.ext
119 119
120 120 Entonces la funcion prueba con las siguientes combinaciones
121 121 .../.../x2009307/y2009307367.ext
122 122 .../.../x2009307/Y2009307367.ext
123 123 .../.../X2009307/y2009307367.ext
124 124 .../.../X2009307/Y2009307367.ext
125 125 siendo para este caso, la ultima combinacion de letras, identica al file buscado
126 126
127 127 Return:
128 128 Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file
129 129 caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas
130 130 para el filename
131 131 """
132 132 filepath = None
133 133 find_flag = False
134 134 filename = None
135 135
136 136 if ext.lower() == ".r": #voltage
137 137 header1 = "dD"
138 138 header2 = "dD"
139 139 elif ext.lower() == ".pdata": #spectra
140 140 header1 = "dD"
141 141 header2 = "pP"
142 142 else:
143 143 return None, filename
144 144
145 145 for dir in header1: #barrido por las dos combinaciones posibles de "D"
146 146 for fil in header2: #barrido por las dos combinaciones posibles de "D"
147 147 doypath = "%s%04d%03d" % ( dir, year, doy ) #formo el nombre del directorio xYYYYDDD (x=d o x=D)
148 148 filename = "%s%04d%03d%03d%s" % ( fil, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext
149 149 filepath = os.path.join( path, doypath, filename ) #formo el path completo
150 150 if os.path.exists( filepath ): #verifico que exista
151 151 find_flag = True
152 152 break
153 153 if find_flag:
154 154 break
155 155
156 156 if not(find_flag):
157 157 return None, filename
158 158
159 159 return filepath, filename
160 160
161 161 class JRODataIO:
162 162
163 163 c = 3E8
164 164
165 __isConfig = False
166
165 167 basicHeaderObj = BasicHeader()
166 168
167 169 systemHeaderObj = SystemHeader()
168 170
169 171 radarControllerHeaderObj = RadarControllerHeader()
170 172
171 173 processingHeaderObj = ProcessingHeader()
172 174
173 175 online = 0
174 176
175 177 dtype = None
176 178
177 179 pathList = []
178 180
179 181 filenameList = []
180 182
181 183 filename = None
182 184
183 185 ext = None
184 186
185 187 flagNoMoreFiles = 0
186 188
187 189 flagIsNewFile = 1
188 190
189 191 flagTimeBlock = 0
190 192
191 193 flagIsNewBlock = 0
192 194
193 195 fp = None
194 196
195 197 firstHeaderSize = 0
196 198
197 199 basicHeaderSize = 24
198 200
199 201 versionFile = 1103
200 202
201 203 fileSize = None
202 204
203 205 ippSeconds = None
204 206
205 207 fileSizeByHeader = None
206 208
207 209 fileIndex = None
208 210
209 211 profileIndex = None
210 212
211 213 blockIndex = None
212 214
213 215 nTotalBlocks = None
214 216
215 217 maxTimeStep = 30
216 218
217 219 lastUTTime = None
218 220
219 221 datablock = None
220 222
221 223 dataOutObj = None
222 224
223 225 blocksize = None
224 226
225 227 def __init__(self):
226 228 pass
227 229
228 230 class JRODataReader(JRODataIO):
229 231
230 232 nReadBlocks = 0
231 233
232 234 delay = 60 #number of seconds waiting a new file
233 235
234 236 nTries = 3 #quantity tries
235 237
236 238 nFiles = 3 #number of files for searching
237 239
238 240
239 241 def __init__(self):
240 242
241 243 """
242 244
243 245 """
244 246
245 247 raise ValueError, "This method has not been implemented"
246 248
247 249
248 250 def createObjByDefault(self):
249 251 """
250 252
251 253 """
252 254 raise ValueError, "This method has not been implemented"
253 255
254 256 def getBlockDimension(self):
255 257
256 258 raise ValueError, "No implemented"
257 259
258 260 def __searchFilesOffLine(self,
259 261 path,
260 262 startDate,
261 263 endDate,
262 264 startTime=datetime.time(0,0,0),
263 265 endTime=datetime.time(23,59,59),
264 266 set=None,
265 267 expLabel="",
266 268 ext=".r"):
267 269 dirList = []
268 270 for thisPath in os.listdir(path):
269 271 if os.path.isdir(os.path.join(path,thisPath)):
270 272 dirList.append(thisPath)
271 273
272 274 if not(dirList):
273 275 return None, None
274 276
275 277 pathList = []
276 278 dateList = []
277 279
278 280 thisDate = startDate
279 281
280 282 while(thisDate <= endDate):
281 283 year = thisDate.timetuple().tm_year
282 284 doy = thisDate.timetuple().tm_yday
283 285
284 286 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
285 287 if len(match) == 0:
286 288 thisDate += datetime.timedelta(1)
287 289 continue
288 290
289 291 pathList.append(os.path.join(path,match[0],expLabel))
290 292 dateList.append(thisDate)
291 293 thisDate += datetime.timedelta(1)
292 294
293 295 filenameList = []
294 296 for index in range(len(pathList)):
295 297
296 298 thisPath = pathList[index]
297 299 fileList = glob.glob1(thisPath, "*%s" %ext)
298 300 fileList.sort()
299 301
300 302 #Busqueda de datos en el rango de horas indicados
301 303 thisDate = dateList[index]
302 304 startDT = datetime.datetime.combine(thisDate, startTime)
303 305 endDT = datetime.datetime.combine(thisDate, endTime)
304 306
305 307 startUtSeconds = time.mktime(startDT.timetuple())
306 308 endUtSeconds = time.mktime(endDT.timetuple())
307 309
308 310 for file in fileList:
309 311
310 312 filename = os.path.join(thisPath,file)
311 313
312 314 if isThisFileinRange(filename, startUtSeconds, endUtSeconds):
313 315 filenameList.append(filename)
314 316
315 317 if not(filenameList):
316 318 return None, None
317 319
318 320 self.filenameList = filenameList
319 321
320 322 return pathList, filenameList
321 323
322 324 def __searchFilesOnLine(self, path, startDate=None, endDate=None, startTime=None, endTime=None, expLabel = "", ext = None):
323 325
324 326 """
325 327 Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y
326 328 devuelve el archivo encontrado ademas de otros datos.
327 329
328 330 Input:
329 331 path : carpeta donde estan contenidos los files que contiene data
330 332
331 333 startDate : Fecha inicial. Rechaza todos los directorios donde
332 334 file end time < startDate (obejto datetime.date)
333 335
334 336 endDate : Fecha final. Rechaza todos los directorios donde
335 337 file start time > endDate (obejto datetime.date)
336 338
337 339 startTime : Tiempo inicial. Rechaza todos los archivos donde
338 340 file end time < startTime (obejto datetime.time)
339 341
340 342 endTime : Tiempo final. Rechaza todos los archivos donde
341 343 file start time > endTime (obejto datetime.time)
342 344
343 345 expLabel : Nombre del subexperimento (subfolder)
344 346
345 347 ext : extension de los files
346 348
347 349 Return:
348 350 directory : eL directorio donde esta el file encontrado
349 351 filename : el ultimo file de una determinada carpeta
350 352 year : el anho
351 353 doy : el numero de dia del anho
352 354 set : el set del archivo
353 355
354 356
355 357 """
356 358 dirList = []
357 359 pathList = []
358 360 directory = None
359 361
360 362 #Filtra solo los directorios
361 363 for thisPath in os.listdir(path):
362 364 if os.path.isdir(os.path.join(path, thisPath)):
363 365 dirList.append(thisPath)
364 366
365 367 if not(dirList):
366 368 return None, None, None, None, None
367 369
368 370 dirList = sorted( dirList, key=str.lower )
369 371
370 372 if startDate:
371 373 startDateTime = datetime.datetime.combine(startDate, startTime)
372 374 thisDateTime = startDateTime
373 375 if endDate == None: endDateTime = startDateTime
374 376 else: endDateTime = datetime.datetime.combine(endDate, endTime)
375 377
376 378 while(thisDateTime <= endDateTime):
377 379 year = thisDateTime.timetuple().tm_year
378 380 doy = thisDateTime.timetuple().tm_yday
379 381
380 382 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
381 383 if len(match) == 0:
382 384 thisDateTime += datetime.timedelta(1)
383 385 continue
384 386
385 387 pathList.append(os.path.join(path,match[0], expLabel))
386 388 thisDateTime += datetime.timedelta(1)
387 389
388 390 if not(pathList):
389 391 print "\tNo files in range: %s - %s" %(startDateTime.ctime(), endDateTime.ctime())
390 392 return None, None, None, None, None
391 393
392 394 directory = pathList[0]
393 395
394 396 else:
395 397 directory = dirList[-1]
396 398 directory = os.path.join(path,directory)
397 399
398 400 filename = getlastFileFromPath(directory, ext)
399 401
400 402 if not(filename):
401 403 return None, None, None, None, None
402 404
403 405 if not(self.__verifyFile(os.path.join(directory, filename))):
404 406 return None, None, None, None, None
405 407
406 408 year = int( filename[1:5] )
407 409 doy = int( filename[5:8] )
408 410 set = int( filename[8:11] )
409 411
410 412 return directory, filename, year, doy, set
411 413
412 414 def setup(self,dataOutObj=None,
413 415 path=None,
414 416 startDate=None,
415 417 endDate=None,
416 418 startTime=datetime.time(0,0,0),
417 419 endTime=datetime.time(23,59,59),
418 420 set=0,
419 421 expLabel = "",
420 422 ext = None,
421 423 online = False,
422 424 delay = 60):
423 425
424 426 if path == None:
425 427 raise ValueError, "The path is not valid"
426 428
427 429 if ext == None:
428 430 ext = self.ext
429 431
430 432 if dataOutObj == None:
431 433 dataOutObj = self.createObjByDefault()
432 434
433 435 self.dataOutObj = dataOutObj
434 436
435 437 if online:
436 438 print "Searching files in online mode..."
437 439 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext)
438 440
439 441 if not(doypath):
440 442 for nTries in range( self.nTries ):
441 443 print '\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)
442 444 time.sleep( self.delay )
443 445 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=exp)
444 446 if doypath:
445 447 break
446 448
447 449 if not(doypath):
448 450 print "There 'isn't valied files in %s" % path
449 451 return None
450 452
451 453 self.year = year
452 454 self.doy = doy
453 455 self.set = set - 1
454 456 self.path = path
455 457
456 458 else:
457 459 print "Searching files in offline mode ..."
458 460 pathList, filenameList = self.__searchFilesOffLine(path, startDate, endDate, startTime, endTime, set, expLabel, ext)
459 461
460 462 if not(pathList):
461 463 print "No *%s files into the folder %s \nfor the range: %s - %s"%(ext, path,
462 464 datetime.datetime.combine(startDate,startTime).ctime(),
463 465 datetime.datetime.combine(endDate,endTime).ctime())
464 466
465 467 sys.exit(-1)
466 468
467 469
468 470 self.fileIndex = -1
469 471 self.pathList = pathList
470 472 self.filenameList = filenameList
471 473
472 474 self.online = online
473 475 self.delay = delay
474 476 ext = ext.lower()
475 477 self.ext = ext
476 478
477 479 if not(self.setNextFile()):
478 480 if (startDate!=None) and (endDate!=None):
479 481 print "No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime())
480 482 elif startDate != None:
481 483 print "No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime())
482 484 else:
483 485 print "No files"
484 486
485 487 sys.exit(-1)
486 488
487 489 # self.updateDataHeader()
488 490
489 491 return self.dataOutObj
490 492
491 493 def __setNextFileOffline(self):
492 494
493 495 idFile = self.fileIndex
494 496
495 497 while (True):
496 498 idFile += 1
497 499 if not(idFile < len(self.filenameList)):
498 500 self.flagNoMoreFiles = 1
499 501 print "No more Files"
500 502 return 0
501 503
502 504 filename = self.filenameList[idFile]
503 505
504 506 if not(self.__verifyFile(filename)):
505 507 continue
506 508
507 509 fileSize = os.path.getsize(filename)
508 510 fp = open(filename,'rb')
509 511 break
510 512
511 513 self.flagIsNewFile = 1
512 514 self.fileIndex = idFile
513 515 self.filename = filename
514 516 self.fileSize = fileSize
515 517 self.fp = fp
516 518
517 519 print "Setting the file: %s"%self.filename
518 520
519 521 return 1
520 522
521 523 def __setNextFileOnline(self):
522 524 """
523 525 Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si
524 526 no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files
525 527 siguientes.
526 528
527 529 Affected:
528 530 self.flagIsNewFile
529 531 self.filename
530 532 self.fileSize
531 533 self.fp
532 534 self.set
533 535 self.flagNoMoreFiles
534 536
535 537 Return:
536 538 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado
537 539 1 : si el file fue abierto con exito y esta listo a ser leido
538 540
539 541 Excepciones:
540 542 Si un determinado file no puede ser abierto
541 543 """
542 544 nFiles = 0
543 545 fileOk_flag = False
544 546 firstTime_flag = True
545 547
546 548 self.set += 1
547 549
548 550 #busca el 1er file disponible
549 551 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
550 552 if file:
551 553 if self.__verifyFile(file, False):
552 554 fileOk_flag = True
553 555
554 556 #si no encuentra un file entonces espera y vuelve a buscar
555 557 if not(fileOk_flag):
556 558 for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles
557 559
558 560 if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces
559 561 tries = self.nTries
560 562 else:
561 563 tries = 1 #si no es la 1era vez entonces solo lo hace una vez
562 564
563 565 for nTries in range( tries ):
564 566 if firstTime_flag:
565 567 print "\tWaiting %0.2f sec for the file \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 )
566 568 time.sleep( self.delay )
567 569 else:
568 570 print "\tSearching next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext)
569 571
570 572 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
571 573 if file:
572 574 if self.__verifyFile(file):
573 575 fileOk_flag = True
574 576 break
575 577
576 578 if fileOk_flag:
577 579 break
578 580
579 581 firstTime_flag = False
580 582
581 583 print "\tSkipping the file \"%s\" due to this file doesn't exist" % filename
582 584 self.set += 1
583 585
584 586 if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta
585 587 self.set = 0
586 588 self.doy += 1
587 589
588 590 if fileOk_flag:
589 591 self.fileSize = os.path.getsize( file )
590 592 self.filename = file
591 593 self.flagIsNewFile = 1
592 594 if self.fp != None: self.fp.close()
593 595 self.fp = open(file)
594 596 self.flagNoMoreFiles = 0
595 597 print 'Setting the file: %s' % file
596 598 else:
597 599 self.fileSize = 0
598 600 self.filename = None
599 601 self.flagIsNewFile = 0
600 602 self.fp = None
601 603 self.flagNoMoreFiles = 1
602 604 print 'No more Files'
603 605
604 606 return fileOk_flag
605 607
606 608
607 609 def setNextFile(self):
608 610 if self.fp != None:
609 611 self.fp.close()
610 612
611 613 if self.online:
612 614 newFile = self.__setNextFileOnline()
613 615 else:
614 616 newFile = self.__setNextFileOffline()
615 617
616 618 if not(newFile):
617 619 return 0
618 620
619 621 self.__readFirstHeader()
620 622 self.nReadBlocks = 0
621 623 return 1
622 624
623 625 def __setNewBlock(self):
624 626 if self.fp == None:
625 627 return 0
626 628
627 629 if self.flagIsNewFile:
628 630 return 1
629 631
630 632 self.lastUTTime = self.basicHeaderObj.utc
631 633 currentSize = self.fileSize - self.fp.tell()
632 634 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
633 635
634 636 if (currentSize >= neededSize):
635 637 self.__rdBasicHeader()
636 638 return 1
637 639
638 640 if not(self.setNextFile()):
639 641 return 0
640 642
641 643 deltaTime = self.basicHeaderObj.utc - self.lastUTTime #
642 644
643 645 self.flagTimeBlock = 0
644 646
645 647 if deltaTime > self.maxTimeStep:
646 648 self.flagTimeBlock = 1
647 649
648 650 return 1
649 651
650 652
651 653 def readNextBlock(self):
652 654 if not(self.__setNewBlock()):
653 655 return 0
654 656
655 657 if not(self.readBlock()):
656 658 return 0
657 659
658 660 return 1
659 661
660 662 def __rdProcessingHeader(self, fp=None):
661 663 if fp == None:
662 664 fp = self.fp
663 665
664 666 self.processingHeaderObj.read(fp)
665 667
666 668 def __rdRadarControllerHeader(self, fp=None):
667 669 if fp == None:
668 670 fp = self.fp
669 671
670 672 self.radarControllerHeaderObj.read(fp)
671 673
672 674 def __rdSystemHeader(self, fp=None):
673 675 if fp == None:
674 676 fp = self.fp
675 677
676 678 self.systemHeaderObj.read(fp)
677 679
678 680 def __rdBasicHeader(self, fp=None):
679 681 if fp == None:
680 682 fp = self.fp
681 683
682 684 self.basicHeaderObj.read(fp)
683 685
684 686
685 687 def __readFirstHeader(self):
686 688 self.__rdBasicHeader()
687 689 self.__rdSystemHeader()
688 690 self.__rdRadarControllerHeader()
689 691 self.__rdProcessingHeader()
690 692
691 693 self.firstHeaderSize = self.basicHeaderObj.size
692 694
693 695 datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
694 696 if datatype == 0:
695 697 datatype_str = numpy.dtype([('real','<i1'),('imag','<i1')])
696 698 elif datatype == 1:
697 699 datatype_str = numpy.dtype([('real','<i2'),('imag','<i2')])
698 700 elif datatype == 2:
699 701 datatype_str = numpy.dtype([('real','<i4'),('imag','<i4')])
700 702 elif datatype == 3:
701 703 datatype_str = numpy.dtype([('real','<i8'),('imag','<i8')])
702 704 elif datatype == 4:
703 705 datatype_str = numpy.dtype([('real','<f4'),('imag','<f4')])
704 706 elif datatype == 5:
705 707 datatype_str = numpy.dtype([('real','<f8'),('imag','<f8')])
706 708 else:
707 709 raise ValueError, 'Data type was not defined'
708 710
709 711 self.dtype = datatype_str
710 712 self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c
711 713 self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.processingHeaderObj.dataBlocksPerFile - 1)
712 714 # self.dataOutObj.channelList = numpy.arange(self.systemHeaderObj.numChannels)
713 715 # self.dataOutObj.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels)
714 716 self.getBlockDimension()
715 717
716 718
717 719 def __verifyFile(self, filename, msgFlag=True):
718 720 msg = None
719 721 try:
720 722 fp = open(filename, 'rb')
721 723 currentPosition = fp.tell()
722 724 except:
723 725 if msgFlag:
724 726 print "The file %s can't be opened" % (filename)
725 727 return False
726 728
727 729 neededSize = self.processingHeaderObj.blockSize + self.firstHeaderSize
728 730
729 731 if neededSize == 0:
730 732 basicHeaderObj = BasicHeader()
731 733 systemHeaderObj = SystemHeader()
732 734 radarControllerHeaderObj = RadarControllerHeader()
733 735 processingHeaderObj = ProcessingHeader()
734 736
735 737 try:
736 738 if not( basicHeaderObj.read(fp) ): raise ValueError
737 739 if not( systemHeaderObj.read(fp) ): raise ValueError
738 740 if not( radarControllerHeaderObj.read(fp) ): raise ValueError
739 741 if not( processingHeaderObj.read(fp) ): raise ValueError
740 742 data_type = int(numpy.log2((processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
741 743
742 744 neededSize = processingHeaderObj.blockSize + basicHeaderObj.size
743 745
744 746 except:
745 747 if msgFlag:
746 748 print "\tThe file %s is empty or it hasn't enough data" % filename
747 749
748 750 fp.close()
749 751 return False
750 752 else:
751 753 msg = "\tSkipping the file %s due to it hasn't enough data" %filename
752 754
753 755 fp.close()
754 756 fileSize = os.path.getsize(filename)
755 757 currentSize = fileSize - currentPosition
756 758 if currentSize < neededSize:
757 759 if msgFlag and (msg != None):
758 760 print msg #print"\tSkipping the file %s due to it hasn't enough data" %filename
759 761 return False
760 762
761 763 return True
762 764
763 765 def getData():
764 766 pass
765 767
766 768 def hasNotDataInBuffer():
767 769 pass
768 770
769 771 def readBlock():
770 772 pass
773
774 def run(self, **kwargs):
775
776 if not(self.__isConfig):
777
778 self.dataOutObj = dataOut
779 self.setup(**kwargs)
780 self.__isConfig = True
781
782 self.putData()
771 783
772 784 class JRODataWriter(JRODataIO):
773 785
774 786 """
775 787 Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura
776 788 de los datos siempre se realiza por bloques.
777 789 """
778 790
779 791 blockIndex = 0
780 792
781 793 path = None
782 794
783 795 setFile = None
784 796
785 797 profilesPerBlock = None
786 798
787 799 blocksPerFile = None
788 800
789 801 nWriteBlocks = 0
790 802
791 isConfig = False
792
793 803 def __init__(self, dataOutObj=None):
794 804 raise ValueError, "Not implemented"
795 805
796 806
797 807 def hasAllDataInBuffer(self):
798 808 raise ValueError, "Not implemented"
799 809
800 810
801 811 def setBlockDimension(self):
802 812 raise ValueError, "Not implemented"
803 813
804 814
805 815 def writeBlock(self):
806 816 raise ValueError, "No implemented"
807 817
808 818
809 819 def putData(self):
810 820 raise ValueError, "No implemented"
811 821
812 822 def getDataHeader(self):
813 823 """
814 824 Obtiene una copia del First Header
815 825
816 826 Affected:
817 827
818 828 self.basicHeaderObj
819 829 self.systemHeaderObj
820 830 self.radarControllerHeaderObj
821 831 self.processingHeaderObj self.
822 832
823 833 Return:
824 834 None
825 835 """
826 836
827 837 raise ValueError, "No implemented"
828 838
829 839 def getBasicHeader(self):
830 840
831 841 self.basicHeaderObj.size = self.basicHeaderSize #bytes
832 842 self.basicHeaderObj.version = self.versionFile
833 843 self.basicHeaderObj.dataBlock = self.nTotalBlocks
834 844
835 845 utc = numpy.floor(self.dataOutObj.utctime)
836 846 milisecond = (self.dataOutObj.utctime - utc)* 1000.0
837 847
838 848 self.basicHeaderObj.utc = utc
839 849 self.basicHeaderObj.miliSecond = milisecond
840 850 self.basicHeaderObj.timeZone = 0
841 851 self.basicHeaderObj.dstFlag = 0
842 852 self.basicHeaderObj.errorCount = 0
843 853
844 854 def __writeFirstHeader(self):
845 855 """
846 856 Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader)
847 857
848 858 Affected:
849 859 __dataType
850 860
851 861 Return:
852 862 None
853 863 """
854 864
855 865 # CALCULAR PARAMETROS
856 866
857 867 sizeLongHeader = self.systemHeaderObj.size + self.radarControllerHeaderObj.size + self.processingHeaderObj.size
858 868 self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader
859 869
860 870 self.basicHeaderObj.write(self.fp)
861 871 self.systemHeaderObj.write(self.fp)
862 872 self.radarControllerHeaderObj.write(self.fp)
863 873 self.processingHeaderObj.write(self.fp)
864 874
865 875 self.dtype = self.dataOutObj.dtype
866 876
867 877 def __setNewBlock(self):
868 878 """
869 879 Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header
870 880
871 881 Return:
872 882 0 : si no pudo escribir nada
873 883 1 : Si escribio el Basic el First Header
874 884 """
875 885 if self.fp == None:
876 886 self.setNextFile()
877 887
878 888 if self.flagIsNewFile:
879 889 return 1
880 890
881 891 if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile:
882 892 self.basicHeaderObj.write(self.fp)
883 893 return 1
884 894
885 895 if not( self.setNextFile() ):
886 896 return 0
887 897
888 898 return 1
889 899
890 900
891 901 def writeNextBlock(self):
892 902 """
893 903 Selecciona el bloque siguiente de datos y los escribe en un file
894 904
895 905 Return:
896 906 0 : Si no hizo pudo escribir el bloque de datos
897 907 1 : Si no pudo escribir el bloque de datos
898 908 """
899 909 if not( self.__setNewBlock() ):
900 910 return 0
901 911
902 912 self.writeBlock()
903 913
904 914 return 1
905 915
906 916 def setNextFile(self):
907 917 """
908 918 Determina el siguiente file que sera escrito
909 919
910 920 Affected:
911 921 self.filename
912 922 self.subfolder
913 923 self.fp
914 924 self.setFile
915 925 self.flagIsNewFile
916 926
917 927 Return:
918 928 0 : Si el archivo no puede ser escrito
919 929 1 : Si el archivo esta listo para ser escrito
920 930 """
921 931 ext = self.ext
922 932 path = self.path
923 933
924 934 if self.fp != None:
925 935 self.fp.close()
926 936
927 937 timeTuple = time.localtime( self.dataOutObj.dataUtcTime)
928 938 subfolder = 'D%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
929 939
930 940 doypath = os.path.join( path, subfolder )
931 941 if not( os.path.exists(doypath) ):
932 942 os.mkdir(doypath)
933 943 self.setFile = -1 #inicializo mi contador de seteo
934 944 else:
935 945 filesList = os.listdir( doypath )
936 946 if len( filesList ) > 0:
937 947 filesList = sorted( filesList, key=str.lower )
938 948 filen = filesList[-1]
939 949 # el filename debera tener el siguiente formato
940 950 # 0 1234 567 89A BCDE (hex)
941 951 # x YYYY DDD SSS .ext
942 952 if isNumber( filen[8:11] ):
943 953 self.setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file
944 954 else:
945 955 self.setFile = -1
946 956 else:
947 957 self.setFile = -1 #inicializo mi contador de seteo
948 958
949 959 setFile = self.setFile
950 960 setFile += 1
951 961
952 962 file = '%s%4.4d%3.3d%3.3d%s' % (self.optchar,
953 963 timeTuple.tm_year,
954 964 timeTuple.tm_yday,
955 965 setFile,
956 966 ext )
957 967
958 968 filename = os.path.join( path, subfolder, file )
959 969
960 970 fp = open( filename,'wb' )
961 971
962 972 self.blockIndex = 0
963 973
964 974 #guardando atributos
965 975 self.filename = filename
966 976 self.subfolder = subfolder
967 977 self.fp = fp
968 978 self.setFile = setFile
969 979 self.flagIsNewFile = 1
970 980
971 981 self.getDataHeader()
972 982
973 983 print 'Writing the file: %s'%self.filename
974 984
975 985 self.__writeFirstHeader()
976 986
977 987 return 1
978 988
979 989 def setup(self, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None):
980 990 """
981 991 Setea el tipo de formato en la cual sera guardada la data y escribe el First Header
982 992
983 993 Inputs:
984 994 path : el path destino en el cual se escribiran los files a crear
985 995 format : formato en el cual sera salvado un file
986 996 set : el setebo del file
987 997
988 998 Return:
989 999 0 : Si no realizo un buen seteo
990 1000 1 : Si realizo un buen seteo
991 1001 """
992 1002
993 1003 if ext == None:
994 1004 ext = self.ext
995 1005
996 1006 ext = ext.lower()
997 1007
998 1008 self.ext = ext
999 1009
1000 1010 self.path = path
1001 1011
1002 1012 self.setFile = set - 1
1003 1013
1004 1014 self.blocksPerFile = blocksPerFile
1005 1015
1006 1016 self.profilesPerBlock = profilesPerBlock
1007 1017
1008 1018 if not(self.setNextFile()):
1009 1019 print "There isn't a next file"
1010 1020 return 0
1011 1021
1012 1022 self.setBlockDimension()
1013 1023
1014 1024 return 1
1015 1025
1016 1026 def run(self, dataOut, **kwargs):
1017 1027
1018 if not(self.isConfig):
1028 if not(self.__isConfig):
1019 1029
1020 1030 self.dataOutObj = dataOut
1021 1031 self.setup(**kwargs)
1032 self.__isConfig = True
1022 1033
1023 1034 self.putData()
1024 1035
1025 1036 class VoltageReader(JRODataReader):
1026 1037 """
1027 1038 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
1028 1039 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
1029 1040 perfiles*alturas*canales) son almacenados en la variable "buffer".
1030 1041
1031 1042 perfiles * alturas * canales
1032 1043
1033 1044 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1034 1045 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
1035 1046 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
1036 1047 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1037 1048
1038 1049 Example:
1039 1050
1040 1051 dpath = "/home/myuser/data"
1041 1052
1042 1053 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1043 1054
1044 1055 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1045 1056
1046 1057 readerObj = VoltageReader()
1047 1058
1048 1059 readerObj.setup(dpath, startTime, endTime)
1049 1060
1050 1061 while(True):
1051 1062
1052 1063 #to get one profile
1053 1064 profile = readerObj.getData()
1054 1065
1055 1066 #print the profile
1056 1067 print profile
1057 1068
1058 1069 #If you want to see all datablock
1059 1070 print readerObj.datablock
1060 1071
1061 1072 if readerObj.flagNoMoreFiles:
1062 1073 break
1063 1074
1064 1075 """
1065 1076
1066 1077 ext = ".r"
1067 1078
1068 1079 optchar = "D"
1069 1080 dataOutObj = None
1070 1081
1071 1082
1072 1083 def __init__(self, dataOutObj=None):
1073 1084 """
1074 1085 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
1075 1086
1076 1087 Input:
1077 1088 dataOutObj : Objeto de la clase Voltage. Este objeto sera utilizado para
1078 1089 almacenar un perfil de datos cada vez que se haga un requerimiento
1079 1090 (getData). El perfil sera obtenido a partir del buffer de datos,
1080 1091 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1081 1092 bloque de datos.
1082 1093 Si este parametro no es pasado se creara uno internamente.
1083 1094
1084 1095 Variables afectadas:
1085 1096 self.dataOutObj
1086 1097
1087 1098 Return:
1088 1099 None
1089 1100 """
1090 1101
1102 self.__isConfig = False
1103
1091 1104 self.datablock = None
1092 1105
1093 1106 self.utc = 0
1094 1107
1095 1108 self.ext = ".r"
1096 1109
1097 1110 self.optchar = "D"
1098 1111
1099 1112 self.basicHeaderObj = BasicHeader()
1100 1113
1101 1114 self.systemHeaderObj = SystemHeader()
1102 1115
1103 1116 self.radarControllerHeaderObj = RadarControllerHeader()
1104 1117
1105 1118 self.processingHeaderObj = ProcessingHeader()
1106 1119
1107 1120 self.online = 0
1108 1121
1109 1122 self.fp = None
1110 1123
1111 1124 self.idFile = None
1112 1125
1113 1126 self.dtype = None
1114 1127
1115 1128 self.fileSizeByHeader = None
1116 1129
1117 1130 self.filenameList = []
1118 1131
1119 1132 self.filename = None
1120 1133
1121 1134 self.fileSize = None
1122 1135
1123 1136 self.firstHeaderSize = 0
1124 1137
1125 1138 self.basicHeaderSize = 24
1126 1139
1127 1140 self.pathList = []
1128 1141
1129 1142 self.filenameList = []
1130 1143
1131 1144 self.lastUTTime = 0
1132 1145
1133 1146 self.maxTimeStep = 30
1134 1147
1135 1148 self.flagNoMoreFiles = 0
1136 1149
1137 1150 self.set = 0
1138 1151
1139 1152 self.path = None
1140 1153
1141 1154 self.profileIndex = 9999
1142 1155
1143 1156 self.delay = 3 #seconds
1144 1157
1145 1158 self.nTries = 3 #quantity tries
1146 1159
1147 1160 self.nFiles = 3 #number of files for searching
1148 1161
1149 1162 self.nReadBlocks = 0
1150 1163
1151 1164 self.flagIsNewFile = 1
1152 1165
1153 1166 self.ippSeconds = 0
1154 1167
1155 1168 self.flagTimeBlock = 0
1156 1169
1157 1170 self.flagIsNewBlock = 0
1158 1171
1159 1172 self.nTotalBlocks = 0
1160 1173
1161 1174 self.blocksize = 0
1162 1175
1163 1176 def createObjByDefault(self):
1164 1177
1165 1178 dataObj = Voltage()
1166 1179
1167 1180 return dataObj
1168 1181
1169 1182 def __hasNotDataInBuffer(self):
1170 1183 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1171 1184 return 1
1172 1185 return 0
1173 1186
1174 1187
1175 1188 def getBlockDimension(self):
1176 1189 """
1177 1190 Obtiene la cantidad de puntos a leer por cada bloque de datos
1178 1191
1179 1192 Affected:
1180 1193 self.blocksize
1181 1194
1182 1195 Return:
1183 1196 None
1184 1197 """
1185 1198 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
1186 1199 self.blocksize = pts2read
1187 1200
1188 1201
1189 1202 def readBlock(self):
1190 1203 """
1191 1204 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
1192 1205 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1193 1206 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1194 1207 es seteado a 0
1195 1208
1196 1209 Inputs:
1197 1210 None
1198 1211
1199 1212 Return:
1200 1213 None
1201 1214
1202 1215 Affected:
1203 1216 self.profileIndex
1204 1217 self.datablock
1205 1218 self.flagIsNewFile
1206 1219 self.flagIsNewBlock
1207 1220 self.nTotalBlocks
1208 1221
1209 1222 Exceptions:
1210 1223 Si un bloque leido no es un bloque valido
1211 1224 """
1212 1225
1213 1226 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
1214 1227
1215 1228 try:
1216 1229 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
1217 1230 except:
1218 1231 print "The read block (%3d) has not enough data" %self.nReadBlocks
1219 1232 return 0
1220 1233
1221 1234 junk = numpy.transpose(junk, (2,0,1))
1222 1235 self.datablock = junk['real'] + junk['imag']*1j
1223 1236
1224 1237 self.profileIndex = 0
1225 1238
1226 1239 self.flagIsNewFile = 0
1227 1240 self.flagIsNewBlock = 1
1228 1241
1229 1242 self.nTotalBlocks += 1
1230 1243 self.nReadBlocks += 1
1231 1244
1232 1245 return 1
1233 1246
1234 1247
1235 1248 def getData(self):
1236 1249 """
1237 1250 getData obtiene una unidad de datos del buffer de lectura y la copia a la clase "Voltage"
1238 1251 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1239 1252 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1240 1253
1241 1254 Ademas incrementa el contador del buffer en 1.
1242 1255
1243 1256 Return:
1244 1257 data : retorna un perfil de voltages (alturas * canales) copiados desde el
1245 1258 buffer. Si no hay mas archivos a leer retorna None.
1246 1259
1247 1260 Variables afectadas:
1248 1261 self.dataOutObj
1249 1262 self.profileIndex
1250 1263
1251 1264 Affected:
1252 1265 self.dataOutObj
1253 1266 self.profileIndex
1254 1267 self.flagTimeBlock
1255 1268 self.flagIsNewBlock
1256 1269 """
1257 1270 if self.flagNoMoreFiles: return 0
1258 1271
1259 1272 self.flagTimeBlock = 0
1260 1273 self.flagIsNewBlock = 0
1261 1274
1262 1275 if self.__hasNotDataInBuffer():
1263 1276
1264 1277 if not( self.readNextBlock() ):
1265 1278 return 0
1266 1279
1267 1280 # self.updateDataHeader()
1268 1281
1269 1282 if self.flagNoMoreFiles == 1:
1270 1283 print 'Process finished'
1271 1284 return 0
1272 1285
1273 1286 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1274 1287
1275 1288 if self.datablock == None:
1276 1289 self.dataOutObj.flagNoData = True
1277 1290 return 0
1278 1291
1279 1292 self.dataOutObj.data = self.datablock[:,self.profileIndex,:]
1280 1293
1281 1294 self.dataOutObj.dtype = self.dtype
1282 1295
1283 1296 self.dataOutObj.nChannels = self.systemHeaderObj.nChannels
1284 1297
1285 1298 self.dataOutObj.nHeights = self.processingHeaderObj.nHeights
1286 1299
1287 1300 self.dataOutObj.nProfiles = self.processingHeaderObj.profilesPerBlock
1288 1301
1289 1302 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1290 1303
1291 1304 self.dataOutObj.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1292 1305
1293 1306 self.dataOutObj.channelList = range(self.systemHeaderObj.nChannels)
1294 1307
1295 1308 self.dataOutObj.channelIndexList = range(self.systemHeaderObj.nChannels)
1296 1309
1297 1310 self.dataOutObj.flagTimeBlock = self.flagTimeBlock
1298 1311
1299 1312 self.dataOutObj.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.ippSeconds
1300 1313
1301 1314 self.dataOutObj.ippSeconds = self.ippSeconds
1302 1315
1303 1316 self.dataOutObj.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt
1304 1317
1305 1318 self.dataOutObj.nCohInt = self.processingHeaderObj.nCohInt
1306 1319
1307 1320 self.dataOutObj.flagShiftFFT = False
1308 1321
1309 1322 if self.processingHeaderObj.code != None:
1310 1323 self.dataOutObj.nCode = self.processingHeaderObj.nCode
1311 1324
1312 1325 self.dataOutObj.nBaud = self.processingHeaderObj.nBaud
1313 1326
1314 1327 self.dataOutObj.code = self.processingHeaderObj.code
1315 1328
1316 1329 self.profileIndex += 1
1317 1330
1318 1331 self.dataOutObj.systemHeaderObj = self.systemHeaderObj.copy()
1319 1332
1320 1333 self.dataOutObj.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1321 1334
1322 1335 self.dataOutObj.flagNoData = False
1323 1336
1324 1337 # print self.profileIndex, self.dataOutObj.utctime
1325 1338 # if self.profileIndex == 800:
1326 1339 # a=1
1327 1340
1328 1341 return self.dataOutObj.data
1329 1342
1330 1343
1331 1344 class VoltageWriter(JRODataWriter):
1332 1345 """
1333 1346 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
1334 1347 de los datos siempre se realiza por bloques.
1335 1348 """
1336 1349
1337 1350 ext = ".r"
1338 1351
1339 1352 optchar = "D"
1340 1353
1341 1354 shapeBuffer = None
1342 1355
1343 1356
1344 1357 def __init__(self, dataOutObj=None):
1345 1358 """
1346 1359 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
1347 1360
1348 1361 Affected:
1349 1362 self.dataOutObj
1350 1363
1351 1364 Return: None
1352 1365 """
1353 1366 if dataOutObj == None:
1354 1367 dataOutObj = Voltage()
1355 1368
1356 1369 if not( isinstance(dataOutObj, Voltage) ):
1357 1370 raise ValueError, "in VoltageReader, dataOutObj must be an Spectra class object"
1358 1371
1359 1372 self.dataOutObj = dataOutObj
1360 1373
1361 1374 self.nTotalBlocks = 0
1362 1375
1363 1376 self.profileIndex = 0
1364 1377
1365 self.isConfig = False
1378 self.__isConfig = False
1366 1379
1367 1380 self.fp = None
1368 1381
1369 1382 self.flagIsNewFile = 1
1370 1383
1371 1384 self.nTotalBlocks = 0
1372 1385
1373 1386 self.flagIsNewBlock = 0
1374 1387
1375 1388 self.flagNoMoreFiles = 0
1376 1389
1377 1390 self.setFile = None
1378 1391
1379 1392 self.dtype = None
1380 1393
1381 1394 self.path = None
1382 1395
1383 1396 self.noMoreFiles = 0
1384 1397
1385 1398 self.filename = None
1386 1399
1387 1400 self.basicHeaderObj = BasicHeader()
1388 1401
1389 1402 self.systemHeaderObj = SystemHeader()
1390 1403
1391 1404 self.radarControllerHeaderObj = RadarControllerHeader()
1392 1405
1393 1406 self.processingHeaderObj = ProcessingHeader()
1394 1407
1395 1408 def hasAllDataInBuffer(self):
1396 1409 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1397 1410 return 1
1398 1411 return 0
1399 1412
1400 1413
1401 1414 def setBlockDimension(self):
1402 1415 """
1403 1416 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
1404 1417
1405 1418 Affected:
1406 1419 self.shape_spc_Buffer
1407 1420 self.shape_cspc_Buffer
1408 1421 self.shape_dc_Buffer
1409 1422
1410 1423 Return: None
1411 1424 """
1412 1425 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
1413 1426 self.processingHeaderObj.nHeights,
1414 1427 self.systemHeaderObj.nChannels)
1415 1428
1416 1429 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
1417 1430 self.processingHeaderObj.profilesPerBlock,
1418 1431 self.processingHeaderObj.nHeights),
1419 1432 dtype=numpy.dtype('complex'))
1420 1433
1421 1434
1422 1435 def writeBlock(self):
1423 1436 """
1424 1437 Escribe el buffer en el file designado
1425 1438
1426 1439 Affected:
1427 1440 self.profileIndex
1428 1441 self.flagIsNewFile
1429 1442 self.flagIsNewBlock
1430 1443 self.nTotalBlocks
1431 1444 self.blockIndex
1432 1445
1433 1446 Return: None
1434 1447 """
1435 1448 data = numpy.zeros( self.shapeBuffer, self.dtype )
1436 1449
1437 1450 junk = numpy.transpose(self.datablock, (1,2,0))
1438 1451
1439 1452 data['real'] = junk.real
1440 1453 data['imag'] = junk.imag
1441 1454
1442 1455 data = data.reshape( (-1) )
1443 1456
1444 1457 data.tofile( self.fp )
1445 1458
1446 1459 self.datablock.fill(0)
1447 1460
1448 1461 self.profileIndex = 0
1449 1462 self.flagIsNewFile = 0
1450 1463 self.flagIsNewBlock = 1
1451 1464
1452 1465 self.blockIndex += 1
1453 1466 self.nTotalBlocks += 1
1454 1467
1455 1468 def putData(self):
1456 1469 """
1457 1470 Setea un bloque de datos y luego los escribe en un file
1458 1471
1459 1472 Affected:
1460 1473 self.flagIsNewBlock
1461 1474 self.profileIndex
1462 1475
1463 1476 Return:
1464 1477 0 : Si no hay data o no hay mas files que puedan escribirse
1465 1478 1 : Si se escribio la data de un bloque en un file
1466 1479 """
1467 1480 if self.dataOutObj.flagNoData:
1468 1481 return 0
1469 1482
1470 1483 self.flagIsNewBlock = 0
1471 1484
1472 1485 if self.dataOutObj.flagTimeBlock:
1473 1486
1474 1487 self.datablock.fill(0)
1475 1488 self.profileIndex = 0
1476 1489 self.setNextFile()
1477 1490
1478 1491 if self.profileIndex == 0:
1479 1492 self.getBasicHeader()
1480 1493
1481 1494 self.datablock[:,self.profileIndex,:] = self.dataOutObj.data
1482 1495
1483 1496 self.profileIndex += 1
1484 1497
1485 1498 if self.hasAllDataInBuffer():
1486 1499 #if self.flagIsNewFile:
1487 1500 self.writeNextBlock()
1488 1501 # self.getDataHeader()
1489 1502
1490 1503 if self.flagNoMoreFiles:
1491 1504 #print 'Process finished'
1492 1505 return 0
1493 1506
1494 1507 return 1
1495 1508
1496 1509 def __getProcessFlags(self):
1497 1510
1498 1511 processFlags = 0
1499 1512
1500 1513 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1501 1514 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1502 1515 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1503 1516 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1504 1517 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1505 1518 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1506 1519
1507 1520 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1508 1521
1509 1522
1510 1523
1511 1524 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
1512 1525 PROCFLAG.DATATYPE_SHORT,
1513 1526 PROCFLAG.DATATYPE_LONG,
1514 1527 PROCFLAG.DATATYPE_INT64,
1515 1528 PROCFLAG.DATATYPE_FLOAT,
1516 1529 PROCFLAG.DATATYPE_DOUBLE]
1517 1530
1518 1531
1519 1532 for index in range(len(dtypeList)):
1520 1533 if self.dataOutObj.dtype == dtypeList[index]:
1521 1534 dtypeValue = datatypeValueList[index]
1522 1535 break
1523 1536
1524 1537 processFlags += dtypeValue
1525 1538
1526 1539 if self.dataOutObj.flagDecodeData:
1527 1540 processFlags += PROCFLAG.DECODE_DATA
1528 1541
1529 1542 if self.dataOutObj.flagDeflipData:
1530 1543 processFlags += PROCFLAG.DEFLIP_DATA
1531 1544
1532 1545 if self.dataOutObj.code != None:
1533 1546 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
1534 1547
1535 1548 if self.dataOutObj.nCohInt > 1:
1536 1549 processFlags += PROCFLAG.COHERENT_INTEGRATION
1537 1550
1538 1551 return processFlags
1539 1552
1540 1553
1541 1554 def __getBlockSize(self):
1542 1555 '''
1543 1556 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
1544 1557 '''
1545 1558
1546 1559 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1547 1560 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1548 1561 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1549 1562 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1550 1563 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1551 1564 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1552 1565
1553 1566 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1554 1567 datatypeValueList = [1,2,4,8,4,8]
1555 1568 for index in range(len(dtypeList)):
1556 1569 if self.dataOutObj.dtype == dtypeList[index]:
1557 1570 datatypeValue = datatypeValueList[index]
1558 1571 break
1559 1572
1560 1573 blocksize = int(self.dataOutObj.nHeights * self.dataOutObj.nChannels * self.dataOutObj.nProfiles * datatypeValue * 2)
1561 1574
1562 1575 return blocksize
1563 1576
1564 1577 def getDataHeader(self):
1565 1578
1566 1579 """
1567 1580 Obtiene una copia del First Header
1568 1581
1569 1582 Affected:
1570 1583 self.systemHeaderObj
1571 1584 self.radarControllerHeaderObj
1572 1585 self.dtype
1573 1586
1574 1587 Return:
1575 1588 None
1576 1589 """
1577 1590
1578 1591 self.systemHeaderObj = self.dataOutObj.systemHeaderObj.copy()
1579 1592 self.systemHeaderObj.nChannels = self.dataOutObj.nChannels
1580 1593 self.radarControllerHeaderObj = self.dataOutObj.radarControllerHeaderObj.copy()
1581 1594
1582 1595 self.getBasicHeader()
1583 1596
1584 1597 processingHeaderSize = 40 # bytes
1585 1598 self.processingHeaderObj.dtype = 0 # Voltage
1586 1599 self.processingHeaderObj.blockSize = self.__getBlockSize()
1587 1600 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
1588 1601 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
1589 1602 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOutObj.processingHeaderObj.nWindows
1590 1603 self.processingHeaderObj.processFlags = self.__getProcessFlags()
1591 1604 self.processingHeaderObj.nCohInt = self.dataOutObj.nCohInt
1592 1605 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
1593 1606 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
1594 1607
1595 1608 if self.dataOutObj.code != None:
1596 1609 self.processingHeaderObj.code = self.dataOutObj.code
1597 1610 self.processingHeaderObj.nCode = self.dataOutObj.nCode
1598 1611 self.processingHeaderObj.nBaud = self.dataOutObj.nBaud
1599 1612 codesize = int(8 + 4 * self.dataOutObj.nCode * self.dataOutObj.nBaud)
1600 1613 processingHeaderSize += codesize
1601 1614
1602 1615 if self.processingHeaderObj.nWindows != 0:
1603 1616 self.processingHeaderObj.firstHeight = self.dataOutObj.heightList[0]
1604 1617 self.processingHeaderObj.deltaHeight = self.dataOutObj.heightList[1] - self.dataOutObj.heightList[0]
1605 1618 self.processingHeaderObj.nHeights = self.dataOutObj.nHeights
1606 1619 self.processingHeaderObj.samplesWin = self.dataOutObj.nHeights
1607 1620 processingHeaderSize += 12
1608 1621
1609 1622 self.processingHeaderObj.size = processingHeaderSize
1610 1623
1611 1624 class SpectraReader(JRODataReader):
1612 1625 """
1613 1626 Esta clase permite leer datos de espectros desde archivos procesados (.pdata). La lectura
1614 1627 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones)
1615 1628 son almacenados en tres buffer's para el Self Spectra, el Cross Spectra y el DC Channel.
1616 1629
1617 1630 paresCanalesIguales * alturas * perfiles (Self Spectra)
1618 1631 paresCanalesDiferentes * alturas * perfiles (Cross Spectra)
1619 1632 canales * alturas (DC Channels)
1620 1633
1621 1634 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1622 1635 RadarControllerHeader y Spectra. Los tres primeros se usan para almacenar informacion de la
1623 1636 cabecera de datos (metadata), y el cuarto (Spectra) para obtener y almacenar un bloque de
1624 1637 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1625 1638
1626 1639 Example:
1627 1640 dpath = "/home/myuser/data"
1628 1641
1629 1642 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1630 1643
1631 1644 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1632 1645
1633 1646 readerObj = SpectraReader()
1634 1647
1635 1648 readerObj.setup(dpath, startTime, endTime)
1636 1649
1637 1650 while(True):
1638 1651
1639 1652 readerObj.getData()
1640 1653
1641 1654 print readerObj.data_spc
1642 1655
1643 1656 print readerObj.data_cspc
1644 1657
1645 1658 print readerObj.data_dc
1646 1659
1647 1660 if readerObj.flagNoMoreFiles:
1648 1661 break
1649 1662
1650 1663 """
1651 1664
1652 1665 pts2read_SelfSpectra = 0
1653 1666
1654 1667 pts2read_CrossSpectra = 0
1655 1668
1656 1669 pts2read_DCchannels = 0
1657 1670
1658 1671 ext = ".pdata"
1659 1672
1660 1673 optchar = "P"
1661 1674
1662 1675 dataOutObj = None
1663 1676
1664 1677 nRdChannels = None
1665 1678
1666 1679 nRdPairs = None
1667 1680
1668 1681 rdPairList = []
1669 1682
1670 1683
1671 1684 def __init__(self, dataOutObj=None):
1672 1685 """
1673 1686 Inicializador de la clase SpectraReader para la lectura de datos de espectros.
1674 1687
1675 1688 Inputs:
1676 1689 dataOutObj : Objeto de la clase Spectra. Este objeto sera utilizado para
1677 1690 almacenar un perfil de datos cada vez que se haga un requerimiento
1678 1691 (getData). El perfil sera obtenido a partir del buffer de datos,
1679 1692 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1680 1693 bloque de datos.
1681 1694 Si este parametro no es pasado se creara uno internamente.
1682 1695
1683 1696 Affected:
1684 1697 self.dataOutObj
1685 1698
1686 1699 Return : None
1687 1700 """
1688
1701
1702 self.__isConfig = False
1703
1689 1704 self.pts2read_SelfSpectra = 0
1690 1705
1691 1706 self.pts2read_CrossSpectra = 0
1692 1707
1693 1708 self.pts2read_DCchannels = 0
1694 1709
1695 1710 self.datablock = None
1696 1711
1697 1712 self.utc = None
1698 1713
1699 1714 self.ext = ".pdata"
1700 1715
1701 1716 self.optchar = "P"
1702 1717
1703 1718 self.basicHeaderObj = BasicHeader()
1704 1719
1705 1720 self.systemHeaderObj = SystemHeader()
1706 1721
1707 1722 self.radarControllerHeaderObj = RadarControllerHeader()
1708 1723
1709 1724 self.processingHeaderObj = ProcessingHeader()
1710 1725
1711 1726 self.online = 0
1712 1727
1713 1728 self.fp = None
1714 1729
1715 1730 self.idFile = None
1716 1731
1717 1732 self.dtype = None
1718 1733
1719 1734 self.fileSizeByHeader = None
1720 1735
1721 1736 self.filenameList = []
1722 1737
1723 1738 self.filename = None
1724 1739
1725 1740 self.fileSize = None
1726 1741
1727 1742 self.firstHeaderSize = 0
1728 1743
1729 1744 self.basicHeaderSize = 24
1730 1745
1731 1746 self.pathList = []
1732 1747
1733 1748 self.lastUTTime = 0
1734 1749
1735 1750 self.maxTimeStep = 30
1736 1751
1737 1752 self.flagNoMoreFiles = 0
1738 1753
1739 1754 self.set = 0
1740 1755
1741 1756 self.path = None
1742 1757
1743 1758 self.delay = 3 #seconds
1744 1759
1745 1760 self.nTries = 3 #quantity tries
1746 1761
1747 1762 self.nFiles = 3 #number of files for searching
1748 1763
1749 1764 self.nReadBlocks = 0
1750 1765
1751 1766 self.flagIsNewFile = 1
1752 1767
1753 1768 self.ippSeconds = 0
1754 1769
1755 1770 self.flagTimeBlock = 0
1756 1771
1757 1772 self.flagIsNewBlock = 0
1758 1773
1759 1774 self.nTotalBlocks = 0
1760 1775
1761 1776 self.blocksize = 0
1762 1777
1763 1778
1764 1779 def createObjByDefault(self):
1765 1780
1766 1781 dataObj = Spectra()
1767 1782
1768 1783 return dataObj
1769 1784
1770 1785 def __hasNotDataInBuffer(self):
1771 1786 return 1
1772 1787
1773 1788
1774 1789 def getBlockDimension(self):
1775 1790 """
1776 1791 Obtiene la cantidad de puntos a leer por cada bloque de datos
1777 1792
1778 1793 Affected:
1779 1794 self.nRdChannels
1780 1795 self.nRdPairs
1781 1796 self.pts2read_SelfSpectra
1782 1797 self.pts2read_CrossSpectra
1783 1798 self.pts2read_DCchannels
1784 1799 self.blocksize
1785 1800 self.dataOutObj.nChannels
1786 1801 self.dataOutObj.nPairs
1787 1802
1788 1803 Return:
1789 1804 None
1790 1805 """
1791 1806 self.nRdChannels = 0
1792 1807 self.nRdPairs = 0
1793 1808 self.rdPairList = []
1794 1809
1795 1810 for i in range(0, self.processingHeaderObj.totalSpectra*2, 2):
1796 1811 if self.processingHeaderObj.spectraComb[i] == self.processingHeaderObj.spectraComb[i+1]:
1797 1812 self.nRdChannels = self.nRdChannels + 1 #par de canales iguales
1798 1813 else:
1799 1814 self.nRdPairs = self.nRdPairs + 1 #par de canales diferentes
1800 1815 self.rdPairList.append((self.processingHeaderObj.spectraComb[i], self.processingHeaderObj.spectraComb[i+1]))
1801 1816
1802 1817 pts2read = self.processingHeaderObj.nHeights * self.processingHeaderObj.profilesPerBlock
1803 1818
1804 1819 self.pts2read_SelfSpectra = int(self.nRdChannels * pts2read)
1805 1820 self.blocksize = self.pts2read_SelfSpectra
1806 1821
1807 1822 if self.processingHeaderObj.flag_cspc:
1808 1823 self.pts2read_CrossSpectra = int(self.nRdPairs * pts2read)
1809 1824 self.blocksize += self.pts2read_CrossSpectra
1810 1825
1811 1826 if self.processingHeaderObj.flag_dc:
1812 1827 self.pts2read_DCchannels = int(self.systemHeaderObj.nChannels * self.processingHeaderObj.nHeights)
1813 1828 self.blocksize += self.pts2read_DCchannels
1814 1829
1815 1830 # self.blocksize = self.pts2read_SelfSpectra + self.pts2read_CrossSpectra + self.pts2read_DCchannels
1816 1831
1817 1832
1818 1833 def readBlock(self):
1819 1834 """
1820 1835 Lee el bloque de datos desde la posicion actual del puntero del archivo
1821 1836 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1822 1837 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1823 1838 es seteado a 0
1824 1839
1825 1840 Return: None
1826 1841
1827 1842 Variables afectadas:
1828 1843
1829 1844 self.flagIsNewFile
1830 1845 self.flagIsNewBlock
1831 1846 self.nTotalBlocks
1832 1847 self.data_spc
1833 1848 self.data_cspc
1834 1849 self.data_dc
1835 1850
1836 1851 Exceptions:
1837 1852 Si un bloque leido no es un bloque valido
1838 1853 """
1839 1854 blockOk_flag = False
1840 1855 fpointer = self.fp.tell()
1841 1856
1842 1857 spc = numpy.fromfile( self.fp, self.dtype[0], self.pts2read_SelfSpectra )
1843 1858 spc = spc.reshape( (self.nRdChannels, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1844 1859
1845 1860 if self.processingHeaderObj.flag_cspc:
1846 1861 cspc = numpy.fromfile( self.fp, self.dtype, self.pts2read_CrossSpectra )
1847 1862 cspc = cspc.reshape( (self.nRdPairs, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1848 1863
1849 1864 if self.processingHeaderObj.flag_dc:
1850 1865 dc = numpy.fromfile( self.fp, self.dtype, self.pts2read_DCchannels ) #int(self.processingHeaderObj.nHeights*self.systemHeaderObj.nChannels) )
1851 1866 dc = dc.reshape( (self.systemHeaderObj.nChannels, self.processingHeaderObj.nHeights) ) #transforma a un arreglo 2D
1852 1867
1853 1868
1854 1869 if not(self.processingHeaderObj.shif_fft):
1855 1870 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
1856 1871
1857 1872 if self.processingHeaderObj.flag_cspc:
1858 1873 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
1859 1874
1860 1875
1861 1876 spc = numpy.transpose( spc, (0,2,1) )
1862 1877 self.data_spc = spc
1863 1878
1864 1879 if self.processingHeaderObj.flag_cspc:
1865 1880 cspc = numpy.transpose( cspc, (0,2,1) )
1866 1881 self.data_cspc = cspc['real'] + cspc['imag']*1j
1867 1882 else:
1868 1883 self.data_cspc = None
1869 1884
1870 1885 if self.processingHeaderObj.flag_dc:
1871 1886 self.data_dc = dc['real'] + dc['imag']*1j
1872 1887 else:
1873 1888 self.data_dc = None
1874 1889
1875 1890 self.flagIsNewFile = 0
1876 1891 self.flagIsNewBlock = 1
1877 1892
1878 1893 self.nTotalBlocks += 1
1879 1894 self.nReadBlocks += 1
1880 1895
1881 1896 return 1
1882 1897
1883 1898
1884 1899 def getData(self):
1885 1900 """
1886 1901 Copia el buffer de lectura a la clase "Spectra",
1887 1902 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1888 1903 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1889 1904
1890 1905 Return:
1891 1906 0 : Si no hay mas archivos disponibles
1892 1907 1 : Si hizo una buena copia del buffer
1893 1908
1894 1909 Affected:
1895 1910 self.dataOutObj
1896 1911
1897 1912 self.flagTimeBlock
1898 1913 self.flagIsNewBlock
1899 1914 """
1900 1915
1901 1916 if self.flagNoMoreFiles: return 0
1902 1917
1903 1918 self.flagTimeBlock = 0
1904 1919 self.flagIsNewBlock = 0
1905 1920
1906 1921 if self.__hasNotDataInBuffer():
1907 1922
1908 1923 if not( self.readNextBlock() ):
1909 1924 return 0
1910 1925
1911 1926 # self.updateDataHeader()
1912 1927
1913 1928 if self.flagNoMoreFiles == 1:
1914 1929 print 'Process finished'
1915 1930 return 0
1916 1931
1917 1932 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1918 1933
1919 1934 if self.data_dc == None:
1920 1935 self.dataOutObj.flagNoData = True
1921 1936 return 0
1922 1937
1923 1938
1924 1939 self.dataOutObj.data_spc = self.data_spc
1925 1940
1926 1941 self.dataOutObj.data_cspc = self.data_cspc
1927 1942
1928 1943 self.dataOutObj.data_dc = self.data_dc
1929 1944
1930 1945 self.dataOutObj.flagTimeBlock = self.flagTimeBlock
1931 1946
1932 1947 self.dataOutObj.flagNoData = False
1933 1948
1934 1949 self.dataOutObj.dtype = self.dtype
1935 1950
1936 1951 self.dataOutObj.nChannels = self.nRdChannels
1937 1952
1938 1953 self.dataOutObj.nPairs = self.nRdPairs
1939 1954
1940 1955 self.dataOutObj.pairsList = self.rdPairList
1941 1956
1942 1957 self.dataOutObj.nHeights = self.processingHeaderObj.nHeights
1943 1958
1944 1959 self.dataOutObj.nProfiles = self.processingHeaderObj.profilesPerBlock
1945 1960
1946 1961 self.dataOutObj.nFFTPoints = self.processingHeaderObj.profilesPerBlock
1947 1962
1948 1963 self.dataOutObj.nIncohInt = self.processingHeaderObj.nIncohInt
1949 1964
1950 1965
1951 1966 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1952 1967
1953 1968 self.dataOutObj.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1954 1969
1955 1970 self.dataOutObj.channelList = range(self.systemHeaderObj.nChannels)
1956 1971
1957 1972 self.dataOutObj.channelIndexList = range(self.systemHeaderObj.nChannels)
1958 1973
1959 1974 self.dataOutObj.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000.#+ self.profileIndex * self.ippSeconds
1960 1975
1961 1976 self.dataOutObj.ippSeconds = self.ippSeconds
1962 1977
1963 1978 self.dataOutObj.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt * self.processingHeaderObj.nIncohInt * self.dataOutObj.nFFTPoints
1964 1979
1965 1980 self.dataOutObj.flagShiftFFT = self.processingHeaderObj.shif_fft
1966 1981
1967 1982 # self.profileIndex += 1
1968 1983
1969 1984 self.dataOutObj.systemHeaderObj = self.systemHeaderObj.copy()
1970 1985
1971 1986 self.dataOutObj.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1972 1987
1973 1988 return self.dataOutObj.data_spc
1974 1989
1975 1990
1976 1991 class SpectraWriter(JRODataWriter):
1977 1992
1978 1993 """
1979 1994 Esta clase permite escribir datos de espectros a archivos procesados (.pdata). La escritura
1980 1995 de los datos siempre se realiza por bloques.
1981 1996 """
1982 1997
1983 1998 ext = ".pdata"
1984 1999
1985 2000 optchar = "P"
1986 2001
1987 2002 shape_spc_Buffer = None
1988 2003
1989 2004 shape_cspc_Buffer = None
1990 2005
1991 2006 shape_dc_Buffer = None
1992 2007
1993 2008 data_spc = None
1994 2009
1995 2010 data_cspc = None
1996 2011
1997 2012 data_dc = None
1998 2013
1999 2014 # dataOutObj = None
2000 2015
2001 2016 def __init__(self, dataOutObj=None):
2002 2017 """
2003 2018 Inicializador de la clase SpectraWriter para la escritura de datos de espectros.
2004 2019
2005 2020 Affected:
2006 2021 self.dataOutObj
2007 2022 self.basicHeaderObj
2008 2023 self.systemHeaderObj
2009 2024 self.radarControllerHeaderObj
2010 2025 self.processingHeaderObj
2011 2026
2012 2027 Return: None
2013 2028 """
2014 2029 if dataOutObj == None:
2015 2030 dataOutObj = Spectra()
2016 2031
2017 2032 if not( isinstance(dataOutObj, Spectra) ):
2018 2033 raise ValueError, "in SpectraReader, dataOutObj must be an Spectra class object"
2019 2034
2020 2035 self.dataOutObj = dataOutObj
2021 2036
2037 self.__isConfig = False
2038
2022 2039 self.nTotalBlocks = 0
2023 2040
2024 2041 self.data_spc = None
2025 2042
2026 2043 self.data_cspc = None
2027 2044
2028 2045 self.data_dc = None
2029 2046
2030 2047 self.fp = None
2031 2048
2032 2049 self.flagIsNewFile = 1
2033 2050
2034 2051 self.nTotalBlocks = 0
2035 2052
2036 2053 self.flagIsNewBlock = 0
2037 2054
2038 2055 self.flagNoMoreFiles = 0
2039 2056
2040 2057 self.setFile = None
2041 2058
2042 2059 self.dtype = None
2043 2060
2044 2061 self.path = None
2045 2062
2046 2063 self.noMoreFiles = 0
2047 2064
2048 2065 self.filename = None
2049 2066
2050 2067 self.basicHeaderObj = BasicHeader()
2051 2068
2052 2069 self.systemHeaderObj = SystemHeader()
2053 2070
2054 2071 self.radarControllerHeaderObj = RadarControllerHeader()
2055 2072
2056 2073 self.processingHeaderObj = ProcessingHeader()
2057 2074
2058 2075
2059 2076 def hasAllDataInBuffer(self):
2060 2077 return 1
2061 2078
2062 2079
2063 2080 def setBlockDimension(self):
2064 2081 """
2065 2082 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
2066 2083
2067 2084 Affected:
2068 2085 self.shape_spc_Buffer
2069 2086 self.shape_cspc_Buffer
2070 2087 self.shape_dc_Buffer
2071 2088
2072 2089 Return: None
2073 2090 """
2074 2091 self.shape_spc_Buffer = (self.dataOutObj.nChannels,
2075 2092 self.processingHeaderObj.nHeights,
2076 2093 self.processingHeaderObj.profilesPerBlock)
2077 2094
2078 2095 self.shape_cspc_Buffer = (self.dataOutObj.nPairs,
2079 2096 self.processingHeaderObj.nHeights,
2080 2097 self.processingHeaderObj.profilesPerBlock)
2081 2098
2082 2099 self.shape_dc_Buffer = (self.dataOutObj.nChannels,
2083 2100 self.processingHeaderObj.nHeights)
2084 2101
2085 2102
2086 2103 def writeBlock(self):
2087 2104 """
2088 2105 Escribe el buffer en el file designado
2089 2106
2090 2107 Affected:
2091 2108 self.data_spc
2092 2109 self.data_cspc
2093 2110 self.data_dc
2094 2111 self.flagIsNewFile
2095 2112 self.flagIsNewBlock
2096 2113 self.nTotalBlocks
2097 2114 self.nWriteBlocks
2098 2115
2099 2116 Return: None
2100 2117 """
2101 2118
2102 2119 spc = numpy.transpose( self.data_spc, (0,2,1) )
2103 2120 if not( self.processingHeaderObj.shif_fft ):
2104 2121 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2105 2122 data = spc.reshape((-1))
2106 2123 data.tofile(self.fp)
2107 2124
2108 2125 if self.data_cspc != None:
2109 2126 data = numpy.zeros( self.shape_cspc_Buffer, self.dtype )
2110 2127 cspc = numpy.transpose( self.data_cspc, (0,2,1) )
2111 2128 if not( self.processingHeaderObj.shif_fft ):
2112 2129 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2113 2130 data['real'] = cspc.real
2114 2131 data['imag'] = cspc.imag
2115 2132 data = data.reshape((-1))
2116 2133 data.tofile(self.fp)
2117 2134
2118 2135 if self.data_dc != None:
2119 2136 data = numpy.zeros( self.shape_dc_Buffer, self.dtype )
2120 2137 dc = self.data_dc
2121 2138 data['real'] = dc.real
2122 2139 data['imag'] = dc.imag
2123 2140 data = data.reshape((-1))
2124 2141 data.tofile(self.fp)
2125 2142
2126 2143 self.data_spc.fill(0)
2127 2144 self.data_dc.fill(0)
2128 2145 if self.data_cspc != None:
2129 2146 self.data_cspc.fill(0)
2130 2147
2131 2148 self.flagIsNewFile = 0
2132 2149 self.flagIsNewBlock = 1
2133 2150 self.nTotalBlocks += 1
2134 2151 self.nWriteBlocks += 1
2135 2152 self.blockIndex += 1
2136 2153
2137 2154
2138 2155 def putData(self):
2139 2156 """
2140 2157 Setea un bloque de datos y luego los escribe en un file
2141 2158
2142 2159 Affected:
2143 2160 self.data_spc
2144 2161 self.data_cspc
2145 2162 self.data_dc
2146 2163
2147 2164 Return:
2148 2165 0 : Si no hay data o no hay mas files que puedan escribirse
2149 2166 1 : Si se escribio la data de un bloque en un file
2150 2167 """
2151 2168
2152 2169 if self.dataOutObj.flagNoData:
2153 2170 return 0
2154 2171
2155 2172 self.flagIsNewBlock = 0
2156 2173
2157 2174 if self.dataOutObj.flagTimeBlock:
2158 2175 self.data_spc.fill(0)
2159 2176 self.data_cspc.fill(0)
2160 2177 self.data_dc.fill(0)
2161 2178 self.setNextFile()
2162 2179
2163 2180 if self.flagIsNewFile == 0:
2164 2181 self.getBasicHeader()
2165 2182
2166 2183 self.data_spc = self.dataOutObj.data_spc
2167 2184 self.data_cspc = self.dataOutObj.data_cspc
2168 2185 self.data_dc = self.dataOutObj.data_dc
2169 2186
2170 2187 # #self.processingHeaderObj.dataBlocksPerFile)
2171 2188 if self.hasAllDataInBuffer():
2172 2189 # self.getDataHeader()
2173 2190 self.writeNextBlock()
2174 2191
2175 2192 if self.flagNoMoreFiles:
2176 2193 #print 'Process finished'
2177 2194 return 0
2178 2195
2179 2196 return 1
2180 2197
2181 2198
2182 2199 def __getProcessFlags(self):
2183 2200
2184 2201 processFlags = 0
2185 2202
2186 2203 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2187 2204 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2188 2205 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2189 2206 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2190 2207 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2191 2208 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2192 2209
2193 2210 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2194 2211
2195 2212
2196 2213
2197 2214 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
2198 2215 PROCFLAG.DATATYPE_SHORT,
2199 2216 PROCFLAG.DATATYPE_LONG,
2200 2217 PROCFLAG.DATATYPE_INT64,
2201 2218 PROCFLAG.DATATYPE_FLOAT,
2202 2219 PROCFLAG.DATATYPE_DOUBLE]
2203 2220
2204 2221
2205 2222 for index in range(len(dtypeList)):
2206 2223 if self.dataOutObj.dtype == dtypeList[index]:
2207 2224 dtypeValue = datatypeValueList[index]
2208 2225 break
2209 2226
2210 2227 processFlags += dtypeValue
2211 2228
2212 2229 if self.dataOutObj.flagDecodeData:
2213 2230 processFlags += PROCFLAG.DECODE_DATA
2214 2231
2215 2232 if self.dataOutObj.flagDeflipData:
2216 2233 processFlags += PROCFLAG.DEFLIP_DATA
2217 2234
2218 2235 if self.dataOutObj.code != None:
2219 2236 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
2220 2237
2221 2238 if self.dataOutObj.nIncohInt > 1:
2222 2239 processFlags += PROCFLAG.INCOHERENT_INTEGRATION
2223 2240
2224 2241 if self.dataOutObj.data_dc != None:
2225 2242 processFlags += PROCFLAG.SAVE_CHANNELS_DC
2226 2243
2227 2244 return processFlags
2228 2245
2229 2246
2230 2247 def __getBlockSize(self):
2231 2248 '''
2232 2249 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Spectra
2233 2250 '''
2234 2251
2235 2252 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2236 2253 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2237 2254 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2238 2255 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2239 2256 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2240 2257 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2241 2258
2242 2259 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2243 2260 datatypeValueList = [1,2,4,8,4,8]
2244 2261 for index in range(len(dtypeList)):
2245 2262 if self.dataOutObj.dtype == dtypeList[index]:
2246 2263 datatypeValue = datatypeValueList[index]
2247 2264 break
2248 2265
2249 2266
2250 2267 pts2write = self.dataOutObj.nHeights * self.dataOutObj.nFFTPoints
2251 2268
2252 2269 pts2write_SelfSpectra = int(self.dataOutObj.nChannels * pts2write)
2253 2270 blocksize = (pts2write_SelfSpectra*datatypeValue)
2254 2271
2255 2272 if self.dataOutObj.data_cspc != None:
2256 2273 pts2write_CrossSpectra = int(self.dataOutObj.nPairs * pts2write)
2257 2274 blocksize += (pts2write_CrossSpectra*datatypeValue*2)
2258 2275
2259 2276 if self.dataOutObj.data_dc != None:
2260 2277 pts2write_DCchannels = int(self.dataOutObj.nChannels * self.dataOutObj.nHeights)
2261 2278 blocksize += (pts2write_DCchannels*datatypeValue*2)
2262 2279
2263 2280 blocksize = blocksize #* datatypeValue * 2 #CORREGIR ESTO
2264 2281
2265 2282 return blocksize
2266 2283
2267 2284 def getDataHeader(self):
2268 2285
2269 2286 """
2270 2287 Obtiene una copia del First Header
2271 2288
2272 2289 Affected:
2273 2290 self.systemHeaderObj
2274 2291 self.radarControllerHeaderObj
2275 2292 self.dtype
2276 2293
2277 2294 Return:
2278 2295 None
2279 2296 """
2280 2297
2281 2298 self.systemHeaderObj = self.dataOutObj.systemHeaderObj.copy()
2282 2299 self.systemHeaderObj.nChannels = self.dataOutObj.nChannels
2283 2300 self.radarControllerHeaderObj = self.dataOutObj.radarControllerHeaderObj.copy()
2284 2301
2285 2302 self.getBasicHeader()
2286 2303
2287 2304 processingHeaderSize = 40 # bytes
2288 2305 self.processingHeaderObj.dtype = 0 # Voltage
2289 2306 self.processingHeaderObj.blockSize = self.__getBlockSize()
2290 2307 self.processingHeaderObj.profilesPerBlock = self.dataOutObj.nFFTPoints
2291 2308 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
2292 2309 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOutObj.processingHeaderObj.nWindows
2293 2310 self.processingHeaderObj.processFlags = self.__getProcessFlags()
2294 2311 self.processingHeaderObj.nCohInt = self.dataOutObj.nCohInt# Se requiere para determinar el valor de timeInterval
2295 2312 self.processingHeaderObj.nIncohInt = self.dataOutObj.nIncohInt
2296 2313 self.processingHeaderObj.totalSpectra = self.dataOutObj.nPairs + self.dataOutObj.nChannels
2297 2314
2298 2315 if self.processingHeaderObj.totalSpectra > 0:
2299 2316 channelList = []
2300 2317 for channel in range(self.dataOutObj.nChannels):
2301 2318 channelList.append(channel)
2302 2319 channelList.append(channel)
2303 2320
2304 2321 pairsList = []
2305 2322 for pair in self.dataOutObj.pairsList:
2306 2323 pairsList.append(pair[0])
2307 2324 pairsList.append(pair[1])
2308 2325 spectraComb = channelList + pairsList
2309 2326 spectraComb = numpy.array(spectraComb,dtype="u1")
2310 2327 self.processingHeaderObj.spectraComb = spectraComb
2311 2328 sizeOfSpcComb = len(spectraComb)
2312 2329 processingHeaderSize += sizeOfSpcComb
2313 2330
2314 2331 if self.dataOutObj.code != None:
2315 2332 self.processingHeaderObj.code = self.dataOutObj.code
2316 2333 self.processingHeaderObj.nCode = self.dataOutObj.nCode
2317 2334 self.processingHeaderObj.nBaud = self.dataOutObj.nBaud
2318 2335 nCodeSize = 4 # bytes
2319 2336 nBaudSize = 4 # bytes
2320 2337 codeSize = 4 # bytes
2321 2338 sizeOfCode = int(nCodeSize + nBaudSize + codeSize * self.dataOutObj.nCode * self.dataOutObj.nBaud)
2322 2339 processingHeaderSize += sizeOfCode
2323 2340
2324 2341 if self.processingHeaderObj.nWindows != 0:
2325 2342 self.processingHeaderObj.firstHeight = self.dataOutObj.heightList[0]
2326 2343 self.processingHeaderObj.deltaHeight = self.dataOutObj.heightList[1] - self.dataOutObj.heightList[0]
2327 2344 self.processingHeaderObj.nHeights = self.dataOutObj.nHeights
2328 2345 self.processingHeaderObj.samplesWin = self.dataOutObj.nHeights
2329 2346 sizeOfFirstHeight = 4
2330 2347 sizeOfdeltaHeight = 4
2331 2348 sizeOfnHeights = 4
2332 2349 sizeOfWindows = (sizeOfFirstHeight + sizeOfdeltaHeight + sizeOfnHeights)*self.processingHeaderObj.nWindows
2333 2350 processingHeaderSize += sizeOfWindows
2334 2351
2335 2352 self.processingHeaderObj.size = processingHeaderSize
2336 2353
2337 2354 class SpectraHeisWriter():
2338 2355
2339 2356 i=0
2340 2357
2341 2358 def __init__(self, dataOutObj):
2342 2359
2343 2360 self.wrObj = FITS()
2344 2361 self.dataOutObj = dataOutObj
2345 2362
2346 2363 def isNumber(str):
2347 2364 """
2348 2365 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
2349 2366
2350 2367 Excepciones:
2351 2368 Si un determinado string no puede ser convertido a numero
2352 2369 Input:
2353 2370 str, string al cual se le analiza para determinar si convertible a un numero o no
2354 2371
2355 2372 Return:
2356 2373 True : si el string es uno numerico
2357 2374 False : no es un string numerico
2358 2375 """
2359 2376 try:
2360 2377 float( str )
2361 2378 return True
2362 2379 except:
2363 2380 return False
2364 2381
2365 2382 def setup(self, wrpath,):
2366 2383
2367 2384 if not(os.path.exists(wrpath)):
2368 2385 os.mkdir(wrpath)
2369 2386
2370 2387 self.wrpath = wrpath
2371 2388 self.setFile = 0
2372 2389
2373 2390 def putData(self):
2374 2391 # self.wrObj.writeHeader(nChannels=self.dataOutObj.nChannels, nFFTPoints=self.dataOutObj.nFFTPoints)
2375 2392 #name = self.dataOutObj.utctime
2376 2393 name= time.localtime( self.dataOutObj.utctime)
2377 2394 ext=".fits"
2378 2395 #folder='D%4.4d%3.3d'%(name.tm_year,name.tm_yday)
2379 2396 subfolder = 'D%4.4d%3.3d' % (name.tm_year,name.tm_yday)
2380 2397
2381 2398 doypath = os.path.join( self.wrpath, subfolder )
2382 2399 if not( os.path.exists(doypath) ):
2383 2400 os.mkdir(doypath)
2384 2401 self.setFile += 1
2385 2402 file = 'D%4.4d%3.3d%3.3d%s' % (name.tm_year,name.tm_yday,self.setFile,ext)
2386 2403
2387 2404 filename = os.path.join(self.wrpath,subfolder, file)
2388 2405
2389 2406 # print self.dataOutObj.ippSeconds
2390 2407 freq=numpy.arange(-1*self.dataOutObj.nHeights/2.,self.dataOutObj.nHeights/2.)/(2*self.dataOutObj.ippSeconds)
2391 2408
2392 2409 col1=self.wrObj.setColF(name="freq", format=str(self.dataOutObj.nFFTPoints)+'E', array=freq)
2393 2410 col2=self.wrObj.writeData(name="P_Ch1",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[0,:]))
2394 2411 col3=self.wrObj.writeData(name="P_Ch2",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[1,:]))
2395 2412 col4=self.wrObj.writeData(name="P_Ch3",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[2,:]))
2396 2413 col5=self.wrObj.writeData(name="P_Ch4",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[3,:]))
2397 2414 col6=self.wrObj.writeData(name="P_Ch5",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[4,:]))
2398 2415 col7=self.wrObj.writeData(name="P_Ch6",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[5,:]))
2399 2416 col8=self.wrObj.writeData(name="P_Ch7",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[6,:]))
2400 2417 col9=self.wrObj.writeData(name="P_Ch8",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[7,:]))
2401 2418 #n=numpy.arange((100))
2402 2419 n=self.dataOutObj.data_spc[6,:]
2403 2420 a=self.wrObj.cFImage(n)
2404 2421 b=self.wrObj.Ctable(col1,col2,col3,col4,col5,col6,col7,col8,col9)
2405 2422 self.wrObj.CFile(a,b)
2406 2423 self.wrObj.wFile(filename)
2407 2424 return 1
2408 2425
2409 2426 class FITS:
2410 2427
2411 2428 name=None
2412 2429 format=None
2413 2430 array =None
2414 2431 data =None
2415 2432 thdulist=None
2416 2433
2417 2434 def __init__(self):
2418 2435
2419 2436 pass
2420 2437
2421 2438 def setColF(self,name,format,array):
2422 2439 self.name=name
2423 2440 self.format=format
2424 2441 self.array=array
2425 2442 a1=numpy.array([self.array],dtype=numpy.float32)
2426 2443 self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1)
2427 2444 return self.col1
2428 2445
2429 2446 # def setColP(self,name,format,data):
2430 2447 # self.name=name
2431 2448 # self.format=format
2432 2449 # self.data=data
2433 2450 # a2=numpy.array([self.data],dtype=numpy.float32)
2434 2451 # self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2435 2452 # return self.col2
2436 2453
2437 2454 def writeHeader(self,):
2438 2455 pass
2439 2456
2440 2457 def writeData(self,name,format,data):
2441 2458 self.name=name
2442 2459 self.format=format
2443 2460 self.data=data
2444 2461 a2=numpy.array([self.data],dtype=numpy.float32)
2445 2462 self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2446 2463 return self.col2
2447 2464
2448 2465 def cFImage(self,n):
2449 2466 self.hdu= pyfits.PrimaryHDU(n)
2450 2467 return self.hdu
2451 2468
2452 2469 def Ctable(self,col1,col2,col3,col4,col5,col6,col7,col8,col9):
2453 2470 self.cols=pyfits.ColDefs( [col1,col2,col3,col4,col5,col6,col7,col8,col9])
2454 2471 self.tbhdu = pyfits.new_table(self.cols)
2455 2472 return self.tbhdu
2456 2473
2457 2474 def CFile(self,hdu,tbhdu):
2458 2475 self.thdulist=pyfits.HDUList([hdu,tbhdu])
2459 2476
2460 2477 def wFile(self,filename):
2461 2478 self.thdulist.writeto(filename) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now