##// END OF EJS Templates
Miguel Valdez -
r187:ddf8fff0f71b
parent child
Show More
@@ -1,2474 +1,2474
1 1 '''
2 2
3 3 $Author: murco $
4 4 $Id: JRODataIO.py 169 2012-11-19 21:57:03Z murco $
5 5 '''
6 6
7 7 import os, sys
8 8 import glob
9 9 import time
10 10 import numpy
11 11 import fnmatch
12 12 import time, datetime
13 13
14 14 from jrodata import *
15 15 from jroheaderIO import *
16 16
17 17 def isNumber(str):
18 18 """
19 19 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
20 20
21 21 Excepciones:
22 22 Si un determinado string no puede ser convertido a numero
23 23 Input:
24 24 str, string al cual se le analiza para determinar si convertible a un numero o no
25 25
26 26 Return:
27 27 True : si el string es uno numerico
28 28 False : no es un string numerico
29 29 """
30 30 try:
31 31 float( str )
32 32 return True
33 33 except:
34 34 return False
35 35
36 36 def isThisFileinRange(filename, startUTSeconds, endUTSeconds):
37 37 """
38 38 Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado.
39 39
40 40 Inputs:
41 41 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
42 42
43 43 startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en
44 44 segundos contados desde 01/01/1970.
45 45 endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en
46 46 segundos contados desde 01/01/1970.
47 47
48 48 Return:
49 49 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
50 50 fecha especificado, de lo contrario retorna False.
51 51
52 52 Excepciones:
53 53 Si el archivo no existe o no puede ser abierto
54 54 Si la cabecera no puede ser leida.
55 55
56 56 """
57 57 basicHeaderObj = BasicHeader()
58 58
59 59 try:
60 60 fp = open(filename,'rb')
61 61 except:
62 62 raise IOError, "The file %s can't be opened" %(filename)
63 63
64 64 sts = basicHeaderObj.read(fp)
65 65 fp.close()
66 66
67 67 if not(sts):
68 68 print "Skipping the file %s because it has not a valid header" %(filename)
69 69 return 0
70 70
71 71 if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)):
72 72 return 0
73 73
74 74 return 1
75 75
76 76 def getlastFileFromPath(path, ext):
77 77 """
78 78 Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext"
79 79 al final de la depuracion devuelve el ultimo file de la lista que quedo.
80 80
81 81 Input:
82 82 fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta
83 83 ext : extension de los files contenidos en una carpeta
84 84
85 85 Return:
86 86 El ultimo file de una determinada carpeta, no se considera el path.
87 87 """
88 88 validFilelist = []
89 89 fileList = os.listdir(path)
90 90
91 91 # 0 1234 567 89A BCDE
92 92 # H YYYY DDD SSS .ext
93 93
94 94 for file in fileList:
95 95 try:
96 96 year = int(file[1:5])
97 97 doy = int(file[5:8])
98 98
99 99 if (os.path.splitext(file)[-1].upper() != ext.upper()) : continue
100 100 except:
101 101 continue
102 102
103 103 validFilelist.append(file)
104 104
105 105 if validFilelist:
106 106 validFilelist = sorted( validFilelist, key=str.lower )
107 107 return validFilelist[-1]
108 108
109 109 return None
110 110
111 111 def checkForRealPath(path, year, doy, set, ext):
112 112 """
113 113 Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path,
114 114 Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar
115 115 el path exacto de un determinado file.
116 116
117 117 Example :
118 118 nombre correcto del file es .../.../D2009307/P2009307367.ext
119 119
120 120 Entonces la funcion prueba con las siguientes combinaciones
121 121 .../.../x2009307/y2009307367.ext
122 122 .../.../x2009307/Y2009307367.ext
123 123 .../.../X2009307/y2009307367.ext
124 124 .../.../X2009307/Y2009307367.ext
125 125 siendo para este caso, la ultima combinacion de letras, identica al file buscado
126 126
127 127 Return:
128 128 Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file
129 129 caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas
130 130 para el filename
131 131 """
132 132 filepath = None
133 133 find_flag = False
134 134 filename = None
135 135
136 136 if ext.lower() == ".r": #voltage
137 137 header1 = "dD"
138 138 header2 = "dD"
139 139 elif ext.lower() == ".pdata": #spectra
140 140 header1 = "dD"
141 141 header2 = "pP"
142 142 else:
143 143 return None, filename
144 144
145 145 for dir in header1: #barrido por las dos combinaciones posibles de "D"
146 146 for fil in header2: #barrido por las dos combinaciones posibles de "D"
147 147 doypath = "%s%04d%03d" % ( dir, year, doy ) #formo el nombre del directorio xYYYYDDD (x=d o x=D)
148 148 filename = "%s%04d%03d%03d%s" % ( fil, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext
149 149 filepath = os.path.join( path, doypath, filename ) #formo el path completo
150 150 if os.path.exists( filepath ): #verifico que exista
151 151 find_flag = True
152 152 break
153 153 if find_flag:
154 154 break
155 155
156 156 if not(find_flag):
157 157 return None, filename
158 158
159 159 return filepath, filename
160 160
161 161 class JRODataIO:
162 162
163 163 c = 3E8
164 164
165 165 isConfig = False
166 166
167 167 basicHeaderObj = BasicHeader()
168 168
169 169 systemHeaderObj = SystemHeader()
170 170
171 171 radarControllerHeaderObj = RadarControllerHeader()
172 172
173 173 processingHeaderObj = ProcessingHeader()
174 174
175 175 online = 0
176 176
177 177 dtype = None
178 178
179 179 pathList = []
180 180
181 181 filenameList = []
182 182
183 183 filename = None
184 184
185 185 ext = None
186 186
187 187 flagNoMoreFiles = 0
188 188
189 189 flagIsNewFile = 1
190 190
191 191 flagTimeBlock = 0
192 192
193 193 flagIsNewBlock = 0
194 194
195 195 fp = None
196 196
197 197 firstHeaderSize = 0
198 198
199 199 basicHeaderSize = 24
200 200
201 201 versionFile = 1103
202 202
203 203 fileSize = None
204 204
205 205 ippSeconds = None
206 206
207 207 fileSizeByHeader = None
208 208
209 209 fileIndex = None
210 210
211 211 profileIndex = None
212 212
213 213 blockIndex = None
214 214
215 215 nTotalBlocks = None
216 216
217 217 maxTimeStep = 30
218 218
219 219 lastUTTime = None
220 220
221 221 datablock = None
222 222
223 223 dataOut = None
224 224
225 225 blocksize = None
226 226
227 227 def __init__(self):
228 228
229 229 raise ValueError, "Not implemented"
230 230
231 231 def run(self):
232 232
233 233 raise ValueError, "Not implemented"
234 234
235 235
236 236
237 237 class JRODataReader(JRODataIO):
238 238
239 239 nReadBlocks = 0
240 240
241 241 delay = 60 #number of seconds waiting a new file
242 242
243 243 nTries = 3 #quantity tries
244 244
245 245 nFiles = 3 #number of files for searching
246 246
247 247
248 248 def __init__(self):
249 249
250 250 """
251 251
252 252 """
253 253
254 254 raise ValueError, "This method has not been implemented"
255 255
256 256
257 257 def createObjByDefault(self):
258 258 """
259 259
260 260 """
261 261 raise ValueError, "This method has not been implemented"
262 262
263 263 def getBlockDimension(self):
264 264
265 265 raise ValueError, "No implemented"
266 266
267 267 def __searchFilesOffLine(self,
268 268 path,
269 269 startDate,
270 270 endDate,
271 271 startTime=datetime.time(0,0,0),
272 272 endTime=datetime.time(23,59,59),
273 273 set=None,
274 274 expLabel="",
275 275 ext=".r"):
276 276 dirList = []
277 277 for thisPath in os.listdir(path):
278 278 if os.path.isdir(os.path.join(path,thisPath)):
279 279 dirList.append(thisPath)
280 280
281 281 if not(dirList):
282 282 return None, None
283 283
284 284 pathList = []
285 285 dateList = []
286 286
287 287 thisDate = startDate
288 288
289 289 while(thisDate <= endDate):
290 290 year = thisDate.timetuple().tm_year
291 291 doy = thisDate.timetuple().tm_yday
292 292
293 293 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
294 294 if len(match) == 0:
295 295 thisDate += datetime.timedelta(1)
296 296 continue
297 297
298 298 pathList.append(os.path.join(path,match[0],expLabel))
299 299 dateList.append(thisDate)
300 300 thisDate += datetime.timedelta(1)
301 301
302 302 filenameList = []
303 303 for index in range(len(pathList)):
304 304
305 305 thisPath = pathList[index]
306 306 fileList = glob.glob1(thisPath, "*%s" %ext)
307 307 fileList.sort()
308 308
309 309 #Busqueda de datos en el rango de horas indicados
310 310 thisDate = dateList[index]
311 311 startDT = datetime.datetime.combine(thisDate, startTime)
312 312 endDT = datetime.datetime.combine(thisDate, endTime)
313 313
314 314 startUtSeconds = time.mktime(startDT.timetuple())
315 315 endUtSeconds = time.mktime(endDT.timetuple())
316 316
317 317 for file in fileList:
318 318
319 319 filename = os.path.join(thisPath,file)
320 320
321 321 if isThisFileinRange(filename, startUtSeconds, endUtSeconds):
322 322 filenameList.append(filename)
323 323
324 324 if not(filenameList):
325 325 return None, None
326 326
327 327 self.filenameList = filenameList
328 328
329 329 return pathList, filenameList
330 330
331 331 def __searchFilesOnLine(self, path, startDate=None, endDate=None, startTime=None, endTime=None, expLabel = "", ext = None):
332 332
333 333 """
334 334 Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y
335 335 devuelve el archivo encontrado ademas de otros datos.
336 336
337 337 Input:
338 338 path : carpeta donde estan contenidos los files que contiene data
339 339
340 340 startDate : Fecha inicial. Rechaza todos los directorios donde
341 341 file end time < startDate (obejto datetime.date)
342 342
343 343 endDate : Fecha final. Rechaza todos los directorios donde
344 344 file start time > endDate (obejto datetime.date)
345 345
346 346 startTime : Tiempo inicial. Rechaza todos los archivos donde
347 347 file end time < startTime (obejto datetime.time)
348 348
349 349 endTime : Tiempo final. Rechaza todos los archivos donde
350 350 file start time > endTime (obejto datetime.time)
351 351
352 352 expLabel : Nombre del subexperimento (subfolder)
353 353
354 354 ext : extension de los files
355 355
356 356 Return:
357 357 directory : eL directorio donde esta el file encontrado
358 358 filename : el ultimo file de una determinada carpeta
359 359 year : el anho
360 360 doy : el numero de dia del anho
361 361 set : el set del archivo
362 362
363 363
364 364 """
365 365 dirList = []
366 366 pathList = []
367 367 directory = None
368 368
369 369 #Filtra solo los directorios
370 370 for thisPath in os.listdir(path):
371 371 if os.path.isdir(os.path.join(path, thisPath)):
372 372 dirList.append(thisPath)
373 373
374 374 if not(dirList):
375 375 return None, None, None, None, None
376 376
377 377 dirList = sorted( dirList, key=str.lower )
378 378
379 379 if startDate:
380 380 startDateTime = datetime.datetime.combine(startDate, startTime)
381 381 thisDateTime = startDateTime
382 382 if endDate == None: endDateTime = startDateTime
383 383 else: endDateTime = datetime.datetime.combine(endDate, endTime)
384 384
385 385 while(thisDateTime <= endDateTime):
386 386 year = thisDateTime.timetuple().tm_year
387 387 doy = thisDateTime.timetuple().tm_yday
388 388
389 389 match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy))
390 390 if len(match) == 0:
391 391 thisDateTime += datetime.timedelta(1)
392 392 continue
393 393
394 394 pathList.append(os.path.join(path,match[0], expLabel))
395 395 thisDateTime += datetime.timedelta(1)
396 396
397 397 if not(pathList):
398 398 print "\tNo files in range: %s - %s" %(startDateTime.ctime(), endDateTime.ctime())
399 399 return None, None, None, None, None
400 400
401 401 directory = pathList[0]
402 402
403 403 else:
404 404 directory = dirList[-1]
405 405 directory = os.path.join(path,directory)
406 406
407 407 filename = getlastFileFromPath(directory, ext)
408 408
409 409 if not(filename):
410 410 return None, None, None, None, None
411 411
412 412 if not(self.__verifyFile(os.path.join(directory, filename))):
413 413 return None, None, None, None, None
414 414
415 415 year = int( filename[1:5] )
416 416 doy = int( filename[5:8] )
417 417 set = int( filename[8:11] )
418 418
419 419 return directory, filename, year, doy, set
420 420
421 421 def setup(self,
422 422 path=None,
423 423 startDate=None,
424 424 endDate=None,
425 425 startTime=datetime.time(0,0,0),
426 426 endTime=datetime.time(23,59,59),
427 427 set=0,
428 428 expLabel = "",
429 429 ext = None,
430 430 online = False,
431 431 delay = 60):
432 432
433 433 if path == None:
434 434 raise ValueError, "The path is not valid"
435 435
436 436 if ext == None:
437 437 ext = self.ext
438 438
439 439 if dataOut != None:
440 440 self.dataOut = dataOut
441 441
442 442 if online:
443 443 print "Searching files in online mode..."
444 444 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext)
445 445
446 446 if not(doypath):
447 447 for nTries in range( self.nTries ):
448 448 print '\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)
449 449 time.sleep( self.delay )
450 450 doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=exp)
451 451 if doypath:
452 452 break
453 453
454 454 if not(doypath):
455 455 print "There 'isn't valied files in %s" % path
456 456 return None
457 457
458 458 self.year = year
459 459 self.doy = doy
460 460 self.set = set - 1
461 461 self.path = path
462 462
463 463 else:
464 464 print "Searching files in offline mode ..."
465 465 pathList, filenameList = self.__searchFilesOffLine(path, startDate, endDate, startTime, endTime, set, expLabel, ext)
466 466
467 467 if not(pathList):
468 468 print "No *%s files into the folder %s \nfor the range: %s - %s"%(ext, path,
469 469 datetime.datetime.combine(startDate,startTime).ctime(),
470 470 datetime.datetime.combine(endDate,endTime).ctime())
471 471
472 472 sys.exit(-1)
473 473
474 474
475 475 self.fileIndex = -1
476 476 self.pathList = pathList
477 477 self.filenameList = filenameList
478 478
479 479 self.online = online
480 480 self.delay = delay
481 481 ext = ext.lower()
482 482 self.ext = ext
483 483
484 484 if not(self.setNextFile()):
485 485 if (startDate!=None) and (endDate!=None):
486 486 print "No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime())
487 487 elif startDate != None:
488 488 print "No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime())
489 489 else:
490 490 print "No files"
491 491
492 492 sys.exit(-1)
493 493
494 494 # self.updateDataHeader()
495 495
496 496 return self.dataOut
497 497
498 498 def __setNextFileOffline(self):
499 499
500 500 idFile = self.fileIndex
501 501
502 502 while (True):
503 503 idFile += 1
504 504 if not(idFile < len(self.filenameList)):
505 505 self.flagNoMoreFiles = 1
506 506 print "No more Files"
507 507 return 0
508 508
509 509 filename = self.filenameList[idFile]
510 510
511 511 if not(self.__verifyFile(filename)):
512 512 continue
513 513
514 514 fileSize = os.path.getsize(filename)
515 515 fp = open(filename,'rb')
516 516 break
517 517
518 518 self.flagIsNewFile = 1
519 519 self.fileIndex = idFile
520 520 self.filename = filename
521 521 self.fileSize = fileSize
522 522 self.fp = fp
523 523
524 524 print "Setting the file: %s"%self.filename
525 525
526 526 return 1
527 527
528 528 def __setNextFileOnline(self):
529 529 """
530 530 Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si
531 531 no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files
532 532 siguientes.
533 533
534 534 Affected:
535 535 self.flagIsNewFile
536 536 self.filename
537 537 self.fileSize
538 538 self.fp
539 539 self.set
540 540 self.flagNoMoreFiles
541 541
542 542 Return:
543 543 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado
544 544 1 : si el file fue abierto con exito y esta listo a ser leido
545 545
546 546 Excepciones:
547 547 Si un determinado file no puede ser abierto
548 548 """
549 549 nFiles = 0
550 550 fileOk_flag = False
551 551 firstTime_flag = True
552 552
553 553 self.set += 1
554 554
555 555 #busca el 1er file disponible
556 556 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
557 557 if file:
558 558 if self.__verifyFile(file, False):
559 559 fileOk_flag = True
560 560
561 561 #si no encuentra un file entonces espera y vuelve a buscar
562 562 if not(fileOk_flag):
563 563 for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles
564 564
565 565 if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces
566 566 tries = self.nTries
567 567 else:
568 568 tries = 1 #si no es la 1era vez entonces solo lo hace una vez
569 569
570 570 for nTries in range( tries ):
571 571 if firstTime_flag:
572 572 print "\tWaiting %0.2f sec for the file \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 )
573 573 time.sleep( self.delay )
574 574 else:
575 575 print "\tSearching next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext)
576 576
577 577 file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext )
578 578 if file:
579 579 if self.__verifyFile(file):
580 580 fileOk_flag = True
581 581 break
582 582
583 583 if fileOk_flag:
584 584 break
585 585
586 586 firstTime_flag = False
587 587
588 588 print "\tSkipping the file \"%s\" due to this file doesn't exist" % filename
589 589 self.set += 1
590 590
591 591 if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta
592 592 self.set = 0
593 593 self.doy += 1
594 594
595 595 if fileOk_flag:
596 596 self.fileSize = os.path.getsize( file )
597 597 self.filename = file
598 598 self.flagIsNewFile = 1
599 599 if self.fp != None: self.fp.close()
600 600 self.fp = open(file)
601 601 self.flagNoMoreFiles = 0
602 602 print 'Setting the file: %s' % file
603 603 else:
604 604 self.fileSize = 0
605 605 self.filename = None
606 606 self.flagIsNewFile = 0
607 607 self.fp = None
608 608 self.flagNoMoreFiles = 1
609 609 print 'No more Files'
610 610
611 611 return fileOk_flag
612 612
613 613
614 614 def setNextFile(self):
615 615 if self.fp != None:
616 616 self.fp.close()
617 617
618 618 if self.online:
619 619 newFile = self.__setNextFileOnline()
620 620 else:
621 621 newFile = self.__setNextFileOffline()
622 622
623 623 if not(newFile):
624 624 return 0
625 625
626 626 self.__readFirstHeader()
627 627 self.nReadBlocks = 0
628 628 return 1
629 629
630 630 def __setNewBlock(self):
631 631 if self.fp == None:
632 632 return 0
633 633
634 634 if self.flagIsNewFile:
635 635 return 1
636 636
637 637 self.lastUTTime = self.basicHeaderObj.utc
638 638 currentSize = self.fileSize - self.fp.tell()
639 639 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
640 640
641 641 if (currentSize >= neededSize):
642 642 self.__rdBasicHeader()
643 643 return 1
644 644
645 645 if not(self.setNextFile()):
646 646 return 0
647 647
648 648 deltaTime = self.basicHeaderObj.utc - self.lastUTTime #
649 649
650 650 self.flagTimeBlock = 0
651 651
652 652 if deltaTime > self.maxTimeStep:
653 653 self.flagTimeBlock = 1
654 654
655 655 return 1
656 656
657 657
658 658 def readNextBlock(self):
659 659 if not(self.__setNewBlock()):
660 660 return 0
661 661
662 662 if not(self.readBlock()):
663 663 return 0
664 664
665 665 return 1
666 666
667 667 def __rdProcessingHeader(self, fp=None):
668 668 if fp == None:
669 669 fp = self.fp
670 670
671 671 self.processingHeaderObj.read(fp)
672 672
673 673 def __rdRadarControllerHeader(self, fp=None):
674 674 if fp == None:
675 675 fp = self.fp
676 676
677 677 self.radarControllerHeaderObj.read(fp)
678 678
679 679 def __rdSystemHeader(self, fp=None):
680 680 if fp == None:
681 681 fp = self.fp
682 682
683 683 self.systemHeaderObj.read(fp)
684 684
685 685 def __rdBasicHeader(self, fp=None):
686 686 if fp == None:
687 687 fp = self.fp
688 688
689 689 self.basicHeaderObj.read(fp)
690 690
691 691
692 692 def __readFirstHeader(self):
693 693 self.__rdBasicHeader()
694 694 self.__rdSystemHeader()
695 695 self.__rdRadarControllerHeader()
696 696 self.__rdProcessingHeader()
697 697
698 698 self.firstHeaderSize = self.basicHeaderObj.size
699 699
700 700 datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
701 701 if datatype == 0:
702 702 datatype_str = numpy.dtype([('real','<i1'),('imag','<i1')])
703 703 elif datatype == 1:
704 704 datatype_str = numpy.dtype([('real','<i2'),('imag','<i2')])
705 705 elif datatype == 2:
706 706 datatype_str = numpy.dtype([('real','<i4'),('imag','<i4')])
707 707 elif datatype == 3:
708 708 datatype_str = numpy.dtype([('real','<i8'),('imag','<i8')])
709 709 elif datatype == 4:
710 710 datatype_str = numpy.dtype([('real','<f4'),('imag','<f4')])
711 711 elif datatype == 5:
712 712 datatype_str = numpy.dtype([('real','<f8'),('imag','<f8')])
713 713 else:
714 714 raise ValueError, 'Data type was not defined'
715 715
716 716 self.dtype = datatype_str
717 717 self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c
718 718 self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.processingHeaderObj.dataBlocksPerFile - 1)
719 719 # self.dataOut.channelList = numpy.arange(self.systemHeaderObj.numChannels)
720 720 # self.dataOut.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels)
721 721 self.getBlockDimension()
722 722
723 723
724 724 def __verifyFile(self, filename, msgFlag=True):
725 725 msg = None
726 726 try:
727 727 fp = open(filename, 'rb')
728 728 currentPosition = fp.tell()
729 729 except:
730 730 if msgFlag:
731 731 print "The file %s can't be opened" % (filename)
732 732 return False
733 733
734 734 neededSize = self.processingHeaderObj.blockSize + self.firstHeaderSize
735 735
736 736 if neededSize == 0:
737 737 basicHeaderObj = BasicHeader()
738 738 systemHeaderObj = SystemHeader()
739 739 radarControllerHeaderObj = RadarControllerHeader()
740 740 processingHeaderObj = ProcessingHeader()
741 741
742 742 try:
743 743 if not( basicHeaderObj.read(fp) ): raise ValueError
744 744 if not( systemHeaderObj.read(fp) ): raise ValueError
745 745 if not( radarControllerHeaderObj.read(fp) ): raise ValueError
746 746 if not( processingHeaderObj.read(fp) ): raise ValueError
747 747 data_type = int(numpy.log2((processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
748 748
749 749 neededSize = processingHeaderObj.blockSize + basicHeaderObj.size
750 750
751 751 except:
752 752 if msgFlag:
753 753 print "\tThe file %s is empty or it hasn't enough data" % filename
754 754
755 755 fp.close()
756 756 return False
757 757 else:
758 758 msg = "\tSkipping the file %s due to it hasn't enough data" %filename
759 759
760 760 fp.close()
761 761 fileSize = os.path.getsize(filename)
762 762 currentSize = fileSize - currentPosition
763 763 if currentSize < neededSize:
764 764 if msgFlag and (msg != None):
765 765 print msg #print"\tSkipping the file %s due to it hasn't enough data" %filename
766 766 return False
767 767
768 768 return True
769 769
770 770 def getData():
771 771 pass
772 772
773 773 def hasNotDataInBuffer():
774 774 pass
775 775
776 776 def readBlock():
777 777 pass
778 778
779 779 def run(self, **kwargs):
780 780
781 781 if not(self.isConfig):
782 782
783 783 # self.dataOut = dataOut
784 784 self.setup(**kwargs)
785 785 self.isConfig = True
786 786
787 787 self.getData()
788 788
789 789 class JRODataWriter(JRODataIO):
790 790
791 791 """
792 792 Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura
793 793 de los datos siempre se realiza por bloques.
794 794 """
795 795
796 796 blockIndex = 0
797 797
798 798 path = None
799 799
800 800 setFile = None
801 801
802 802 profilesPerBlock = None
803 803
804 804 blocksPerFile = None
805 805
806 806 nWriteBlocks = 0
807 807
808 808 def __init__(self, dataOut=None):
809 809 raise ValueError, "Not implemented"
810 810
811 811
812 812 def hasAllDataInBuffer(self):
813 813 raise ValueError, "Not implemented"
814 814
815 815
816 816 def setBlockDimension(self):
817 817 raise ValueError, "Not implemented"
818 818
819 819
820 820 def writeBlock(self):
821 821 raise ValueError, "No implemented"
822 822
823 823
824 824 def putData(self):
825 825 raise ValueError, "No implemented"
826 826
827 827 def getDataHeader(self):
828 828 """
829 829 Obtiene una copia del First Header
830 830
831 831 Affected:
832 832
833 833 self.basicHeaderObj
834 834 self.systemHeaderObj
835 835 self.radarControllerHeaderObj
836 836 self.processingHeaderObj self.
837 837
838 838 Return:
839 839 None
840 840 """
841 841
842 842 raise ValueError, "No implemented"
843 843
844 844 def getBasicHeader(self):
845 845
846 846 self.basicHeaderObj.size = self.basicHeaderSize #bytes
847 847 self.basicHeaderObj.version = self.versionFile
848 848 self.basicHeaderObj.dataBlock = self.nTotalBlocks
849 849
850 850 utc = numpy.floor(self.dataOut.utctime)
851 851 milisecond = (self.dataOut.utctime - utc)* 1000.0
852 852
853 853 self.basicHeaderObj.utc = utc
854 854 self.basicHeaderObj.miliSecond = milisecond
855 855 self.basicHeaderObj.timeZone = 0
856 856 self.basicHeaderObj.dstFlag = 0
857 857 self.basicHeaderObj.errorCount = 0
858 858
859 859 def __writeFirstHeader(self):
860 860 """
861 861 Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader)
862 862
863 863 Affected:
864 864 __dataType
865 865
866 866 Return:
867 867 None
868 868 """
869 869
870 870 # CALCULAR PARAMETROS
871 871
872 872 sizeLongHeader = self.systemHeaderObj.size + self.radarControllerHeaderObj.size + self.processingHeaderObj.size
873 873 self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader
874 874
875 875 self.basicHeaderObj.write(self.fp)
876 876 self.systemHeaderObj.write(self.fp)
877 877 self.radarControllerHeaderObj.write(self.fp)
878 878 self.processingHeaderObj.write(self.fp)
879 879
880 880 self.dtype = self.dataOut.dtype
881 881
882 882 def __setNewBlock(self):
883 883 """
884 884 Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header
885 885
886 886 Return:
887 887 0 : si no pudo escribir nada
888 888 1 : Si escribio el Basic el First Header
889 889 """
890 890 if self.fp == None:
891 891 self.setNextFile()
892 892
893 893 if self.flagIsNewFile:
894 894 return 1
895 895
896 896 if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile:
897 897 self.basicHeaderObj.write(self.fp)
898 898 return 1
899 899
900 900 if not( self.setNextFile() ):
901 901 return 0
902 902
903 903 return 1
904 904
905 905
906 906 def writeNextBlock(self):
907 907 """
908 908 Selecciona el bloque siguiente de datos y los escribe en un file
909 909
910 910 Return:
911 911 0 : Si no hizo pudo escribir el bloque de datos
912 912 1 : Si no pudo escribir el bloque de datos
913 913 """
914 914 if not( self.__setNewBlock() ):
915 915 return 0
916 916
917 917 self.writeBlock()
918 918
919 919 return 1
920 920
921 921 def setNextFile(self):
922 922 """
923 923 Determina el siguiente file que sera escrito
924 924
925 925 Affected:
926 926 self.filename
927 927 self.subfolder
928 928 self.fp
929 929 self.setFile
930 930 self.flagIsNewFile
931 931
932 932 Return:
933 933 0 : Si el archivo no puede ser escrito
934 934 1 : Si el archivo esta listo para ser escrito
935 935 """
936 936 ext = self.ext
937 937 path = self.path
938 938
939 939 if self.fp != None:
940 940 self.fp.close()
941 941
942 942 timeTuple = time.localtime( self.dataOut.dataUtcTime)
943 943 subfolder = 'D%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
944 944
945 945 doypath = os.path.join( path, subfolder )
946 946 if not( os.path.exists(doypath) ):
947 947 os.mkdir(doypath)
948 948 self.setFile = -1 #inicializo mi contador de seteo
949 949 else:
950 950 filesList = os.listdir( doypath )
951 951 if len( filesList ) > 0:
952 952 filesList = sorted( filesList, key=str.lower )
953 953 filen = filesList[-1]
954 954 # el filename debera tener el siguiente formato
955 955 # 0 1234 567 89A BCDE (hex)
956 956 # x YYYY DDD SSS .ext
957 957 if isNumber( filen[8:11] ):
958 958 self.setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file
959 959 else:
960 960 self.setFile = -1
961 961 else:
962 962 self.setFile = -1 #inicializo mi contador de seteo
963 963
964 964 setFile = self.setFile
965 965 setFile += 1
966 966
967 967 file = '%s%4.4d%3.3d%3.3d%s' % (self.optchar,
968 968 timeTuple.tm_year,
969 969 timeTuple.tm_yday,
970 970 setFile,
971 971 ext )
972 972
973 973 filename = os.path.join( path, subfolder, file )
974 974
975 975 fp = open( filename,'wb' )
976 976
977 977 self.blockIndex = 0
978 978
979 979 #guardando atributos
980 980 self.filename = filename
981 981 self.subfolder = subfolder
982 982 self.fp = fp
983 983 self.setFile = setFile
984 984 self.flagIsNewFile = 1
985 985
986 986 self.getDataHeader()
987 987
988 988 print 'Writing the file: %s'%self.filename
989 989
990 990 self.__writeFirstHeader()
991 991
992 992 return 1
993 993
994 994 def setup(self, dataOut, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None):
995 995 """
996 996 Setea el tipo de formato en la cual sera guardada la data y escribe el First Header
997 997
998 998 Inputs:
999 999 path : el path destino en el cual se escribiran los files a crear
1000 1000 format : formato en el cual sera salvado un file
1001 1001 set : el setebo del file
1002 1002
1003 1003 Return:
1004 1004 0 : Si no realizo un buen seteo
1005 1005 1 : Si realizo un buen seteo
1006 1006 """
1007 1007
1008 1008 if ext == None:
1009 1009 ext = self.ext
1010 1010
1011 1011 ext = ext.lower()
1012 1012
1013 1013 self.ext = ext
1014 1014
1015 1015 self.path = path
1016 1016
1017 1017 self.setFile = set - 1
1018 1018
1019 1019 self.blocksPerFile = blocksPerFile
1020 1020
1021 1021 self.profilesPerBlock = profilesPerBlock
1022 1022
1023 1023 self.dataOut = dataOut
1024 1024
1025 1025 if not(self.setNextFile()):
1026 1026 print "There isn't a next file"
1027 1027 return 0
1028 1028
1029 1029 self.setBlockDimension()
1030 1030
1031 1031 return 1
1032 1032
1033 1033 def run(self, dataOut, **kwargs):
1034 1034
1035 1035 if not(self.isConfig):
1036 1036
1037 1037 self.setup(dataOut, **kwargs)
1038 1038 self.isConfig = True
1039 1039
1040 1040 self.putData()
1041 1041
1042 1042 class VoltageReader(JRODataReader):
1043 1043 """
1044 1044 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
1045 1045 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
1046 1046 perfiles*alturas*canales) son almacenados en la variable "buffer".
1047 1047
1048 1048 perfiles * alturas * canales
1049 1049
1050 1050 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1051 1051 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
1052 1052 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
1053 1053 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1054 1054
1055 1055 Example:
1056 1056
1057 1057 dpath = "/home/myuser/data"
1058 1058
1059 1059 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1060 1060
1061 1061 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1062 1062
1063 1063 readerObj = VoltageReader()
1064 1064
1065 1065 readerObj.setup(dpath, startTime, endTime)
1066 1066
1067 1067 while(True):
1068 1068
1069 1069 #to get one profile
1070 1070 profile = readerObj.getData()
1071 1071
1072 1072 #print the profile
1073 1073 print profile
1074 1074
1075 1075 #If you want to see all datablock
1076 1076 print readerObj.datablock
1077 1077
1078 1078 if readerObj.flagNoMoreFiles:
1079 1079 break
1080 1080
1081 1081 """
1082 1082
1083 1083 ext = ".r"
1084 1084
1085 1085 optchar = "D"
1086 1086 dataOut = None
1087 1087
1088 1088
1089 1089 def __init__(self):
1090 1090 """
1091 1091 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
1092 1092
1093 1093 Input:
1094 1094 dataOut : Objeto de la clase Voltage. Este objeto sera utilizado para
1095 1095 almacenar un perfil de datos cada vez que se haga un requerimiento
1096 1096 (getData). El perfil sera obtenido a partir del buffer de datos,
1097 1097 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1098 1098 bloque de datos.
1099 1099 Si este parametro no es pasado se creara uno internamente.
1100 1100
1101 1101 Variables afectadas:
1102 1102 self.dataOut
1103 1103
1104 1104 Return:
1105 1105 None
1106 1106 """
1107 1107
1108 1108 self.isConfig = False
1109 1109
1110 1110 self.datablock = None
1111 1111
1112 1112 self.utc = 0
1113 1113
1114 1114 self.ext = ".r"
1115 1115
1116 1116 self.optchar = "D"
1117 1117
1118 1118 self.basicHeaderObj = BasicHeader()
1119 1119
1120 1120 self.systemHeaderObj = SystemHeader()
1121 1121
1122 1122 self.radarControllerHeaderObj = RadarControllerHeader()
1123 1123
1124 1124 self.processingHeaderObj = ProcessingHeader()
1125 1125
1126 1126 self.online = 0
1127 1127
1128 1128 self.fp = None
1129 1129
1130 1130 self.idFile = None
1131 1131
1132 1132 self.dtype = None
1133 1133
1134 1134 self.fileSizeByHeader = None
1135 1135
1136 1136 self.filenameList = []
1137 1137
1138 1138 self.filename = None
1139 1139
1140 1140 self.fileSize = None
1141 1141
1142 1142 self.firstHeaderSize = 0
1143 1143
1144 1144 self.basicHeaderSize = 24
1145 1145
1146 1146 self.pathList = []
1147 1147
1148 1148 self.filenameList = []
1149 1149
1150 1150 self.lastUTTime = 0
1151 1151
1152 1152 self.maxTimeStep = 30
1153 1153
1154 1154 self.flagNoMoreFiles = 0
1155 1155
1156 1156 self.set = 0
1157 1157
1158 1158 self.path = None
1159 1159
1160 1160 self.profileIndex = 9999
1161 1161
1162 1162 self.delay = 3 #seconds
1163 1163
1164 1164 self.nTries = 3 #quantity tries
1165 1165
1166 1166 self.nFiles = 3 #number of files for searching
1167 1167
1168 1168 self.nReadBlocks = 0
1169 1169
1170 1170 self.flagIsNewFile = 1
1171 1171
1172 1172 self.ippSeconds = 0
1173 1173
1174 1174 self.flagTimeBlock = 0
1175 1175
1176 1176 self.flagIsNewBlock = 0
1177 1177
1178 1178 self.nTotalBlocks = 0
1179 1179
1180 1180 self.blocksize = 0
1181 1181
1182 dataOut = self.createObjByDefault()
1182 self.dataOut = self.createObjByDefault()
1183 1183
1184 1184 def createObjByDefault(self):
1185 1185
1186 1186 dataObj = Voltage()
1187 1187
1188 1188 return dataObj
1189 1189
1190 1190 def __hasNotDataInBuffer(self):
1191 1191 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1192 1192 return 1
1193 1193 return 0
1194 1194
1195 1195
1196 1196 def getBlockDimension(self):
1197 1197 """
1198 1198 Obtiene la cantidad de puntos a leer por cada bloque de datos
1199 1199
1200 1200 Affected:
1201 1201 self.blocksize
1202 1202
1203 1203 Return:
1204 1204 None
1205 1205 """
1206 1206 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
1207 1207 self.blocksize = pts2read
1208 1208
1209 1209
1210 1210 def readBlock(self):
1211 1211 """
1212 1212 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
1213 1213 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1214 1214 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1215 1215 es seteado a 0
1216 1216
1217 1217 Inputs:
1218 1218 None
1219 1219
1220 1220 Return:
1221 1221 None
1222 1222
1223 1223 Affected:
1224 1224 self.profileIndex
1225 1225 self.datablock
1226 1226 self.flagIsNewFile
1227 1227 self.flagIsNewBlock
1228 1228 self.nTotalBlocks
1229 1229
1230 1230 Exceptions:
1231 1231 Si un bloque leido no es un bloque valido
1232 1232 """
1233 1233
1234 1234 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
1235 1235
1236 1236 try:
1237 1237 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
1238 1238 except:
1239 1239 print "The read block (%3d) has not enough data" %self.nReadBlocks
1240 1240 return 0
1241 1241
1242 1242 junk = numpy.transpose(junk, (2,0,1))
1243 1243 self.datablock = junk['real'] + junk['imag']*1j
1244 1244
1245 1245 self.profileIndex = 0
1246 1246
1247 1247 self.flagIsNewFile = 0
1248 1248 self.flagIsNewBlock = 1
1249 1249
1250 1250 self.nTotalBlocks += 1
1251 1251 self.nReadBlocks += 1
1252 1252
1253 1253 return 1
1254 1254
1255 1255
1256 1256 def getData(self):
1257 1257 """
1258 1258 getData obtiene una unidad de datos del buffer de lectura y la copia a la clase "Voltage"
1259 1259 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1260 1260 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1261 1261
1262 1262 Ademas incrementa el contador del buffer en 1.
1263 1263
1264 1264 Return:
1265 1265 data : retorna un perfil de voltages (alturas * canales) copiados desde el
1266 1266 buffer. Si no hay mas archivos a leer retorna None.
1267 1267
1268 1268 Variables afectadas:
1269 1269 self.dataOut
1270 1270 self.profileIndex
1271 1271
1272 1272 Affected:
1273 1273 self.dataOut
1274 1274 self.profileIndex
1275 1275 self.flagTimeBlock
1276 1276 self.flagIsNewBlock
1277 1277 """
1278 1278 if self.flagNoMoreFiles: return 0
1279 1279
1280 1280 self.flagTimeBlock = 0
1281 1281 self.flagIsNewBlock = 0
1282 1282
1283 1283 if self.__hasNotDataInBuffer():
1284 1284
1285 1285 if not( self.readNextBlock() ):
1286 1286 return 0
1287 1287
1288 1288 # self.updateDataHeader()
1289 1289
1290 1290 if self.flagNoMoreFiles == 1:
1291 1291 print 'Process finished'
1292 1292 return 0
1293 1293
1294 1294 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1295 1295
1296 1296 if self.datablock == None:
1297 1297 self.dataOut.flagNoData = True
1298 1298 return 0
1299 1299
1300 1300 self.dataOut.data = self.datablock[:,self.profileIndex,:]
1301 1301
1302 1302 self.dataOut.dtype = self.dtype
1303 1303
1304 1304 self.dataOut.nChannels = self.systemHeaderObj.nChannels
1305 1305
1306 1306 self.dataOut.nHeights = self.processingHeaderObj.nHeights
1307 1307
1308 1308 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1309 1309
1310 1310 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1311 1311
1312 1312 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1313 1313
1314 1314 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1315 1315
1316 1316 self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
1317 1317
1318 1318 self.dataOut.flagTimeBlock = self.flagTimeBlock
1319 1319
1320 1320 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.ippSeconds
1321 1321
1322 1322 self.dataOut.ippSeconds = self.ippSeconds
1323 1323
1324 1324 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt
1325 1325
1326 1326 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
1327 1327
1328 1328 self.dataOut.flagShiftFFT = False
1329 1329
1330 1330 if self.processingHeaderObj.code != None:
1331 1331 self.dataOut.nCode = self.processingHeaderObj.nCode
1332 1332
1333 1333 self.dataOut.nBaud = self.processingHeaderObj.nBaud
1334 1334
1335 1335 self.dataOut.code = self.processingHeaderObj.code
1336 1336
1337 1337 self.profileIndex += 1
1338 1338
1339 1339 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1340 1340
1341 1341 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1342 1342
1343 1343 self.dataOut.flagNoData = False
1344 1344
1345 1345 # print self.profileIndex, self.dataOut.utctime
1346 1346 # if self.profileIndex == 800:
1347 1347 # a=1
1348 1348
1349 1349 return self.dataOut.data
1350 1350
1351 1351
1352 1352 class VoltageWriter(JRODataWriter):
1353 1353 """
1354 1354 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
1355 1355 de los datos siempre se realiza por bloques.
1356 1356 """
1357 1357
1358 1358 ext = ".r"
1359 1359
1360 1360 optchar = "D"
1361 1361
1362 1362 shapeBuffer = None
1363 1363
1364 1364
1365 1365 def __init__(self):
1366 1366 """
1367 1367 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
1368 1368
1369 1369 Affected:
1370 1370 self.dataOut
1371 1371
1372 1372 Return: None
1373 1373 """
1374 1374
1375 1375 self.nTotalBlocks = 0
1376 1376
1377 1377 self.profileIndex = 0
1378 1378
1379 1379 self.isConfig = False
1380 1380
1381 1381 self.fp = None
1382 1382
1383 1383 self.flagIsNewFile = 1
1384 1384
1385 1385 self.nTotalBlocks = 0
1386 1386
1387 1387 self.flagIsNewBlock = 0
1388 1388
1389 1389 self.flagNoMoreFiles = 0
1390 1390
1391 1391 self.setFile = None
1392 1392
1393 1393 self.dtype = None
1394 1394
1395 1395 self.path = None
1396 1396
1397 1397 self.noMoreFiles = 0
1398 1398
1399 1399 self.filename = None
1400 1400
1401 1401 self.basicHeaderObj = BasicHeader()
1402 1402
1403 1403 self.systemHeaderObj = SystemHeader()
1404 1404
1405 1405 self.radarControllerHeaderObj = RadarControllerHeader()
1406 1406
1407 1407 self.processingHeaderObj = ProcessingHeader()
1408 1408
1409 1409 def hasAllDataInBuffer(self):
1410 1410 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
1411 1411 return 1
1412 1412 return 0
1413 1413
1414 1414
1415 1415 def setBlockDimension(self):
1416 1416 """
1417 1417 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
1418 1418
1419 1419 Affected:
1420 1420 self.shape_spc_Buffer
1421 1421 self.shape_cspc_Buffer
1422 1422 self.shape_dc_Buffer
1423 1423
1424 1424 Return: None
1425 1425 """
1426 1426 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
1427 1427 self.processingHeaderObj.nHeights,
1428 1428 self.systemHeaderObj.nChannels)
1429 1429
1430 1430 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
1431 1431 self.processingHeaderObj.profilesPerBlock,
1432 1432 self.processingHeaderObj.nHeights),
1433 1433 dtype=numpy.dtype('complex'))
1434 1434
1435 1435
1436 1436 def writeBlock(self):
1437 1437 """
1438 1438 Escribe el buffer en el file designado
1439 1439
1440 1440 Affected:
1441 1441 self.profileIndex
1442 1442 self.flagIsNewFile
1443 1443 self.flagIsNewBlock
1444 1444 self.nTotalBlocks
1445 1445 self.blockIndex
1446 1446
1447 1447 Return: None
1448 1448 """
1449 1449 data = numpy.zeros( self.shapeBuffer, self.dtype )
1450 1450
1451 1451 junk = numpy.transpose(self.datablock, (1,2,0))
1452 1452
1453 1453 data['real'] = junk.real
1454 1454 data['imag'] = junk.imag
1455 1455
1456 1456 data = data.reshape( (-1) )
1457 1457
1458 1458 data.tofile( self.fp )
1459 1459
1460 1460 self.datablock.fill(0)
1461 1461
1462 1462 self.profileIndex = 0
1463 1463 self.flagIsNewFile = 0
1464 1464 self.flagIsNewBlock = 1
1465 1465
1466 1466 self.blockIndex += 1
1467 1467 self.nTotalBlocks += 1
1468 1468
1469 1469 def putData(self):
1470 1470 """
1471 1471 Setea un bloque de datos y luego los escribe en un file
1472 1472
1473 1473 Affected:
1474 1474 self.flagIsNewBlock
1475 1475 self.profileIndex
1476 1476
1477 1477 Return:
1478 1478 0 : Si no hay data o no hay mas files que puedan escribirse
1479 1479 1 : Si se escribio la data de un bloque en un file
1480 1480 """
1481 1481 if self.dataOut.flagNoData:
1482 1482 return 0
1483 1483
1484 1484 self.flagIsNewBlock = 0
1485 1485
1486 1486 if self.dataOut.flagTimeBlock:
1487 1487
1488 1488 self.datablock.fill(0)
1489 1489 self.profileIndex = 0
1490 1490 self.setNextFile()
1491 1491
1492 1492 if self.profileIndex == 0:
1493 1493 self.getBasicHeader()
1494 1494
1495 1495 self.datablock[:,self.profileIndex,:] = self.dataOut.data
1496 1496
1497 1497 self.profileIndex += 1
1498 1498
1499 1499 if self.hasAllDataInBuffer():
1500 1500 #if self.flagIsNewFile:
1501 1501 self.writeNextBlock()
1502 1502 # self.getDataHeader()
1503 1503
1504 1504 if self.flagNoMoreFiles:
1505 1505 #print 'Process finished'
1506 1506 return 0
1507 1507
1508 1508 return 1
1509 1509
1510 1510 def __getProcessFlags(self):
1511 1511
1512 1512 processFlags = 0
1513 1513
1514 1514 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1515 1515 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1516 1516 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1517 1517 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1518 1518 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1519 1519 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1520 1520
1521 1521 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1522 1522
1523 1523
1524 1524
1525 1525 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
1526 1526 PROCFLAG.DATATYPE_SHORT,
1527 1527 PROCFLAG.DATATYPE_LONG,
1528 1528 PROCFLAG.DATATYPE_INT64,
1529 1529 PROCFLAG.DATATYPE_FLOAT,
1530 1530 PROCFLAG.DATATYPE_DOUBLE]
1531 1531
1532 1532
1533 1533 for index in range(len(dtypeList)):
1534 1534 if self.dataOut.dtype == dtypeList[index]:
1535 1535 dtypeValue = datatypeValueList[index]
1536 1536 break
1537 1537
1538 1538 processFlags += dtypeValue
1539 1539
1540 1540 if self.dataOut.flagDecodeData:
1541 1541 processFlags += PROCFLAG.DECODE_DATA
1542 1542
1543 1543 if self.dataOut.flagDeflipData:
1544 1544 processFlags += PROCFLAG.DEFLIP_DATA
1545 1545
1546 1546 if self.dataOut.code != None:
1547 1547 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
1548 1548
1549 1549 if self.dataOut.nCohInt > 1:
1550 1550 processFlags += PROCFLAG.COHERENT_INTEGRATION
1551 1551
1552 1552 return processFlags
1553 1553
1554 1554
1555 1555 def __getBlockSize(self):
1556 1556 '''
1557 1557 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
1558 1558 '''
1559 1559
1560 1560 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
1561 1561 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
1562 1562 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
1563 1563 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
1564 1564 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
1565 1565 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
1566 1566
1567 1567 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
1568 1568 datatypeValueList = [1,2,4,8,4,8]
1569 1569 for index in range(len(dtypeList)):
1570 1570 if self.dataOut.dtype == dtypeList[index]:
1571 1571 datatypeValue = datatypeValueList[index]
1572 1572 break
1573 1573
1574 1574 blocksize = int(self.dataOut.nHeights * self.dataOut.nChannels * self.dataOut.nProfiles * datatypeValue * 2)
1575 1575
1576 1576 return blocksize
1577 1577
1578 1578 def getDataHeader(self):
1579 1579
1580 1580 """
1581 1581 Obtiene una copia del First Header
1582 1582
1583 1583 Affected:
1584 1584 self.systemHeaderObj
1585 1585 self.radarControllerHeaderObj
1586 1586 self.dtype
1587 1587
1588 1588 Return:
1589 1589 None
1590 1590 """
1591 1591
1592 1592 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
1593 1593 self.systemHeaderObj.nChannels = self.dataOut.nChannels
1594 1594 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
1595 1595
1596 1596 self.getBasicHeader()
1597 1597
1598 1598 processingHeaderSize = 40 # bytes
1599 1599 self.processingHeaderObj.dtype = 0 # Voltage
1600 1600 self.processingHeaderObj.blockSize = self.__getBlockSize()
1601 1601 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
1602 1602 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
1603 1603 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
1604 1604 self.processingHeaderObj.processFlags = self.__getProcessFlags()
1605 1605 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt
1606 1606 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
1607 1607 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
1608 1608
1609 1609 if self.dataOut.code != None:
1610 1610 self.processingHeaderObj.code = self.dataOut.code
1611 1611 self.processingHeaderObj.nCode = self.dataOut.nCode
1612 1612 self.processingHeaderObj.nBaud = self.dataOut.nBaud
1613 1613 codesize = int(8 + 4 * self.dataOut.nCode * self.dataOut.nBaud)
1614 1614 processingHeaderSize += codesize
1615 1615
1616 1616 if self.processingHeaderObj.nWindows != 0:
1617 1617 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
1618 1618 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
1619 1619 self.processingHeaderObj.nHeights = self.dataOut.nHeights
1620 1620 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
1621 1621 processingHeaderSize += 12
1622 1622
1623 1623 self.processingHeaderObj.size = processingHeaderSize
1624 1624
1625 1625 class SpectraReader(JRODataReader):
1626 1626 """
1627 1627 Esta clase permite leer datos de espectros desde archivos procesados (.pdata). La lectura
1628 1628 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones)
1629 1629 son almacenados en tres buffer's para el Self Spectra, el Cross Spectra y el DC Channel.
1630 1630
1631 1631 paresCanalesIguales * alturas * perfiles (Self Spectra)
1632 1632 paresCanalesDiferentes * alturas * perfiles (Cross Spectra)
1633 1633 canales * alturas (DC Channels)
1634 1634
1635 1635 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
1636 1636 RadarControllerHeader y Spectra. Los tres primeros se usan para almacenar informacion de la
1637 1637 cabecera de datos (metadata), y el cuarto (Spectra) para obtener y almacenar un bloque de
1638 1638 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
1639 1639
1640 1640 Example:
1641 1641 dpath = "/home/myuser/data"
1642 1642
1643 1643 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
1644 1644
1645 1645 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
1646 1646
1647 1647 readerObj = SpectraReader()
1648 1648
1649 1649 readerObj.setup(dpath, startTime, endTime)
1650 1650
1651 1651 while(True):
1652 1652
1653 1653 readerObj.getData()
1654 1654
1655 1655 print readerObj.data_spc
1656 1656
1657 1657 print readerObj.data_cspc
1658 1658
1659 1659 print readerObj.data_dc
1660 1660
1661 1661 if readerObj.flagNoMoreFiles:
1662 1662 break
1663 1663
1664 1664 """
1665 1665
1666 1666 pts2read_SelfSpectra = 0
1667 1667
1668 1668 pts2read_CrossSpectra = 0
1669 1669
1670 1670 pts2read_DCchannels = 0
1671 1671
1672 1672 ext = ".pdata"
1673 1673
1674 1674 optchar = "P"
1675 1675
1676 1676 dataOut = None
1677 1677
1678 1678 nRdChannels = None
1679 1679
1680 1680 nRdPairs = None
1681 1681
1682 1682 rdPairList = []
1683 1683
1684 1684
1685 1685 def __init__(self):
1686 1686 """
1687 1687 Inicializador de la clase SpectraReader para la lectura de datos de espectros.
1688 1688
1689 1689 Inputs:
1690 1690 dataOut : Objeto de la clase Spectra. Este objeto sera utilizado para
1691 1691 almacenar un perfil de datos cada vez que se haga un requerimiento
1692 1692 (getData). El perfil sera obtenido a partir del buffer de datos,
1693 1693 si el buffer esta vacio se hara un nuevo proceso de lectura de un
1694 1694 bloque de datos.
1695 1695 Si este parametro no es pasado se creara uno internamente.
1696 1696
1697 1697 Affected:
1698 1698 self.dataOut
1699 1699
1700 1700 Return : None
1701 1701 """
1702 1702
1703 1703 self.isConfig = False
1704 1704
1705 1705 self.pts2read_SelfSpectra = 0
1706 1706
1707 1707 self.pts2read_CrossSpectra = 0
1708 1708
1709 1709 self.pts2read_DCchannels = 0
1710 1710
1711 1711 self.datablock = None
1712 1712
1713 1713 self.utc = None
1714 1714
1715 1715 self.ext = ".pdata"
1716 1716
1717 1717 self.optchar = "P"
1718 1718
1719 1719 self.basicHeaderObj = BasicHeader()
1720 1720
1721 1721 self.systemHeaderObj = SystemHeader()
1722 1722
1723 1723 self.radarControllerHeaderObj = RadarControllerHeader()
1724 1724
1725 1725 self.processingHeaderObj = ProcessingHeader()
1726 1726
1727 1727 self.online = 0
1728 1728
1729 1729 self.fp = None
1730 1730
1731 1731 self.idFile = None
1732 1732
1733 1733 self.dtype = None
1734 1734
1735 1735 self.fileSizeByHeader = None
1736 1736
1737 1737 self.filenameList = []
1738 1738
1739 1739 self.filename = None
1740 1740
1741 1741 self.fileSize = None
1742 1742
1743 1743 self.firstHeaderSize = 0
1744 1744
1745 1745 self.basicHeaderSize = 24
1746 1746
1747 1747 self.pathList = []
1748 1748
1749 1749 self.lastUTTime = 0
1750 1750
1751 1751 self.maxTimeStep = 30
1752 1752
1753 1753 self.flagNoMoreFiles = 0
1754 1754
1755 1755 self.set = 0
1756 1756
1757 1757 self.path = None
1758 1758
1759 1759 self.delay = 3 #seconds
1760 1760
1761 1761 self.nTries = 3 #quantity tries
1762 1762
1763 1763 self.nFiles = 3 #number of files for searching
1764 1764
1765 1765 self.nReadBlocks = 0
1766 1766
1767 1767 self.flagIsNewFile = 1
1768 1768
1769 1769 self.ippSeconds = 0
1770 1770
1771 1771 self.flagTimeBlock = 0
1772 1772
1773 1773 self.flagIsNewBlock = 0
1774 1774
1775 1775 self.nTotalBlocks = 0
1776 1776
1777 1777 self.blocksize = 0
1778 1778
1779 dataOut = self.createObjByDefault()
1779 self.dataOut = self.createObjByDefault()
1780 1780
1781 1781
1782 1782 def createObjByDefault(self):
1783 1783
1784 1784 dataObj = Spectra()
1785 1785
1786 1786 return dataObj
1787 1787
1788 1788 def __hasNotDataInBuffer(self):
1789 1789 return 1
1790 1790
1791 1791
1792 1792 def getBlockDimension(self):
1793 1793 """
1794 1794 Obtiene la cantidad de puntos a leer por cada bloque de datos
1795 1795
1796 1796 Affected:
1797 1797 self.nRdChannels
1798 1798 self.nRdPairs
1799 1799 self.pts2read_SelfSpectra
1800 1800 self.pts2read_CrossSpectra
1801 1801 self.pts2read_DCchannels
1802 1802 self.blocksize
1803 1803 self.dataOut.nChannels
1804 1804 self.dataOut.nPairs
1805 1805
1806 1806 Return:
1807 1807 None
1808 1808 """
1809 1809 self.nRdChannels = 0
1810 1810 self.nRdPairs = 0
1811 1811 self.rdPairList = []
1812 1812
1813 1813 for i in range(0, self.processingHeaderObj.totalSpectra*2, 2):
1814 1814 if self.processingHeaderObj.spectraComb[i] == self.processingHeaderObj.spectraComb[i+1]:
1815 1815 self.nRdChannels = self.nRdChannels + 1 #par de canales iguales
1816 1816 else:
1817 1817 self.nRdPairs = self.nRdPairs + 1 #par de canales diferentes
1818 1818 self.rdPairList.append((self.processingHeaderObj.spectraComb[i], self.processingHeaderObj.spectraComb[i+1]))
1819 1819
1820 1820 pts2read = self.processingHeaderObj.nHeights * self.processingHeaderObj.profilesPerBlock
1821 1821
1822 1822 self.pts2read_SelfSpectra = int(self.nRdChannels * pts2read)
1823 1823 self.blocksize = self.pts2read_SelfSpectra
1824 1824
1825 1825 if self.processingHeaderObj.flag_cspc:
1826 1826 self.pts2read_CrossSpectra = int(self.nRdPairs * pts2read)
1827 1827 self.blocksize += self.pts2read_CrossSpectra
1828 1828
1829 1829 if self.processingHeaderObj.flag_dc:
1830 1830 self.pts2read_DCchannels = int(self.systemHeaderObj.nChannels * self.processingHeaderObj.nHeights)
1831 1831 self.blocksize += self.pts2read_DCchannels
1832 1832
1833 1833 # self.blocksize = self.pts2read_SelfSpectra + self.pts2read_CrossSpectra + self.pts2read_DCchannels
1834 1834
1835 1835
1836 1836 def readBlock(self):
1837 1837 """
1838 1838 Lee el bloque de datos desde la posicion actual del puntero del archivo
1839 1839 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
1840 1840 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
1841 1841 es seteado a 0
1842 1842
1843 1843 Return: None
1844 1844
1845 1845 Variables afectadas:
1846 1846
1847 1847 self.flagIsNewFile
1848 1848 self.flagIsNewBlock
1849 1849 self.nTotalBlocks
1850 1850 self.data_spc
1851 1851 self.data_cspc
1852 1852 self.data_dc
1853 1853
1854 1854 Exceptions:
1855 1855 Si un bloque leido no es un bloque valido
1856 1856 """
1857 1857 blockOk_flag = False
1858 1858 fpointer = self.fp.tell()
1859 1859
1860 1860 spc = numpy.fromfile( self.fp, self.dtype[0], self.pts2read_SelfSpectra )
1861 1861 spc = spc.reshape( (self.nRdChannels, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1862 1862
1863 1863 if self.processingHeaderObj.flag_cspc:
1864 1864 cspc = numpy.fromfile( self.fp, self.dtype, self.pts2read_CrossSpectra )
1865 1865 cspc = cspc.reshape( (self.nRdPairs, self.processingHeaderObj.nHeights, self.processingHeaderObj.profilesPerBlock) ) #transforma a un arreglo 3D
1866 1866
1867 1867 if self.processingHeaderObj.flag_dc:
1868 1868 dc = numpy.fromfile( self.fp, self.dtype, self.pts2read_DCchannels ) #int(self.processingHeaderObj.nHeights*self.systemHeaderObj.nChannels) )
1869 1869 dc = dc.reshape( (self.systemHeaderObj.nChannels, self.processingHeaderObj.nHeights) ) #transforma a un arreglo 2D
1870 1870
1871 1871
1872 1872 if not(self.processingHeaderObj.shif_fft):
1873 1873 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
1874 1874
1875 1875 if self.processingHeaderObj.flag_cspc:
1876 1876 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
1877 1877
1878 1878
1879 1879 spc = numpy.transpose( spc, (0,2,1) )
1880 1880 self.data_spc = spc
1881 1881
1882 1882 if self.processingHeaderObj.flag_cspc:
1883 1883 cspc = numpy.transpose( cspc, (0,2,1) )
1884 1884 self.data_cspc = cspc['real'] + cspc['imag']*1j
1885 1885 else:
1886 1886 self.data_cspc = None
1887 1887
1888 1888 if self.processingHeaderObj.flag_dc:
1889 1889 self.data_dc = dc['real'] + dc['imag']*1j
1890 1890 else:
1891 1891 self.data_dc = None
1892 1892
1893 1893 self.flagIsNewFile = 0
1894 1894 self.flagIsNewBlock = 1
1895 1895
1896 1896 self.nTotalBlocks += 1
1897 1897 self.nReadBlocks += 1
1898 1898
1899 1899 return 1
1900 1900
1901 1901
1902 1902 def getData(self):
1903 1903 """
1904 1904 Copia el buffer de lectura a la clase "Spectra",
1905 1905 con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de
1906 1906 lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock"
1907 1907
1908 1908 Return:
1909 1909 0 : Si no hay mas archivos disponibles
1910 1910 1 : Si hizo una buena copia del buffer
1911 1911
1912 1912 Affected:
1913 1913 self.dataOut
1914 1914
1915 1915 self.flagTimeBlock
1916 1916 self.flagIsNewBlock
1917 1917 """
1918 1918
1919 1919 if self.flagNoMoreFiles: return 0
1920 1920
1921 1921 self.flagTimeBlock = 0
1922 1922 self.flagIsNewBlock = 0
1923 1923
1924 1924 if self.__hasNotDataInBuffer():
1925 1925
1926 1926 if not( self.readNextBlock() ):
1927 1927 return 0
1928 1928
1929 1929 # self.updateDataHeader()
1930 1930
1931 1931 if self.flagNoMoreFiles == 1:
1932 1932 print 'Process finished'
1933 1933 return 0
1934 1934
1935 1935 #data es un numpy array de 3 dmensiones (perfiles, alturas y canales)
1936 1936
1937 1937 if self.data_dc == None:
1938 1938 self.dataOut.flagNoData = True
1939 1939 return 0
1940 1940
1941 1941
1942 1942 self.dataOut.data_spc = self.data_spc
1943 1943
1944 1944 self.dataOut.data_cspc = self.data_cspc
1945 1945
1946 1946 self.dataOut.data_dc = self.data_dc
1947 1947
1948 1948 self.dataOut.flagTimeBlock = self.flagTimeBlock
1949 1949
1950 1950 self.dataOut.flagNoData = False
1951 1951
1952 1952 self.dataOut.dtype = self.dtype
1953 1953
1954 1954 self.dataOut.nChannels = self.nRdChannels
1955 1955
1956 1956 self.dataOut.nPairs = self.nRdPairs
1957 1957
1958 1958 self.dataOut.pairsList = self.rdPairList
1959 1959
1960 1960 self.dataOut.nHeights = self.processingHeaderObj.nHeights
1961 1961
1962 1962 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
1963 1963
1964 1964 self.dataOut.nFFTPoints = self.processingHeaderObj.profilesPerBlock
1965 1965
1966 1966 self.dataOut.nIncohInt = self.processingHeaderObj.nIncohInt
1967 1967
1968 1968
1969 1969 xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight
1970 1970
1971 1971 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight)
1972 1972
1973 1973 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
1974 1974
1975 1975 self.dataOut.channelIndexList = range(self.systemHeaderObj.nChannels)
1976 1976
1977 1977 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000.#+ self.profileIndex * self.ippSeconds
1978 1978
1979 1979 self.dataOut.ippSeconds = self.ippSeconds
1980 1980
1981 1981 self.dataOut.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt * self.processingHeaderObj.nIncohInt * self.dataOut.nFFTPoints
1982 1982
1983 1983 self.dataOut.flagShiftFFT = self.processingHeaderObj.shif_fft
1984 1984
1985 1985 # self.profileIndex += 1
1986 1986
1987 1987 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
1988 1988
1989 1989 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
1990 1990
1991 1991 return self.dataOut.data_spc
1992 1992
1993 1993
1994 1994 class SpectraWriter(JRODataWriter):
1995 1995
1996 1996 """
1997 1997 Esta clase permite escribir datos de espectros a archivos procesados (.pdata). La escritura
1998 1998 de los datos siempre se realiza por bloques.
1999 1999 """
2000 2000
2001 2001 ext = ".pdata"
2002 2002
2003 2003 optchar = "P"
2004 2004
2005 2005 shape_spc_Buffer = None
2006 2006
2007 2007 shape_cspc_Buffer = None
2008 2008
2009 2009 shape_dc_Buffer = None
2010 2010
2011 2011 data_spc = None
2012 2012
2013 2013 data_cspc = None
2014 2014
2015 2015 data_dc = None
2016 2016
2017 2017 # dataOut = None
2018 2018
2019 2019 def __init__(self):
2020 2020 """
2021 2021 Inicializador de la clase SpectraWriter para la escritura de datos de espectros.
2022 2022
2023 2023 Affected:
2024 2024 self.dataOut
2025 2025 self.basicHeaderObj
2026 2026 self.systemHeaderObj
2027 2027 self.radarControllerHeaderObj
2028 2028 self.processingHeaderObj
2029 2029
2030 2030 Return: None
2031 2031 """
2032 2032
2033 2033 self.isConfig = False
2034 2034
2035 2035 self.nTotalBlocks = 0
2036 2036
2037 2037 self.data_spc = None
2038 2038
2039 2039 self.data_cspc = None
2040 2040
2041 2041 self.data_dc = None
2042 2042
2043 2043 self.fp = None
2044 2044
2045 2045 self.flagIsNewFile = 1
2046 2046
2047 2047 self.nTotalBlocks = 0
2048 2048
2049 2049 self.flagIsNewBlock = 0
2050 2050
2051 2051 self.flagNoMoreFiles = 0
2052 2052
2053 2053 self.setFile = None
2054 2054
2055 2055 self.dtype = None
2056 2056
2057 2057 self.path = None
2058 2058
2059 2059 self.noMoreFiles = 0
2060 2060
2061 2061 self.filename = None
2062 2062
2063 2063 self.basicHeaderObj = BasicHeader()
2064 2064
2065 2065 self.systemHeaderObj = SystemHeader()
2066 2066
2067 2067 self.radarControllerHeaderObj = RadarControllerHeader()
2068 2068
2069 2069 self.processingHeaderObj = ProcessingHeader()
2070 2070
2071 2071
2072 2072 def hasAllDataInBuffer(self):
2073 2073 return 1
2074 2074
2075 2075
2076 2076 def setBlockDimension(self):
2077 2077 """
2078 2078 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
2079 2079
2080 2080 Affected:
2081 2081 self.shape_spc_Buffer
2082 2082 self.shape_cspc_Buffer
2083 2083 self.shape_dc_Buffer
2084 2084
2085 2085 Return: None
2086 2086 """
2087 2087 self.shape_spc_Buffer = (self.dataOut.nChannels,
2088 2088 self.processingHeaderObj.nHeights,
2089 2089 self.processingHeaderObj.profilesPerBlock)
2090 2090
2091 2091 self.shape_cspc_Buffer = (self.dataOut.nPairs,
2092 2092 self.processingHeaderObj.nHeights,
2093 2093 self.processingHeaderObj.profilesPerBlock)
2094 2094
2095 2095 self.shape_dc_Buffer = (self.dataOut.nChannels,
2096 2096 self.processingHeaderObj.nHeights)
2097 2097
2098 2098
2099 2099 def writeBlock(self):
2100 2100 """
2101 2101 Escribe el buffer en el file designado
2102 2102
2103 2103 Affected:
2104 2104 self.data_spc
2105 2105 self.data_cspc
2106 2106 self.data_dc
2107 2107 self.flagIsNewFile
2108 2108 self.flagIsNewBlock
2109 2109 self.nTotalBlocks
2110 2110 self.nWriteBlocks
2111 2111
2112 2112 Return: None
2113 2113 """
2114 2114
2115 2115 spc = numpy.transpose( self.data_spc, (0,2,1) )
2116 2116 if not( self.processingHeaderObj.shif_fft ):
2117 2117 spc = numpy.roll( spc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2118 2118 data = spc.reshape((-1))
2119 2119 data.tofile(self.fp)
2120 2120
2121 2121 if self.data_cspc != None:
2122 2122 data = numpy.zeros( self.shape_cspc_Buffer, self.dtype )
2123 2123 cspc = numpy.transpose( self.data_cspc, (0,2,1) )
2124 2124 if not( self.processingHeaderObj.shif_fft ):
2125 2125 cspc = numpy.roll( cspc, self.processingHeaderObj.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones
2126 2126 data['real'] = cspc.real
2127 2127 data['imag'] = cspc.imag
2128 2128 data = data.reshape((-1))
2129 2129 data.tofile(self.fp)
2130 2130
2131 2131 if self.data_dc != None:
2132 2132 data = numpy.zeros( self.shape_dc_Buffer, self.dtype )
2133 2133 dc = self.data_dc
2134 2134 data['real'] = dc.real
2135 2135 data['imag'] = dc.imag
2136 2136 data = data.reshape((-1))
2137 2137 data.tofile(self.fp)
2138 2138
2139 2139 self.data_spc.fill(0)
2140 2140 self.data_dc.fill(0)
2141 2141 if self.data_cspc != None:
2142 2142 self.data_cspc.fill(0)
2143 2143
2144 2144 self.flagIsNewFile = 0
2145 2145 self.flagIsNewBlock = 1
2146 2146 self.nTotalBlocks += 1
2147 2147 self.nWriteBlocks += 1
2148 2148 self.blockIndex += 1
2149 2149
2150 2150
2151 2151 def putData(self):
2152 2152 """
2153 2153 Setea un bloque de datos y luego los escribe en un file
2154 2154
2155 2155 Affected:
2156 2156 self.data_spc
2157 2157 self.data_cspc
2158 2158 self.data_dc
2159 2159
2160 2160 Return:
2161 2161 0 : Si no hay data o no hay mas files que puedan escribirse
2162 2162 1 : Si se escribio la data de un bloque en un file
2163 2163 """
2164 2164
2165 2165 if self.dataOut.flagNoData:
2166 2166 return 0
2167 2167
2168 2168 self.flagIsNewBlock = 0
2169 2169
2170 2170 if self.dataOut.flagTimeBlock:
2171 2171 self.data_spc.fill(0)
2172 2172 self.data_cspc.fill(0)
2173 2173 self.data_dc.fill(0)
2174 2174 self.setNextFile()
2175 2175
2176 2176 if self.flagIsNewFile == 0:
2177 2177 self.getBasicHeader()
2178 2178
2179 2179 self.data_spc = self.dataOut.data_spc
2180 2180 self.data_cspc = self.dataOut.data_cspc
2181 2181 self.data_dc = self.dataOut.data_dc
2182 2182
2183 2183 # #self.processingHeaderObj.dataBlocksPerFile)
2184 2184 if self.hasAllDataInBuffer():
2185 2185 # self.getDataHeader()
2186 2186 self.writeNextBlock()
2187 2187
2188 2188 if self.flagNoMoreFiles:
2189 2189 #print 'Process finished'
2190 2190 return 0
2191 2191
2192 2192 return 1
2193 2193
2194 2194
2195 2195 def __getProcessFlags(self):
2196 2196
2197 2197 processFlags = 0
2198 2198
2199 2199 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2200 2200 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2201 2201 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2202 2202 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2203 2203 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2204 2204 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2205 2205
2206 2206 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2207 2207
2208 2208
2209 2209
2210 2210 datatypeValueList = [PROCFLAG.DATATYPE_CHAR,
2211 2211 PROCFLAG.DATATYPE_SHORT,
2212 2212 PROCFLAG.DATATYPE_LONG,
2213 2213 PROCFLAG.DATATYPE_INT64,
2214 2214 PROCFLAG.DATATYPE_FLOAT,
2215 2215 PROCFLAG.DATATYPE_DOUBLE]
2216 2216
2217 2217
2218 2218 for index in range(len(dtypeList)):
2219 2219 if self.dataOut.dtype == dtypeList[index]:
2220 2220 dtypeValue = datatypeValueList[index]
2221 2221 break
2222 2222
2223 2223 processFlags += dtypeValue
2224 2224
2225 2225 if self.dataOut.flagDecodeData:
2226 2226 processFlags += PROCFLAG.DECODE_DATA
2227 2227
2228 2228 if self.dataOut.flagDeflipData:
2229 2229 processFlags += PROCFLAG.DEFLIP_DATA
2230 2230
2231 2231 if self.dataOut.code != None:
2232 2232 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
2233 2233
2234 2234 if self.dataOut.nIncohInt > 1:
2235 2235 processFlags += PROCFLAG.INCOHERENT_INTEGRATION
2236 2236
2237 2237 if self.dataOut.data_dc != None:
2238 2238 processFlags += PROCFLAG.SAVE_CHANNELS_DC
2239 2239
2240 2240 return processFlags
2241 2241
2242 2242
2243 2243 def __getBlockSize(self):
2244 2244 '''
2245 2245 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Spectra
2246 2246 '''
2247 2247
2248 2248 dtype0 = numpy.dtype([('real','<i1'),('imag','<i1')])
2249 2249 dtype1 = numpy.dtype([('real','<i2'),('imag','<i2')])
2250 2250 dtype2 = numpy.dtype([('real','<i4'),('imag','<i4')])
2251 2251 dtype3 = numpy.dtype([('real','<i8'),('imag','<i8')])
2252 2252 dtype4 = numpy.dtype([('real','<f4'),('imag','<f4')])
2253 2253 dtype5 = numpy.dtype([('real','<f8'),('imag','<f8')])
2254 2254
2255 2255 dtypeList = [dtype0, dtype1, dtype2, dtype3, dtype4, dtype5]
2256 2256 datatypeValueList = [1,2,4,8,4,8]
2257 2257 for index in range(len(dtypeList)):
2258 2258 if self.dataOut.dtype == dtypeList[index]:
2259 2259 datatypeValue = datatypeValueList[index]
2260 2260 break
2261 2261
2262 2262
2263 2263 pts2write = self.dataOut.nHeights * self.dataOut.nFFTPoints
2264 2264
2265 2265 pts2write_SelfSpectra = int(self.dataOut.nChannels * pts2write)
2266 2266 blocksize = (pts2write_SelfSpectra*datatypeValue)
2267 2267
2268 2268 if self.dataOut.data_cspc != None:
2269 2269 pts2write_CrossSpectra = int(self.dataOut.nPairs * pts2write)
2270 2270 blocksize += (pts2write_CrossSpectra*datatypeValue*2)
2271 2271
2272 2272 if self.dataOut.data_dc != None:
2273 2273 pts2write_DCchannels = int(self.dataOut.nChannels * self.dataOut.nHeights)
2274 2274 blocksize += (pts2write_DCchannels*datatypeValue*2)
2275 2275
2276 2276 blocksize = blocksize #* datatypeValue * 2 #CORREGIR ESTO
2277 2277
2278 2278 return blocksize
2279 2279
2280 2280 def getDataHeader(self):
2281 2281
2282 2282 """
2283 2283 Obtiene una copia del First Header
2284 2284
2285 2285 Affected:
2286 2286 self.systemHeaderObj
2287 2287 self.radarControllerHeaderObj
2288 2288 self.dtype
2289 2289
2290 2290 Return:
2291 2291 None
2292 2292 """
2293 2293
2294 2294 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
2295 2295 self.systemHeaderObj.nChannels = self.dataOut.nChannels
2296 2296 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
2297 2297
2298 2298 self.getBasicHeader()
2299 2299
2300 2300 processingHeaderSize = 40 # bytes
2301 2301 self.processingHeaderObj.dtype = 0 # Voltage
2302 2302 self.processingHeaderObj.blockSize = self.__getBlockSize()
2303 2303 self.processingHeaderObj.profilesPerBlock = self.dataOut.nFFTPoints
2304 2304 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
2305 2305 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
2306 2306 self.processingHeaderObj.processFlags = self.__getProcessFlags()
2307 2307 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt# Se requiere para determinar el valor de timeInterval
2308 2308 self.processingHeaderObj.nIncohInt = self.dataOut.nIncohInt
2309 2309 self.processingHeaderObj.totalSpectra = self.dataOut.nPairs + self.dataOut.nChannels
2310 2310
2311 2311 if self.processingHeaderObj.totalSpectra > 0:
2312 2312 channelList = []
2313 2313 for channel in range(self.dataOut.nChannels):
2314 2314 channelList.append(channel)
2315 2315 channelList.append(channel)
2316 2316
2317 2317 pairsList = []
2318 2318 for pair in self.dataOut.pairsList:
2319 2319 pairsList.append(pair[0])
2320 2320 pairsList.append(pair[1])
2321 2321 spectraComb = channelList + pairsList
2322 2322 spectraComb = numpy.array(spectraComb,dtype="u1")
2323 2323 self.processingHeaderObj.spectraComb = spectraComb
2324 2324 sizeOfSpcComb = len(spectraComb)
2325 2325 processingHeaderSize += sizeOfSpcComb
2326 2326
2327 2327 if self.dataOut.code != None:
2328 2328 self.processingHeaderObj.code = self.dataOut.code
2329 2329 self.processingHeaderObj.nCode = self.dataOut.nCode
2330 2330 self.processingHeaderObj.nBaud = self.dataOut.nBaud
2331 2331 nCodeSize = 4 # bytes
2332 2332 nBaudSize = 4 # bytes
2333 2333 codeSize = 4 # bytes
2334 2334 sizeOfCode = int(nCodeSize + nBaudSize + codeSize * self.dataOut.nCode * self.dataOut.nBaud)
2335 2335 processingHeaderSize += sizeOfCode
2336 2336
2337 2337 if self.processingHeaderObj.nWindows != 0:
2338 2338 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
2339 2339 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
2340 2340 self.processingHeaderObj.nHeights = self.dataOut.nHeights
2341 2341 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
2342 2342 sizeOfFirstHeight = 4
2343 2343 sizeOfdeltaHeight = 4
2344 2344 sizeOfnHeights = 4
2345 2345 sizeOfWindows = (sizeOfFirstHeight + sizeOfdeltaHeight + sizeOfnHeights)*self.processingHeaderObj.nWindows
2346 2346 processingHeaderSize += sizeOfWindows
2347 2347
2348 2348 self.processingHeaderObj.size = processingHeaderSize
2349 2349
2350 2350 class SpectraHeisWriter():
2351 2351
2352 2352 i=0
2353 2353
2354 2354 def __init__(self, dataOut):
2355 2355
2356 2356 self.wrObj = FITS()
2357 2357 self.dataOut = dataOut
2358 2358
2359 2359 def isNumber(str):
2360 2360 """
2361 2361 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
2362 2362
2363 2363 Excepciones:
2364 2364 Si un determinado string no puede ser convertido a numero
2365 2365 Input:
2366 2366 str, string al cual se le analiza para determinar si convertible a un numero o no
2367 2367
2368 2368 Return:
2369 2369 True : si el string es uno numerico
2370 2370 False : no es un string numerico
2371 2371 """
2372 2372 try:
2373 2373 float( str )
2374 2374 return True
2375 2375 except:
2376 2376 return False
2377 2377
2378 2378 def setup(self, wrpath,):
2379 2379
2380 2380 if not(os.path.exists(wrpath)):
2381 2381 os.mkdir(wrpath)
2382 2382
2383 2383 self.wrpath = wrpath
2384 2384 self.setFile = 0
2385 2385
2386 2386 def putData(self):
2387 2387 # self.wrObj.writeHeader(nChannels=self.dataOut.nChannels, nFFTPoints=self.dataOut.nFFTPoints)
2388 2388 #name = self.dataOut.utctime
2389 2389 name= time.localtime( self.dataOut.utctime)
2390 2390 ext=".fits"
2391 2391 #folder='D%4.4d%3.3d'%(name.tm_year,name.tm_yday)
2392 2392 subfolder = 'D%4.4d%3.3d' % (name.tm_year,name.tm_yday)
2393 2393
2394 2394 doypath = os.path.join( self.wrpath, subfolder )
2395 2395 if not( os.path.exists(doypath) ):
2396 2396 os.mkdir(doypath)
2397 2397 self.setFile += 1
2398 2398 file = 'D%4.4d%3.3d%3.3d%s' % (name.tm_year,name.tm_yday,self.setFile,ext)
2399 2399
2400 2400 filename = os.path.join(self.wrpath,subfolder, file)
2401 2401
2402 2402 # print self.dataOut.ippSeconds
2403 2403 freq=numpy.arange(-1*self.dataOut.nHeights/2.,self.dataOut.nHeights/2.)/(2*self.dataOut.ippSeconds)
2404 2404
2405 2405 col1=self.wrObj.setColF(name="freq", format=str(self.dataOut.nFFTPoints)+'E', array=freq)
2406 2406 col2=self.wrObj.writeData(name="P_Ch1",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[0,:]))
2407 2407 col3=self.wrObj.writeData(name="P_Ch2",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[1,:]))
2408 2408 col4=self.wrObj.writeData(name="P_Ch3",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[2,:]))
2409 2409 col5=self.wrObj.writeData(name="P_Ch4",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[3,:]))
2410 2410 col6=self.wrObj.writeData(name="P_Ch5",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[4,:]))
2411 2411 col7=self.wrObj.writeData(name="P_Ch6",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[5,:]))
2412 2412 col8=self.wrObj.writeData(name="P_Ch7",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[6,:]))
2413 2413 col9=self.wrObj.writeData(name="P_Ch8",format=str(self.dataOut.nFFTPoints)+'E',data=10*numpy.log10(self.dataOut.data_spc[7,:]))
2414 2414 #n=numpy.arange((100))
2415 2415 n=self.dataOut.data_spc[6,:]
2416 2416 a=self.wrObj.cFImage(n)
2417 2417 b=self.wrObj.Ctable(col1,col2,col3,col4,col5,col6,col7,col8,col9)
2418 2418 self.wrObj.CFile(a,b)
2419 2419 self.wrObj.wFile(filename)
2420 2420 return 1
2421 2421
2422 2422 class FITS:
2423 2423
2424 2424 name=None
2425 2425 format=None
2426 2426 array =None
2427 2427 data =None
2428 2428 thdulist=None
2429 2429
2430 2430 def __init__(self):
2431 2431
2432 2432 pass
2433 2433
2434 2434 def setColF(self,name,format,array):
2435 2435 self.name=name
2436 2436 self.format=format
2437 2437 self.array=array
2438 2438 a1=numpy.array([self.array],dtype=numpy.float32)
2439 2439 self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1)
2440 2440 return self.col1
2441 2441
2442 2442 # def setColP(self,name,format,data):
2443 2443 # self.name=name
2444 2444 # self.format=format
2445 2445 # self.data=data
2446 2446 # a2=numpy.array([self.data],dtype=numpy.float32)
2447 2447 # self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2448 2448 # return self.col2
2449 2449
2450 2450 def writeHeader(self,):
2451 2451 pass
2452 2452
2453 2453 def writeData(self,name,format,data):
2454 2454 self.name=name
2455 2455 self.format=format
2456 2456 self.data=data
2457 2457 a2=numpy.array([self.data],dtype=numpy.float32)
2458 2458 self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
2459 2459 return self.col2
2460 2460
2461 2461 def cFImage(self,n):
2462 2462 self.hdu= pyfits.PrimaryHDU(n)
2463 2463 return self.hdu
2464 2464
2465 2465 def Ctable(self,col1,col2,col3,col4,col5,col6,col7,col8,col9):
2466 2466 self.cols=pyfits.ColDefs( [col1,col2,col3,col4,col5,col6,col7,col8,col9])
2467 2467 self.tbhdu = pyfits.new_table(self.cols)
2468 2468 return self.tbhdu
2469 2469
2470 2470 def CFile(self,hdu,tbhdu):
2471 2471 self.thdulist=pyfits.HDUList([hdu,tbhdu])
2472 2472
2473 2473 def wFile(self,filename):
2474 2474 self.thdulist.writeto(filename) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now