##// END OF EJS Templates
New reading option allows to read by blocks of different sizes
Julio Valdez -
r833:26a5421c91c8
parent child
Show More
@@ -1,1709 +1,1723
1 1 '''
2 2 Created on Jul 2, 2014
3 3
4 4 @author: roj-idl71
5 5 '''
6 6 import os
7 7 import sys
8 8 import glob
9 9 import time
10 10 import numpy
11 11 import fnmatch
12 12 import time, datetime
13 13 #import h5py
14 14 import traceback
15 15
16 16 try:
17 17 from gevent import sleep
18 18 except:
19 19 from time import sleep
20 20
21 21 from schainpy.model.data.jroheaderIO import PROCFLAG, BasicHeader, SystemHeader, RadarControllerHeader, ProcessingHeader
22 22 from schainpy.model.data.jroheaderIO import get_dtype_index, get_numpy_dtype, get_procflag_dtype, get_dtype_width
23 23
24 24 LOCALTIME = True
25 25
26 26 def isNumber(cad):
27 27 """
28 28 Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.
29 29
30 30 Excepciones:
31 31 Si un determinado string no puede ser convertido a numero
32 32 Input:
33 33 str, string al cual se le analiza para determinar si convertible a un numero o no
34 34
35 35 Return:
36 36 True : si el string es uno numerico
37 37 False : no es un string numerico
38 38 """
39 39 try:
40 40 float( cad )
41 41 return True
42 42 except:
43 43 return False
44 44
45 45 def isFileInEpoch(filename, startUTSeconds, endUTSeconds):
46 46 """
47 47 Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado.
48 48
49 49 Inputs:
50 50 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
51 51
52 52 startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en
53 53 segundos contados desde 01/01/1970.
54 54 endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en
55 55 segundos contados desde 01/01/1970.
56 56
57 57 Return:
58 58 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
59 59 fecha especificado, de lo contrario retorna False.
60 60
61 61 Excepciones:
62 62 Si el archivo no existe o no puede ser abierto
63 63 Si la cabecera no puede ser leida.
64 64
65 65 """
66 66 basicHeaderObj = BasicHeader(LOCALTIME)
67 67
68 68 try:
69 69 fp = open(filename,'rb')
70 70 except IOError:
71 71 print "The file %s can't be opened" %(filename)
72 72 return 0
73 73
74 74 sts = basicHeaderObj.read(fp)
75 75 fp.close()
76 76
77 77 if not(sts):
78 78 print "Skipping the file %s because it has not a valid header" %(filename)
79 79 return 0
80 80
81 81 if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)):
82 82 return 0
83 83
84 84 return 1
85 85
86 86 def isTimeInRange(thisTime, startTime, endTime):
87 87
88 88 if endTime >= startTime:
89 89 if (thisTime < startTime) or (thisTime > endTime):
90 90 return 0
91 91
92 92 return 1
93 93 else:
94 94 if (thisTime < startTime) and (thisTime > endTime):
95 95 return 0
96 96
97 97 return 1
98 98
99 99 def isFileInTimeRange(filename, startDate, endDate, startTime, endTime):
100 100 """
101 101 Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado.
102 102
103 103 Inputs:
104 104 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
105 105
106 106 startDate : fecha inicial del rango seleccionado en formato datetime.date
107 107
108 108 endDate : fecha final del rango seleccionado en formato datetime.date
109 109
110 110 startTime : tiempo inicial del rango seleccionado en formato datetime.time
111 111
112 112 endTime : tiempo final del rango seleccionado en formato datetime.time
113 113
114 114 Return:
115 115 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
116 116 fecha especificado, de lo contrario retorna False.
117 117
118 118 Excepciones:
119 119 Si el archivo no existe o no puede ser abierto
120 120 Si la cabecera no puede ser leida.
121 121
122 122 """
123 123
124 124
125 125 try:
126 126 fp = open(filename,'rb')
127 127 except IOError:
128 128 print "The file %s can't be opened" %(filename)
129 129 return None
130 130
131 131 firstBasicHeaderObj = BasicHeader(LOCALTIME)
132 132 systemHeaderObj = SystemHeader()
133 133 radarControllerHeaderObj = RadarControllerHeader()
134 134 processingHeaderObj = ProcessingHeader()
135 135
136 136 lastBasicHeaderObj = BasicHeader(LOCALTIME)
137 137
138 138 sts = firstBasicHeaderObj.read(fp)
139 139
140 140 if not(sts):
141 141 print "[Reading] Skipping the file %s because it has not a valid header" %(filename)
142 142 return None
143 143
144 144 if not systemHeaderObj.read(fp):
145 145 return None
146 146
147 147 if not radarControllerHeaderObj.read(fp):
148 148 return None
149 149
150 150 if not processingHeaderObj.read(fp):
151 151 return None
152 152
153 153 filesize = os.path.getsize(filename)
154 154
155 155 offset = processingHeaderObj.blockSize + 24 #header size
156 156
157 157 if filesize <= offset:
158 158 print "[Reading] %s: This file has not enough data" %filename
159 159 return None
160 160
161 161 fp.seek(-offset, 2)
162 162
163 163 sts = lastBasicHeaderObj.read(fp)
164 164
165 165 fp.close()
166 166
167 167 thisDatetime = lastBasicHeaderObj.datatime
168 168 thisTime_last_block = thisDatetime.time()
169 169
170 170 thisDatetime = firstBasicHeaderObj.datatime
171 171 thisDate = thisDatetime.date()
172 172 thisTime_first_block = thisDatetime.time()
173 173
174 174 #General case
175 175 # o>>>>>>>>>>>>>><<<<<<<<<<<<<<o
176 176 #-----------o----------------------------o-----------
177 177 # startTime endTime
178 178
179 179 if endTime >= startTime:
180 180 if (thisTime_last_block < startTime) or (thisTime_first_block > endTime):
181 181 return None
182 182
183 183 return thisDatetime
184 184
185 185 #If endTime < startTime then endTime belongs to the next day
186 186
187 187
188 188 #<<<<<<<<<<<o o>>>>>>>>>>>
189 189 #-----------o----------------------------o-----------
190 190 # endTime startTime
191 191
192 192 if (thisDate == startDate) and (thisTime_last_block < startTime):
193 193 return None
194 194
195 195 if (thisDate == endDate) and (thisTime_first_block > endTime):
196 196 return None
197 197
198 198 if (thisTime_last_block < startTime) and (thisTime_first_block > endTime):
199 199 return None
200 200
201 201 return thisDatetime
202 202
203 203 def isFolderInDateRange(folder, startDate=None, endDate=None):
204 204 """
205 205 Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado.
206 206
207 207 Inputs:
208 208 folder : nombre completo del directorio.
209 209 Su formato deberia ser "/path_root/?YYYYDDD"
210 210
211 211 siendo:
212 212 YYYY : Anio (ejemplo 2015)
213 213 DDD : Dia del anio (ejemplo 305)
214 214
215 215 startDate : fecha inicial del rango seleccionado en formato datetime.date
216 216
217 217 endDate : fecha final del rango seleccionado en formato datetime.date
218 218
219 219 Return:
220 220 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
221 221 fecha especificado, de lo contrario retorna False.
222 222 Excepciones:
223 223 Si el directorio no tiene el formato adecuado
224 224 """
225 225
226 226 basename = os.path.basename(folder)
227 227
228 228 if not isRadarFolder(basename):
229 229 print "The folder %s has not the rigth format" %folder
230 230 return 0
231 231
232 232 if startDate and endDate:
233 233 thisDate = getDateFromRadarFolder(basename)
234 234
235 235 if thisDate < startDate:
236 236 return 0
237 237
238 238 if thisDate > endDate:
239 239 return 0
240 240
241 241 return 1
242 242
243 243 def isFileInDateRange(filename, startDate=None, endDate=None):
244 244 """
245 245 Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado.
246 246
247 247 Inputs:
248 248 filename : nombre completo del archivo de datos en formato Jicamarca (.r)
249 249
250 250 Su formato deberia ser "?YYYYDDDsss"
251 251
252 252 siendo:
253 253 YYYY : Anio (ejemplo 2015)
254 254 DDD : Dia del anio (ejemplo 305)
255 255 sss : set
256 256
257 257 startDate : fecha inicial del rango seleccionado en formato datetime.date
258 258
259 259 endDate : fecha final del rango seleccionado en formato datetime.date
260 260
261 261 Return:
262 262 Boolean : Retorna True si el archivo de datos contiene datos en el rango de
263 263 fecha especificado, de lo contrario retorna False.
264 264 Excepciones:
265 265 Si el archivo no tiene el formato adecuado
266 266 """
267 267
268 268 basename = os.path.basename(filename)
269 269
270 270 if not isRadarFile(basename):
271 271 print "The filename %s has not the rigth format" %filename
272 272 return 0
273 273
274 274 if startDate and endDate:
275 275 thisDate = getDateFromRadarFile(basename)
276 276
277 277 if thisDate < startDate:
278 278 return 0
279 279
280 280 if thisDate > endDate:
281 281 return 0
282 282
283 283 return 1
284 284
285 285 def getFileFromSet(path, ext, set):
286 286 validFilelist = []
287 287 fileList = os.listdir(path)
288 288
289 289 # 0 1234 567 89A BCDE
290 290 # H YYYY DDD SSS .ext
291 291
292 292 for thisFile in fileList:
293 293 try:
294 294 year = int(thisFile[1:5])
295 295 doy = int(thisFile[5:8])
296 296 except:
297 297 continue
298 298
299 299 if (os.path.splitext(thisFile)[-1].lower() != ext.lower()):
300 300 continue
301 301
302 302 validFilelist.append(thisFile)
303 303
304 304 myfile = fnmatch.filter(validFilelist,'*%4.4d%3.3d%3.3d*'%(year,doy,set))
305 305
306 306 if len(myfile)!= 0:
307 307 return myfile[0]
308 308 else:
309 309 filename = '*%4.4d%3.3d%3.3d%s'%(year,doy,set,ext.lower())
310 310 print 'the filename %s does not exist'%filename
311 311 print '...going to the last file: '
312 312
313 313 if validFilelist:
314 314 validFilelist = sorted( validFilelist, key=str.lower )
315 315 return validFilelist[-1]
316 316
317 317 return None
318 318
319 319 def getlastFileFromPath(path, ext):
320 320 """
321 321 Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext"
322 322 al final de la depuracion devuelve el ultimo file de la lista que quedo.
323 323
324 324 Input:
325 325 fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta
326 326 ext : extension de los files contenidos en una carpeta
327 327
328 328 Return:
329 329 El ultimo file de una determinada carpeta, no se considera el path.
330 330 """
331 331 validFilelist = []
332 332 fileList = os.listdir(path)
333 333
334 334 # 0 1234 567 89A BCDE
335 335 # H YYYY DDD SSS .ext
336 336
337 337 for thisFile in fileList:
338 338
339 339 year = thisFile[1:5]
340 340 if not isNumber(year):
341 341 continue
342 342
343 343 doy = thisFile[5:8]
344 344 if not isNumber(doy):
345 345 continue
346 346
347 347 year = int(year)
348 348 doy = int(doy)
349 349
350 350 if (os.path.splitext(thisFile)[-1].lower() != ext.lower()):
351 351 continue
352 352
353 353 validFilelist.append(thisFile)
354 354
355 355 if validFilelist:
356 356 validFilelist = sorted( validFilelist, key=str.lower )
357 357 return validFilelist[-1]
358 358
359 359 return None
360 360
361 361 def checkForRealPath(path, foldercounter, year, doy, set, ext):
362 362 """
363 363 Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path,
364 364 Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar
365 365 el path exacto de un determinado file.
366 366
367 367 Example :
368 368 nombre correcto del file es .../.../D2009307/P2009307367.ext
369 369
370 370 Entonces la funcion prueba con las siguientes combinaciones
371 371 .../.../y2009307367.ext
372 372 .../.../Y2009307367.ext
373 373 .../.../x2009307/y2009307367.ext
374 374 .../.../x2009307/Y2009307367.ext
375 375 .../.../X2009307/y2009307367.ext
376 376 .../.../X2009307/Y2009307367.ext
377 377 siendo para este caso, la ultima combinacion de letras, identica al file buscado
378 378
379 379 Return:
380 380 Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file
381 381 caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas
382 382 para el filename
383 383 """
384 384 fullfilename = None
385 385 find_flag = False
386 386 filename = None
387 387
388 388 prefixDirList = [None,'d','D']
389 389 if ext.lower() == ".r": #voltage
390 390 prefixFileList = ['d','D']
391 391 elif ext.lower() == ".pdata": #spectra
392 392 prefixFileList = ['p','P']
393 393 else:
394 394 return None, filename
395 395
396 396 #barrido por las combinaciones posibles
397 397 for prefixDir in prefixDirList:
398 398 thispath = path
399 399 if prefixDir != None:
400 400 #formo el nombre del directorio xYYYYDDD (x=d o x=D)
401 401 if foldercounter == 0:
402 402 thispath = os.path.join(path, "%s%04d%03d" % ( prefixDir, year, doy ))
403 403 else:
404 404 thispath = os.path.join(path, "%s%04d%03d_%02d" % ( prefixDir, year, doy , foldercounter))
405 405 for prefixFile in prefixFileList: #barrido por las dos combinaciones posibles de "D"
406 406 filename = "%s%04d%03d%03d%s" % ( prefixFile, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext
407 407 fullfilename = os.path.join( thispath, filename ) #formo el path completo
408 408
409 409 if os.path.exists( fullfilename ): #verifico que exista
410 410 find_flag = True
411 411 break
412 412 if find_flag:
413 413 break
414 414
415 415 if not(find_flag):
416 416 return None, filename
417 417
418 418 return fullfilename, filename
419 419
420 420 def isRadarFolder(folder):
421 421 try:
422 422 year = int(folder[1:5])
423 423 doy = int(folder[5:8])
424 424 except:
425 425 return 0
426 426
427 427 return 1
428 428
429 429 def isRadarFile(file):
430 430 try:
431 431 year = int(file[1:5])
432 432 doy = int(file[5:8])
433 433 set = int(file[8:11])
434 434 except:
435 435 return 0
436 436
437 437 return 1
438 438
439 439 def getDateFromRadarFile(file):
440 440 try:
441 441 year = int(file[1:5])
442 442 doy = int(file[5:8])
443 443 set = int(file[8:11])
444 444 except:
445 445 return None
446 446
447 447 thisDate = datetime.date(year, 1, 1) + datetime.timedelta(doy-1)
448 448 return thisDate
449 449
450 450 def getDateFromRadarFolder(folder):
451 451 try:
452 452 year = int(folder[1:5])
453 453 doy = int(folder[5:8])
454 454 except:
455 455 return None
456 456
457 457 thisDate = datetime.date(year, 1, 1) + datetime.timedelta(doy-1)
458 458 return thisDate
459 459
460 460 class JRODataIO:
461 461
462 462 c = 3E8
463 463
464 464 isConfig = False
465 465
466 466 basicHeaderObj = None
467 467
468 468 systemHeaderObj = None
469 469
470 470 radarControllerHeaderObj = None
471 471
472 472 processingHeaderObj = None
473 473
474 474 dtype = None
475 475
476 476 pathList = []
477 477
478 478 filenameList = []
479 479
480 480 filename = None
481 481
482 482 ext = None
483 483
484 484 flagIsNewFile = 1
485 485
486 486 flagDiscontinuousBlock = 0
487 487
488 488 flagIsNewBlock = 0
489 489
490 490 fp = None
491 491
492 492 firstHeaderSize = 0
493 493
494 494 basicHeaderSize = 24
495 495
496 496 versionFile = 1103
497 497
498 498 fileSize = None
499 499
500 500 # ippSeconds = None
501 501
502 502 fileSizeByHeader = None
503 503
504 504 fileIndex = None
505 505
506 506 profileIndex = None
507 507
508 508 blockIndex = None
509 509
510 510 nTotalBlocks = None
511 511
512 512 maxTimeStep = 30
513 513
514 514 lastUTTime = None
515 515
516 516 datablock = None
517 517
518 518 dataOut = None
519 519
520 520 blocksize = None
521 521
522 522 getByBlock = False
523 523
524 524 def __init__(self):
525 525
526 526 raise NotImplementedError
527 527
528 528 def run(self):
529 529
530 530 raise NotImplementedError
531 531
532 532 def getDtypeWidth(self):
533 533
534 534 dtype_index = get_dtype_index(self.dtype)
535 535 dtype_width = get_dtype_width(dtype_index)
536 536
537 537 return dtype_width
538 538
539 539 class JRODataReader(JRODataIO):
540 540
541 541
542 542 online = 0
543 543
544 544 realtime = 0
545 545
546 546 nReadBlocks = 0
547 547
548 548 delay = 10 #number of seconds waiting a new file
549 549
550 550 nTries = 3 #quantity tries
551 551
552 552 nFiles = 3 #number of files for searching
553 553
554 554 path = None
555 555
556 556 foldercounter = 0
557 557
558 558 flagNoMoreFiles = 0
559 559
560 560 datetimeList = []
561 561
562 562 __isFirstTimeOnline = 1
563 563
564 564 __printInfo = True
565 565
566 566 profileIndex = None
567 567
568 568 nTxs = 1
569 569
570 570 txIndex = None
571 571
572 #Added--------------------
573
574 selBlocksize = None
575
576 selBlocktime = None
577
578
572 579 def __init__(self):
573 580
574 581 """
575 582 This class is used to find data files
576 583
577 584 Example:
578 585 reader = JRODataReader()
579 586 fileList = reader.findDataFiles()
580 587
581 588 """
582 589 pass
583 590
584 591
585 592 def createObjByDefault(self):
586 593 """
587 594
588 595 """
589 596 raise NotImplementedError
590 597
591 598 def getBlockDimension(self):
592 599
593 600 raise NotImplementedError
594 601
595 602 def __searchFilesOffLine(self,
596 603 path,
597 604 startDate=None,
598 605 endDate=None,
599 606 startTime=datetime.time(0,0,0),
600 607 endTime=datetime.time(23,59,59),
601 608 set=None,
602 609 expLabel='',
603 610 ext='.r',
604 611 walk=True):
605 612
606 613 self.filenameList = []
607 614 self.datetimeList = []
608 615
609 616 pathList = []
610 617
611 618 dateList, pathList = self.findDatafiles(path, startDate, endDate, expLabel, ext, walk, include_path=True)
612 619
613 620 if dateList == []:
614 621 # print "[Reading] Date range selected invalid [%s - %s]: No *%s files in %s)" %(startDate, endDate, ext, path)
615 622 return None, None
616 623
617 624 if len(dateList) > 1:
618 625 print "[Reading] Data found for date range [%s - %s]: total days = %d" %(startDate, endDate, len(dateList))
619 626 else:
620 627 print "[Reading] Data found for date range [%s - %s]: date = %s" %(startDate, endDate, dateList[0])
621 628
622 629 filenameList = []
623 630 datetimeList = []
624 631
625 632 for thisPath in pathList:
626 633 # thisPath = pathList[pathDict[file]]
627 634
628 635 fileList = glob.glob1(thisPath, "*%s" %ext)
629 636 fileList.sort()
630 637
631 638 for file in fileList:
632 639
633 640 filename = os.path.join(thisPath,file)
634 641
635 642 if not isFileInDateRange(filename, startDate, endDate):
636 643 continue
637 644
638 645 thisDatetime = isFileInTimeRange(filename, startDate, endDate, startTime, endTime)
639 646
640 647 if not(thisDatetime):
641 648 continue
642 649
643 650 filenameList.append(filename)
644 651 datetimeList.append(thisDatetime)
645 652
646 653 if not(filenameList):
647 654 print "[Reading] Time range selected invalid [%s - %s]: No *%s files in %s)" %(startTime, endTime, ext, path)
648 655 return None, None
649 656
650 657 print "[Reading] %d file(s) was(were) found in time range: %s - %s" %(len(filenameList), startTime, endTime)
651 658 print
652 659
653 660 for i in range(len(filenameList)):
654 661 print "[Reading] %s -> [%s]" %(filenameList[i], datetimeList[i].ctime())
655 662
656 663 self.filenameList = filenameList
657 664 self.datetimeList = datetimeList
658 665
659 666 return pathList, filenameList
660 667
661 668 def __searchFilesOnLine(self, path, expLabel = "", ext = None, walk=True, set=None):
662 669
663 670 """
664 671 Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y
665 672 devuelve el archivo encontrado ademas de otros datos.
666 673
667 674 Input:
668 675 path : carpeta donde estan contenidos los files que contiene data
669 676
670 677 expLabel : Nombre del subexperimento (subfolder)
671 678
672 679 ext : extension de los files
673 680
674 681 walk : Si es habilitado no realiza busquedas dentro de los ubdirectorios (doypath)
675 682
676 683 Return:
677 684 directory : eL directorio donde esta el file encontrado
678 685 filename : el ultimo file de una determinada carpeta
679 686 year : el anho
680 687 doy : el numero de dia del anho
681 688 set : el set del archivo
682 689
683 690
684 691 """
685 692 if not os.path.isdir(path):
686 693 return None, None, None, None, None, None
687 694
688 695 dirList = []
689 696
690 697 if not walk:
691 698 fullpath = path
692 699 foldercounter = 0
693 700 else:
694 701 #Filtra solo los directorios
695 702 for thisPath in os.listdir(path):
696 703 if not os.path.isdir(os.path.join(path,thisPath)):
697 704 continue
698 705 if not isRadarFolder(thisPath):
699 706 continue
700 707
701 708 dirList.append(thisPath)
702 709
703 710 if not(dirList):
704 711 return None, None, None, None, None, None
705 712
706 713 dirList = sorted( dirList, key=str.lower )
707 714
708 715 doypath = dirList[-1]
709 716 foldercounter = int(doypath.split('_')[1]) if len(doypath.split('_'))>1 else 0
710 717 fullpath = os.path.join(path, doypath, expLabel)
711 718
712 719
713 720 print "[Reading] %s folder was found: " %(fullpath )
714 721
715 722 if set == None:
716 723 filename = getlastFileFromPath(fullpath, ext)
717 724 else:
718 725 filename = getFileFromSet(fullpath, ext, set)
719 726
720 727 if not(filename):
721 728 return None, None, None, None, None, None
722 729
723 730 print "[Reading] %s file was found" %(filename)
724 731
725 732 if not(self.__verifyFile(os.path.join(fullpath, filename))):
726 733 return None, None, None, None, None, None
727 734
728 735 year = int( filename[1:5] )
729 736 doy = int( filename[5:8] )
730 737 set = int( filename[8:11] )
731 738
732 739 return fullpath, foldercounter, filename, year, doy, set
733 740
734 741 def __setNextFileOffline(self):
735 742
736 743 idFile = self.fileIndex
737 744
738 745 while (True):
739 746 idFile += 1
740 747 if not(idFile < len(self.filenameList)):
741 748 self.flagNoMoreFiles = 1
742 749 # print "[Reading] No more Files"
743 750 return 0
744 751
745 752 filename = self.filenameList[idFile]
746 753
747 754 if not(self.__verifyFile(filename)):
748 755 continue
749 756
750 757 fileSize = os.path.getsize(filename)
751 758 fp = open(filename,'rb')
752 759 break
753 760
754 761 self.flagIsNewFile = 1
755 762 self.fileIndex = idFile
756 763 self.filename = filename
757 764 self.fileSize = fileSize
758 765 self.fp = fp
759 766
760 767 # print "[Reading] Setting the file: %s"%self.filename
761 768
762 769 return 1
763 770
764 771 def __setNextFileOnline(self):
765 772 """
766 773 Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si
767 774 no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files
768 775 siguientes.
769 776
770 777 Affected:
771 778 self.flagIsNewFile
772 779 self.filename
773 780 self.fileSize
774 781 self.fp
775 782 self.set
776 783 self.flagNoMoreFiles
777 784
778 785 Return:
779 786 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado
780 787 1 : si el file fue abierto con exito y esta listo a ser leido
781 788
782 789 Excepciones:
783 790 Si un determinado file no puede ser abierto
784 791 """
785 792 nFiles = 0
786 793 fileOk_flag = False
787 794 firstTime_flag = True
788 795
789 796 self.set += 1
790 797
791 798 if self.set > 999:
792 799 self.set = 0
793 800 self.foldercounter += 1
794 801
795 802 #busca el 1er file disponible
796 803 fullfilename, filename = checkForRealPath( self.path, self.foldercounter, self.year, self.doy, self.set, self.ext )
797 804 if fullfilename:
798 805 if self.__verifyFile(fullfilename, False):
799 806 fileOk_flag = True
800 807
801 808 #si no encuentra un file entonces espera y vuelve a buscar
802 809 if not(fileOk_flag):
803 810 for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles
804 811
805 812 if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces
806 813 tries = self.nTries
807 814 else:
808 815 tries = 1 #si no es la 1era vez entonces solo lo hace una vez
809 816
810 817 for nTries in range( tries ):
811 818 if firstTime_flag:
812 819 print "\t[Reading] Waiting %0.2f sec for the next file: \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 )
813 820 sleep( self.delay )
814 821 else:
815 822 print "\t[Reading] Searching the next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext)
816 823
817 824 fullfilename, filename = checkForRealPath( self.path, self.foldercounter, self.year, self.doy, self.set, self.ext )
818 825 if fullfilename:
819 826 if self.__verifyFile(fullfilename):
820 827 fileOk_flag = True
821 828 break
822 829
823 830 if fileOk_flag:
824 831 break
825 832
826 833 firstTime_flag = False
827 834
828 835 print "\t[Reading] Skipping the file \"%s\" due to this file doesn't exist" % filename
829 836 self.set += 1
830 837
831 838 if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta
832 839 self.set = 0
833 840 self.doy += 1
834 841 self.foldercounter = 0
835 842
836 843 if fileOk_flag:
837 844 self.fileSize = os.path.getsize( fullfilename )
838 845 self.filename = fullfilename
839 846 self.flagIsNewFile = 1
840 847 if self.fp != None: self.fp.close()
841 848 self.fp = open(fullfilename, 'rb')
842 849 self.flagNoMoreFiles = 0
843 850 # print '[Reading] Setting the file: %s' % fullfilename
844 851 else:
845 852 self.fileSize = 0
846 853 self.filename = None
847 854 self.flagIsNewFile = 0
848 855 self.fp = None
849 856 self.flagNoMoreFiles = 1
850 857 # print '[Reading] No more files to read'
851 858
852 859 return fileOk_flag
853 860
854 861 def setNextFile(self):
855 862 if self.fp != None:
856 863 self.fp.close()
857 864
858 865 if self.online:
859 866 newFile = self.__setNextFileOnline()
860 867 else:
861 868 newFile = self.__setNextFileOffline()
862 869
863 870 if not(newFile):
864 871 print '[Reading] No more files to read'
865 872 return 0
866 873
867 874 print '[Reading] Setting the file: %s' % self.filename
868 875
869 876 self.__readFirstHeader()
870 877 self.nReadBlocks = 0
871 878 return 1
872 879
873 880 def __waitNewBlock(self):
874 881 """
875 882 Return 1 si se encontro un nuevo bloque de datos, 0 de otra forma.
876 883
877 884 Si el modo de lectura es OffLine siempre retorn 0
878 885 """
879 886 if not self.online:
880 887 return 0
881 888
882 889 if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):
883 890 return 0
884 891
885 892 currentPointer = self.fp.tell()
886 893
887 894 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
888 895
889 896 for nTries in range( self.nTries ):
890 897
891 898 self.fp.close()
892 899 self.fp = open( self.filename, 'rb' )
893 900 self.fp.seek( currentPointer )
894 901
895 902 self.fileSize = os.path.getsize( self.filename )
896 903 currentSize = self.fileSize - currentPointer
897 904
898 905 if ( currentSize >= neededSize ):
899 906 self.basicHeaderObj.read(self.fp)
900 907 return 1
901 908
902 909 if self.fileSize == self.fileSizeByHeader:
903 910 # self.flagEoF = True
904 911 return 0
905 912
906 913 print "[Reading] Waiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries+1)
907 914 sleep( self.delay )
908 915
909 916
910 917 return 0
911 918
912 919 def waitDataBlock(self,pointer_location):
913 920
914 921 currentPointer = pointer_location
915 922
916 923 neededSize = self.processingHeaderObj.blockSize #+ self.basicHeaderSize
917 924
918 925 for nTries in range( self.nTries ):
919 926 self.fp.close()
920 927 self.fp = open( self.filename, 'rb' )
921 928 self.fp.seek( currentPointer )
922 929
923 930 self.fileSize = os.path.getsize( self.filename )
924 931 currentSize = self.fileSize - currentPointer
925 932
926 933 if ( currentSize >= neededSize ):
927 934 return 1
928 935
929 936 print "[Reading] Waiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries+1)
930 937 sleep( self.delay )
931 938
932 939 return 0
933 940
934 941 def __jumpToLastBlock(self):
935 942
936 943 if not(self.__isFirstTimeOnline):
937 944 return
938 945
939 946 csize = self.fileSize - self.fp.tell()
940 947 blocksize = self.processingHeaderObj.blockSize
941 948
942 949 #salta el primer bloque de datos
943 950 if csize > self.processingHeaderObj.blockSize:
944 951 self.fp.seek(self.fp.tell() + blocksize)
945 952 else:
946 953 return
947 954
948 955 csize = self.fileSize - self.fp.tell()
949 956 neededsize = self.processingHeaderObj.blockSize + self.basicHeaderSize
950 957 while True:
951 958
952 959 if self.fp.tell()<self.fileSize:
953 960 self.fp.seek(self.fp.tell() + neededsize)
954 961 else:
955 962 self.fp.seek(self.fp.tell() - neededsize)
956 963 break
957 964
958 965 # csize = self.fileSize - self.fp.tell()
959 966 # neededsize = self.processingHeaderObj.blockSize + self.basicHeaderSize
960 967 # factor = int(csize/neededsize)
961 968 # if factor > 0:
962 969 # self.fp.seek(self.fp.tell() + factor*neededsize)
963 970
964 971 self.flagIsNewFile = 0
965 972 self.__isFirstTimeOnline = 0
966 973
967 974 def __setNewBlock(self):
968 975
969 976 if self.fp == None:
970 977 return 0
971 978
972 979 # if self.online:
973 980 # self.__jumpToLastBlock()
974 981
975 982 if self.flagIsNewFile:
976 983 self.lastUTTime = self.basicHeaderObj.utc
977 984 return 1
978 985
979 986 if self.realtime:
980 987 self.flagDiscontinuousBlock = 1
981 988 if not(self.setNextFile()):
982 989 return 0
983 990 else:
984 991 return 1
985 992
986 993 currentSize = self.fileSize - self.fp.tell()
987 994 neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize
988 995
989 996 if (currentSize >= neededSize):
990 997 self.basicHeaderObj.read(self.fp)
991 998 self.lastUTTime = self.basicHeaderObj.utc
992 999 return 1
993 1000
994 1001 if self.__waitNewBlock():
995 1002 self.lastUTTime = self.basicHeaderObj.utc
996 1003 return 1
997 1004
998 1005 if not(self.setNextFile()):
999 1006 return 0
1000 1007
1001 1008 deltaTime = self.basicHeaderObj.utc - self.lastUTTime #
1002 1009 self.lastUTTime = self.basicHeaderObj.utc
1003 1010
1004 1011 self.flagDiscontinuousBlock = 0
1005 1012
1006 1013 if deltaTime > self.maxTimeStep:
1007 1014 self.flagDiscontinuousBlock = 1
1008 1015
1009 1016 return 1
1010 1017
1011 1018 def readNextBlock(self):
1012 1019
1013 1020 #Skip block out of startTime and endTime
1014 1021 while True:
1015 1022 if not(self.__setNewBlock()):
1016 1023 return 0
1017 1024
1018 1025 if not(self.readBlock()):
1019 1026 return 0
1020 1027
1021 1028 self.getBasicHeader()
1022 1029
1023 1030 if not isTimeInRange(self.dataOut.datatime.time(), self.startTime, self.endTime):
1024 1031
1025 1032 print "[Reading] Block No. %d/%d -> %s [Skipping]" %(self.nReadBlocks,
1026 1033 self.processingHeaderObj.dataBlocksPerFile,
1027 1034 self.dataOut.datatime.ctime())
1028 1035 continue
1029 1036
1030 1037 break
1031 1038
1032 1039 print "[Reading] Block No. %d/%d -> %s" %(self.nReadBlocks,
1033 1040 self.processingHeaderObj.dataBlocksPerFile,
1034 1041 self.dataOut.datatime.ctime())
1035 1042 return 1
1036 1043
1037 1044 def __readFirstHeader(self):
1038 1045
1039 1046 self.basicHeaderObj.read(self.fp)
1040 1047 self.systemHeaderObj.read(self.fp)
1041 1048 self.radarControllerHeaderObj.read(self.fp)
1042 1049 self.processingHeaderObj.read(self.fp)
1043 1050
1044 1051 self.firstHeaderSize = self.basicHeaderObj.size
1045 1052
1046 1053 datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR))
1047 1054 if datatype == 0:
1048 1055 datatype_str = numpy.dtype([('real','<i1'),('imag','<i1')])
1049 1056 elif datatype == 1:
1050 1057 datatype_str = numpy.dtype([('real','<i2'),('imag','<i2')])
1051 1058 elif datatype == 2:
1052 1059 datatype_str = numpy.dtype([('real','<i4'),('imag','<i4')])
1053 1060 elif datatype == 3:
1054 1061 datatype_str = numpy.dtype([('real','<i8'),('imag','<i8')])
1055 1062 elif datatype == 4:
1056 1063 datatype_str = numpy.dtype([('real','<f4'),('imag','<f4')])
1057 1064 elif datatype == 5:
1058 1065 datatype_str = numpy.dtype([('real','<f8'),('imag','<f8')])
1059 1066 else:
1060 1067 raise ValueError, 'Data type was not defined'
1061 1068
1062 1069 self.dtype = datatype_str
1063 1070 #self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c
1064 1071 self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.processingHeaderObj.dataBlocksPerFile - 1)
1065 1072 # self.dataOut.channelList = numpy.arange(self.systemHeaderObj.numChannels)
1066 1073 # self.dataOut.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels)
1067 1074 self.getBlockDimension()
1068 1075
1069 1076 def __verifyFile(self, filename, msgFlag=True):
1070 1077
1071 1078 msg = None
1072 1079
1073 1080 try:
1074 1081 fp = open(filename, 'rb')
1075 1082 except IOError:
1076 1083
1077 1084 if msgFlag:
1078 1085 print "[Reading] File %s can't be opened" % (filename)
1079 1086
1080 1087 return False
1081 1088
1082 1089 currentPosition = fp.tell()
1083 1090 neededSize = self.processingHeaderObj.blockSize + self.firstHeaderSize
1084 1091
1085 1092 if neededSize == 0:
1086 1093 basicHeaderObj = BasicHeader(LOCALTIME)
1087 1094 systemHeaderObj = SystemHeader()
1088 1095 radarControllerHeaderObj = RadarControllerHeader()
1089 1096 processingHeaderObj = ProcessingHeader()
1090 1097
1091 1098 if not( basicHeaderObj.read(fp) ):
1092 1099 fp.close()
1093 1100 return False
1094 1101
1095 1102 if not( systemHeaderObj.read(fp) ):
1096 1103 fp.close()
1097 1104 return False
1098 1105
1099 1106 if not( radarControllerHeaderObj.read(fp) ):
1100 1107 fp.close()
1101 1108 return False
1102 1109
1103 1110 if not( processingHeaderObj.read(fp) ):
1104 1111 fp.close()
1105 1112 return False
1106 1113
1107 1114 neededSize = processingHeaderObj.blockSize + basicHeaderObj.size
1108 1115 else:
1109 1116 msg = "[Reading] Skipping the file %s due to it hasn't enough data" %filename
1110 1117
1111 1118 fp.close()
1112 1119
1113 1120 fileSize = os.path.getsize(filename)
1114 1121 currentSize = fileSize - currentPosition
1115 1122
1116 1123 if currentSize < neededSize:
1117 1124 if msgFlag and (msg != None):
1118 1125 print msg
1119 1126 return False
1120 1127
1121 1128 return True
1122 1129
1123 1130 def findDatafiles(self, path, startDate=None, endDate=None, expLabel='', ext='.r', walk=True, include_path=False):
1124 1131
1125 1132 path_empty = True
1126 1133
1127 1134 dateList = []
1128 1135 pathList = []
1129 1136
1130 1137 multi_path = path.split(',')
1131 1138
1132 1139 if not walk:
1133 1140
1134 1141 for single_path in multi_path:
1135 1142
1136 1143 if not os.path.isdir(single_path):
1137 1144 continue
1138 1145
1139 1146 fileList = glob.glob1(single_path, "*"+ext)
1140 1147
1141 1148 if not fileList:
1142 1149 continue
1143 1150
1144 1151 path_empty = False
1145 1152
1146 1153 fileList.sort()
1147 1154
1148 1155 for thisFile in fileList:
1149 1156
1150 1157 if not os.path.isfile(os.path.join(single_path, thisFile)):
1151 1158 continue
1152 1159
1153 1160 if not isRadarFile(thisFile):
1154 1161 continue
1155 1162
1156 1163 if not isFileInDateRange(thisFile, startDate, endDate):
1157 1164 continue
1158 1165
1159 1166 thisDate = getDateFromRadarFile(thisFile)
1160 1167
1161 1168 if thisDate in dateList:
1162 1169 continue
1163 1170
1164 1171 dateList.append(thisDate)
1165 1172 pathList.append(single_path)
1166 1173
1167 1174 else:
1168 1175 for single_path in multi_path:
1169 1176
1170 1177 if not os.path.isdir(single_path):
1171 1178 continue
1172 1179
1173 1180 dirList = []
1174 1181
1175 1182 for thisPath in os.listdir(single_path):
1176 1183
1177 1184 if not os.path.isdir(os.path.join(single_path,thisPath)):
1178 1185 continue
1179 1186
1180 1187 if not isRadarFolder(thisPath):
1181 1188 continue
1182 1189
1183 1190 if not isFolderInDateRange(thisPath, startDate, endDate):
1184 1191 continue
1185 1192
1186 1193 dirList.append(thisPath)
1187 1194
1188 1195 if not dirList:
1189 1196 continue
1190 1197
1191 1198 dirList.sort()
1192 1199
1193 1200 for thisDir in dirList:
1194 1201
1195 1202 datapath = os.path.join(single_path, thisDir, expLabel)
1196 1203 fileList = glob.glob1(datapath, "*"+ext)
1197 1204
1198 1205 if not fileList:
1199 1206 continue
1200 1207
1201 1208 path_empty = False
1202 1209
1203 1210 thisDate = getDateFromRadarFolder(thisDir)
1204 1211
1205 1212 pathList.append(datapath)
1206 1213 dateList.append(thisDate)
1207 1214
1208 1215 dateList.sort()
1209 1216
1210 1217 if walk:
1211 1218 pattern_path = os.path.join(multi_path[0], "[dYYYYDDD]", expLabel)
1212 1219 else:
1213 1220 pattern_path = multi_path[0]
1214 1221
1215 1222 if path_empty:
1216 1223 print "[Reading] No *%s files in %s for %s to %s" %(ext, pattern_path, startDate, endDate)
1217 1224 else:
1218 1225 if not dateList:
1219 1226 print "[Reading] Date range selected invalid [%s - %s]: No *%s files in %s)" %(startDate, endDate, ext, path)
1220 1227
1221 1228 if include_path:
1222 1229 return dateList, pathList
1223 1230
1224 1231 return dateList
1225 1232
1226 1233 def setup(self,
1227 1234 path=None,
1228 1235 startDate=None,
1229 1236 endDate=None,
1230 1237 startTime=datetime.time(0,0,0),
1231 1238 endTime=datetime.time(23,59,59),
1232 1239 set=None,
1233 1240 expLabel = "",
1234 1241 ext = None,
1235 1242 online = False,
1236 1243 delay = 60,
1237 1244 walk = True,
1238 1245 getblock = False,
1239 1246 nTxs = 1,
1240 realtime=False):
1247 realtime=False,
1248 blocksize=None,
1249 blocktime=None):
1241 1250
1242 1251 if path == None:
1243 1252 raise ValueError, "[Reading] The path is not valid"
1244 1253
1245 1254 if ext == None:
1246 1255 ext = self.ext
1247 1256
1248 1257 if online:
1249 1258 print "[Reading] Searching files in online mode..."
1250 1259
1251 1260 for nTries in range( self.nTries ):
1252 1261 fullpath, foldercounter, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext, walk=walk, set=set)
1253 1262
1254 1263 if fullpath:
1255 1264 break
1256 1265
1257 1266 print '[Reading] Waiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)
1258 1267 sleep( self.delay )
1259 1268
1260 1269 if not(fullpath):
1261 1270 print "[Reading] There 'isn't any valid file in %s" % path
1262 1271 return
1263 1272
1264 1273 self.year = year
1265 1274 self.doy = doy
1266 1275 self.set = set - 1
1267 1276 self.path = path
1268 1277 self.foldercounter = foldercounter
1269 1278 last_set = None
1270 1279
1271 1280 else:
1272 1281 print "[Reading] Searching files in offline mode ..."
1273 1282 pathList, filenameList = self.__searchFilesOffLine(path, startDate=startDate, endDate=endDate,
1274 1283 startTime=startTime, endTime=endTime,
1275 1284 set=set, expLabel=expLabel, ext=ext,
1276 1285 walk=walk)
1277 1286
1278 1287 if not(pathList):
1279 1288 # print "[Reading] No *%s files in %s (%s - %s)"%(ext, path,
1280 1289 # datetime.datetime.combine(startDate,startTime).ctime(),
1281 1290 # datetime.datetime.combine(endDate,endTime).ctime())
1282 1291
1283 1292 # sys.exit(-1)
1284 1293
1285 1294 self.fileIndex = -1
1286 1295 self.pathList = []
1287 1296 self.filenameList = []
1288 1297 return
1289 1298
1290 1299 self.fileIndex = -1
1291 1300 self.pathList = pathList
1292 1301 self.filenameList = filenameList
1293 1302 file_name = os.path.basename(filenameList[-1])
1294 1303 basename, ext = os.path.splitext(file_name)
1295 1304 last_set = int(basename[-3:])
1296 1305
1297 1306 self.online = online
1298 1307 self.realtime = realtime
1299 1308 self.delay = delay
1300 1309 ext = ext.lower()
1301 1310 self.ext = ext
1302 1311 self.getByBlock = getblock
1303 1312 self.nTxs = nTxs
1304 1313 self.startTime = startTime
1305 1314 self.endTime = endTime
1306 1315
1316 #Added-----------------
1317 self.selBlocksize = blocksize
1318 self.selBlocktime = blocktime
1319
1320
1307 1321 if not(self.setNextFile()):
1308 1322 if (startDate!=None) and (endDate!=None):
1309 1323 print "[Reading] No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime())
1310 1324 elif startDate != None:
1311 1325 print "[Reading] No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime())
1312 1326 else:
1313 1327 print "[Reading] No files"
1314 1328
1315 1329 self.fileIndex = -1
1316 1330 self.pathList = []
1317 1331 self.filenameList = []
1318 1332 return
1319 1333
1320 1334 # self.getBasicHeader()
1321 1335
1322 1336 if last_set != None:
1323 1337 self.dataOut.last_block = last_set * self.processingHeaderObj.dataBlocksPerFile + self.basicHeaderObj.dataBlock
1324 1338 return
1325 1339
1326 1340 def getBasicHeader(self):
1327 1341
1328 1342 self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.radarControllerHeaderObj.ippSeconds
1329 1343
1330 1344 self.dataOut.flagDiscontinuousBlock = self.flagDiscontinuousBlock
1331 1345
1332 1346 self.dataOut.timeZone = self.basicHeaderObj.timeZone
1333 1347
1334 1348 self.dataOut.dstFlag = self.basicHeaderObj.dstFlag
1335 1349
1336 1350 self.dataOut.errorCount = self.basicHeaderObj.errorCount
1337 1351
1338 1352 self.dataOut.useLocalTime = self.basicHeaderObj.useLocalTime
1339 1353
1340 1354 self.dataOut.ippSeconds = self.radarControllerHeaderObj.ippSeconds/self.nTxs
1341 1355
1342 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock*self.nTxs
1356 # self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock*self.nTxs
1343 1357
1344 1358
1345 1359 def getFirstHeader(self):
1346 1360
1347 1361 raise NotImplementedError
1348 1362
1349 1363 def getData(self):
1350 1364
1351 1365 raise NotImplementedError
1352 1366
1353 1367 def hasNotDataInBuffer(self):
1354 1368
1355 1369 raise NotImplementedError
1356 1370
1357 1371 def readBlock(self):
1358 1372
1359 1373 raise NotImplementedError
1360 1374
1361 1375 def isEndProcess(self):
1362 1376
1363 1377 return self.flagNoMoreFiles
1364 1378
1365 1379 def printReadBlocks(self):
1366 1380
1367 1381 print "[Reading] Number of read blocks per file %04d" %self.nReadBlocks
1368 1382
1369 1383 def printTotalBlocks(self):
1370 1384
1371 1385 print "[Reading] Number of read blocks %04d" %self.nTotalBlocks
1372 1386
1373 1387 def printNumberOfBlock(self):
1374 1388
1375 1389 if self.flagIsNewBlock:
1376 1390 print "[Reading] Block No. %d/%d -> %s" %(self.nReadBlocks,
1377 1391 self.processingHeaderObj.dataBlocksPerFile,
1378 1392 self.dataOut.datatime.ctime())
1379 1393
1380 1394 def printInfo(self):
1381 1395
1382 1396 if self.__printInfo == False:
1383 1397 return
1384 1398
1385 1399 self.basicHeaderObj.printInfo()
1386 1400 self.systemHeaderObj.printInfo()
1387 1401 self.radarControllerHeaderObj.printInfo()
1388 1402 self.processingHeaderObj.printInfo()
1389 1403
1390 1404 self.__printInfo = False
1391 1405
1392 1406
1393 1407 def run(self, **kwargs):
1394 1408
1395 1409 if not(self.isConfig):
1396 1410
1397 1411 # self.dataOut = dataOut
1398 1412 self.setup(**kwargs)
1399 1413 self.isConfig = True
1400 1414
1401 1415 self.getData()
1402 1416
1403 1417 class JRODataWriter(JRODataIO):
1404 1418
1405 1419 """
1406 1420 Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura
1407 1421 de los datos siempre se realiza por bloques.
1408 1422 """
1409 1423
1410 1424 blockIndex = 0
1411 1425
1412 1426 path = None
1413 1427
1414 1428 setFile = None
1415 1429
1416 1430 profilesPerBlock = None
1417 1431
1418 1432 blocksPerFile = None
1419 1433
1420 1434 nWriteBlocks = 0
1421 1435
1422 1436 fileDate = None
1423 1437
1424 1438 def __init__(self, dataOut=None):
1425 1439 raise NotImplementedError
1426 1440
1427 1441
1428 1442 def hasAllDataInBuffer(self):
1429 1443 raise NotImplementedError
1430 1444
1431 1445
1432 1446 def setBlockDimension(self):
1433 1447 raise NotImplementedError
1434 1448
1435 1449
1436 1450 def writeBlock(self):
1437 1451 raise NotImplementedError
1438 1452
1439 1453
1440 1454 def putData(self):
1441 1455 raise NotImplementedError
1442 1456
1443 1457
1444 1458 def getProcessFlags(self):
1445 1459
1446 1460 processFlags = 0
1447 1461
1448 1462 dtype_index = get_dtype_index(self.dtype)
1449 1463 procflag_dtype = get_procflag_dtype(dtype_index)
1450 1464
1451 1465 processFlags += procflag_dtype
1452 1466
1453 1467 if self.dataOut.flagDecodeData:
1454 1468 processFlags += PROCFLAG.DECODE_DATA
1455 1469
1456 1470 if self.dataOut.flagDeflipData:
1457 1471 processFlags += PROCFLAG.DEFLIP_DATA
1458 1472
1459 1473 if self.dataOut.code is not None:
1460 1474 processFlags += PROCFLAG.DEFINE_PROCESS_CODE
1461 1475
1462 1476 if self.dataOut.nCohInt > 1:
1463 1477 processFlags += PROCFLAG.COHERENT_INTEGRATION
1464 1478
1465 1479 if self.dataOut.type == "Spectra":
1466 1480 if self.dataOut.nIncohInt > 1:
1467 1481 processFlags += PROCFLAG.INCOHERENT_INTEGRATION
1468 1482
1469 1483 if self.dataOut.data_dc is not None:
1470 1484 processFlags += PROCFLAG.SAVE_CHANNELS_DC
1471 1485
1472 1486 if self.dataOut.flagShiftFFT:
1473 1487 processFlags += PROCFLAG.SHIFT_FFT_DATA
1474 1488
1475 1489 return processFlags
1476 1490
1477 1491 def setBasicHeader(self):
1478 1492
1479 1493 self.basicHeaderObj.size = self.basicHeaderSize #bytes
1480 1494 self.basicHeaderObj.version = self.versionFile
1481 1495 self.basicHeaderObj.dataBlock = self.nTotalBlocks
1482 1496
1483 1497 utc = numpy.floor(self.dataOut.utctime)
1484 1498 milisecond = (self.dataOut.utctime - utc)* 1000.0
1485 1499
1486 1500 self.basicHeaderObj.utc = utc
1487 1501 self.basicHeaderObj.miliSecond = milisecond
1488 1502 self.basicHeaderObj.timeZone = self.dataOut.timeZone
1489 1503 self.basicHeaderObj.dstFlag = self.dataOut.dstFlag
1490 1504 self.basicHeaderObj.errorCount = self.dataOut.errorCount
1491 1505
1492 1506 def setFirstHeader(self):
1493 1507 """
1494 1508 Obtiene una copia del First Header
1495 1509
1496 1510 Affected:
1497 1511
1498 1512 self.basicHeaderObj
1499 1513 self.systemHeaderObj
1500 1514 self.radarControllerHeaderObj
1501 1515 self.processingHeaderObj self.
1502 1516
1503 1517 Return:
1504 1518 None
1505 1519 """
1506 1520
1507 1521 raise NotImplementedError
1508 1522
1509 1523 def __writeFirstHeader(self):
1510 1524 """
1511 1525 Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader)
1512 1526
1513 1527 Affected:
1514 1528 __dataType
1515 1529
1516 1530 Return:
1517 1531 None
1518 1532 """
1519 1533
1520 1534 # CALCULAR PARAMETROS
1521 1535
1522 1536 sizeLongHeader = self.systemHeaderObj.size + self.radarControllerHeaderObj.size + self.processingHeaderObj.size
1523 1537 self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader
1524 1538
1525 1539 self.basicHeaderObj.write(self.fp)
1526 1540 self.systemHeaderObj.write(self.fp)
1527 1541 self.radarControllerHeaderObj.write(self.fp)
1528 1542 self.processingHeaderObj.write(self.fp)
1529 1543
1530 1544 def __setNewBlock(self):
1531 1545 """
1532 1546 Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header
1533 1547
1534 1548 Return:
1535 1549 0 : si no pudo escribir nada
1536 1550 1 : Si escribio el Basic el First Header
1537 1551 """
1538 1552 if self.fp == None:
1539 1553 self.setNextFile()
1540 1554
1541 1555 if self.flagIsNewFile:
1542 1556 return 1
1543 1557
1544 1558 if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile:
1545 1559 self.basicHeaderObj.write(self.fp)
1546 1560 return 1
1547 1561
1548 1562 if not( self.setNextFile() ):
1549 1563 return 0
1550 1564
1551 1565 return 1
1552 1566
1553 1567
1554 1568 def writeNextBlock(self):
1555 1569 """
1556 1570 Selecciona el bloque siguiente de datos y los escribe en un file
1557 1571
1558 1572 Return:
1559 1573 0 : Si no hizo pudo escribir el bloque de datos
1560 1574 1 : Si no pudo escribir el bloque de datos
1561 1575 """
1562 1576 if not( self.__setNewBlock() ):
1563 1577 return 0
1564 1578
1565 1579 self.writeBlock()
1566 1580
1567 1581 print "[Writing] Block No. %d/%d" %(self.blockIndex,
1568 1582 self.processingHeaderObj.dataBlocksPerFile)
1569 1583
1570 1584 return 1
1571 1585
1572 1586 def setNextFile(self):
1573 1587 """
1574 1588 Determina el siguiente file que sera escrito
1575 1589
1576 1590 Affected:
1577 1591 self.filename
1578 1592 self.subfolder
1579 1593 self.fp
1580 1594 self.setFile
1581 1595 self.flagIsNewFile
1582 1596
1583 1597 Return:
1584 1598 0 : Si el archivo no puede ser escrito
1585 1599 1 : Si el archivo esta listo para ser escrito
1586 1600 """
1587 1601 ext = self.ext
1588 1602 path = self.path
1589 1603
1590 1604 if self.fp != None:
1591 1605 self.fp.close()
1592 1606
1593 1607 timeTuple = time.localtime( self.dataOut.utctime)
1594 1608 subfolder = 'd%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
1595 1609
1596 1610 fullpath = os.path.join( path, subfolder )
1597 1611 setFile = self.setFile
1598 1612
1599 1613 if not( os.path.exists(fullpath) ):
1600 1614 os.mkdir(fullpath)
1601 1615 setFile = -1 #inicializo mi contador de seteo
1602 1616 else:
1603 1617 filesList = os.listdir( fullpath )
1604 1618 if len( filesList ) > 0:
1605 1619 filesList = sorted( filesList, key=str.lower )
1606 1620 filen = filesList[-1]
1607 1621 # el filename debera tener el siguiente formato
1608 1622 # 0 1234 567 89A BCDE (hex)
1609 1623 # x YYYY DDD SSS .ext
1610 1624 if isNumber( filen[8:11] ):
1611 1625 setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file
1612 1626 else:
1613 1627 setFile = -1
1614 1628 else:
1615 1629 setFile = -1 #inicializo mi contador de seteo
1616 1630
1617 1631 setFile += 1
1618 1632
1619 1633 #If this is a new day it resets some values
1620 1634 if self.dataOut.datatime.date() > self.fileDate:
1621 1635 setFile = 0
1622 1636 self.nTotalBlocks = 0
1623 1637
1624 1638 filen = '%s%4.4d%3.3d%3.3d%s' % (self.optchar, timeTuple.tm_year, timeTuple.tm_yday, setFile, ext )
1625 1639
1626 1640 filename = os.path.join( path, subfolder, filen )
1627 1641
1628 1642 fp = open( filename,'wb' )
1629 1643
1630 1644 self.blockIndex = 0
1631 1645
1632 1646 #guardando atributos
1633 1647 self.filename = filename
1634 1648 self.subfolder = subfolder
1635 1649 self.fp = fp
1636 1650 self.setFile = setFile
1637 1651 self.flagIsNewFile = 1
1638 1652 self.fileDate = self.dataOut.datatime.date()
1639 1653
1640 1654 self.setFirstHeader()
1641 1655
1642 1656 print '[Writing] Opening file: %s'%self.filename
1643 1657
1644 1658 self.__writeFirstHeader()
1645 1659
1646 1660 return 1
1647 1661
1648 1662 def setup(self, dataOut, path, blocksPerFile, profilesPerBlock=64, set=None, ext=None, datatype=4):
1649 1663 """
1650 1664 Setea el tipo de formato en la cual sera guardada la data y escribe el First Header
1651 1665
1652 1666 Inputs:
1653 1667 path : directory where data will be saved
1654 1668 profilesPerBlock : number of profiles per block
1655 1669 set : initial file set
1656 1670 datatype : An integer number that defines data type:
1657 1671 0 : int8 (1 byte)
1658 1672 1 : int16 (2 bytes)
1659 1673 2 : int32 (4 bytes)
1660 1674 3 : int64 (8 bytes)
1661 1675 4 : float32 (4 bytes)
1662 1676 5 : double64 (8 bytes)
1663 1677
1664 1678 Return:
1665 1679 0 : Si no realizo un buen seteo
1666 1680 1 : Si realizo un buen seteo
1667 1681 """
1668 1682
1669 1683 if ext == None:
1670 1684 ext = self.ext
1671 1685
1672 1686 self.ext = ext.lower()
1673 1687
1674 1688 self.path = path
1675 1689
1676 1690 if set is None:
1677 1691 self.setFile = -1
1678 1692 else:
1679 1693 self.setFile = set - 1
1680 1694
1681 1695 self.blocksPerFile = blocksPerFile
1682 1696
1683 1697 self.profilesPerBlock = profilesPerBlock
1684 1698
1685 1699 self.dataOut = dataOut
1686 1700 self.fileDate = self.dataOut.datatime.date()
1687 1701 #By default
1688 1702 self.dtype = self.dataOut.dtype
1689 1703
1690 1704 if datatype is not None:
1691 1705 self.dtype = get_numpy_dtype(datatype)
1692 1706
1693 1707 if not(self.setNextFile()):
1694 1708 print "[Writing] There isn't a next file"
1695 1709 return 0
1696 1710
1697 1711 self.setBlockDimension()
1698 1712
1699 1713 return 1
1700 1714
1701 1715 def run(self, dataOut, **kwargs):
1702 1716
1703 1717 if not(self.isConfig):
1704 1718
1705 1719 self.setup(dataOut, **kwargs)
1706 1720 self.isConfig = True
1707 1721
1708 1722 self.putData()
1709 1723
@@ -1,596 +1,621
1 1 '''
2 2 Created on Jul 2, 2014
3 3
4 4 @author: roj-idl71
5 5 '''
6 6
7 7 import numpy
8 8
9 9 from jroIO_base import LOCALTIME, JRODataReader, JRODataWriter
10 10 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation
11 11 from schainpy.model.data.jroheaderIO import PROCFLAG, BasicHeader, SystemHeader, RadarControllerHeader, ProcessingHeader
12 12 from schainpy.model.data.jrodata import Voltage
13 13
14 14 class VoltageReader(JRODataReader, ProcessingUnit):
15 15 """
16 16 Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura
17 17 de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones:
18 18 perfiles*alturas*canales) son almacenados en la variable "buffer".
19 19
20 20 perfiles * alturas * canales
21 21
22 22 Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader,
23 23 RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la
24 24 cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de
25 25 datos desde el "buffer" cada vez que se ejecute el metodo "getData".
26 26
27 27 Example:
28 28
29 29 dpath = "/home/myuser/data"
30 30
31 31 startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0)
32 32
33 33 endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0)
34 34
35 35 readerObj = VoltageReader()
36 36
37 37 readerObj.setup(dpath, startTime, endTime)
38 38
39 39 while(True):
40 40
41 41 #to get one profile
42 42 profile = readerObj.getData()
43 43
44 44 #print the profile
45 45 print profile
46 46
47 47 #If you want to see all datablock
48 48 print readerObj.datablock
49 49
50 50 if readerObj.flagNoMoreFiles:
51 51 break
52 52
53 53 """
54 54
55 55 ext = ".r"
56 56
57 57 optchar = "D"
58 58 dataOut = None
59 59
60
61 60 def __init__(self):
62 61 """
63 62 Inicializador de la clase VoltageReader para la lectura de datos de voltage.
64 63
65 64 Input:
66 65 dataOut : Objeto de la clase Voltage. Este objeto sera utilizado para
67 66 almacenar un perfil de datos cada vez que se haga un requerimiento
68 67 (getData). El perfil sera obtenido a partir del buffer de datos,
69 68 si el buffer esta vacio se hara un nuevo proceso de lectura de un
70 69 bloque de datos.
71 70 Si este parametro no es pasado se creara uno internamente.
72 71
73 72 Variables afectadas:
74 73 self.dataOut
75 74
76 75 Return:
77 76 None
78 77 """
79 78
80 79 ProcessingUnit.__init__(self)
81 80
82 81 self.isConfig = False
83 82
84 83 self.datablock = None
85 84
86 85 self.utc = 0
87 86
88 87 self.ext = ".r"
89 88
90 89 self.optchar = "D"
91 90
92 91 self.basicHeaderObj = BasicHeader(LOCALTIME)
93 92
94 93 self.systemHeaderObj = SystemHeader()
95 94
96 95 self.radarControllerHeaderObj = RadarControllerHeader()
97 96
98 97 self.processingHeaderObj = ProcessingHeader()
99 98
100 99 self.online = 0
101 100
102 101 self.fp = None
103 102
104 103 self.idFile = None
105 104
106 105 self.dtype = None
107 106
108 107 self.fileSizeByHeader = None
109 108
110 109 self.filenameList = []
111 110
112 111 self.filename = None
113 112
114 113 self.fileSize = None
115 114
116 115 self.firstHeaderSize = 0
117 116
118 117 self.basicHeaderSize = 24
119 118
120 119 self.pathList = []
121 120
122 121 self.filenameList = []
123 122
124 123 self.lastUTTime = 0
125 124
126 125 self.maxTimeStep = 30
127 126
128 127 self.flagNoMoreFiles = 0
129 128
130 129 self.set = 0
131 130
132 131 self.path = None
133 132
134 133 self.profileIndex = 2**32-1
135 134
136 135 self.delay = 3 #seconds
137 136
138 137 self.nTries = 3 #quantity tries
139 138
140 139 self.nFiles = 3 #number of files for searching
141 140
142 141 self.nReadBlocks = 0
143 142
144 143 self.flagIsNewFile = 1
145 144
146 145 self.__isFirstTimeOnline = 1
147 146
148 147 # self.ippSeconds = 0
149 148
150 149 self.flagDiscontinuousBlock = 0
151 150
152 151 self.flagIsNewBlock = 0
153 152
154 153 self.nTotalBlocks = 0
155 154
156 155 self.blocksize = 0
157 156
158 157 self.dataOut = self.createObjByDefault()
159 158
160 159 self.nTxs = 1
161 160
162 161 self.txIndex = 0
163 162
164 163 def createObjByDefault(self):
165 164
166 165 dataObj = Voltage()
167 166
168 167 return dataObj
169 168
170 169 def __hasNotDataInBuffer(self):
171 170
172 171 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock*self.nTxs:
173 172 return 1
174 173
175 174 return 0
176 175
177 176
178 177 def getBlockDimension(self):
179 178 """
180 179 Obtiene la cantidad de puntos a leer por cada bloque de datos
181 180
182 181 Affected:
183 182 self.blocksize
184 183
185 184 Return:
186 185 None
187 186 """
188 187 pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels
189 188 self.blocksize = pts2read
190 189
191 190
192 191 def readBlock(self):
193 192 """
194 193 readBlock lee el bloque de datos desde la posicion actual del puntero del archivo
195 194 (self.fp) y actualiza todos los parametros relacionados al bloque de datos
196 195 (metadata + data). La data leida es almacenada en el buffer y el contador del buffer
197 196 es seteado a 0
198 197
199 198 Inputs:
200 199 None
201 200
202 201 Return:
203 202 None
204 203
205 204 Affected:
206 205 self.profileIndex
207 206 self.datablock
208 207 self.flagIsNewFile
209 208 self.flagIsNewBlock
210 209 self.nTotalBlocks
211 210
212 211 Exceptions:
213 212 Si un bloque leido no es un bloque valido
214 213 """
215 214 current_pointer_location = self.fp.tell()
216 215 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
217 216
218 217 try:
219 218 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
220 219 except:
221 220 #print "The read block (%3d) has not enough data" %self.nReadBlocks
222 221
223 222 if self.waitDataBlock(pointer_location=current_pointer_location):
224 223 junk = numpy.fromfile( self.fp, self.dtype, self.blocksize )
225 224 junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) )
226 225 # return 0
227 226
228 227 #Dimensions : nChannels, nProfiles, nSamples
229 228
230 229 junk = numpy.transpose(junk, (2,0,1))
231 230 self.datablock = junk['real'] + junk['imag']*1j
232 231
233 232 self.profileIndex = 0
234 233
235 234 self.flagIsNewFile = 0
236 235 self.flagIsNewBlock = 1
237 236
238 237 self.nTotalBlocks += 1
239 238 self.nReadBlocks += 1
240 239
241 240 return 1
242 241
243 242 def getFirstHeader(self):
244 243
245 244 self.getBasicHeader()
246 245
247 246 self.dataOut.systemHeaderObj = self.systemHeaderObj.copy()
248 247
249 248 self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy()
250 249
251 250 if self.nTxs > 1:
252 251 self.dataOut.radarControllerHeaderObj.ippSeconds = self.radarControllerHeaderObj.ippSeconds/self.nTxs
253 252
254 253 #Time interval and code are propierties of dataOut. Its value depends of radarControllerHeaderObj.
255 254
256 255 # self.dataOut.timeInterval = self.radarControllerHeaderObj.ippSeconds * self.processingHeaderObj.nCohInt
257 256 #
258 257 # if self.radarControllerHeaderObj.code is not None:
259 258 #
260 259 # self.dataOut.nCode = self.radarControllerHeaderObj.nCode
261 260 #
262 261 # self.dataOut.nBaud = self.radarControllerHeaderObj.nBaud
263 262 #
264 263 # self.dataOut.code = self.radarControllerHeaderObj.code
265 264
266 265 self.dataOut.dtype = self.dtype
267 266
268 267 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock
269 268
270 269 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.nHeights) *self.processingHeaderObj.deltaHeight + self.processingHeaderObj.firstHeight
271 270
272 271 self.dataOut.channelList = range(self.systemHeaderObj.nChannels)
273 272
274 273 self.dataOut.nCohInt = self.processingHeaderObj.nCohInt
275 274
276 275 self.dataOut.flagDecodeData = self.processingHeaderObj.flag_decode #asumo q la data no esta decodificada
277 276
278 277 self.dataOut.flagDeflipData = self.processingHeaderObj.flag_deflip #asumo q la data no esta sin flip
279 278
280 279 self.dataOut.flagShiftFFT = self.processingHeaderObj.shif_fft
281 280
282 281 def reshapeData(self):
283 282
284 283 if self.nTxs < 0:
285 284 return
286 285
287 286 if self.nTxs == 1:
288 287 return
289 288
290 289 if self.nTxs < 1 and self.processingHeaderObj.profilesPerBlock % (1./self.nTxs) != 0:
291 290 raise ValueError, "1./nTxs (=%f), should be a multiple of nProfiles (=%d)" %(1./self.nTxs, self.processingHeaderObj.profilesPerBlock)
292 291
293 292 if self.nTxs > 1 and self.processingHeaderObj.nHeights % self.nTxs != 0:
294 293 raise ValueError, "nTxs (=%d), should be a multiple of nHeights (=%d)" %(self.nTxs, self.processingHeaderObj.nHeights)
295 294
296 295 self.datablock = self.datablock.reshape((self.systemHeaderObj.nChannels, self.processingHeaderObj.profilesPerBlock*self.nTxs, self.processingHeaderObj.nHeights/self.nTxs))
297 296
298 297 self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock*self.nTxs
299 298 self.dataOut.heightList = numpy.arange(self.processingHeaderObj.nHeights/self.nTxs) *self.processingHeaderObj.deltaHeight + self.processingHeaderObj.firstHeight
300 299 self.dataOut.radarControllerHeaderObj.ippSeconds = self.radarControllerHeaderObj.ippSeconds/self.nTxs
301 300
302 301 return
303 302
304 303 def getData(self):
305 304 """
306 305 getData obtiene una unidad de datos del buffer de lectura, un perfil, y la copia al objeto self.dataOut
307 306 del tipo "Voltage" con todos los parametros asociados a este (metadata). cuando no hay datos
308 307 en el buffer de lectura es necesario hacer una nueva lectura de los bloques de datos usando
309 308 "readNextBlock"
310 309
311 310 Ademas incrementa el contador del buffer "self.profileIndex" en 1.
312 311
313 312 Return:
314 313
315 314 Si el flag self.getByBlock ha sido seteado el bloque completo es copiado a self.dataOut y el self.profileIndex
316 315 es igual al total de perfiles leidos desde el archivo.
317 316
318 317 Si self.getByBlock == False:
319 318
320 319 self.dataOut.data = buffer[:, thisProfile, :]
321 320
322 321 shape = [nChannels, nHeis]
323 322
324 323 Si self.getByBlock == True:
325 324
326 325 self.dataOut.data = buffer[:, :, :]
327 326
328 327 shape = [nChannels, nProfiles, nHeis]
329 328
330 329 Variables afectadas:
331 330 self.dataOut
332 331 self.profileIndex
333 332
334 333 Affected:
335 334 self.dataOut
336 335 self.profileIndex
337 336 self.flagDiscontinuousBlock
338 337 self.flagIsNewBlock
339 338 """
340 339
341 340 if self.flagNoMoreFiles:
342 341 self.dataOut.flagNoData = True
343 342 print 'Process finished'
344 343 return 0
345 344
346 345 self.flagDiscontinuousBlock = 0
347 346 self.flagIsNewBlock = 0
348 347
349 348 if self.__hasNotDataInBuffer():
350 349
351 350 if not( self.readNextBlock() ):
352 351 return 0
353 352
354 353 self.getFirstHeader()
355 354
356 355 self.reshapeData()
357 356
358 357 if self.datablock is None:
359 358 self.dataOut.flagNoData = True
360 359 return 0
361 360
362 361 if not self.getByBlock:
363 362
364 363 """
365 364 Return profile by profile
366 365
367 366 If nTxs > 1 then one profile is divided by nTxs and number of total
368 367 blocks is increased by nTxs (nProfiles *= nTxs)
369 368 """
370 369 self.dataOut.flagDataAsBlock = False
371 370 self.dataOut.data = self.datablock[:,self.profileIndex,:]
372 371 self.dataOut.profileIndex = self.profileIndex
373 372
374 373 self.profileIndex += 1
375 374
375 # elif self.selBlocksize==None or self.selBlocksize==self.dataOut.nProfiles:
376 # """
377 # Return all block
378 # """
379 # self.dataOut.flagDataAsBlock = True
380 # self.dataOut.data = self.datablock
381 # self.dataOut.profileIndex = self.dataOut.nProfiles - 1
382 #
383 # self.profileIndex = self.dataOut.nProfiles
384
376 385 else:
377 386 """
378 Return all block
387 Return a block
379 388 """
380 self.dataOut.flagDataAsBlock = True
381 self.dataOut.data = self.datablock
382 self.dataOut.profileIndex = self.dataOut.nProfiles - 1
389 if self.selBlocksize == None: self.selBlocksize = self.dataOut.nProfiles
390 if self.selBlocktime != None: self.selBlocksize = int(self.dataOut.nProfiles*round(self.selBlocktime/(self.dataOut.ippSeconds*self.dataOut.nProfiles)))
391
392 self.dataOut.data = self.datablock[:,self.profileIndex:self.profileIndex+self.selBlocksize,:]
393 self.profileIndex += self.selBlocksize
394
395 while self.dataOut.data.shape[1] < self.selBlocksize: #Not enough profiles to fill the block
396 if not( self.readNextBlock() ):
397 return 0
398 self.getFirstHeader()
399 self.reshapeData()
400 if self.datablock is None:
401 self.dataOut.flagNoData = True
402 return 0
403 #stack data
404 indMax = self.selBlocksize - self.dataOut.data.shape[1]
405 self.dataOut.data = numpy.hstack((self.dataOut.data,self.datablock[:,:indMax,:]))
406 self.profileIndex = indMax
383 407
384 self.profileIndex = self.dataOut.nProfiles
408 self.dataOut.flagDataAsBlock = True
409 self.dataOut.nProfiles = self.selBlocksize
385 410
386 411 self.dataOut.flagNoData = False
387 412
388 413 self.getBasicHeader()
389 414
390 415 self.dataOut.realtime = self.online
391 416
392 417 return self.dataOut.data
393 418
394 419 class VoltageWriter(JRODataWriter, Operation):
395 420 """
396 421 Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura
397 422 de los datos siempre se realiza por bloques.
398 423 """
399 424
400 425 ext = ".r"
401 426
402 427 optchar = "D"
403 428
404 429 shapeBuffer = None
405 430
406 431
407 432 def __init__(self):
408 433 """
409 434 Inicializador de la clase VoltageWriter para la escritura de datos de espectros.
410 435
411 436 Affected:
412 437 self.dataOut
413 438
414 439 Return: None
415 440 """
416 441 Operation.__init__(self)
417 442
418 443 self.nTotalBlocks = 0
419 444
420 445 self.profileIndex = 0
421 446
422 447 self.isConfig = False
423 448
424 449 self.fp = None
425 450
426 451 self.flagIsNewFile = 1
427 452
428 453 self.blockIndex = 0
429 454
430 455 self.flagIsNewBlock = 0
431 456
432 457 self.setFile = None
433 458
434 459 self.dtype = None
435 460
436 461 self.path = None
437 462
438 463 self.filename = None
439 464
440 465 self.basicHeaderObj = BasicHeader(LOCALTIME)
441 466
442 467 self.systemHeaderObj = SystemHeader()
443 468
444 469 self.radarControllerHeaderObj = RadarControllerHeader()
445 470
446 471 self.processingHeaderObj = ProcessingHeader()
447 472
448 473 def hasAllDataInBuffer(self):
449 474 if self.profileIndex >= self.processingHeaderObj.profilesPerBlock:
450 475 return 1
451 476 return 0
452 477
453 478
454 479 def setBlockDimension(self):
455 480 """
456 481 Obtiene las formas dimensionales del los subbloques de datos que componen un bloque
457 482
458 483 Affected:
459 484 self.shape_spc_Buffer
460 485 self.shape_cspc_Buffer
461 486 self.shape_dc_Buffer
462 487
463 488 Return: None
464 489 """
465 490 self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock,
466 491 self.processingHeaderObj.nHeights,
467 492 self.systemHeaderObj.nChannels)
468 493
469 494 self.datablock = numpy.zeros((self.systemHeaderObj.nChannels,
470 495 self.processingHeaderObj.profilesPerBlock,
471 496 self.processingHeaderObj.nHeights),
472 497 dtype=numpy.dtype('complex64'))
473 498
474 499 def writeBlock(self):
475 500 """
476 501 Escribe el buffer en el file designado
477 502
478 503 Affected:
479 504 self.profileIndex
480 505 self.flagIsNewFile
481 506 self.flagIsNewBlock
482 507 self.nTotalBlocks
483 508 self.blockIndex
484 509
485 510 Return: None
486 511 """
487 512 data = numpy.zeros( self.shapeBuffer, self.dtype )
488 513
489 514 junk = numpy.transpose(self.datablock, (1,2,0))
490 515
491 516 data['real'] = junk.real
492 517 data['imag'] = junk.imag
493 518
494 519 data = data.reshape( (-1) )
495 520
496 521 data.tofile( self.fp )
497 522
498 523 self.datablock.fill(0)
499 524
500 525 self.profileIndex = 0
501 526 self.flagIsNewFile = 0
502 527 self.flagIsNewBlock = 1
503 528
504 529 self.blockIndex += 1
505 530 self.nTotalBlocks += 1
506 531
507 532 # print "[Writing] Block = %04d" %self.blockIndex
508 533
509 534 def putData(self):
510 535 """
511 536 Setea un bloque de datos y luego los escribe en un file
512 537
513 538 Affected:
514 539 self.flagIsNewBlock
515 540 self.profileIndex
516 541
517 542 Return:
518 543 0 : Si no hay data o no hay mas files que puedan escribirse
519 544 1 : Si se escribio la data de un bloque en un file
520 545 """
521 546 if self.dataOut.flagNoData:
522 547 return 0
523 548
524 549 self.flagIsNewBlock = 0
525 550
526 551 if self.dataOut.flagDiscontinuousBlock:
527 552 self.datablock.fill(0)
528 553 self.profileIndex = 0
529 554 self.setNextFile()
530 555
531 556 if self.profileIndex == 0:
532 557 self.setBasicHeader()
533 558
534 559 self.datablock[:,self.profileIndex,:] = self.dataOut.data
535 560
536 561 self.profileIndex += 1
537 562
538 563 if self.hasAllDataInBuffer():
539 564 #if self.flagIsNewFile:
540 565 self.writeNextBlock()
541 566 # self.setFirstHeader()
542 567
543 568 return 1
544 569
545 570 def __getBlockSize(self):
546 571 '''
547 572 Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage
548 573 '''
549 574
550 575 dtype_width = self.getDtypeWidth()
551 576
552 577 blocksize = int(self.dataOut.nHeights * self.dataOut.nChannels * self.profilesPerBlock * dtype_width * 2)
553 578
554 579 return blocksize
555 580
556 581 def setFirstHeader(self):
557 582
558 583 """
559 584 Obtiene una copia del First Header
560 585
561 586 Affected:
562 587 self.systemHeaderObj
563 588 self.radarControllerHeaderObj
564 589 self.dtype
565 590
566 591 Return:
567 592 None
568 593 """
569 594
570 595 self.systemHeaderObj = self.dataOut.systemHeaderObj.copy()
571 596 self.systemHeaderObj.nChannels = self.dataOut.nChannels
572 597 self.radarControllerHeaderObj = self.dataOut.radarControllerHeaderObj.copy()
573 598
574 599 self.processingHeaderObj.dtype = 0 # Voltage
575 600 self.processingHeaderObj.blockSize = self.__getBlockSize()
576 601 self.processingHeaderObj.profilesPerBlock = self.profilesPerBlock
577 602 self.processingHeaderObj.dataBlocksPerFile = self.blocksPerFile
578 603 self.processingHeaderObj.nWindows = 1 #podria ser 1 o self.dataOut.processingHeaderObj.nWindows
579 604 self.processingHeaderObj.nCohInt = self.dataOut.nCohInt
580 605 self.processingHeaderObj.nIncohInt = 1 # Cuando la data de origen es de tipo Voltage
581 606 self.processingHeaderObj.totalSpectra = 0 # Cuando la data de origen es de tipo Voltage
582 607
583 608 if self.dataOut.code is not None:
584 609 self.processingHeaderObj.code = self.dataOut.code
585 610 self.processingHeaderObj.nCode = self.dataOut.nCode
586 611 self.processingHeaderObj.nBaud = self.dataOut.nBaud
587 612
588 613 if self.processingHeaderObj.nWindows != 0:
589 614 self.processingHeaderObj.firstHeight = self.dataOut.heightList[0]
590 615 self.processingHeaderObj.deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0]
591 616 self.processingHeaderObj.nHeights = self.dataOut.nHeights
592 617 self.processingHeaderObj.samplesWin = self.dataOut.nHeights
593 618
594 619 self.processingHeaderObj.processFlags = self.getProcessFlags()
595 620
596 621 self.setBasicHeader() No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now