This diff has been collapsed as it changes many lines, (1123 lines changed) Show them Hide them | |||
@@ -21,6 +21,27 from DataIO import DataWriter | |||
|
21 | 21 | |
|
22 | 22 | from Model.Spectra import Spectra |
|
23 | 23 | |
|
24 | def isNumber( str ): | |
|
25 | """ | |
|
26 | Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero. | |
|
27 | ||
|
28 | Excepciones: | |
|
29 | Si un determinado string no puede ser convertido a numero | |
|
30 | Input: | |
|
31 | str, string al cual se le analiza para determinar si convertible a un numero o no | |
|
32 | ||
|
33 | Return: | |
|
34 | True : si el string es uno numerico | |
|
35 | False : no es un string numerico | |
|
36 | """ | |
|
37 | try: | |
|
38 | float( str ) | |
|
39 | return True | |
|
40 | except: | |
|
41 | return False | |
|
42 | ||
|
43 | ||
|
44 | ||
|
24 | 45 | def isThisFileinRange(filename, startUTSeconds, endUTSeconds): |
|
25 | 46 | """ |
|
26 | 47 | Esta funcion determina si un archivo de datos en formato Jicamarca(.r) se encuentra |
@@ -31,6 +52,7 def isThisFileinRange(filename, startUTSeconds, endUTSeconds): | |||
|
31 | 52 | |
|
32 | 53 | startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en |
|
33 | 54 | segundos contados desde 01/01/1970. |
|
55 | ||
|
34 | 56 | endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en |
|
35 | 57 | segundos contados desde 01/01/1970. |
|
36 | 58 | |
@@ -41,12 +63,11 def isThisFileinRange(filename, startUTSeconds, endUTSeconds): | |||
|
41 | 63 | Excepciones: |
|
42 | 64 | Si el archivo no existe o no puede ser abierto |
|
43 | 65 | Si la cabecera no puede ser leida. |
|
44 | ||
|
45 | 66 | """ |
|
46 | 67 | m_BasicHeader = BasicHeader() |
|
47 | 68 | |
|
48 | 69 | try: |
|
49 | fp = open(filename,'rb') | |
|
70 | fp = open( filename,'rb' ) #lectura binaria | |
|
50 | 71 | except: |
|
51 | 72 | raise IOError, "The file %s can't be opened" %(filename) |
|
52 | 73 | |
@@ -64,8 +85,8 def isThisFileinRange(filename, startUTSeconds, endUTSeconds): | |||
|
64 | 85 | class SpectraReader(DataReader): |
|
65 | 86 | """ |
|
66 | 87 | Esta clase permite leer datos de espectros desde archivos procesados (.pdata). La lectura |
|
67 |
de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones |
|
|
68 | perfiless*alturas*canales) son almacenados en la variable "buffer". | |
|
88 | de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones) | |
|
89 | son almacenados en tres buffer's para el Self Spectra, el Cross Spectra y el DC Channel. | |
|
69 | 90 | |
|
70 | 91 | Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader, |
|
71 | 92 | RadarControllerHeader y Spectra. Los tres primeros se usan para almacenar informacion de la |
@@ -73,7 +94,6 class SpectraReader(DataReader): | |||
|
73 | 94 | datos desde el "buffer" cada vez que se ejecute el metodo "getData". |
|
74 | 95 | |
|
75 | 96 | Example: |
|
76 | ||
|
77 | 97 | dpath = "/home/myuser/data" |
|
78 | 98 | |
|
79 | 99 | startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0) |
@@ -98,11 +118,12 class SpectraReader(DataReader): | |||
|
98 | 118 | #speed of light |
|
99 | 119 | __c = 3E8 |
|
100 | 120 | |
|
121 | ||
|
101 | 122 | def __init__( self, m_Spectra = None ): |
|
102 | 123 | """ |
|
103 | 124 | Inicializador de la clase SpectraReader para la lectura de datos de espectros. |
|
104 | 125 | |
|
105 | Input: | |
|
126 | Inputs: | |
|
106 | 127 | m_Spectra : Objeto de la clase Spectra. Este objeto sera utilizado para |
|
107 | 128 | almacenar un perfil de datos cada vez que se haga un requerimiento |
|
108 | 129 | (getData). El perfil sera obtenido a partir del buffer de datos, |
@@ -110,17 +131,14 class SpectraReader(DataReader): | |||
|
110 | 131 | bloque de datos. |
|
111 | 132 | Si este parametro no es pasado se creara uno internamente. |
|
112 | 133 | |
|
113 |
|
|
|
134 | Affected: | |
|
114 | 135 | self.m_Spectra |
|
115 | 136 | self.m_BasicHeader |
|
116 | 137 | self.m_SystemHeader |
|
117 | 138 | self.m_RadarControllerHeader |
|
118 | 139 | self.m_ProcessingHeader |
|
119 | 140 | |
|
120 | ||
|
121 | Return: | |
|
122 | Void | |
|
123 | ||
|
141 | Return : None | |
|
124 | 142 | """ |
|
125 | 143 | if m_Spectra == None: |
|
126 | 144 | m_Spectra = Spectra() |
@@ -178,11 +196,19 class SpectraReader(DataReader): | |||
|
178 | 196 | |
|
179 | 197 | self.fileSize = None |
|
180 | 198 | |
|
181 |
self.__ |
|
|
182 |
self.__ |
|
|
183 |
self.__ |
|
|
199 | self.__data_spc = None | |
|
200 | self.__data_cspc = None | |
|
201 | self.__data_dc = None | |
|
184 | 202 | |
|
185 |
self. |
|
|
203 | self.nChannels = 0 | |
|
204 | self.nPairs = 0 | |
|
205 | ||
|
206 | self.__pts2read_SelfSpectra = 0 | |
|
207 | self.__pts2read_CrossSpectra = 0 | |
|
208 | self.__pts2read_DCchannels = 0 | |
|
209 | self.__blocksize = 0 | |
|
210 | ||
|
211 | self.__datablockIndex = 0 | |
|
186 | 212 | |
|
187 | 213 | self.__ippSeconds = 0 |
|
188 | 214 | |
@@ -190,40 +216,111 class SpectraReader(DataReader): | |||
|
190 | 216 | |
|
191 | 217 | self.nCrossPairs = 0 |
|
192 | 218 | |
|
193 | self.nChannels = 0 | |
|
194 | ||
|
195 | self.__path = None | |
|
196 |
self.__ |
|
|
197 |
self.__ |
|
|
198 |
self.__ |
|
|
199 | self.__set = None | |
|
219 | self.__delay = 7 #seconds | |
|
220 | self.__nTries = 3 #quantity tries | |
|
221 | self.__nFiles = 3 #number of files for searching | |
|
222 | self.__year = 0 | |
|
223 | self.__doy = 0 | |
|
224 | self.__set = 0 | |
|
200 | 225 | self.__ext = None |
|
226 | self.__path = None | |
|
227 | self.datablock_id = 9999 | |
|
228 | ||
|
201 | 229 | |
|
202 | 230 | def __rdSystemHeader( self, fp=None ): |
|
231 | """ | |
|
232 | Lectura del System Header | |
|
233 | ||
|
234 | Inputs: | |
|
235 | fp : file pointer | |
|
236 | ||
|
237 | Affected: | |
|
238 | self.m_SystemHeader | |
|
239 | ||
|
240 | Return: None | |
|
241 | """ | |
|
203 | 242 | if fp == None: |
|
204 | 243 | fp = self.__fp |
|
205 | 244 | |
|
206 | 245 | self.m_SystemHeader.read( fp ) |
|
207 | 246 | |
|
247 | ||
|
248 | ||
|
208 | 249 | def __rdRadarControllerHeader( self, fp=None ): |
|
250 | """ | |
|
251 | Lectura del Radar Controller Header | |
|
252 | ||
|
253 | Inputs: | |
|
254 | fp : file pointer | |
|
255 | ||
|
256 | Affected: | |
|
257 | self.m_RadarControllerHeader | |
|
258 | ||
|
259 | Return: None | |
|
260 | """ | |
|
209 | 261 | if fp == None: |
|
210 | 262 | fp = self.__fp |
|
211 | 263 | |
|
212 | 264 | self.m_RadarControllerHeader.read(fp) |
|
213 | 265 | |
|
266 | ||
|
214 | 267 | def __rdProcessingHeader( self,fp=None ): |
|
268 | """ | |
|
269 | Lectura del Processing Header | |
|
270 | ||
|
271 | Inputs: | |
|
272 | fp : file pointer | |
|
273 | ||
|
274 | Affected: | |
|
275 | self.m_ProcessingHeader | |
|
276 | ||
|
277 | Return: None | |
|
278 | """ | |
|
215 | 279 | if fp == None: |
|
216 | 280 | fp = self.__fp |
|
217 | 281 | |
|
218 | 282 | self.m_ProcessingHeader.read(fp) |
|
219 | 283 | |
|
284 | ||
|
220 | 285 | def __rdBasicHeader( self, fp=None ): |
|
286 | """ | |
|
287 | Lectura del Basic Header | |
|
288 | ||
|
289 | Inputs: | |
|
290 | fp : file pointer | |
|
291 | ||
|
292 | Affected: | |
|
293 | self.m_BasicHeader | |
|
294 | ||
|
295 | Return: None | |
|
296 | """ | |
|
221 | 297 | if fp == None: |
|
222 | 298 | fp = self.__fp |
|
223 | 299 | |
|
224 | 300 | self.m_BasicHeader.read(fp) |
|
225 | 301 | |
|
302 | ||
|
226 | 303 | def __readFirstHeader( self ): |
|
304 | """ | |
|
305 | Lectura del First Header, es decir el Basic Header y el Long Header | |
|
306 | ||
|
307 | Affected: | |
|
308 | self.m_BasicHeader | |
|
309 | self.m_SystemHeader | |
|
310 | self.m_RadarControllerHeader | |
|
311 | self.m_ProcessingHeader | |
|
312 | self.firstHeaderSize | |
|
313 | self.__heights | |
|
314 | self.__dataType | |
|
315 | self.__fileSizeByHeader | |
|
316 | self.__ippSeconds | |
|
317 | self.nChannels | |
|
318 | self.nPairs | |
|
319 | self.__pts2read_SelfSpectra | |
|
320 | self.__pts2read_CrossSpectra | |
|
321 | ||
|
322 | Return: None | |
|
323 | """ | |
|
227 | 324 | self.__rdBasicHeader() |
|
228 | 325 | self.__rdSystemHeader() |
|
229 | 326 | self.__rdRadarControllerHeader() |
@@ -257,73 +354,223 class SpectraReader(DataReader): | |||
|
257 | 354 |
xf |
|
258 | 355 | |
|
259 | 356 |
self.__heights |
|
357 | ||
|
260 | 358 |
self.__dataType |
|
261 | 359 | self.__fileSizeByHeader = self.m_ProcessingHeader.dataBlocksPerFile * self.m_ProcessingHeader.blockSize + self.firstHeaderSize + self.basicHeaderSize*(self.m_ProcessingHeader.dataBlocksPerFile - 1) |
|
262 | 360 |
self.__ippSeconds |
|
263 | 361 | |
|
264 | def __setNextFileOnline(self, delay = 60 ): | |
|
362 | self.nChannels = 0 | |
|
363 | self.nPairs = 0 | |
|
364 | ||
|
365 | for i in range( 0, self.m_ProcessingHeader.totalSpectra*2, 2 ): | |
|
366 | if self.m_ProcessingHeader.spectraComb[i] == self.m_ProcessingHeader.spectraComb[i+1]: | |
|
367 | self.nChannels = self.nChannels + 1 | |
|
368 | else: | |
|
369 | self.nPairs = self.nPairs + 1 | |
|
370 | ||
|
371 | pts2read = self.m_ProcessingHeader.profilesPerBlock * self.m_ProcessingHeader.numHeights | |
|
372 | self.__pts2read_SelfSpectra = int( pts2read * self.nChannels ) | |
|
373 | self.__pts2read_CrossSpectra = int( pts2read * self.nPairs ) | |
|
374 | self.__pts2read_DCchannels = int( self.m_ProcessingHeader.numHeights * self.m_SystemHeader.numChannels ) | |
|
375 | ||
|
376 | self.__blocksize = self.__pts2read_SelfSpectra + self.__pts2read_CrossSpectra + self.__pts2read_DCchannels | |
|
377 | ||
|
378 | self.m_Spectra.nChannels = self.nChannels | |
|
379 | self.m_Spectra.nPairs = self.nPairs | |
|
380 | ||
|
381 | ||
|
382 | def __checkForRealPath( self ): | |
|
383 | """ | |
|
384 | Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar | |
|
385 | el path exacto de un determinado file. | |
|
386 | ||
|
387 | Example : | |
|
388 | nombre correcto del file es ../RAWDATA/D2009307/P2009307367 | |
|
389 | ||
|
390 | Entonces la funcion prueba con las siguientes combinaciones | |
|
391 | ../RAWDATA/d2009307/p2009307367 | |
|
392 | ../RAWDATA/d2009307/P2009307367 | |
|
393 | ../RAWDATA/D2009307/p2009307367 | |
|
394 | ../RAWDATA/D2009307/P2009307367 | |
|
395 | siendo para este caso, la ultima combinacion de letras, identica al file buscado | |
|
396 | ||
|
397 | Return: | |
|
398 | Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file | |
|
399 | caso contrario devuelve None | |
|
265 | 400 |
|
|
401 | filepath = None | |
|
402 | find_flag = False | |
|
403 | filename = None | |
|
404 | ||
|
405 | for dir in "dD": #barrido por las dos combinaciones posibles de "D" | |
|
406 | for fil in "pP": #barrido por las dos combinaciones posibles de "D" | |
|
407 | doypath = "%s%04d%03d" % ( dir, self.__year, self.__doy ) #formo el nombre del directorio xYYYYDDD (x=d o x=D) | |
|
408 | filename = "%s%04d%03d%03d%s" % ( fil, self.__year, self.__doy, self.__set, self.__ext ) #formo el nombre del file xYYYYDDDSSS.ext (p=d o p=D) | |
|
409 | filepath = os.path.join( self.__path, doypath, filename ) #formo el path completo | |
|
410 | if os.path.exists( filepath ): #verifico que exista | |
|
411 | find_flag = True | |
|
412 | break | |
|
413 | if find_flag: | |
|
414 | break | |
|
266 | 415 | |
|
416 | if not(find_flag): | |
|
417 | return None, filename | |
|
267 | 418 | |
|
268 | return: | |
|
269 | bool | |
|
419 | return filepath, filename | |
|
270 | 420 |
|
|
421 | ||
|
422 | def __setNextFileOnline( self ): | |
|
271 | 423 |
|
|
272 | nFiles = 3 | |
|
273 | nTries = 3 | |
|
424 | Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si | |
|
425 | no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files | |
|
426 | siguientes. | |
|
427 | ||
|
428 | Affected: | |
|
429 | self.__flagIsNewFile | |
|
430 | self.filename | |
|
431 | self.fileSize | |
|
432 | self.__fp | |
|
433 | self.__set | |
|
434 | self.noMoreFiles | |
|
435 | ||
|
436 | Return: | |
|
437 | 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado | |
|
438 | 1 : si el file fue abierto con exito y esta listo a ser leido | |
|
274 | 439 |
|
|
440 | Excepciones: | |
|
441 | Si un determinado file no puede ser abierto | |
|
442 | """ | |
|
275 | 443 | countFiles = 0 |
|
276 | 444 | countTries = 0 |
|
277 | 445 | |
|
278 |
fileStatus = |
|
|
446 | fileStatus = 0 | |
|
447 | notFirstTime_flag = False | |
|
448 | bChangeDir = False | |
|
449 | ||
|
450 | fileSize = 0 | |
|
451 | fp = None | |
|
452 | ||
|
453 | self.__flagIsNewFile = 0 | |
|
454 | ||
|
455 | while(True): #este loop permite llevar la cuenta de intentos, de files y carpetas, | |
|
456 | #si no encuentra alguno sale del bucle | |
|
279 | 457 | |
|
280 | while(True): | |
|
281 | 458 | countFiles += 1 |
|
282 | 459 | |
|
283 | if countFiles > nFiles+1: | |
|
460 | if countFiles > (self.__nFiles + 1): | |
|
284 | 461 | break |
|
285 | 462 | |
|
286 | self.set += 1 | |
|
463 | self.__set += 1 | |
|
287 | 464 | |
|
288 | if countFiles > nFiles: | |
|
289 |
self. |
|
|
290 |
self. |
|
|
465 | if countFiles > self.__nFiles: #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta | |
|
466 | self.__set = 0 | |
|
467 | self.__doy += 1 | |
|
468 | bChangeDir = True | |
|
291 | 469 | |
|
292 | doypath = "D%04d%04d" %(self.year, self.doy) | |
|
293 | filename = "D%04d%04d%03d%s" %(self.year, self.doy, self.set, self.__ext) | |
|
294 | file = os.path.join(self.filepath, doypath, filename) | |
|
295 | fileSize = os.path.getsize(file) | |
|
470 | file = None | |
|
471 | filename = None | |
|
296 | 472 | |
|
297 |
|
|
|
298 | fp = open(file) | |
|
299 | except: | |
|
300 | raise IOError, "The file %s can't be opened" %file | |
|
473 | countTries = 0 | |
|
474 | ||
|
475 | #espero hasta encontrar el 1er file disponible | |
|
476 | while( True ): | |
|
477 | ||
|
478 | countTries += 1 | |
|
479 | if( countTries >= self.__nTries ): #checkeo que no haya ido mas alla de la cantidad de intentos | |
|
480 | break | |
|
481 | ||
|
482 | file, filename = self.__checkForRealPath() | |
|
483 | if file != None: | |
|
484 | break | |
|
301 | 485 | |
|
486 | if notFirstTime_flag: #este flag me sirve solo para esperar por el 1er file, en lo siguientes no espera solo checkea si existe o no | |
|
487 | countTries = self.__nTries | |
|
488 | print "\tsearching next \"%s\" file ..." % filename | |
|
489 | break | |
|
490 | ||
|
491 | print "\twaiting new \"%s\" file ..." % filename | |
|
492 | time.sleep( self.__delay ) | |
|
493 | ||
|
494 | if countTries >= self.__nTries: #se realizaron n intentos y no hubo un file nuevo | |
|
495 | notFirstTime_flag = True | |
|
496 | continue #vuelvo al inico del while principal | |
|
497 | ||
|
498 | countTries = 0 | |
|
499 | ||
|
500 | #una vez que se obtuvo el 1er file valido se procede a checkear su contenido, y se espera una cierta cantidad | |
|
501 | #de tiempo por una cierta cantidad de veces hasta que el contenido del file sea un contenido valido | |
|
302 | 502 | while(True): |
|
303 | 503 | countTries += 1 |
|
304 | if countTries > nTries: | |
|
504 | if countTries > self.__nTries: | |
|
505 | break | |
|
506 | ||
|
507 | try: | |
|
508 | fp = open(file) | |
|
509 | except: | |
|
510 | print "The file \"%s\" can't be opened" % file | |
|
305 | 511 | break |
|
306 | 512 | |
|
513 | fileSize = os.path.getsize( file ) | |
|
307 | 514 | currentSize = fileSize - fp.tell() |
|
308 | 515 | neededSize = self.m_ProcessingHeader.blockSize + self.firstHeaderSize |
|
309 | 516 | |
|
310 |
if |
|
|
311 |
|
|
|
312 | time.sleep(delay) | |
|
313 | else: | |
|
314 | fileStatus = True | |
|
517 | if currentSize > neededSize: | |
|
518 | fileStatus = 1 | |
|
315 | 519 | break |
|
316 | 520 | |
|
317 |
|
|
|
521 | fp.close() | |
|
522 | ||
|
523 | if bChangeDir: #si al buscar un file cambie de directorio ya no espero y salgo del bucle while | |
|
524 | print "\tsearching next \"%s\" file ..." % filename | |
|
318 | 525 | break |
|
319 | 526 | |
|
320 | print "Skipping the file %s due to it hasn't enough data" %filename | |
|
321 | fp.close() | |
|
527 | print "\twaiting for block of \"%s\" file ..." % filename | |
|
528 | time.sleep( self.__delay ) | |
|
529 | ||
|
530 | if fileStatus == 1: | |
|
531 | break | |
|
532 | ||
|
533 | print "Skipping the file \"%s\" due to this files is empty" % filename | |
|
534 | countFiles = 0 | |
|
535 | ||
|
536 | ||
|
537 | if fileStatus == 1: | |
|
538 | self.fileSize = fileSize | |
|
539 | self.filename = file#name | |
|
540 | self.__flagIsNewFile = 1 | |
|
541 | self.__fp = fp | |
|
542 | self.noMoreFiles = 0 | |
|
543 | print 'Setting the file: %s' % file #name | |
|
544 | else: | |
|
545 | self.fileSize = 0 | |
|
546 | self.filename = None | |
|
547 | self.__fp = None | |
|
548 | self.noMoreFiles = 1 | |
|
549 | print 'No more Files' | |
|
322 | 550 | |
|
323 | 551 | return fileStatus |
|
324 | 552 | |
|
553 | ||
|
325 | 554 | def __setNextFileOffline( self ): |
|
555 | """ | |
|
556 | Busca el siguiente file dentro de un folder que tenga suficiente data para ser leida | |
|
557 | ||
|
558 | Affected: | |
|
559 | self.__flagIsNewFile | |
|
560 | self.__idFile | |
|
561 | self.filename | |
|
562 | self.fileSize | |
|
563 | self.__fp | |
|
564 | ||
|
565 | Return: | |
|
566 | 0 : si un determinado file no puede ser abierto | |
|
567 | 1 : si el file fue abierto con exito | |
|
568 | ||
|
569 | Excepciones: | |
|
570 | Si un determinado file no puede ser abierto | |
|
571 | """ | |
|
326 | 572 | idFile = self.__idFile |
|
573 | self.__flagIsNewFile = 0 | |
|
327 | 574 | |
|
328 | 575 | while(True): |
|
329 | 576 | idFile += 1 |
@@ -349,7 +596,6 class SpectraReader(DataReader): | |||
|
349 | 596 | continue |
|
350 | 597 | |
|
351 | 598 | break |
|
352 | ||
|
353 | 599 | self.__flagIsNewFile = 1 |
|
354 | 600 | self.__idFile = idFile |
|
355 | 601 | self.filename = filename |
@@ -360,8 +606,22 class SpectraReader(DataReader): | |||
|
360 | 606 | |
|
361 | 607 | return 1 |
|
362 | 608 | |
|
609 | ||
|
363 | 610 | def __setNextFile(self): |
|
611 | """ | |
|
612 | Determina el siguiente file a leer y si hay uno disponible lee el First Header | |
|
364 | 613 | |
|
614 | Affected: | |
|
615 | self.m_BasicHeader | |
|
616 | self.m_SystemHeader | |
|
617 | self.m_RadarControllerHeader | |
|
618 | self.m_ProcessingHeader | |
|
619 | self.firstHeaderSize | |
|
620 | ||
|
621 | Return: | |
|
622 | 0 : Si no hay files disponibles | |
|
623 | 1 : Si hay mas files disponibles | |
|
624 | """ | |
|
365 | 625 | if self.__fp != None: |
|
366 | 626 | self.__fp.close() |
|
367 | 627 | |
@@ -377,7 +637,20 class SpectraReader(DataReader): | |||
|
377 | 637 | |
|
378 | 638 | return 1 |
|
379 | 639 | |
|
640 | ||
|
380 | 641 | def __setNewBlock( self ): |
|
642 | """ | |
|
643 | Lee el Basic Header y posiciona le file pointer en la posicion inicial del bloque a leer | |
|
644 | ||
|
645 | Affected: | |
|
646 | self.m_BasicHeader | |
|
647 | self.flagResetProcessing | |
|
648 | self.ns | |
|
649 | ||
|
650 | Return: | |
|
651 | 0 : Si el file no tiene un Basic Header que pueda ser leido | |
|
652 | 1 : Si se pudo leer el Basic Header | |
|
653 | """ | |
|
381 | 654 | if self.__fp == None: |
|
382 | 655 | return 0 |
|
383 | 656 | |
@@ -407,107 +680,163 class SpectraReader(DataReader): | |||
|
407 | 680 | return 1 |
|
408 | 681 | |
|
409 | 682 | |
|
410 | ||
|
411 | 683 | def __readBlock(self): |
|
412 | 684 | """ |
|
413 |
|
|
|
685 | Lee el bloque de datos desde la posicion actual del puntero del archivo | |
|
414 | 686 | (self.__fp) y actualiza todos los parametros relacionados al bloque de datos |
|
415 | 687 | (metadata + data). La data leida es almacenada en el buffer y el contador del buffer |
|
416 | 688 | es seteado a 0 |
|
417 | 689 | |
|
418 | Inputs: | |
|
419 | None | |
|
420 | ||
|
421 | Return: | |
|
422 | None | |
|
690 | Return: None | |
|
423 | 691 | |
|
424 | 692 | Variables afectadas: |
|
425 | ||
|
426 | self.__buffer_id | |
|
427 | ||
|
428 | self.__buffer_sspc | |
|
429 | ||
|
693 | self.__datablockIndex | |
|
430 | 694 | self.__flagIsNewFile |
|
431 | ||
|
432 | 695 | self.flagIsNewBlock |
|
433 | ||
|
434 | 696 | self.nReadBlocks |
|
435 | ||
|
697 | self.__data_spc | |
|
698 | self.__data_cspc | |
|
699 | self.__data_dc | |
|
436 | 700 | """ |
|
437 | Npair_SelfSpectra = 0 | |
|
438 | Npair_CrossSpectra = 0 | |
|
439 | ||
|
440 | for i in range( 0,self.m_ProcessingHeader.totalSpectra*2,2 ): | |
|
441 | if self.m_ProcessingHeader.spectraComb[i] == self.m_ProcessingHeader.spectraComb[i+1]: | |
|
442 | Npair_SelfSpectra = Npair_SelfSpectra + 1 | |
|
443 | else: | |
|
444 | Npair_CrossSpectra = Npair_CrossSpectra + 1 | |
|
445 | ||
|
446 | # self.__buffer_sspc = numpy.concatenate( (data_sspc,data_cspc,data_dcc), axis=0 ) | |
|
447 | ||
|
448 | self.__buffer_id = 0 | |
|
701 | self.datablock_id = 0 | |
|
449 | 702 | self.__flagIsNewFile = 0 |
|
450 | 703 | self.flagIsNewBlock = 1 |
|
451 | 704 | |
|
452 | pts2read = self.m_ProcessingHeader.profilesPerBlock*self.m_ProcessingHeader.numHeights | |
|
705 | spc = numpy.fromfile( self.__fp, self.__dataType[0], self.__pts2read_SelfSpectra ) | |
|
706 | cspc = numpy.fromfile( self.__fp, self.__dataType, self.__pts2read_CrossSpectra ) | |
|
707 | dc = numpy.fromfile( self.__fp, self.__dataType, self.__pts2read_DCchannels ) #int(self.m_ProcessingHeader.numHeights*self.m_SystemHeader.numChannels) ) | |
|
453 | 708 | |
|
454 | spc = numpy.fromfile(self.__fp, self.__dataType[0], int(pts2read*Npair_SelfSpectra)) | |
|
455 | cspc = numpy.fromfile(self.__fp, self.__dataType, int(pts2read*Npair_CrossSpectra)) | |
|
456 | dc = numpy.fromfile(self.__fp, self.__dataType, int(self.m_ProcessingHeader.numHeights*self.m_SystemHeader.numChannels) ) | |
|
709 | spc = spc.reshape( (self.nChannels, self.m_ProcessingHeader.numHeights, self.m_ProcessingHeader.profilesPerBlock) ) #transforma a un arreglo 3D | |
|
457 | 710 | |
|
458 |
spc = spc.reshape(( |
|
|
459 | cspc = cspc.reshape((Npair_CrossSpectra, self.m_ProcessingHeader.numHeights, self.m_ProcessingHeader.profilesPerBlock)) | |
|
460 | dc = dc.reshape((self.m_SystemHeader.numChannels, self.m_ProcessingHeader.numHeights)) | |
|
711 | cspc = cspc.reshape( (self.nPairs, self.m_ProcessingHeader.numHeights, self.m_ProcessingHeader.profilesPerBlock) ) #transforma a un arreglo 3D | |
|
712 | dc = dc.reshape( (self.m_SystemHeader.numChannels, self.m_ProcessingHeader.numHeights) ) #transforma a un arreglo 2D | |
|
461 | 713 | |
|
462 | 714 | if not(self.m_ProcessingHeader.shif_fft): |
|
463 | spc = numpy.roll(spc, self.m_ProcessingHeader.profilesPerBlock/2, axis=2) | |
|
464 | cspc = numpy.roll(cspc, self.m_ProcessingHeader.profilesPerBlock/2, axis=2) | |
|
715 | spc = numpy.roll( spc, self.m_ProcessingHeader.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones | |
|
716 | cspc = numpy.roll( cspc, self.m_ProcessingHeader.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones | |
|
465 | 717 | |
|
466 | 718 | spc = numpy.transpose(spc, (0,2,1)) |
|
467 | 719 | cspc = numpy.transpose(cspc, (0,2,1)) |
|
468 | 720 | #dc = numpy.transpose(dc, (0,2,1)) |
|
469 | 721 | |
|
470 | data_spc = spc | |
|
471 | data_cspc = cspc['real'] + cspc['imag']*1j | |
|
472 | data_dc = dc['real'] + dc['imag']*1j | |
|
473 | ||
|
474 | self.__buffer_spc = data_spc | |
|
475 | self.__buffer_cspc = data_cspc | |
|
476 | self.__buffer_dc = data_dc | |
|
722 | self.__data_spc = spc | |
|
723 | self.__data_cspc = cspc['real'] + cspc['imag']*1j | |
|
724 | self.__data_dc = dc['real'] + dc['imag']*1j | |
|
477 | 725 | |
|
478 | 726 | self.__flagIsNewFile = 0 |
|
479 | 727 | |
|
480 | 728 | self.flagIsNewBlock = 1 |
|
481 | 729 | |
|
482 | 730 | self.nReadBlocks += 1 |
|
731 | self.datablock_id = 0 | |
|
732 | ||
|
483 | 733 | |
|
484 | 734 | |
|
485 | 735 | def __hasNotDataInBuffer(self): |
|
736 | #if self.datablock_id >= self.m_ProcessingHeader.profilesPerBlock: | |
|
486 | 737 | return 1 |
|
487 | 738 | |
|
488 | def __searchFilesOnline(self, path, startDateTime, expLabel = "", ext = ".pdata"): | |
|
739 | ||
|
740 | def __getlastFileFromPath( self, pathList, ext ): | |
|
489 | 741 | """ |
|
742 | Depura el pathList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext" | |
|
743 | al final de la depuracion devuelve el ultimo file de la lista que quedo. | |
|
490 | 744 | |
|
745 | Input: | |
|
746 | pathList : lista conteniendo todos los filename completos que componen una determinada carpeta | |
|
747 | ext : extension de los files contenidos en una carpeta | |
|
491 | 748 | |
|
492 | 749 | Return: |
|
750 | El ultimo file de una determinada carpeta | |
|
751 | """ | |
|
493 | 752 | |
|
494 |
|
|
|
753 | filesList = [] | |
|
754 | filename = None | |
|
495 | 755 | |
|
496 | filename | |
|
756 | # 0123456789ABCDE | |
|
757 | # PYYYYDDDSSS.ext | |
|
497 | 758 |
|
|
498 | year | |
|
759 | for filename in pathList: | |
|
760 | year = filename[1:5] | |
|
761 | doy = filename[5:8] | |
|
762 | leng = len( ext ) | |
|
499 | 763 | |
|
500 | doy | |
|
764 | if ( filename[-leng:].upper() != ext.upper() ) : continue | |
|
765 | if not( isNumber( year ) ) : continue | |
|
766 | if not( isNumber( doy ) ) : continue | |
|
501 | 767 | |
|
502 | set | |
|
768 | filesList.append(filename) | |
|
769 | ||
|
770 | if len( filesList ) > 0: | |
|
771 | #filesList.sort() | |
|
772 | filesList = sorted( filesList, key=str.lower ) | |
|
773 | filename = filesList[-1] | |
|
774 | ||
|
775 | return filename | |
|
776 | ||
|
777 | ||
|
778 | def __searchFilesOnLine( self, path, startDateTime=None, ext = ".pdata" ): | |
|
779 | """ | |
|
780 | Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y | |
|
781 | devuelve el archivo encontrado ademas de otros datos. | |
|
782 | ||
|
783 | Input: | |
|
784 | path : carpeta donde estan contenidos los files que contiene data | |
|
785 | startDateTime : punto especifico en el tiempo del cual se requiere la data | |
|
786 | ext : extension de los files | |
|
503 | 787 | |
|
788 | Return: | |
|
789 | year : el anho | |
|
790 | doy : el numero de dia del anho | |
|
791 | set : el set del archivo | |
|
792 | filename : el ultimo file de una determinada carpeta | |
|
793 | directory : eL directorio donde esta el file encontrado | |
|
504 | 794 | """ |
|
505 | 795 | |
|
506 | pass | |
|
796 | print "Searching files ..." | |
|
797 | ||
|
798 | dirList = [] | |
|
799 | directory = None | |
|
800 | ||
|
801 | if startDateTime == None: | |
|
802 | for thisPath in os.listdir(path): | |
|
803 | if os.path.isdir( os.path.join(path,thisPath) ): | |
|
804 | dirList.append( thisPath ) | |
|
805 | ||
|
806 | dirList = sorted( dirList, key=str.lower ) #para que quede ordenado al margen de si el nombre esta en mayusculas o minusculas, utilizo la funcion sorted | |
|
807 | if len(dirList) > 0 : | |
|
808 | directory = dirList[-1] | |
|
809 | else: | |
|
810 | year = startDateTime.timetuple().tm_year | |
|
811 | doy = startDateTime.timetuple().tm_yday | |
|
812 | ||
|
813 | doyPath = "D%04d%03d" % (year,doy) #caso del nombre en mayusculas | |
|
814 | if os.path.isdir( os.path.join(path,doyPath) ): | |
|
815 | directory = doyPath | |
|
816 | ||
|
817 | doyPath = doyPath.lower() #caso del nombre en minusculas | |
|
818 | if os.path.isdir( os.path.join(path,doyPath) ): | |
|
819 | directory = doyPath | |
|
820 | ||
|
821 | if directory == None: | |
|
822 | return 0, 0, 0, None, None | |
|
823 | ||
|
824 | filename = self.__getlastFileFromPath( os.listdir( os.path.join(path,directory) ), ext ) | |
|
825 | ||
|
826 | if filename == None: | |
|
827 | return 0, 0, 0, None, None | |
|
828 | ||
|
829 | year = int( directory[-7:-3] ) | |
|
830 | doy = int( directory[-3:] ) | |
|
831 | ln = len( ext ) | |
|
832 | set = int( filename[-ln-3:-ln] ) | |
|
507 | 833 | |
|
508 | def __searchFilesOffline(self, path, startDateTime, endDateTime, set=None, expLabel = "", ext = ".pdata"): | |
|
834 | return year, doy, set, filename, directory | |
|
835 | ||
|
836 | ||
|
837 | def __searchFilesOffLine( self, path, startDateTime, endDateTime, set=None, expLabel = "", ext = ".pdata" ): | |
|
509 | 838 | """ |
|
510 |
|
|
|
839 | Realiza una busqueda de los archivos que coincidan con los parametros | |
|
511 | 840 | especificados y se encuentren ubicados en el path indicado. Para realizar una busqueda |
|
512 | 841 | correcta la estructura de directorios debe ser la siguiente: |
|
513 | 842 | |
@@ -548,9 +877,6 class SpectraReader(DataReader): | |||
|
548 | 877 | como fuente para leer los bloque de datos, si se termina |
|
549 | 878 | de leer todos los bloques de datos de un determinado |
|
550 | 879 | archivo se pasa al siguiente archivo de la lista. |
|
551 | ||
|
552 | Excepciones: | |
|
553 | ||
|
554 | 880 | """ |
|
555 | 881 | |
|
556 | 882 | print "Searching files ..." |
@@ -593,16 +919,56 class SpectraReader(DataReader): | |||
|
593 | 919 | return pathList, filenameList |
|
594 | 920 | |
|
595 | 921 | |
|
596 | def __searchFiles(self, path, startDateTime, endDateTime, set, expLabel, ext, online): | |
|
922 | def __initFilesOnline( self, path, dirfilename, filename ): | |
|
923 | """ | |
|
924 | Verifica que el primer file tenga una data valida, para ello leo el 1er bloque | |
|
925 | del file, si no es un file valido espera una cierta cantidad de tiempo a que | |
|
926 | lo sea, si transcurrido el tiempo no logra validar el file entonces el metodo | |
|
927 | devuelve 0 caso contrario devuelve 1 | |
|
597 | 928 |
|
|
598 |
|
|
|
599 | pathList, filenameList = self.__searchFilesOnline(path, startDateTime, endDateTime, set, expLabel, ext) | |
|
600 | else: | |
|
601 | pathList, filenameList = self.__searchFilesOffline(path, startDateTime, endDateTime, set, expLabel, ext) | |
|
929 | Affected: | |
|
930 | m_BasicHeader | |
|
931 | ||
|
932 | Return: | |
|
933 | 0 : file no valido para ser leido | |
|
934 | 1 : file valido para ser leido | |
|
935 | """ | |
|
936 | m_BasicHeader = BasicHeader() | |
|
937 | ||
|
938 | file = os.path.join( path, dirfilename, filename ) | |
|
939 | ||
|
940 | nTries = 0 | |
|
941 | while(True): | |
|
942 | ||
|
943 | nTries += 1 | |
|
944 | if nTries > self.__nTries: | |
|
945 | break | |
|
946 | ||
|
947 | try: | |
|
948 | fp = open( file,'rb' ) #lectura binaria | |
|
949 | except: | |
|
950 | raise IOError, "The file %s can't be opened" %(file) | |
|
951 | ||
|
952 | try: | |
|
953 | m_BasicHeader.read(fp) | |
|
954 | except: | |
|
955 | print "The file %s is empty" % filename | |
|
956 | ||
|
957 | fp.close() | |
|
958 | ||
|
959 | if m_BasicHeader.size > 24: | |
|
960 | break | |
|
961 | ||
|
962 | print 'waiting for new block: try %02d' % ( nTries ) | |
|
963 | time.sleep( self.__delay) | |
|
964 | ||
|
965 | if m_BasicHeader.size <= 24: | |
|
966 | return 0 | |
|
602 | 967 | |
|
603 | 968 | return 1 |
|
604 | 969 | |
|
605 | def setup( self, path, startDateTime, endDateTime=None, set=None, expLabel = "", ext = ".pdata", online = 0 ): | |
|
970 | ||
|
971 | def setup( self, path, startDateTime=None, endDateTime=None, set=None, expLabel = "", ext = ".pdata", online = 0 ): | |
|
606 | 972 | """ |
|
607 | 973 | setup configura los parametros de lectura de la clase SpectraReader. |
|
608 | 974 | |
@@ -626,59 +992,114 class SpectraReader(DataReader): | |||
|
626 | 992 | |
|
627 | 993 | expLabel : Nombre del subdirectorio de datos. Por defecto "" |
|
628 | 994 | |
|
629 |
ext : Extension de los archivos a leer. Por defecto . |
|
|
995 | ext : Extension de los archivos a leer. Por defecto .pdata | |
|
630 | 996 | |
|
631 | online : | |
|
997 | online : Si es == a 0 entonces busca files que cumplan con las condiciones dadas | |
|
632 | 998 | |
|
633 | 999 | Return: |
|
1000 | 0 : Si no encuentra files que cumplan con las condiciones dadas | |
|
1001 | 1 : Si encuentra files que cumplan con las condiciones dadas | |
|
634 | 1002 | |
|
635 | 1003 | Affected: |
|
1004 | self.startUTCSeconds | |
|
1005 | self.endUTCSeconds | |
|
1006 | self.startYear | |
|
1007 | self.endYear | |
|
1008 | self.startDoy | |
|
1009 | self.endDoy | |
|
1010 | self.__pathList | |
|
1011 | self.filenameList | |
|
1012 | self.online | |
|
1013 | """ | |
|
636 | 1014 | |
|
637 |
|
|
|
1015 | if online: | |
|
1016 | nTries = 0 | |
|
1017 | while( nTries < self.__nTries ): | |
|
1018 | nTries += 1 | |
|
1019 | subfolder = "D%04d%03d" % ( startDateTime.timetuple().tm_year,startDateTime.timetuple().tm_yday ) | |
|
1020 | file = os.path.join( path, subfolder ) | |
|
1021 | print "Searching first file in \"%s\", try %03d ..." % ( file, nTries ) | |
|
1022 | year, doy, set, filename, dirfilename = self.__searchFilesOnLine( path, startDateTime, ext ) | |
|
1023 | if filename == None: | |
|
1024 | time.sleep( self.__delay ) | |
|
1025 | else: | |
|
1026 | break | |
|
638 | 1027 | |
|
639 | Example: | |
|
1028 | if filename == None: | |
|
1029 | print "No files On Line" | |
|
1030 | return 0 | |
|
640 | 1031 | |
|
641 | """ | |
|
642 | if online == 0: | |
|
643 | pathList, filenameList = self.__searchFilesOffline(path, startDateTime, endDateTime, set, expLabel, ext) | |
|
644 | self.__idFile = -1 | |
|
1032 | if self.__initFilesOnline( path, dirfilename, filename ) == 0: | |
|
1033 | print "The file %s hasn't enough data" | |
|
1034 | return 0 | |
|
1035 | ||
|
1036 | self.__year = year | |
|
1037 | self.__doy = doy | |
|
1038 | self.__set = set - 1 | |
|
1039 | self.__path = path | |
|
645 | 1040 | |
|
646 | 1041 | else: |
|
647 | filepath, filename, year, doy, set = self.__searchFilesOnline() | |
|
648 |
se |
|
|
1042 | pathList, filenameList = self.__searchFilesOffLine( path, startDateTime, endDateTime, set, expLabel, ext ) | |
|
1043 | self.__idFile = -1 | |
|
1044 | self.__pathList = pathList | |
|
1045 | self.filenameList = filenameList | |
|
1046 | ||
|
1047 | self.online = online | |
|
1048 | self.__ext = ext | |
|
649 | 1049 |
|
|
650 | 1050 | if not(self.__setNextFile()): |
|
1051 | if (startDateTime != None) and (endDateTime != None): | |
|
651 | 1052 | print "No files in range: %s - %s" %(startDateTime.ctime(), endDateTime.ctime()) |
|
1053 | elif startDateTime != None: | |
|
1054 | print "No files in : %s" % startDateTime.ctime() | |
|
1055 | else: | |
|
1056 | print "No files" | |
|
652 | 1057 |
return 0 |
|
653 | 1058 | |
|
1059 | if startDateTime != None: | |
|
654 | 1060 | self.startUTCSeconds = time.mktime(startDateTime.timetuple()) |
|
655 | self.endUTCSeconds = time.mktime(endDateTime.timetuple()) | |
|
656 | ||
|
657 | 1061 | self.startYear = startDateTime.timetuple().tm_year |
|
658 | self.endYear = endDateTime.timetuple().tm_year | |
|
659 | ||
|
660 | 1062 | self.startDoy = startDateTime.timetuple().tm_yday |
|
1063 | ||
|
1064 | if endDateTime != None: | |
|
1065 | self.endUTCSeconds = time.mktime(endDateTime.timetuple()) | |
|
1066 | self.endYear = endDateTime.timetuple().tm_year | |
|
661 | 1067 | self.endDoy = endDateTime.timetuple().tm_yday |
|
662 | 1068 | #call fillHeaderValues() - to Data Object |
|
663 | 1069 |
|
|
664 | self.__pathList = pathList | |
|
665 | self.filenameList = filenameList | |
|
666 | self.online = online | |
|
667 | ||
|
668 | self.__startDateTime = startDateTime | |
|
1070 | self.m_Spectra.m_BasicHeader = self.m_BasicHeader.copy() | |
|
1071 | self.m_Spectra.m_ProcessingHeader = self.m_ProcessingHeader.copy() | |
|
1072 | self.m_Spectra.m_RadarControllerHeader = self.m_RadarControllerHeader.copy() | |
|
1073 | self.m_Spectra.m_SystemHeader = self.m_SystemHeader.copy() | |
|
1074 | self.m_Spectra.dataType = self.__dataType | |
|
669 | 1075 | |
|
670 | 1076 | return 1 |
|
671 | 1077 |
|
|
1078 | ||
|
672 | 1079 | def readNextBlock(self): |
|
673 | 1080 | """ |
|
674 |
|
|
|
1081 | Establece un nuevo bloque de datos a leer y los lee, si es que no existiese | |
|
675 | 1082 | mas bloques disponibles en el archivo actual salta al siguiente. |
|
676 | 1083 | |
|
1084 | Affected: | |
|
1085 | self.__lastUTTime | |
|
1086 | ||
|
1087 | Return: None | |
|
677 | 1088 | """ |
|
678 | 1089 | |
|
679 | 1090 | if not( self.__setNewBlock() ): |
|
680 | 1091 | return 0 |
|
681 | 1092 | |
|
1093 | if self.online: | |
|
1094 | nTries = 0 | |
|
1095 | while( nTries < self.__nTries ): | |
|
1096 | nTries += 1 | |
|
1097 | if self.__readBlock() == 0: | |
|
1098 | print "Waiting for the next block ..." | |
|
1099 | time.sleep( self.__delay ) | |
|
1100 | else: | |
|
1101 | break | |
|
1102 | else: | |
|
682 | 1103 | self.__readBlock() |
|
683 | 1104 | |
|
684 | 1105 | self.__lastUTTime = self.m_BasicHeader.utc |
@@ -688,23 +1109,19 class SpectraReader(DataReader): | |||
|
688 | 1109 | |
|
689 | 1110 | def getData(self): |
|
690 | 1111 | """ |
|
691 |
|
|
|
1112 | Copia el buffer de lectura a la clase "Spectra", | |
|
692 | 1113 | con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de |
|
693 | 1114 | lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock" |
|
694 | 1115 | |
|
695 | Inputs: | |
|
696 | None | |
|
697 | ||
|
698 | 1116 | Return: |
|
699 | data : retorna un bloque de datos (nFFTs * alturas * canales) copiados desde el | |
|
700 | buffer. Si no hay mas archivos a leer retorna None. | |
|
1117 | 0 : Si no hay mas archivos disponibles | |
|
1118 | 1 : Si hizo una buena copia del buffer | |
|
701 | 1119 | |
|
702 | 1120 | Variables afectadas: |
|
703 | 1121 | self.m_Spectra |
|
704 |
self.__ |
|
|
705 | ||
|
706 | Excepciones: | |
|
707 | ||
|
1122 | self.__datablockIndex | |
|
1123 | self.flagResetProcessing | |
|
1124 | self.flagIsNewBlock | |
|
708 | 1125 | """ |
|
709 | 1126 | |
|
710 | 1127 | self.flagResetProcessing = 0 |
@@ -725,44 +1142,66 class SpectraReader(DataReader): | |||
|
725 | 1142 | return 0 |
|
726 | 1143 | |
|
727 | 1144 | #data es un numpy array de 3 dmensiones (perfiles, alturas y canales) |
|
728 | #print type(self.__buffer_sspc) | |
|
729 | 1145 | |
|
730 | time = self.m_BasicHeader.utc + self.__buffer_id*self.__ippSeconds | |
|
1146 | self.m_Spectra.flagNoData = False | |
|
1147 | self.m_Spectra.flagResetProcessing = self.flagResetProcessing | |
|
731 | 1148 | |
|
732 |
self.m_Spectra. |
|
|
733 |
self.m_Spectra.data_spc = self.__ |
|
|
734 |
self.m_Spectra.data_ |
|
|
735 | self.m_Spectra.data_dc = self.__buffer_dc | |
|
1149 | self.m_Spectra.data_spc = self.__data_spc | |
|
1150 | self.m_Spectra.data_cspc = self.__data_cspc | |
|
1151 | self.m_Spectra.data_dc = self.__data_dc | |
|
736 | 1152 | |
|
737 | 1153 | #call setData - to Data Object |
|
1154 | #self.datablock_id += 1 | |
|
1155 | #self.idProfile += 1 | |
|
738 | 1156 | |
|
739 | 1157 | return 1 |
|
740 | 1158 | |
|
741 | 1159 | |
|
742 | 1160 | class SpectraWriter(DataWriter): |
|
1161 | """ | |
|
1162 | Esta clase permite escribir datos de espectros a archivos procesados (.pdata). La escritura | |
|
1163 | de los datos siempre se realiza por bloques. | |
|
1164 | """ | |
|
1165 | ||
|
1166 | def __init__( self, m_Spectra = None ): | |
|
1167 | """ | |
|
1168 | Inicializador de la clase SpectraWriter para la escritura de datos de espectros. | |
|
1169 | ||
|
1170 | Affected: | |
|
1171 | self.m_Spectra | |
|
1172 | self.m_BasicHeader | |
|
1173 | self.m_SystemHeader | |
|
1174 | self.m_RadarControllerHeader | |
|
1175 | self.m_ProcessingHeader | |
|
1176 | ||
|
1177 | Return: None | |
|
1178 | """ | |
|
743 | 1179 | |
|
744 | def __init__(self): | |
|
745 | 1180 | if m_Spectra == None: |
|
746 | 1181 | m_Spectra = Spectra() |
|
747 | 1182 | |
|
748 | 1183 | self.m_Spectra = m_Spectra |
|
749 | 1184 | |
|
1185 | self.__path = None | |
|
1186 | ||
|
750 | 1187 | self.__fp = None |
|
751 | 1188 | |
|
1189 | self.__format = None | |
|
1190 | ||
|
752 | 1191 | self.__blocksCounter = 0 |
|
753 | 1192 | |
|
754 | 1193 | self.__setFile = None |
|
755 | 1194 | |
|
756 |
self.__flagIsNewFile = |
|
|
757 | ||
|
758 | self.__buffer_sspc = 0 | |
|
759 | ||
|
760 | self.__buffer_id = 0 | |
|
1195 | self.__flagIsNewFile = 1 | |
|
761 | 1196 | |
|
762 | 1197 | self.__dataType = None |
|
763 | 1198 | |
|
764 | 1199 | self.__ext = None |
|
765 | 1200 | |
|
1201 | self.__shape_spc_Buffer = None | |
|
1202 | self.__shape_cspc_Buffer = None | |
|
1203 | self.__shape_dc_Buffer = None | |
|
1204 | ||
|
766 | 1205 | self.nWriteBlocks = 0 |
|
767 | 1206 | |
|
768 | 1207 | self.flagIsNewBlock = 0 |
@@ -779,27 +1218,129 class SpectraWriter(DataWriter): | |||
|
779 | 1218 | |
|
780 | 1219 | self.m_ProcessingHeader = ProcessingHeader() |
|
781 | 1220 | |
|
1221 | self.__data_spc = None | |
|
1222 | self.__data_cspc = None | |
|
1223 | self.__data_dc = None | |
|
1224 | ||
|
1225 | def __writeFirstHeader( self ): | |
|
1226 | """ | |
|
1227 | Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader) | |
|
1228 | ||
|
1229 | Affected: | |
|
1230 | __dataType | |
|
1231 | ||
|
1232 | Return: | |
|
1233 | None | |
|
1234 | """ | |
|
1235 | self.__writeBasicHeader() | |
|
1236 | self.__wrSystemHeader() | |
|
1237 | self.__wrRadarControllerHeader() | |
|
1238 | self.__wrProcessingHeader() | |
|
1239 | self.__dataType = self.m_Spectra.dataType | |
|
1240 | ||
|
1241 | def __writeBasicHeader( self, fp=None ): | |
|
1242 | """ | |
|
1243 | Escribe solo el Basic header en el file creado | |
|
1244 | ||
|
1245 | Return: | |
|
1246 | None | |
|
1247 | """ | |
|
1248 | if fp == None: | |
|
1249 | fp = self.__fp | |
|
1250 | ||
|
1251 | self.m_BasicHeader.write(fp) | |
|
1252 | ||
|
1253 | def __wrSystemHeader( self, fp=None ): | |
|
1254 | """ | |
|
1255 | Escribe solo el System header en el file creado | |
|
1256 | ||
|
1257 | Return: | |
|
1258 | None | |
|
1259 | """ | |
|
1260 | if fp == None: | |
|
1261 | fp = self.__fp | |
|
1262 | ||
|
1263 | self.m_SystemHeader.write(fp) | |
|
1264 | ||
|
1265 | def __wrRadarControllerHeader( self, fp=None ): | |
|
1266 | """ | |
|
1267 | Escribe solo el RadarController header en el file creado | |
|
1268 | ||
|
1269 | Return: | |
|
1270 | None | |
|
1271 | """ | |
|
1272 | if fp == None: | |
|
1273 | fp = self.__fp | |
|
1274 | ||
|
1275 | self.m_RadarControllerHeader.write(fp) | |
|
1276 | ||
|
1277 | def __wrProcessingHeader( self, fp=None ): | |
|
1278 | """ | |
|
1279 | Escribe solo el Processing header en el file creado | |
|
1280 | ||
|
1281 | Return: | |
|
1282 | None | |
|
1283 | """ | |
|
1284 | if fp == None: | |
|
1285 | fp = self.__fp | |
|
1286 | ||
|
1287 | self.m_ProcessingHeader.write(fp) | |
|
1288 | ||
|
782 | 1289 | def __setNextFile(self): |
|
783 | setFile = self.__setFile | |
|
1290 | """ | |
|
1291 | Determina el siguiente file que sera escrito | |
|
1292 | ||
|
1293 | Affected: | |
|
1294 | self.filename | |
|
1295 | self.__subfolder | |
|
1296 | self.__fp | |
|
1297 | self.__setFile | |
|
1298 | self.__flagIsNewFile | |
|
1299 | ||
|
1300 | Return: | |
|
1301 | 0 : Si el archivo no puede ser escrito | |
|
1302 | 1 : Si el archivo esta listo para ser escrito | |
|
1303 | """ | |
|
784 | 1304 | ext = self.__ext |
|
785 | 1305 | path = self.__path |
|
786 | 1306 | |
|
787 | setFile += 1 | |
|
788 | ||
|
789 | if not(self.__blocksCounter >= self.m_ProcessingHeader.dataBlocksPerFile): | |
|
1307 | if self.__fp != None: | |
|
790 | 1308 | self.__fp.close() |
|
791 | return 0 | |
|
1309 | ||
|
1310 | if self.m_BasicHeader.size <= 24: return 0 #no existe la suficiente data para ser escrita | |
|
792 | 1311 | |
|
793 | 1312 | timeTuple = time.localtime(self.m_Spectra.m_BasicHeader.utc) # utc from m_Spectra |
|
794 |
|
|
|
795 | subfolder = 'D%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_doy) | |
|
1313 | subfolder = 'D%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday) | |
|
1314 | ||
|
796 | 1315 | tmp = os.path.join(path,subfolder) |
|
797 | 1316 | if not(os.path.exists(tmp)): |
|
798 | 1317 | os.mkdir(tmp) |
|
1318 | self.__setFile = -1 #inicializo mi contador de seteo | |
|
1319 | else: | |
|
1320 | filesList = os.listdir( tmp ) | |
|
1321 | if len( filesList ) > 0: | |
|
1322 | filesList = sorted( filesList, key=str.lower ) | |
|
1323 | filen = filesList[-1] | |
|
1324 | # el filename debera tener el siguiente formato | |
|
1325 | # 0 1234 567 89A BCDE (hex) | |
|
1326 | # P YYYY DDD SSS .ext | |
|
1327 | if isNumber( filen[8:11] ): | |
|
1328 | self.__setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file | |
|
1329 | else: | |
|
1330 | self.__setFile = -1 | |
|
1331 | else: | |
|
1332 | self.__setFile = -1 #inicializo mi contador de seteo | |
|
1333 | ||
|
1334 | setFile = self.__setFile | |
|
1335 | setFile += 1 | |
|
1336 | file = 'P%4.4d%3.3d%3.3d%s' % ( timeTuple.tm_year, timeTuple.tm_yday, setFile, ext ) | |
|
799 | 1337 | |
|
800 | 1338 | filename = os.path.join(path,subfolder,file) |
|
1339 | ||
|
801 | 1340 | fp = open(filename,'wb') |
|
802 | 1341 | |
|
1342 | self.__blocksCounter = 0 | |
|
1343 | ||
|
803 | 1344 | #guardando atributos |
|
804 | 1345 | self.filename = filename |
|
805 | 1346 | self.__subfolder = subfolder |
@@ -809,36 +1350,73 class SpectraWriter(DataWriter): | |||
|
809 | 1350 | |
|
810 | 1351 | print 'Writing the file: %s'%self.filename |
|
811 | 1352 | |
|
812 | return 1 | |
|
813 | ||
|
1353 | self.__writeFirstHeader() | |
|
814 | 1354 | |
|
1355 | return 1 | |
|
815 | 1356 | |
|
816 | 1357 | def __setNewBlock(self): |
|
1358 | """ | |
|
1359 | Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header | |
|
1360 | ||
|
1361 | Return: | |
|
1362 | 0 : si no pudo escribir nada | |
|
1363 | 1 : Si escribio el Basic el First Header | |
|
1364 | """ | |
|
817 | 1365 | if self.__fp == None: |
|
818 | return 0 | |
|
1366 | self.__setNextFile() | |
|
819 | 1367 | |
|
820 | 1368 | if self.__flagIsNewFile: |
|
821 | 1369 | return 1 |
|
822 | 1370 | |
|
823 | #Bloques completados? | |
|
824 | if self.__blocksCounter < self.m_ProcessingHeader.profilesPerBlock: | |
|
1371 | if self.__blocksCounter < self.m_ProcessingHeader.dataBlocksPerFile: | |
|
825 | 1372 | self.__writeBasicHeader() |
|
826 | 1373 | return 1 |
|
827 | 1374 | |
|
828 | 1375 | if not(self.__setNextFile()): |
|
829 | 1376 | return 0 |
|
830 | 1377 | |
|
831 | self.__writeFirstHeader() | |
|
832 | ||
|
833 | 1378 | return 1 |
|
834 | 1379 | |
|
835 | 1380 | def __writeBlock(self): |
|
1381 | """ | |
|
1382 | Escribe el buffer en el file designado | |
|
836 | 1383 | |
|
837 | numpy.save(self.__fp,self.__buffer_sspc) | |
|
1384 | Affected: | |
|
1385 | self.__data_spc | |
|
1386 | self.__data_cspc | |
|
1387 | self.__data_dc | |
|
1388 | self.__flagIsNewFile | |
|
1389 | self.flagIsNewBlock | |
|
1390 | self.nWriteBlocks | |
|
1391 | self.__blocksCounter | |
|
1392 | ||
|
1393 | Return: None | |
|
1394 | """ | |
|
1395 | spc = numpy.transpose( self.__data_spc, (0,2,1) ) | |
|
1396 | if not( self.m_ProcessingHeader.shif_fft ): | |
|
1397 | spc = numpy.roll( spc, self.m_ProcessingHeader.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones | |
|
1398 | data = spc.reshape((-1)) | |
|
1399 | data.tofile(self.__fp) | |
|
838 | 1400 | |
|
839 | self.__buffer_sspc = numpy.array([],self.__dataType) | |
|
1401 | data = numpy.zeros( self.__shape_cspc_Buffer, self.__dataType ) | |
|
1402 | cspc = numpy.transpose( self.__data_cspc, (0,2,1) ) | |
|
1403 | if not( self.m_ProcessingHeader.shif_fft ): | |
|
1404 | cspc = numpy.roll( cspc, self.m_ProcessingHeader.profilesPerBlock/2, axis=2 ) #desplaza a la derecha en el eje 2 determinadas posiciones | |
|
1405 | data['real'] = cspc.real | |
|
1406 | data['imag'] = cspc.imag | |
|
1407 | data = data.reshape((-1)) | |
|
1408 | data.tofile(self.__fp) | |
|
1409 | ||
|
1410 | data = numpy.zeros( self.__shape_dc_Buffer, self.__dataType ) | |
|
1411 | dc = self.__data_dc | |
|
1412 | data['real'] = dc.real | |
|
1413 | data['imag'] = dc.imag | |
|
1414 | data = data.reshape((-1)) | |
|
1415 | data.tofile(self.__fp) | |
|
840 | 1416 | |
|
841 |
self.__ |
|
|
1417 | self.__data_spc.fill(0) | |
|
1418 | self.__data_cspc.fill(0) | |
|
1419 | self.__data_dc.fill(0) | |
|
842 | 1420 | |
|
843 | 1421 | self.__flagIsNewFile = 0 |
|
844 | 1422 | |
@@ -848,7 +1426,15 class SpectraWriter(DataWriter): | |||
|
848 | 1426 | |
|
849 | 1427 | self.__blocksCounter += 1 |
|
850 | 1428 | |
|
1429 | ||
|
851 | 1430 | def writeNextBlock(self): |
|
1431 | """ | |
|
1432 | Selecciona el bloque siguiente de datos y los escribe en un file | |
|
1433 | ||
|
1434 | Return: | |
|
1435 | 0 : Si no hizo pudo escribir el bloque de datos | |
|
1436 | 1 : Si no pudo escribir el bloque de datos | |
|
1437 | """ | |
|
852 | 1438 | if not(self.__setNewBlock()): |
|
853 | 1439 | return 0 |
|
854 | 1440 | |
@@ -856,70 +1442,201 class SpectraWriter(DataWriter): | |||
|
856 | 1442 | |
|
857 | 1443 | return 1 |
|
858 | 1444 | |
|
1445 | ||
|
859 | 1446 | def __hasAllDataInBuffer(self): |
|
860 | if self.__buffer_id >= self.m_ProcessingHeader.profilesPerBlock: | |
|
861 | 1447 |
|
|
862 | 1448 | |
|
863 | return 0 | |
|
864 | 1449 | |
|
865 | 1450 | def putData(self): |
|
866 | self.flagIsNewBlock = 0 | |
|
1451 | """ | |
|
1452 | Setea un bloque de datos y luego los escribe en un file | |
|
867 | 1453 | |
|
868 | if self.m_Spectra.noData: | |
|
869 | return None | |
|
1454 | Affected: | |
|
1455 | self.__data_spc | |
|
1456 | self.__data_cspc | |
|
1457 | self.__data_dc | |
|
870 | 1458 | |
|
871 | shape = self.m_Spectra.data.shape | |
|
872 | data = numpy.zeros(shape,self.__dataType) | |
|
873 | data['real'] = self.m_Spectra.data.real | |
|
874 | data['imag'] = self.m_Spectra.data.imag | |
|
875 | data = data.reshape((-1)) | |
|
1459 | Return: | |
|
1460 | 0 : Si no hay data o no hay mas files que puedan escribirse | |
|
1461 | 1 : Si se escribio la data de un bloque en un file | |
|
1462 | """ | |
|
1463 | self.flagIsNewBlock = 0 | |
|
1464 | ||
|
1465 | if self.m_Spectra.flagNoData: | |
|
1466 | return 0 | |
|
876 | 1467 | |
|
877 | self.__buffer_sspc = numpy.hstack((self.__buffer_sspc,data)) | |
|
1468 | if self.m_Spectra.flagResetProcessing: | |
|
1469 | self.__data_spc.fill(0) | |
|
1470 | self.__data_cspc.fill(0) | |
|
1471 | self.__data_dc.fill(0) | |
|
1472 | self.__setNextFile() | |
|
878 | 1473 | |
|
879 | self.__buffer_id += 1 | |
|
1474 | self.__data_spc = self.m_Spectra.data_spc | |
|
1475 | self.__data_cspc = self.m_Spectra.data_cspc | |
|
1476 | self.__data_dc = self.m_Spectra.data_dc | |
|
880 | 1477 | |
|
881 | if __hasAllDataInBuffer(): | |
|
1478 | if True: | |
|
1479 | time.sleep( 3 ) | |
|
1480 | self.__getHeader() | |
|
882 | 1481 | self.writeNextBlock() |
|
883 | 1482 | |
|
884 | ||
|
885 | 1483 | if self.noMoreFiles: |
|
886 | print 'Process finished' | |
|
887 |
return |
|
|
1484 | #print 'Process finished' | |
|
1485 | return 0 | |
|
888 | 1486 | |
|
889 | 1487 | return 1 |
|
890 | 1488 | |
|
1489 | def __getHeader( self ): | |
|
1490 | """ | |
|
1491 | Obtiene una copia del First Header | |
|
891 | 1492 | |
|
892 | def setup(self,path,set=None,format=None): | |
|
1493 | Affected: | |
|
1494 | self.m_BasicHeader | |
|
1495 | self.m_SystemHeader | |
|
1496 | self.m_RadarControllerHeader | |
|
1497 | self.m_ProcessingHeader | |
|
1498 | self.__dataType | |
|
1499 | ||
|
1500 | Return: | |
|
1501 | None | |
|
1502 | """ | |
|
1503 | self.m_BasicHeader = self.m_Spectra.m_BasicHeader.copy() | |
|
1504 | self.m_SystemHeader = self.m_Spectra.m_SystemHeader.copy() | |
|
1505 | self.m_RadarControllerHeader = self.m_Spectra.m_RadarControllerHeader.copy() | |
|
1506 | self.m_ProcessingHeader = self.m_Spectra.m_ProcessingHeader.copy() | |
|
1507 | self.__dataType = self.m_Spectra.dataType | |
|
1508 | ||
|
1509 | def __setHeaderByFile(self): | |
|
1510 | ||
|
1511 | format = self.__format | |
|
1512 | header = ['Basic','System','RadarController','Processing'] | |
|
1513 | ||
|
1514 | fmtFromFile = None | |
|
1515 | headerFromFile = None | |
|
1516 | ||
|
1517 | ||
|
1518 | fileTable = self.__configHeaderFile | |
|
1519 | ||
|
1520 | if os.access(fileTable, os.R_OK): | |
|
1521 | import re, string | |
|
1522 | ||
|
1523 | f = open(fileTable,'r') | |
|
1524 | lines = f.read() | |
|
1525 | f.close() | |
|
1526 | ||
|
1527 | #Delete comments into expConfig | |
|
1528 | while 1: | |
|
1529 | ||
|
1530 | startComment = string.find(lines.lower(),'#') | |
|
1531 | if startComment == -1: | |
|
1532 | break | |
|
1533 | endComment = string.find(lines.lower(),'\n',startComment) | |
|
1534 | lines = string.replace(lines,lines[startComment:endComment+1],'', 1) | |
|
1535 | ||
|
1536 | while expFromFile == None: | |
|
1537 | ||
|
1538 | currFmt = string.find(lines.lower(),'format="%s"' %(expName)) | |
|
1539 | nextFmt = string.find(lines.lower(),'format',currFmt+10) | |
|
1540 | ||
|
1541 | if currFmt == -1: | |
|
1542 | break | |
|
1543 | if nextFmt == -1: | |
|
1544 | nextFmt = len(lines)-1 | |
|
1545 | ||
|
1546 | fmtTable = lines[currFmt:nextFmt] | |
|
1547 | lines = lines[nextFmt:] | |
|
1548 | ||
|
1549 | fmtRead = self.__getValueFromArg(fmtTable,'format') | |
|
1550 | if fmtRead != format: | |
|
1551 | continue | |
|
1552 | fmtFromFile = fmtRead | |
|
1553 | ||
|
1554 | lines2 = fmtTable | |
|
1555 | ||
|
1556 | while headerFromFile == None: | |
|
1557 | ||
|
1558 | currHeader = string.find(lines2.lower(),'header="%s"' %(header)) | |
|
1559 | nextHeader = string.find(lines2.lower(),'header',currHeader+10) | |
|
1560 | ||
|
1561 | if currHeader == -1: | |
|
1562 | break | |
|
1563 | if nextHeader == -1: | |
|
1564 | nextHeader = len(lines2)-1 | |
|
1565 | ||
|
1566 | headerTable = lines2[currHeader:nextHeader] | |
|
1567 | lines2 = lines2[nextHeader:] | |
|
1568 | ||
|
1569 | headerRead = self.__getValueFromArg(headerTable,'site') | |
|
1570 | if not(headerRead in header): | |
|
1571 | continue | |
|
1572 | headerFromFile = headerRead | |
|
1573 | ||
|
1574 | if headerRead == 'Basic': | |
|
1575 | self.m_BasicHeader.size = self.__getValueFromArg(headerTable,'size',lower=False) | |
|
1576 | self.m_BasicHeader.version = self.__getValueFromArg(headerTable,'version',lower=False) | |
|
1577 | self.m_BasicHeader.dataBlock = self.__getValueFromArg(headerTable,'dataBlock',lower=False) | |
|
1578 | self.m_BasicHeader.utc = self.__getValueFromArg(headerTable,'utc',lower=False) | |
|
1579 | self.m_BasicHeader.miliSecond = self.__getValueFromArg(headerTable,'miliSecond',lower=False) | |
|
1580 | self.m_BasicHeader.timeZone = self.__getValueFromArg(headerTable,'timeZone',lower=False) | |
|
1581 | self.m_BasicHeader.dstFlag = self.__getValueFromArg(headerTable,'dstFlag',lower=False) | |
|
1582 | self.m_BasicHeader.errorCount = self.__getValueFromArg(headerTable,'errorCount',lower=False) | |
|
893 | 1583 | |
|
894 | if set == None: | |
|
895 | set = -1 | |
|
896 | 1584 | else: |
|
897 | set -= 1 | |
|
1585 | print "file access denied:%s"%fileTable | |
|
1586 | sys.exit(0) | |
|
1587 | ||
|
1588 | def setup( self, path, format='pdata' ): | |
|
1589 | """ | |
|
1590 | Setea el tipo de formato en la cual sera guardada la data y escribe el First Header | |
|
1591 | ||
|
1592 | Inputs: | |
|
1593 | path : el path destino en el cual se escribiran los files a crear | |
|
1594 | format : formato en el cual sera salvado un file | |
|
898 | 1595 | |
|
1596 | Return: | |
|
1597 | 0 : Si no realizo un buen seteo | |
|
1598 | 1 : Si realizo un buen seteo | |
|
1599 | """ | |
|
899 | 1600 | if format == 'hdf5': |
|
900 | 1601 | ext = '.hdf5' |
|
1602 | format = 'hdf5' | |
|
901 | 1603 | print 'call hdf5 library' |
|
902 | 1604 | return 0 |
|
903 | 1605 | |
|
904 | 1606 | if format == 'rawdata': |
|
905 | 1607 | ext = '.r' |
|
1608 | format = 'Jicamarca' | |
|
906 | 1609 | |
|
907 | #call to config_headers | |
|
1610 | if format == 'pdata': | |
|
1611 | ext = '.pdata' | |
|
1612 | format = 'pdata' | |
|
908 | 1613 | |
|
909 | self.__setFile = set | |
|
1614 | #call to config_headers | |
|
1615 | #self.__setHeaderByFile() | |
|
910 | 1616 | |
|
911 | if not(self.__setNextFile()): | |
|
912 | print "zzzzzzzzzzzz" | |
|
913 | return 0 | |
|
1617 | self.__path = path | |
|
1618 | self.__setFile = -1 | |
|
1619 | self.__ext = ext | |
|
1620 | self.__format = format | |
|
914 | 1621 | |
|
915 | self.__writeFirstHeader() # dentro de esta funcion se debe setear e __dataType | |
|
1622 | self.__getHeader() | |
|
916 | 1623 | |
|
917 | self.__buffer_sspc = numpy.array([],self.__dataType) | |
|
1624 | self.__shape_spc_Buffer = ( self.m_Spectra.nChannels, | |
|
1625 | self.m_ProcessingHeader.numHeights, | |
|
1626 | self.m_ProcessingHeader.profilesPerBlock | |
|
1627 | ) | |
|
918 | 1628 | |
|
1629 | self.__shape_cspc_Buffer = ( self.m_Spectra.nPairs, | |
|
1630 | self.m_ProcessingHeader.numHeights, | |
|
1631 | self.m_ProcessingHeader.profilesPerBlock | |
|
1632 | ) | |
|
919 | 1633 | |
|
1634 | self.__shape_dc_Buffer = ( self.m_SystemHeader.numChannels, | |
|
1635 | self.m_ProcessingHeader.numHeights | |
|
1636 | ) | |
|
920 | 1637 | |
|
921 | def __writeBasicHeader(self): | |
|
922 | pass | |
|
1638 | if not( self.__setNextFile() ): | |
|
1639 | print "There isn't a next file" #"zzzzzzzzzzzz" | |
|
1640 | return 0 | |
|
923 | 1641 | |
|
924 | def __writeFirstHeader(self): | |
|
925 | pass | |
|
1642 | return 1 No newline at end of file |
General Comments 0
You need to be logged in to leave comments.
Login now