jroIO_base.py
1579 lines
| 47.0 KiB
| text/x-python
|
PythonLexer
r1251 | """ | ||
|
r568 | Created on Jul 2, 2014 | |
|
r487 | ||
|
r568 | @author: roj-idl71 | |
r1251 | """ | ||
|
r487 | import os | |
import sys | |||
import glob | |||
import time | |||
import numpy | |||
import fnmatch | |||
|
r944 | import inspect | |
|
r1074 | import time | |
import datetime | |||
|
r963 | import zmq | |
|
r487 | ||
|
r1287 | from schainpy.model.proc.jroproc_base import Operation | |
|
r568 | from schainpy.model.data.jroheaderIO import PROCFLAG, BasicHeader, SystemHeader, RadarControllerHeader, ProcessingHeader | |
|
r616 | from schainpy.model.data.jroheaderIO import get_dtype_index, get_numpy_dtype, get_procflag_dtype, get_dtype_width | |
|
r1112 | from schainpy.utils import log | |
r1150 | import schainpy.admin | ||
|
r487 | ||
|
r568 | LOCALTIME = True | |
r1251 | DT_DIRECTIVES = { | ||
'%Y': 4, | |||
'%y': 2, | |||
'%m': 2, | |||
'%d': 2, | |||
'%j': 3, | |||
'%H': 2, | |||
'%M': 2, | |||
'%S': 2, | |||
'%f': 6 | |||
} | |||
|
r487 | ||
|
r1074 | ||
|
r568 | def isNumber(cad): | |
|
r487 | """ | |
Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero. | |||
r892 | Excepciones: | ||
|
r487 | Si un determinado string no puede ser convertido a numero | |
Input: | |||
str, string al cual se le analiza para determinar si convertible a un numero o no | |||
r892 | |||
|
r487 | Return: | |
True : si el string es uno numerico | |||
False : no es un string numerico | |||
""" | |||
try: | |||
|
r1074 | float(cad) | |
|
r487 | return True | |
except: | |||
return False | |||
|
r1074 | ||
|
r640 | def isFileInEpoch(filename, startUTSeconds, endUTSeconds): | |
|
r487 | """ | |
Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado. | |||
r892 | |||
|
r487 | Inputs: | |
filename : nombre completo del archivo de datos en formato Jicamarca (.r) | |||
r892 | |||
|
r487 | startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en | |
segundos contados desde 01/01/1970. | |||
endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en | |||
segundos contados desde 01/01/1970. | |||
r892 | |||
|
r487 | Return: | |
Boolean : Retorna True si el archivo de datos contiene datos en el rango de | |||
fecha especificado, de lo contrario retorna False. | |||
r892 | |||
|
r487 | Excepciones: | |
Si el archivo no existe o no puede ser abierto | |||
Si la cabecera no puede ser leida. | |||
r892 | |||
|
r487 | """ | |
basicHeaderObj = BasicHeader(LOCALTIME) | |||
r892 | |||
|
r487 | try: | |
|
r1074 | fp = open(filename, 'rb') | |
|
r487 | except IOError: | |
|
r1167 | print("The file %s can't be opened" % (filename)) | |
|
r684 | return 0 | |
r892 | |||
|
r487 | sts = basicHeaderObj.read(fp) | |
fp.close() | |||
r892 | |||
|
r487 | if not(sts): | |
|
r1167 | print("Skipping the file %s because it has not a valid header" % (filename)) | |
|
r487 | return 0 | |
r892 | |||
|
r487 | if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)): | |
return 0 | |||
r892 | |||
|
r487 | return 1 | |
|
r1074 | ||
|
r759 | def isTimeInRange(thisTime, startTime, endTime): | |
if endTime >= startTime: | |||
if (thisTime < startTime) or (thisTime > endTime): | |||
return 0 | |||
return 1 | |||
else: | |||
if (thisTime < startTime) and (thisTime > endTime): | |||
return 0 | |||
return 1 | |||
r892 | |||
|
r1074 | ||
|
r652 | def isFileInTimeRange(filename, startDate, endDate, startTime, endTime): | |
|
r487 | """ | |
Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado. | |||
r892 | |||
|
r487 | Inputs: | |
filename : nombre completo del archivo de datos en formato Jicamarca (.r) | |||
r892 | |||
|
r652 | startDate : fecha inicial del rango seleccionado en formato datetime.date | |
r892 | |||
|
r652 | endDate : fecha final del rango seleccionado en formato datetime.date | |
r892 | |||
|
r487 | startTime : tiempo inicial del rango seleccionado en formato datetime.time | |
r892 | |||
|
r487 | endTime : tiempo final del rango seleccionado en formato datetime.time | |
r892 | |||
|
r487 | Return: | |
Boolean : Retorna True si el archivo de datos contiene datos en el rango de | |||
fecha especificado, de lo contrario retorna False. | |||
r892 | |||
|
r487 | Excepciones: | |
Si el archivo no existe o no puede ser abierto | |||
Si la cabecera no puede ser leida. | |||
r892 | |||
|
r487 | """ | |
r892 | |||
|
r487 | try: | |
|
r1074 | fp = open(filename, 'rb') | |
|
r487 | except IOError: | |
|
r1167 | print("The file %s can't be opened" % (filename)) | |
|
r684 | return None | |
r892 | |||
|
r759 | firstBasicHeaderObj = BasicHeader(LOCALTIME) | |
systemHeaderObj = SystemHeader() | |||
radarControllerHeaderObj = RadarControllerHeader() | |||
processingHeaderObj = ProcessingHeader() | |||
r892 | |||
|
r759 | lastBasicHeaderObj = BasicHeader(LOCALTIME) | |
r892 | |||
|
r759 | sts = firstBasicHeaderObj.read(fp) | |
r892 | |||
|
r780 | if not(sts): | |
|
r1167 | print("[Reading] Skipping the file %s because it has not a valid header" % (filename)) | |
|
r780 | return None | |
r892 | |||
|
r803 | if not systemHeaderObj.read(fp): | |
return None | |||
r892 | |||
|
r803 | if not radarControllerHeaderObj.read(fp): | |
return None | |||
r892 | |||
|
r803 | if not processingHeaderObj.read(fp): | |
return None | |||
r892 | |||
|
r780 | filesize = os.path.getsize(filename) | |
r892 | |||
|
r1074 | offset = processingHeaderObj.blockSize + 24 # header size | |
r892 | |||
|
r780 | if filesize <= offset: | |
|
r1167 | print("[Reading] %s: This file has not enough data" % filename) | |
|
r780 | return None | |
r892 | |||
|
r759 | fp.seek(-offset, 2) | |
r892 | |||
|
r759 | sts = lastBasicHeaderObj.read(fp) | |
r892 | |||
|
r759 | fp.close() | |
r892 | |||
|
r780 | thisDatetime = lastBasicHeaderObj.datatime | |
thisTime_last_block = thisDatetime.time() | |||
r892 | |||
|
r759 | thisDatetime = firstBasicHeaderObj.datatime | |
thisDate = thisDatetime.date() | |||
thisTime_first_block = thisDatetime.time() | |||
r892 | |||
|
r1074 | # General case | |
|
r648 | # o>>>>>>>>>>>>>><<<<<<<<<<<<<<o | |
#-----------o----------------------------o----------- | |||
# startTime endTime | |||
r892 | |||
|
r648 | if endTime >= startTime: | |
|
r759 | if (thisTime_last_block < startTime) or (thisTime_first_block > endTime): | |
|
r648 | return None | |
r892 | |||
|
r648 | return thisDatetime | |
r892 | |||
|
r1074 | # If endTime < startTime then endTime belongs to the next day | |
r892 | |||
|
r648 | #<<<<<<<<<<<o o>>>>>>>>>>> | |
#-----------o----------------------------o----------- | |||
# endTime startTime | |||
r892 | |||
|
r759 | if (thisDate == startDate) and (thisTime_last_block < startTime): | |
|
r652 | return None | |
r892 | |||
|
r759 | if (thisDate == endDate) and (thisTime_first_block > endTime): | |
|
r652 | return None | |
r892 | |||
|
r759 | if (thisTime_last_block < startTime) and (thisTime_first_block > endTime): | |
|
r487 | return None | |
r892 | |||
|
r487 | return thisDatetime | |
|
r1074 | ||
|
r640 | def isFolderInDateRange(folder, startDate=None, endDate=None): | |
""" | |||
Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado. | |||
r892 | |||
|
r640 | Inputs: | |
folder : nombre completo del directorio. | |||
Su formato deberia ser "/path_root/?YYYYDDD" | |||
r892 | |||
|
r640 | siendo: | |
YYYY : Anio (ejemplo 2015) | |||
DDD : Dia del anio (ejemplo 305) | |||
r892 | |||
|
r640 | startDate : fecha inicial del rango seleccionado en formato datetime.date | |
r892 | |||
|
r640 | endDate : fecha final del rango seleccionado en formato datetime.date | |
r892 | |||
|
r640 | Return: | |
Boolean : Retorna True si el archivo de datos contiene datos en el rango de | |||
fecha especificado, de lo contrario retorna False. | |||
Excepciones: | |||
Si el directorio no tiene el formato adecuado | |||
""" | |||
r892 | |||
|
r640 | basename = os.path.basename(folder) | |
r892 | |||
|
r640 | if not isRadarFolder(basename): | |
|
r1167 | print("The folder %s has not the rigth format" % folder) | |
|
r684 | return 0 | |
r892 | |||
|
r640 | if startDate and endDate: | |
thisDate = getDateFromRadarFolder(basename) | |||
r892 | |||
|
r640 | if thisDate < startDate: | |
return 0 | |||
r892 | |||
|
r640 | if thisDate > endDate: | |
return 0 | |||
r892 | |||
|
r640 | return 1 | |
|
r1074 | ||
|
r640 | def isFileInDateRange(filename, startDate=None, endDate=None): | |
""" | |||
Retorna 1 si el archivo de datos se encuentra dentro del rango de horas especificado. | |||
r892 | |||
|
r640 | Inputs: | |
filename : nombre completo del archivo de datos en formato Jicamarca (.r) | |||
r892 | |||
|
r640 | Su formato deberia ser "?YYYYDDDsss" | |
r892 | |||
|
r640 | siendo: | |
YYYY : Anio (ejemplo 2015) | |||
DDD : Dia del anio (ejemplo 305) | |||
sss : set | |||
r892 | |||
|
r640 | startDate : fecha inicial del rango seleccionado en formato datetime.date | |
r892 | |||
|
r640 | endDate : fecha final del rango seleccionado en formato datetime.date | |
r892 | |||
|
r640 | Return: | |
Boolean : Retorna True si el archivo de datos contiene datos en el rango de | |||
fecha especificado, de lo contrario retorna False. | |||
Excepciones: | |||
Si el archivo no tiene el formato adecuado | |||
""" | |||
r892 | |||
|
r640 | basename = os.path.basename(filename) | |
r892 | |||
|
r640 | if not isRadarFile(basename): | |
|
r1167 | print("The filename %s has not the rigth format" % filename) | |
|
r684 | return 0 | |
r892 | |||
|
r640 | if startDate and endDate: | |
thisDate = getDateFromRadarFile(basename) | |||
r892 | |||
|
r640 | if thisDate < startDate: | |
return 0 | |||
r892 | |||
|
r640 | if thisDate > endDate: | |
return 0 | |||
r892 | |||
|
r640 | return 1 | |
|
r1074 | ||
|
r487 | def getFileFromSet(path, ext, set): | |
validFilelist = [] | |||
fileList = os.listdir(path) | |||
r892 | |||
|
r487 | # 0 1234 567 89A BCDE | |
# H YYYY DDD SSS .ext | |||
r892 | |||
|
r487 | for thisFile in fileList: | |
try: | |||
year = int(thisFile[1:5]) | |||
|
r1074 | doy = int(thisFile[5:8]) | |
|
r487 | except: | |
continue | |||
r892 | |||
|
r487 | if (os.path.splitext(thisFile)[-1].lower() != ext.lower()): | |
continue | |||
r892 | |||
|
r487 | validFilelist.append(thisFile) | |
|
r1074 | myfile = fnmatch.filter( | |
validFilelist, '*%4.4d%3.3d%3.3d*' % (year, doy, set)) | |||
r892 | |||
|
r1074 | if len(myfile) != 0: | |
|
r487 | return myfile[0] | |
else: | |||
|
r1074 | filename = '*%4.4d%3.3d%3.3d%s' % (year, doy, set, ext.lower()) | |
|
r1167 | print('the filename %s does not exist' % filename) | |
print('...going to the last file: ') | |||
r892 | |||
|
r487 | if validFilelist: | |
|
r1074 | validFilelist = sorted(validFilelist, key=str.lower) | |
|
r487 | return validFilelist[-1] | |
return None | |||
|
r1074 | ||
|
r487 | def getlastFileFromPath(path, ext): | |
""" | |||
Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext" | |||
r892 | al final de la depuracion devuelve el ultimo file de la lista que quedo. | ||
Input: | |||
|
r487 | fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta | |
r892 | ext : extension de los files contenidos en una carpeta | ||
|
r487 | Return: | |
El ultimo file de una determinada carpeta, no se considera el path. | |||
""" | |||
validFilelist = [] | |||
fileList = os.listdir(path) | |||
r892 | |||
|
r487 | # 0 1234 567 89A BCDE | |
# H YYYY DDD SSS .ext | |||
r892 | |||
|
r487 | for thisFile in fileList: | |
r892 | |||
|
r487 | year = thisFile[1:5] | |
if not isNumber(year): | |||
continue | |||
r892 | |||
|
r487 | doy = thisFile[5:8] | |
if not isNumber(doy): | |||
continue | |||
r892 | |||
|
r487 | year = int(year) | |
doy = int(doy) | |||
r892 | |||
|
r487 | if (os.path.splitext(thisFile)[-1].lower() != ext.lower()): | |
continue | |||
r892 | |||
|
r487 | validFilelist.append(thisFile) | |
if validFilelist: | |||
|
r1074 | validFilelist = sorted(validFilelist, key=str.lower) | |
|
r487 | return validFilelist[-1] | |
return None | |||
|
r1074 | ||
|
r589 | def isRadarFolder(folder): | |
|
r487 | try: | |
year = int(folder[1:5]) | |||
doy = int(folder[5:8]) | |||
except: | |||
return 0 | |||
r892 | |||
|
r487 | return 1 | |
|
r1074 | ||
|
r589 | def isRadarFile(file): | |
|
r1112 | try: | |
|
r1074 | year = int(file[1:5]) | |
doy = int(file[5:8]) | |||
set = int(file[8:11]) | |||
except: | |||
return 0 | |||
return 1 | |||
r892 | |||
|
r589 | ||
def getDateFromRadarFile(file): | |||
|
r1112 | try: | |
|
r640 | year = int(file[1:5]) | |
doy = int(file[5:8]) | |||
|
r1112 | set = int(file[8:11]) | |
|
r640 | except: | |
return None | |||
|
r1074 | thisDate = datetime.date(year, 1, 1) + datetime.timedelta(doy - 1) | |
|
r640 | return thisDate | |
|
r1074 | ||
|
r640 | def getDateFromRadarFolder(folder): | |
try: | |||
year = int(folder[1:5]) | |||
doy = int(folder[5:8]) | |||
except: | |||
return None | |||
|
r1074 | thisDate = datetime.date(year, 1, 1) + datetime.timedelta(doy - 1) | |
|
r640 | return thisDate | |
r892 | |||
r1251 | def parse_format(s, fmt): | ||
for i in range(fmt.count('%')): | |||
x = fmt.index('%') | |||
d = DT_DIRECTIVES[fmt[x:x+2]] | |||
fmt = fmt.replace(fmt[x:x+2], s[x:x+d]) | |||
return fmt | |||
|
r1074 | ||
r1251 | class Reader(object): | ||
r892 | |||
|
r487 | c = 3E8 | |
isConfig = False | |||
dtype = None | |||
pathList = [] | |||
filenameList = [] | |||
r1251 | datetimeList = [] | ||
|
r487 | filename = None | |
ext = None | |||
flagIsNewFile = 1 | |||
|
r568 | flagDiscontinuousBlock = 0 | |
|
r487 | flagIsNewBlock = 0 | |
r1251 | flagNoMoreFiles = 0 | ||
|
r487 | fp = None | |
firstHeaderSize = 0 | |||
basicHeaderSize = 24 | |||
versionFile = 1103 | |||
fileSize = None | |||
fileSizeByHeader = None | |||
r1251 | fileIndex = -1 | ||
|
r487 | profileIndex = None | |
r1251 | blockIndex = 0 | ||
nTotalBlocks = 0 | |||
|
r487 | maxTimeStep = 30 | |
lastUTTime = None | |||
datablock = None | |||
dataOut = None | |||
|
r527 | getByBlock = False | |
r1251 | path = None | ||
startDate = None | |||
endDate = None | |||
startTime = datetime.time(0, 0, 0) | |||
endTime = datetime.time(23, 59, 59) | |||
set = None | |||
expLabel = "" | |||
online = False | |||
delay = 60 | |||
nTries = 3 # quantity tries | |||
nFiles = 3 # number of files for searching | |||
walk = True | |||
getblock = False | |||
nTxs = 1 | |||
realtime = False | |||
blocksize = 0 | |||
blocktime = None | |||
warnings = True | |||
verbose = True | |||
server = None | |||
format = None | |||
oneDDict = None | |||
twoDDict = None | |||
independentParam = None | |||
filefmt = None | |||
folderfmt = None | |||
r1252 | open_file = open | ||
open_mode = 'rb' | |||
r892 | |||
|
r487 | def run(self): | |
r892 | |||
r1251 | raise NotImplementedError | ||
r892 | |||
|
r944 | def getAllowedArgs(self): | |
|
r1097 | if hasattr(self, '__attrs__'): | |
return self.__attrs__ | |||
else: | |||
return inspect.getargspec(self.run).args | |||
|
r944 | ||
r1251 | def set_kwargs(self, **kwargs): | ||
|
r1074 | ||
r1251 | for key, value in kwargs.items(): | ||
setattr(self, key, value) | |||
def find_folders(self, path, startDate, endDate, folderfmt, last=False): | |||
r892 | |||
r1251 | folders = [x for f in path.split(',') | ||
for x in os.listdir(f) if os.path.isdir(os.path.join(f, x))] | |||
folders.sort() | |||
r892 | |||
r1251 | if last: | ||
folders = [folders[-1]] | |||
r892 | |||
r1251 | for folder in folders: | ||
try: | |||
dt = datetime.datetime.strptime(parse_format(folder, folderfmt), folderfmt).date() | |||
if dt >= startDate and dt <= endDate: | |||
yield os.path.join(path, folder) | |||
else: | |||
log.log('Skiping folder {}'.format(folder), self.name) | |||
except Exception as e: | |||
log.log('Skiping folder {}'.format(folder), self.name) | |||
continue | |||
return | |||
def find_files(self, folders, ext, filefmt, startDate=None, endDate=None, | |||
expLabel='', last=False): | |||
for path in folders: | |||
files = glob.glob1(path, '*{}'.format(ext)) | |||
files.sort() | |||
if last: | |||
if files: | |||
fo = files[-1] | |||
try: | |||
dt = datetime.datetime.strptime(parse_format(fo, filefmt), filefmt).date() | |||
yield os.path.join(path, expLabel, fo) | |||
except Exception as e: | |||
pass | |||
r1252 | return | ||
r1251 | else: | ||
r1252 | return | ||
r892 | |||
r1251 | for fo in files: | ||
try: | |||
dt = datetime.datetime.strptime(parse_format(fo, filefmt), filefmt).date() | |||
if dt >= startDate and dt <= endDate: | |||
yield os.path.join(path, expLabel, fo) | |||
else: | |||
log.log('Skiping file {}'.format(fo), self.name) | |||
except Exception as e: | |||
log.log('Skiping file {}'.format(fo), self.name) | |||
continue | |||
r892 | |||
r1251 | def searchFilesOffLine(self, path, startDate, endDate, | ||
expLabel, ext, walk, | |||
filefmt, folderfmt): | |||
"""Search files in offline mode for the given arguments | |||
|
r487 | ||
r1251 | Return: | ||
Generator of files | |||
|
r487 | """ | |
r1251 | if walk: | ||
folders = self.find_folders( | |||
path, startDate, endDate, folderfmt) | |||
|
r640 | else: | |
r1251 | folders = path.split(',') | ||
|
r1112 | ||
r1251 | return self.find_files( | ||
folders, ext, filefmt, startDate, endDate, expLabel) | |||
def searchFilesOnLine(self, path, startDate, endDate, | |||
expLabel, ext, walk, | |||
filefmt, folderfmt): | |||
"""Search for the last file of the last folder | |||
Arguments: | |||
path : carpeta donde estan contenidos los files que contiene data | |||
expLabel : Nombre del subexperimento (subfolder) | |||
ext : extension de los files | |||
|
r1052 | walk : Si es habilitado no realiza busquedas dentro de los ubdirectorios (doypath) | |
Return: | |||
r1251 | generator with the full path of last filename | ||
|
r487 | """ | |
r1251 | |||
if walk: | |||
folders = self.find_folders( | |||
path, startDate, endDate, folderfmt, last=True) | |||
|
r487 | else: | |
r1251 | folders = path.split(',') | ||
return self.find_files( | |||
folders, ext, filefmt, startDate, endDate, expLabel, last=True) | |||
r892 | |||
r1251 | def setNextFile(self): | ||
"""Set the next file to be readed open it and parse de file header""" | |||
r892 | |||
r1270 | while True: | ||
if self.fp != None: | |||
self.fp.close() | |||
|
r487 | ||
r1251 | if self.online: | ||
r1270 | newFile = self.setNextFileOnline() | ||
r1251 | else: | ||
r1270 | newFile = self.setNextFileOffline() | ||
if not(newFile): | |||
if self.online: | |||
raise schainpy.admin.SchainError('Time to wait for new files reach') | |||
r1251 | else: | ||
r1270 | if self.fileIndex == -1: | ||
raise schainpy.admin.SchainWarning('No files found in the given path') | |||
else: | |||
raise schainpy.admin.SchainWarning('No more files to read') | |||
if self.verifyFile(self.filename): | |||
break | |||
r1251 | |||
log.log('Opening file: %s' % self.filename, self.name) | |||
|
r487 | ||
r1251 | self.readFirstHeader() | ||
self.nReadBlocks = 0 | |||
|
r487 | ||
r1251 | def setNextFileOnline(self): | ||
"""Check for the next file to be readed in online mode. | |||
|
r1052 | ||
r1251 | Set: | ||
|
r1052 | self.filename | |
self.fp | |||
r1251 | self.filesize | ||
|
r1052 | Return: | |
r1251 | boolean | ||
|
r1052 | ||
""" | |||
r1251 | nextFile = True | ||
nextDay = False | |||
r892 | |||
r1251 | for nFiles in range(self.nFiles+1): | ||
for nTries in range(self.nTries): | |||
fullfilename, filename = self.checkForRealPath(nextFile, nextDay) | |||
if fullfilename is not None: | |||
|
r487 | break | |
r1241 | log.warning( | ||
r1251 | "Waiting %0.2f sec for the next file: \"%s\" , try %02d ..." % (self.delay, filename, nTries + 1), | ||
r1241 | self.name) | ||
r1251 | time.sleep(self.delay) | ||
nextFile = False | |||
continue | |||
|
r1278 | if fullfilename is not None: | |
r1251 | break | ||
self.nTries = 1 | |||
nextFile = True | |||
r892 | |||
r1251 | if nFiles == (self.nFiles - 1): | ||
log.log('Trying with next day...', self.name) | |||
|
r1278 | nextDay = True | |
self.nTries = 3 | |||
|
r487 | ||
r1251 | if fullfilename: | ||
|
r1074 | self.fileSize = os.path.getsize(fullfilename) | |
|
r487 | self.filename = fullfilename | |
self.flagIsNewFile = 1 | |||
|
r1074 | if self.fp != None: | |
self.fp.close() | |||
r1252 | self.fp = self.open_file(fullfilename, self.open_mode) | ||
|
r487 | self.flagNoMoreFiles = 0 | |
r1251 | self.fileIndex += 1 | ||
return 1 | |||
else: | |||
return 0 | |||
def setNextFileOffline(self): | |||
"""Open the next file to be readed in offline mode""" | |||
try: | |||
filename = next(self.filenameList) | |||
self.fileIndex +=1 | |||
except StopIteration: | |||
|
r487 | self.flagNoMoreFiles = 1 | |
r1251 | return 0 | ||
|
r487 | ||
r1251 | self.filename = filename | ||
self.fileSize = os.path.getsize(filename) | |||
r1252 | self.fp = self.open_file(filename, self.open_mode) | ||
r1251 | self.flagIsNewFile = 1 | ||
r892 | |||
r1251 | return 1 | ||
r1270 | @staticmethod | ||
def isDateTimeInRange(dt, startDate, endDate, startTime, endTime): | |||
"""Check if the given datetime is in range""" | |||
if startDate <= dt.date() <= endDate: | |||
if startTime <= dt.time() <= endTime: | |||
return True | |||
return False | |||
r1251 | def verifyFile(self, filename): | ||
"""Check for a valid file | |||
Arguments: | |||
filename -- full path filename | |||
Return: | |||
boolean | |||
""" | |||
|
r1052 | ||
r1251 | return True | ||
|
r1052 | ||
r1251 | def checkForRealPath(self, nextFile, nextDay): | ||
"""Check if the next file to be readed exists""" | |||
|
r487 | ||
r1251 | raise NotImplementedError | ||
def readFirstHeader(self): | |||
"""Parse the file header""" | |||
r892 | |||
r1251 | pass | ||
class JRODataReader(Reader): | |||
utc = 0 | |||
nReadBlocks = 0 | |||
foldercounter = 0 | |||
firstHeaderSize = 0 | |||
basicHeaderSize = 24 | |||
__isFirstTimeOnline = 1 | |||
filefmt = "*%Y%j***" | |||
folderfmt = "*%Y%j" | |||
|
r1287 | __attrs__ = ['path', 'startDate', 'endDate', 'startTime', 'endTime', 'online', 'delay', 'walk'] | |
|
r487 | ||
r1251 | def getDtypeWidth(self): | ||
dtype_index = get_dtype_index(self.dtype) | |||
dtype_width = get_dtype_width(dtype_index) | |||
return dtype_width | |||
def checkForRealPath(self, nextFile, nextDay): | |||
"""Check if the next file to be readed exists. | |||
Example : | |||
nombre correcto del file es .../.../D2009307/P2009307367.ext | |||
Entonces la funcion prueba con las siguientes combinaciones | |||
.../.../y2009307367.ext | |||
.../.../Y2009307367.ext | |||
.../.../x2009307/y2009307367.ext | |||
.../.../x2009307/Y2009307367.ext | |||
.../.../X2009307/y2009307367.ext | |||
.../.../X2009307/Y2009307367.ext | |||
siendo para este caso, la ultima combinacion de letras, identica al file buscado | |||
Return: | |||
str -- fullpath of the file | |||
""" | |||
if nextFile: | |||
self.set += 1 | |||
if nextDay: | |||
self.set = 0 | |||
self.doy += 1 | |||
foldercounter = 0 | |||
prefixDirList = [None, 'd', 'D'] | |||
if self.ext.lower() == ".r": # voltage | |||
prefixFileList = ['d', 'D'] | |||
elif self.ext.lower() == ".pdata": # spectra | |||
prefixFileList = ['p', 'P'] | |||
# barrido por las combinaciones posibles | |||
for prefixDir in prefixDirList: | |||
thispath = self.path | |||
if prefixDir != None: | |||
# formo el nombre del directorio xYYYYDDD (x=d o x=D) | |||
if foldercounter == 0: | |||
thispath = os.path.join(self.path, "%s%04d%03d" % | |||
(prefixDir, self.year, self.doy)) | |||
else: | |||
thispath = os.path.join(self.path, "%s%04d%03d_%02d" % ( | |||
prefixDir, self.year, self.doy, foldercounter)) | |||
for prefixFile in prefixFileList: # barrido por las dos combinaciones posibles de "D" | |||
# formo el nombre del file xYYYYDDDSSS.ext | |||
filename = "%s%04d%03d%03d%s" % (prefixFile, self.year, self.doy, self.set, self.ext) | |||
fullfilename = os.path.join( | |||
thispath, filename) | |||
if os.path.exists(fullfilename): | |||
return fullfilename, filename | |||
return None, filename | |||
|
r487 | def __waitNewBlock(self): | |
""" | |||
r892 | Return 1 si se encontro un nuevo bloque de datos, 0 de otra forma. | ||
|
r487 | Si el modo de lectura es OffLine siempre retorn 0 | |
""" | |||
if not self.online: | |||
return 0 | |||
r892 | |||
|
r487 | if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile): | |
return 0 | |||
r892 | |||
|
r487 | currentPointer = self.fp.tell() | |
r892 | |||
|
r487 | neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize | |
r892 | |||
|
r1074 | for nTries in range(self.nTries): | |
r892 | |||
|
r487 | self.fp.close() | |
|
r1074 | self.fp = open(self.filename, 'rb') | |
self.fp.seek(currentPointer) | |||
|
r487 | ||
|
r1074 | self.fileSize = os.path.getsize(self.filename) | |
|
r487 | currentSize = self.fileSize - currentPointer | |
|
r1074 | if (currentSize >= neededSize): | |
|
r487 | self.basicHeaderObj.read(self.fp) | |
return 1 | |||
r892 | |||
|
r487 | if self.fileSize == self.fileSizeByHeader: | |
|
r1074 | # self.flagEoF = True | |
|
r487 | return 0 | |
r892 | |||
|
r1167 | print("[Reading] Waiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries + 1)) | |
r1241 | time.sleep(self.delay) | ||
r892 | |||
return 0 | |||
|
r487 | ||
|
r1215 | def waitDataBlock(self, pointer_location, blocksize=None): | |
r892 | |||
|
r487 | currentPointer = pointer_location | |
|
r1215 | if blocksize is None: | |
neededSize = self.processingHeaderObj.blockSize # + self.basicHeaderSize | |||
else: | |||
neededSize = blocksize | |||
r892 | |||
|
r1074 | for nTries in range(self.nTries): | |
|
r487 | self.fp.close() | |
|
r1074 | self.fp = open(self.filename, 'rb') | |
self.fp.seek(currentPointer) | |||
r892 | |||
|
r1074 | self.fileSize = os.path.getsize(self.filename) | |
|
r487 | currentSize = self.fileSize - currentPointer | |
r892 | |||
|
r1074 | if (currentSize >= neededSize): | |
|
r487 | return 1 | |
r892 | |||
|
r1215 | log.warning( | |
"Waiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries + 1), | |||
self.name | |||
) | |||
r1241 | time.sleep(self.delay) | ||
r892 | |||
|
r487 | return 0 | |
def __setNewBlock(self): | |||
|
r1052 | ||
r1251 | if self.fp == None: | ||
return 0 | |||
r1241 | |||
r1251 | if self.flagIsNewFile: | ||
|
r659 | self.lastUTTime = self.basicHeaderObj.utc | |
|
r487 | return 1 | |
r892 | |||
|
r659 | if self.realtime: | |
self.flagDiscontinuousBlock = 1 | |||
if not(self.setNextFile()): | |||
return 0 | |||
else: | |||
|
r1074 | return 1 | |
r1251 | |||
|
r975 | currentSize = self.fileSize - self.fp.tell() | |
neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize | |||
r1251 | |||
|
r975 | if (currentSize >= neededSize): | |
self.basicHeaderObj.read(self.fp) | |||
|
r659 | self.lastUTTime = self.basicHeaderObj.utc | |
|
r487 | return 1 | |
r1251 | |||
|
r487 | if self.__waitNewBlock(): | |
|
r659 | self.lastUTTime = self.basicHeaderObj.utc | |
|
r487 | return 1 | |
r1251 | |||
|
r975 | if not(self.setNextFile()): | |
return 0 | |||
|
r487 | ||
|
r1074 | deltaTime = self.basicHeaderObj.utc - self.lastUTTime | |
|
r659 | self.lastUTTime = self.basicHeaderObj.utc | |
r892 | |||
|
r568 | self.flagDiscontinuousBlock = 0 | |
|
r487 | ||
if deltaTime > self.maxTimeStep: | |||
|
r568 | self.flagDiscontinuousBlock = 1 | |
|
r487 | ||
return 1 | |||
def readNextBlock(self): | |||
r892 | |||
|
r1074 | while True: | |
r1251 | self.__setNewBlock() | ||
|
r1074 | ||
|
r759 | if not(self.readBlock()): | |
return 0 | |||
|
r1052 | ||
|
r759 | self.getBasicHeader() | |
r1270 | |||
if not self.isDateTimeInRange(self.dataOut.datatime, self.startDate, self.endDate, self.startTime, self.endTime): | |||
|
r1167 | print("[Reading] Block No. %d/%d -> %s [Skipping]" % (self.nReadBlocks, | |
|
r1074 | self.processingHeaderObj.dataBlocksPerFile, | |
|
r1167 | self.dataOut.datatime.ctime())) | |
|
r759 | continue | |
r892 | |||
|
r759 | break | |
r892 | |||
|
r931 | if self.verbose: | |
|
r1167 | print("[Reading] Block No. %d/%d -> %s" % (self.nReadBlocks, | |
|
r1074 | self.processingHeaderObj.dataBlocksPerFile, | |
|
r1167 | self.dataOut.datatime.ctime())) | |
|
r487 | return 1 | |
r1251 | def readFirstHeader(self): | ||
r892 | |||
|
r487 | self.basicHeaderObj.read(self.fp) | |
self.systemHeaderObj.read(self.fp) | |||
self.radarControllerHeaderObj.read(self.fp) | |||
self.processingHeaderObj.read(self.fp) | |||
self.firstHeaderSize = self.basicHeaderObj.size | |||
|
r1074 | datatype = int(numpy.log2((self.processingHeaderObj.processFlags & | |
PROCFLAG.DATATYPE_MASK)) - numpy.log2(PROCFLAG.DATATYPE_CHAR)) | |||
|
r487 | if datatype == 0: | |
|
r1074 | datatype_str = numpy.dtype([('real', '<i1'), ('imag', '<i1')]) | |
|
r487 | elif datatype == 1: | |
|
r1074 | datatype_str = numpy.dtype([('real', '<i2'), ('imag', '<i2')]) | |
|
r487 | elif datatype == 2: | |
|
r1074 | datatype_str = numpy.dtype([('real', '<i4'), ('imag', '<i4')]) | |
|
r487 | elif datatype == 3: | |
|
r1074 | datatype_str = numpy.dtype([('real', '<i8'), ('imag', '<i8')]) | |
|
r487 | elif datatype == 4: | |
|
r1074 | datatype_str = numpy.dtype([('real', '<f4'), ('imag', '<f4')]) | |
|
r487 | elif datatype == 5: | |
|
r1074 | datatype_str = numpy.dtype([('real', '<f8'), ('imag', '<f8')]) | |
|
r487 | else: | |
|
r1167 | raise ValueError('Data type was not defined') | |
|
r487 | ||
self.dtype = datatype_str | |||
#self.ippSeconds = 2 * 1000 * self.radarControllerHeaderObj.ipp / self.c | |||
|
r1074 | self.fileSizeByHeader = self.processingHeaderObj.dataBlocksPerFile * self.processingHeaderObj.blockSize + \ | |
self.firstHeaderSize + self.basicHeaderSize * \ | |||
(self.processingHeaderObj.dataBlocksPerFile - 1) | |||
|
r1079 | # self.dataOut.channelList = numpy.arange(self.systemHeaderObj.numChannels) | |
# self.dataOut.channelIndexList = numpy.arange(self.systemHeaderObj.numChannels) | |||
|
r487 | self.getBlockDimension() | |
r892 | |||
r1251 | def verifyFile(self, filename, msgFlag=True): | ||
r892 | |||
|
r487 | msg = None | |
r892 | |||
|
r487 | try: | |
fp = open(filename, 'rb') | |||
except IOError: | |||
r892 | |||
|
r487 | if msgFlag: | |
|
r1167 | print("[Reading] File %s can't be opened" % (filename)) | |
r892 | |||
|
r487 | return False | |
r1270 | |||
if self.waitDataBlock(0): | |||
|
r487 | basicHeaderObj = BasicHeader(LOCALTIME) | |
systemHeaderObj = SystemHeader() | |||
radarControllerHeaderObj = RadarControllerHeader() | |||
processingHeaderObj = ProcessingHeader() | |||
r892 | |||
|
r1074 | if not(basicHeaderObj.read(fp)): | |
|
r684 | fp.close() | |
return False | |||
r892 | |||
|
r1074 | if not(systemHeaderObj.read(fp)): | |
|
r487 | fp.close() | |
return False | |||
r892 | |||
|
r1074 | if not(radarControllerHeaderObj.read(fp)): | |
|
r684 | fp.close() | |
return False | |||
r892 | |||
|
r1074 | if not(processingHeaderObj.read(fp)): | |
|
r684 | fp.close() | |
r1270 | return False | ||
if not self.online: | |||
dt1 = basicHeaderObj.datatime | |||
fp.seek(self.fileSize-processingHeaderObj.blockSize-24) | |||
if not(basicHeaderObj.read(fp)): | |||
fp.close() | |||
return False | |||
dt2 = basicHeaderObj.datatime | |||
if not self.isDateTimeInRange(dt1, self.startDate, self.endDate, self.startTime, self.endTime) and not \ | |||
self.isDateTimeInRange(dt2, self.startDate, self.endDate, self.startTime, self.endTime): | |||
return False | |||
|
r487 | ||
fp.close() | |||
r1270 | |||
|
r487 | return True | |
|
r640 | def findDatafiles(self, path, startDate=None, endDate=None, expLabel='', ext='.r', walk=True, include_path=False): | |
r892 | |||
|
r759 | path_empty = True | |
r892 | |||
|
r589 | dateList = [] | |
pathList = [] | |||
r892 | |||
|
r640 | multi_path = path.split(',') | |
r892 | |||
|
r589 | if not walk: | |
r892 | |||
|
r589 | for single_path in multi_path: | |
r892 | |||
|
r607 | if not os.path.isdir(single_path): | |
continue | |||
r892 | |||
|
r1074 | fileList = glob.glob1(single_path, "*" + ext) | |
r892 | |||
|
r726 | if not fileList: | |
continue | |||
r892 | |||
|
r759 | path_empty = False | |
r892 | |||
|
r726 | fileList.sort() | |
r892 | |||
|
r589 | for thisFile in fileList: | |
r892 | |||
|
r589 | if not os.path.isfile(os.path.join(single_path, thisFile)): | |
continue | |||
r892 | |||
|
r589 | if not isRadarFile(thisFile): | |
continue | |||
r892 | |||
|
r640 | if not isFileInDateRange(thisFile, startDate, endDate): | |
continue | |||
r892 | |||
|
r589 | thisDate = getDateFromRadarFile(thisFile) | |
r892 | |||
|
r1232 | if thisDate in dateList or single_path in pathList: | |
|
r640 | continue | |
r892 | |||
|
r640 | dateList.append(thisDate) | |
|
r589 | pathList.append(single_path) | |
r892 | |||
|
r640 | else: | |
for single_path in multi_path: | |||
r892 | |||
|
r640 | if not os.path.isdir(single_path): | |
|
r589 | continue | |
r892 | |||
|
r640 | dirList = [] | |
r892 | |||
|
r640 | for thisPath in os.listdir(single_path): | |
r892 | |||
|
r1074 | if not os.path.isdir(os.path.join(single_path, thisPath)): | |
|
r589 | continue | |
r892 | |||
|
r640 | if not isRadarFolder(thisPath): | |
continue | |||
r892 | |||
|
r640 | if not isFolderInDateRange(thisPath, startDate, endDate): | |
continue | |||
r892 | |||
|
r640 | dirList.append(thisPath) | |
r892 | |||
|
r640 | if not dirList: | |
continue | |||
r892 | |||
|
r726 | dirList.sort() | |
r892 | |||
|
r601 | for thisDir in dirList: | |
r892 | |||
|
r632 | datapath = os.path.join(single_path, thisDir, expLabel) | |
|
r1074 | fileList = glob.glob1(datapath, "*" + ext) | |
r892 | |||
|
r759 | if not fileList: | |
|
r632 | continue | |
r892 | |||
|
r759 | path_empty = False | |
r892 | |||
|
r640 | thisDate = getDateFromRadarFolder(thisDir) | |
r892 | |||
|
r632 | pathList.append(datapath) | |
|
r589 | dateList.append(thisDate) | |
r892 | |||
|
r640 | dateList.sort() | |
r892 | |||
|
r684 | if walk: | |
pattern_path = os.path.join(multi_path[0], "[dYYYYDDD]", expLabel) | |||
else: | |||
pattern_path = multi_path[0] | |||
r892 | |||
|
r759 | if path_empty: | |
r1241 | raise schainpy.admin.SchainError("[Reading] No *%s files in %s for %s to %s" % (ext, pattern_path, startDate, endDate)) | ||
|
r759 | else: | |
if not dateList: | |||
r1241 | raise schainpy.admin.SchainError("[Reading] Date range selected invalid [%s - %s]: No *%s files in %s)" % (startDate, endDate, ext, path)) | ||
|
r759 | ||
|
r640 | if include_path: | |
return dateList, pathList | |||
r892 | |||
|
r640 | return dateList | |
r892 | |||
r1251 | def setup(self, **kwargs): | ||
self.set_kwargs(**kwargs) | |||
if not self.ext.startswith('.'): | |||
self.ext = '.{}'.format(self.ext) | |||
r1241 | |||
r1251 | if self.server is not None: | ||
if 'tcp://' in self.server: | |||
|
r1052 | address = server | |
else: | |||
r1251 | address = 'ipc:///tmp/%s' % self.server | ||
|
r1052 | self.server = address | |
self.context = zmq.Context() | |||
self.receiver = self.context.socket(zmq.PULL) | |||
self.receiver.connect(self.server) | |||
time.sleep(0.5) | |||
|
r1167 | print('[Starting] ReceiverData from {}'.format(self.server)) | |
|
r1074 | else: | |
|
r1052 | self.server = None | |
r1251 | if self.path == None: | ||
|
r1167 | raise ValueError("[Reading] The path is not valid") | |
|
r487 | ||
r1251 | if self.online: | ||
log.log("[Reading] Searching files in online mode...", self.name) | |||
|
r1052 | ||
|
r1074 | for nTries in range(self.nTries): | |
r1251 | fullpath = self.searchFilesOnLine(self.path, self.startDate, | ||
self.endDate, self.expLabel, self.ext, self.walk, | |||
self.filefmt, self.folderfmt) | |||
try: | |||
fullpath = next(fullpath) | |||
except: | |||
fullpath = None | |||
|
r1052 | if fullpath: | |
break | |||
r1251 | log.warning( | ||
'Waiting {} sec for a valid file in {}: try {} ...'.format( | |||
self.delay, self.path, nTries + 1), | |||
self.name) | |||
r1241 | time.sleep(self.delay) | ||
|
r1052 | ||
|
r1171 | if not(fullpath): | |
r1251 | raise schainpy.admin.SchainError( | ||
'There isn\'t any valid file in {}'.format(self.path)) | |||
|
r1052 | ||
r1251 | pathname, filename = os.path.split(fullpath) | ||
self.year = int(filename[1:5]) | |||
self.doy = int(filename[5:8]) | |||
self.set = int(filename[8:11]) - 1 | |||
else: | |||
log.log("Searching files in {}".format(self.path), self.name) | |||
self.filenameList = self.searchFilesOffLine(self.path, self.startDate, | |||
self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) | |||
r1241 | |||
r1251 | self.setNextFile() | ||
|
r1052 | ||
|
r487 | return | |
def getBasicHeader(self): | |||
r892 | |||
|
r1074 | self.dataOut.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond / \ | |
1000. + self.profileIndex * self.radarControllerHeaderObj.ippSeconds | |||
r892 | |||
|
r568 | self.dataOut.flagDiscontinuousBlock = self.flagDiscontinuousBlock | |
r892 | |||
|
r487 | self.dataOut.timeZone = self.basicHeaderObj.timeZone | |
r892 | |||
|
r487 | self.dataOut.dstFlag = self.basicHeaderObj.dstFlag | |
r892 | |||
|
r487 | self.dataOut.errorCount = self.basicHeaderObj.errorCount | |
r892 | |||
|
r487 | self.dataOut.useLocalTime = self.basicHeaderObj.useLocalTime | |
r892 | |||
|
r1074 | self.dataOut.ippSeconds = self.radarControllerHeaderObj.ippSeconds / self.nTxs | |
|
r1052 | ||
r1251 | # self.dataOut.nProfiles = self.processingHeaderObj.profilesPerBlock*self.nTxs | ||
|
r1052 | ||
|
r487 | def getFirstHeader(self): | |
r892 | |||
|
r684 | raise NotImplementedError | |
r892 | |||
|
r487 | def getData(self): | |
r892 | |||
|
r684 | raise NotImplementedError | |
|
r487 | ||
def hasNotDataInBuffer(self): | |||
r892 | |||
|
r684 | raise NotImplementedError | |
|
r487 | ||
def readBlock(self): | |||
r892 | |||
|
r684 | raise NotImplementedError | |
r892 | |||
|
r487 | def isEndProcess(self): | |
r892 | |||
|
r487 | return self.flagNoMoreFiles | |
r892 | |||
|
r487 | def printReadBlocks(self): | |
r892 | |||
|
r1167 | print("[Reading] Number of read blocks per file %04d" % self.nReadBlocks) | |
r892 | |||
|
r487 | def printTotalBlocks(self): | |
r892 | |||
|
r1167 | print("[Reading] Number of read blocks %04d" % self.nTotalBlocks) | |
|
r487 | ||
r1251 | def run(self, **kwargs): | ||
""" | |||
Arguments: | |||
path : | |||
startDate : | |||
endDate : | |||
startTime : | |||
endTime : | |||
set : | |||
expLabel : | |||
ext : | |||
online : | |||
delay : | |||
walk : | |||
getblock : | |||
nTxs : | |||
realtime : | |||
blocksize : | |||
blocktime : | |||
skip : | |||
cursor : | |||
warnings : | |||
server : | |||
verbose : | |||
format : | |||
oneDDict : | |||
twoDDict : | |||
independentParam : | |||
""" | |||
r892 | |||
|
r487 | if not(self.isConfig): | |
r1251 | self.setup(**kwargs) | ||
|
r487 | self.isConfig = True | |
r1251 | if self.server is None: | ||
|
r975 | self.getData() | |
|
r1074 | else: | |
|
r975 | self.getFromServer() | |
|
r487 | ||
|
r1074 | ||
r1251 | class JRODataWriter(Reader): | ||
|
r487 | ||
r892 | """ | ||
|
r487 | Esta clase permite escribir datos a archivos procesados (.r o ,pdata). La escritura | |
r892 | de los datos siempre se realiza por bloques. | ||
|
r487 | """ | |
r892 | |||
|
r487 | setFile = None | |
profilesPerBlock = None | |||
blocksPerFile = None | |||
nWriteBlocks = 0 | |||
|
r632 | fileDate = None | |
r892 | |||
|
r487 | def __init__(self, dataOut=None): | |
|
r684 | raise NotImplementedError | |
|
r487 | ||
def hasAllDataInBuffer(self): | |||
|
r684 | raise NotImplementedError | |
|
r487 | ||
def setBlockDimension(self): | |||
|
r684 | raise NotImplementedError | |
|
r487 | ||
def writeBlock(self): | |||
|
r684 | raise NotImplementedError | |
|
r487 | ||
def putData(self): | |||
|
r684 | raise NotImplementedError | |
|
r487 | ||
r1251 | def getDtypeWidth(self): | ||
dtype_index = get_dtype_index(self.dtype) | |||
dtype_width = get_dtype_width(dtype_index) | |||
return dtype_width | |||
|
r616 | def getProcessFlags(self): | |
r892 | |||
|
r616 | processFlags = 0 | |
r892 | |||
|
r616 | dtype_index = get_dtype_index(self.dtype) | |
procflag_dtype = get_procflag_dtype(dtype_index) | |||
r892 | |||
|
r616 | processFlags += procflag_dtype | |
r892 | |||
|
r616 | if self.dataOut.flagDecodeData: | |
processFlags += PROCFLAG.DECODE_DATA | |||
r892 | |||
|
r616 | if self.dataOut.flagDeflipData: | |
processFlags += PROCFLAG.DEFLIP_DATA | |||
r892 | |||
|
r616 | if self.dataOut.code is not None: | |
processFlags += PROCFLAG.DEFINE_PROCESS_CODE | |||
r892 | |||
|
r616 | if self.dataOut.nCohInt > 1: | |
processFlags += PROCFLAG.COHERENT_INTEGRATION | |||
r892 | |||
|
r616 | if self.dataOut.type == "Spectra": | |
if self.dataOut.nIncohInt > 1: | |||
processFlags += PROCFLAG.INCOHERENT_INTEGRATION | |||
r892 | |||
|
r616 | if self.dataOut.data_dc is not None: | |
processFlags += PROCFLAG.SAVE_CHANNELS_DC | |||
r892 | |||
|
r624 | if self.dataOut.flagShiftFFT: | |
processFlags += PROCFLAG.SHIFT_FFT_DATA | |||
r892 | |||
|
r616 | return processFlags | |
r892 | |||
|
r487 | def setBasicHeader(self): | |
r892 | |||
|
r1074 | self.basicHeaderObj.size = self.basicHeaderSize # bytes | |
|
r487 | self.basicHeaderObj.version = self.versionFile | |
|
r1235 | self.basicHeaderObj.dataBlock = self.nTotalBlocks | |
|
r487 | utc = numpy.floor(self.dataOut.utctime) | |
|
r1235 | milisecond = (self.dataOut.utctime - utc) * 1000.0 | |
|
r487 | self.basicHeaderObj.utc = utc | |
self.basicHeaderObj.miliSecond = milisecond | |||
self.basicHeaderObj.timeZone = self.dataOut.timeZone | |||
self.basicHeaderObj.dstFlag = self.dataOut.dstFlag | |||
self.basicHeaderObj.errorCount = self.dataOut.errorCount | |||
r892 | |||
|
r487 | def setFirstHeader(self): | |
""" | |||
Obtiene una copia del First Header | |||
r892 | |||
|
r487 | Affected: | |
r892 | |||
|
r487 | self.basicHeaderObj | |
self.systemHeaderObj | |||
self.radarControllerHeaderObj | |||
self.processingHeaderObj self. | |||
r892 | |||
|
r487 | Return: | |
None | |||
""" | |||
r892 | |||
|
r684 | raise NotImplementedError | |
r892 | |||
|
r487 | def __writeFirstHeader(self): | |
""" | |||
Escribe el primer header del file es decir el Basic header y el Long header (SystemHeader, RadarControllerHeader, ProcessingHeader) | |||
r892 | |||
|
r487 | Affected: | |
__dataType | |||
r892 | |||
|
r487 | Return: | |
None | |||
""" | |||
|
r1052 | ||
# CALCULAR PARAMETROS | |||
|
r1074 | sizeLongHeader = self.systemHeaderObj.size + \ | |
self.radarControllerHeaderObj.size + self.processingHeaderObj.size | |||
|
r487 | self.basicHeaderObj.size = self.basicHeaderSize + sizeLongHeader | |
r892 | |||
|
r487 | self.basicHeaderObj.write(self.fp) | |
self.systemHeaderObj.write(self.fp) | |||
self.radarControllerHeaderObj.write(self.fp) | |||
self.processingHeaderObj.write(self.fp) | |||
r892 | |||
|
r487 | def __setNewBlock(self): | |
""" | |||
Si es un nuevo file escribe el First Header caso contrario escribe solo el Basic Header | |||
r892 | |||
|
r487 | Return: | |
0 : si no pudo escribir nada | |||
1 : Si escribio el Basic el First Header | |||
r892 | """ | ||
|
r487 | if self.fp == None: | |
self.setNextFile() | |||
r892 | |||
|
r487 | if self.flagIsNewFile: | |
return 1 | |||
r892 | |||
|
r487 | if self.blockIndex < self.processingHeaderObj.dataBlocksPerFile: | |
self.basicHeaderObj.write(self.fp) | |||
return 1 | |||
r892 | |||
|
r1074 | if not(self.setNextFile()): | |
|
r487 | return 0 | |
r892 | |||
|
r487 | return 1 | |
def writeNextBlock(self): | |||
""" | |||
Selecciona el bloque siguiente de datos y los escribe en un file | |||
r892 | |||
Return: | |||
0 : Si no hizo pudo escribir el bloque de datos | |||
|
r487 | 1 : Si no pudo escribir el bloque de datos | |
""" | |||
|
r1074 | if not(self.__setNewBlock()): | |
|
r487 | return 0 | |
r892 | |||
|
r487 | self.writeBlock() | |
r892 | |||
|
r1167 | print("[Writing] Block No. %d/%d" % (self.blockIndex, | |
self.processingHeaderObj.dataBlocksPerFile)) | |||
r892 | |||
return 1 | |||
|
r487 | ||
def setNextFile(self): | |||
r1251 | """Determina el siguiente file que sera escrito | ||
|
r487 | ||
r892 | Affected: | ||
|
r487 | self.filename | |
self.subfolder | |||
self.fp | |||
self.setFile | |||
self.flagIsNewFile | |||
Return: | |||
0 : Si el archivo no puede ser escrito | |||
1 : Si el archivo esta listo para ser escrito | |||
""" | |||
ext = self.ext | |||
path = self.path | |||
r892 | |||
|
r487 | if self.fp != None: | |
self.fp.close() | |||
r892 | |||
r1251 | if not os.path.exists(path): | ||
os.mkdir(path) | |||
|
r1074 | timeTuple = time.localtime(self.dataOut.utctime) | |
subfolder = 'd%4.4d%3.3d' % (timeTuple.tm_year, timeTuple.tm_yday) | |||
|
r487 | ||
|
r1074 | fullpath = os.path.join(path, subfolder) | |
|
r632 | setFile = self.setFile | |
r892 | |||
|
r1074 | if not(os.path.exists(fullpath)): | |
|
r487 | os.mkdir(fullpath) | |
|
r1074 | setFile = -1 # inicializo mi contador de seteo | |
|
r487 | else: | |
|
r1074 | filesList = os.listdir(fullpath) | |
if len(filesList) > 0: | |||
filesList = sorted(filesList, key=str.lower) | |||
|
r487 | filen = filesList[-1] | |
# el filename debera tener el siguiente formato | |||
# 0 1234 567 89A BCDE (hex) | |||
# x YYYY DDD SSS .ext | |||
|
r1074 | if isNumber(filen[8:11]): | |
# inicializo mi contador de seteo al seteo del ultimo file | |||
setFile = int(filen[8:11]) | |||
r892 | else: | ||
|
r632 | setFile = -1 | |
|
r487 | else: | |
|
r1074 | setFile = -1 # inicializo mi contador de seteo | |
r892 | |||
|
r487 | setFile += 1 | |
r892 | |||
|
r1074 | # If this is a new day it resets some values | |
|
r632 | if self.dataOut.datatime.date() > self.fileDate: | |
setFile = 0 | |||
self.nTotalBlocks = 0 | |||
|
r1112 | ||
filen = '{}{:04d}{:03d}{:03d}{}'.format( | |||
self.optchar, timeTuple.tm_year, timeTuple.tm_yday, setFile, ext) | |||
|
r487 | ||
|
r1074 | filename = os.path.join(path, subfolder, filen) | |
|
r487 | ||
|
r1074 | fp = open(filename, 'wb') | |
r892 | |||
|
r487 | self.blockIndex = 0 | |
self.filename = filename | |||
self.subfolder = subfolder | |||
self.fp = fp | |||
self.setFile = setFile | |||
self.flagIsNewFile = 1 | |||
|
r632 | self.fileDate = self.dataOut.datatime.date() | |
|
r487 | self.setFirstHeader() | |
r892 | |||
|
r1167 | print('[Writing] Opening file: %s' % self.filename) | |
r892 | |||
|
r487 | self.__writeFirstHeader() | |
r892 | |||
|
r487 | return 1 | |
r892 | |||
|
r1052 | def setup(self, dataOut, path, blocksPerFile, profilesPerBlock=64, set=None, ext=None, datatype=4): | |
|
r487 | """ | |
r892 | Setea el tipo de formato en la cual sera guardada la data y escribe el First Header | ||
|
r487 | Inputs: | |
|
r616 | path : directory where data will be saved | |
r892 | profilesPerBlock : number of profiles per block | ||
|
r626 | set : initial file set | |
|
r616 | datatype : An integer number that defines data type: | |
0 : int8 (1 byte) | |||
1 : int16 (2 bytes) | |||
2 : int32 (4 bytes) | |||
3 : int64 (8 bytes) | |||
|
r626 | 4 : float32 (4 bytes) | |
5 : double64 (8 bytes) | |||
r892 | |||
|
r487 | Return: | |
0 : Si no realizo un buen seteo | |||
r892 | 1 : Si realizo un buen seteo | ||
|
r487 | """ | |
r892 | |||
|
r487 | if ext == None: | |
ext = self.ext | |||
r892 | |||
|
r632 | self.ext = ext.lower() | |
r892 | |||
|
r487 | self.path = path | |
|
r1112 | ||
|
r632 | if set is None: | |
self.setFile = -1 | |||
else: | |||
|
r1112 | self.setFile = set - 1 | |
r892 | |||
|
r487 | self.blocksPerFile = blocksPerFile | |
self.profilesPerBlock = profilesPerBlock | |||
self.dataOut = dataOut | |||
|
r632 | self.fileDate = self.dataOut.datatime.date() | |
|
r616 | self.dtype = self.dataOut.dtype | |
r892 | |||
|
r616 | if datatype is not None: | |
self.dtype = get_numpy_dtype(datatype) | |||
r892 | |||
|
r487 | if not(self.setNextFile()): | |
|
r1167 | print("[Writing] There isn't a next file") | |
|
r487 | return 0 | |
r892 | |||
|
r487 | self.setBlockDimension() | |
r892 | |||
|
r487 | return 1 | |
r892 | |||
|
r1123 | def run(self, dataOut, path, blocksPerFile=100, profilesPerBlock=64, set=None, ext=None, datatype=4, **kwargs): | |
r892 | |||
|
r487 | if not(self.isConfig): | |
r892 | |||
|
r1074 | self.setup(dataOut, path, blocksPerFile, profilesPerBlock=profilesPerBlock, | |
set=set, ext=ext, datatype=datatype, **kwargs) | |||
|
r487 | self.isConfig = True | |
|
r1179 | self.dataOut = dataOut | |
self.putData() | |||
r1251 | return self.dataOut | ||
|
r1287 | ||
class printInfo(Operation): | |||
def __init__(self): | |||
Operation.__init__(self) | |||
self.__printInfo = True | |||
def run(self, dataOut, headers = ['systemHeaderObj', 'radarControllerHeaderObj', 'processingHeaderObj']): | |||
if self.__printInfo == False: | |||
return dataOut | |||
for header in headers: | |||
if hasattr(dataOut, header): | |||
obj = getattr(dataOut, header) | |||
if hasattr(obj, 'printInfo'): | |||
obj.printInfo() | |||
else: | |||
print(obj) | |||
else: | |||
log.warning('Header {} Not found in object'.format(header)) | |||
self.__printInfo = False | |||
return dataOut |