diff --git a/schainpy/Controller/controller.py b/schainpy/Controller/controller.py new file mode 100644 index 0000000..a5e9e06 --- /dev/null +++ b/schainpy/Controller/controller.py @@ -0,0 +1,370 @@ +''' +Created on September , 2012 +@author: +''' +from xml.etree.ElementTree import Element, SubElement, ElementTree +from element import prettify +from xml.etree import ElementTree as ET +import sys + + +#def save(a, b): +# +# nameP = "Alexnder" +# descripcion = self.projectWindow.Text() +# id = 1 +# x = self.data.projectWindow.cmbbox.value() +# +# projectObj = Project(id, name, description) +# +# projectObj.setup(id, name, description) + +class Project(): + + id = None + name = None + description = None + readBranchObjList = None + procBranchObjList = None + + def __init__(self): + +# self.id = id +# self.name = name +# self.description = description + + self.readBranchObjList = [] + self.procBranchObjList = [] + + def setParms(self, id, name, description): + + self.id = id + self.name = name + self.description = description + + def addReadBranch(self, dpath, dataformat, readMode, startDate='', endDate='', startTime='', endTime=''): + + id = len(self.readBranchObjList) + 1 + + readBranchObj = ReadBranch(id, dpath, dataformat, readMode, startDate, endDate, startTime, endTime) + + self.readBranchObjList.append(readBranchObj) + + return readBranchObj + + def addProcBranch(self, name): + + id = len(self.procBranchObjList) + 1 + + procBranchObj = ProcBranch(id, name) + + self.procBranchObjList.append(procBranchObj) + + return procBranchObj + + def makeXml(self): + + projectElement = Element('Project') + projectElement.set('id', str(self.id)) + projectElement.set('name', self.name) + #projectElement.set('description', self.description) + + se = SubElement(projectElement, 'description',description=self.description)#ESTO ES LO ULTIMO QUE SE TRABAJO + #se.text = self.description #ULTIMA MODIFICACION PARA SACAR UN SUB ELEMENT + + for readBranchObj in self.readBranchObjList: + readBranchObj.makeXml(projectElement) + + for procBranchObj in self.procBranchObjList: + procBranchObj.makeXml(projectElement) + + self.projectElement = projectElement + + def writeXml(self, filename): + + self.makeXml() + ElementTree(self.projectElement).write(filename, method='xml') + print prettify(self.projectElement) + + def readXml(self,workspace): + print "Aqui estoy leyendo" + tree=ET.parse(workspace) + root=tree.getroot() + self.project=root.tag + self.idProyect= root.attrib.get('id') + self.nameProyect= root.attrib.get('name') + for description in root.findall('description'): + description = description.get('description') + + self.description= description + + for readBranch in root.findall('readBranch'): + id = readBranch.get('id') + self.idrb=id + + for procBranch in root.findall('procBranch'): + id = readBranch.get('id') + name = readBranch.get('name') + self.idpb=id + self.nameBranch=name +# +# + print self.project + print self.idProyect + print self.nameProyect + print self.description + print self.idrb + print self.idpb + print self.nameBranch +# +####ESTO DEL MEDIO ESTABA COMENTADO +# print root.tag , root.attrib +# +# print root.attrib.get('id') +# print root.attrib.get('name') + + +# for description in root.findall('description'): +# description = root.find('description').text +# name = root.get('name') +# print name, description + +# description=root.find('description').text +# print description +# ESTO FUNCIONABA HACIA ABAJO + print "Otra forma " + root=tree.getroot() + print root.tag , root.attrib + for child in root: + print child.tag ,child.attrib + for child in child: + print child.tag ,child.attrib + for child in child: + print child.tag ,child.attrib + for child in child: + print child.tag ,child.attrib +# +class ReadBranch(): + + id = None + dpath = None + dataformat = None + readMode = None + startDate = None + endDate = None + startTime = None + endTime = None + + def __init__(self, id, dpath, dataformat, readMode, startDate, endDate, startTime, endTime): + + self.id = id + self.dpath = dpath + self.dataformat = dataformat + self.readMode = readMode + self.startDate = startDate + self.endDate = endDate + self.startTime = startTime + self.endTime = endTime + + def makeXml(self, projectElement): + + readBranchElement = SubElement(projectElement, 'readBranch') + readBranchElement.set('id', str(self.id)) + +# readBranchElement.set('dpath', self.dpath) +# readBranchElement.set('dataformat', self.dataformat) +# readBranchElement.set('startDate', self.startDate) +# readBranchElement.set('endDate', self.endDate) +# readBranchElement.set('startTime', self.startTime) +# readBranchElement.set('endTime', self.endTime) +# readBranchElement.set('readMode', str(self.readMode)) + +# se = SubElement(readBranchElement, 'dpath')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = self.dpath +# +# se = SubElement(readBranchElement, 'dataformat')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = self.dataformat +# +# se = SubElement(readBranchElement, 'startDate')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = self.startDate +# +# se = SubElement(readBranchElement, 'endDate')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = self.endDate +# +# se = SubElement(readBranchElement, 'startTime')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = self.startTime +# +# se = SubElement(readBranchElement, 'endTime')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = self.endTime +# +# se = SubElement(readBranchElement, 'readMode')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = str(self.readMode) + + ########################################################################## + se = SubElement(readBranchElement, 'parameter', name='dpath' , value=self.dpath) + se = SubElement(readBranchElement, 'parameter', name='dataformat', value=self.dataformat) + se = SubElement(readBranchElement, 'parameter', name='startDate' , value=self.startDate) + se = SubElement(readBranchElement, 'parameter', name='endDate' , value=self.endDate) + se = SubElement(readBranchElement, 'parameter', name='startTime' , value=self.startTime) + se = SubElement(readBranchElement, 'parameter', name='endTime' , value=self.endTime) + se = SubElement(readBranchElement, 'parameter', name='readMode' , value=str(self.readMode)) + + +class ProcBranch(): + + id = None + name = None + + upObjList = None + + def __init__(self, id, name): + + self.id = id + self.name = name + + self.upObjList = [] + + def addUP(self, name, type): + + id = len(self.upObjList) + 1 + + upObj = UP(id, name, type) + + self.upObjList.append(upObj) + + return upObj + + def makeXml(self, projectElement): + + procBranchElement = SubElement(projectElement, 'procBranch') + procBranchElement.set('id', str(self.id)) + procBranchElement.set('name', self.name) + + for upObj in self.upObjList: + upObj.makeXml(procBranchElement) + +class UP(): + + id = None + name = None + type = None + + opObjList = [] + + def __init__(self, id, name, type): + + self.id = id + self.name = name + self.type = type + + self.opObjList = [] + + def addOperation(self, name, priority): + + id = len(self.opObjList) + 1 + + opObj = Operation(id, name, priority) + + self.opObjList.append(opObj) + + return opObj + + def makeXml(self, procBranchElement): + + upElement = SubElement(procBranchElement, 'UP') + upElement.set('id', str(self.id)) + upElement.set('name', self.name) + upElement.set('type', self.type) + + for opObj in self.opObjList: + opObj.makeXml(upElement) + +class Operation(): + + id = 0 + name = None + priority = None + parmObjList = [] + + def __init__(self, id, name, priority): + + self.id = id + self.name = name + self.priority = priority + + self.parmObjList = [] + + def addParameter(self, name, value): + + id = len(self.parmObjList) + 1 + + parmObj = Parameter(id, name, value) + + self.parmObjList.append(parmObj) + + return parmObj + + def makeXml(self, upElement): + + opElement = SubElement(upElement, 'Operation') + opElement.set('id', str(self.id)) + opElement.set('name', self.name) + opElement.set('priority', str(self.priority)) + + for parmObj in self.parmObjList: + parmObj.makeXml(opElement) + +class Parameter(): + + id = None + name = None + value = None + + def __init__(self, id, name, value): + + self.id = id + self.name = name + self.value = value + + def makeXml(self, opElement): + + parmElement = SubElement(opElement, 'Parameter') + parmElement.set('name', self.name) + parmElement.set('value', self.value) + +# se = SubElement(parmElement, 'value')#ESTO ES LO ULTIMO QUE SE TRABAJO +# se.text = self.value + +if __name__ == '__main__': + + desc = "Este es un test" + filename = "test.xml" + + workspace=str("C:\\Users\\alex\\workspace\\GUIV2.0\\test.xml") + + projectObj = Project() + + projectObj.setParms(id = '11', name='test01', description=desc) + + readBranchObj = projectObj.addReadBranch(dpath='mydata', dataformat='rawdata', readMode=0, startDate='1', endDate='3', startTime='4', endTime='5') + + procBranchObj = projectObj.addProcBranch(name='Branch1') + + procBranchObj1 = projectObj.addProcBranch(name='Branch2') + upObj1 = procBranchObj.addUP(name='UP1', type='Voltage') + upObj2 = procBranchObj.addUP(name='UP2', type='Voltage') + + opObj11 = upObj1.addOperation(name='removeDC', priority=1) + opObj11.addParameter(name='type', value='1') + + + opObj12 = upObj1.addOperation(name='decodification', priority=2) + opObj12.addParameter(name='ncode', value='2') + opObj12.addParameter(name='nbauds', value='8') + opObj12.addParameter(name='code1', value='001110011') + opObj12.addParameter(name='code2', value='001110011') + + projectObj.writeXml(filename) + + projectObj.readXml(workspace) + \ No newline at end of file diff --git a/schainpy/Controller/controller1.py b/schainpy/Controller/controller1.py new file mode 100644 index 0000000..54a7176 --- /dev/null +++ b/schainpy/Controller/controller1.py @@ -0,0 +1,358 @@ +''' +Created on September , 2012 +@author: +''' +from xml.etree.ElementTree import Element, SubElement, ElementTree +from element import prettify +from xml.etree import ElementTree as ET +import sys + +class Project(): + + id = None + name = None + description = None + readBranchObjList = None + procBranchObjList = None + + def __init__(self): + +# self.id = id +# self.name = name +# self.description = description + + self.readBranchObjList = [] + self.procBranchObjList = [] + + def setParms(self, id, name, description): + + self.id = id + self.name = name + self.description = description + + def addReadBranch(self,id, dpath, dataformat, opMode,readMode, startDate='', endDate='', startTime='', endTime=''): + + #id = len(self.readBranchObjList) + 1 + + readBranchObj = ReadBranch(id, dpath, dataformat, opMode , readMode, startDate, endDate, startTime, endTime) + + self.readBranchObjList.append(readBranchObj) + + return readBranchObj + + def addProcBranch(self, id,name): + + # id = len(self.procBranchObjList) + 1 + + procBranchObj = ProcBranch(id, name) + + self.procBranchObjList.append(procBranchObj) + + return procBranchObj + + def makeXml(self): + + projectElement = Element('Project') + projectElement.set('id', str(self.id)) + projectElement.set('name', self.name) + #projectElement.set('description', self.description) + + se = SubElement(projectElement, 'description',description=self.description)#ESTO ES LO ULTIMO QUE SE TRABAJO + #se.text = self.description #ULTIMA MODIFICACION PARA SACAR UN SUB ELEMENT + + for readBranchObj in self.readBranchObjList: + readBranchObj.makeXml(projectElement) + + for branchObj in self.procBranchObjList: + branchObj.makeXml(projectElement) + + self.projectElement = projectElement + + def writeXml(self, filename): + + self.makeXml() + ElementTree(self.projectElement).write(filename, method='xml') + #print prettify(self.projectElement) + +class ReadBranch(): + + id = None + dpath = None + dataformat = None + opMode =None + readMode = None + startDate = None + endDate = None + startTime = None + endTime = None + + def __init__(self, id, dpath, dataformat,opMode, readMode, startDate, endDate, startTime, endTime): + + self.id = id + self.dpath = dpath + self.dataformat = dataformat + self.opMode = opMode + self.readMode = readMode + self.startDate = startDate + self.endDate = endDate + self.startTime = startTime + self.endTime = endTime + + def makeXml(self, projectElement): + + readBranchElement = SubElement(projectElement, 'readBranch') + readBranchElement.set('id', str(self.id)) + + ########################################################################## + se = SubElement(readBranchElement, 'parameter', name='dpath' , value=self.dpath) + se = SubElement(readBranchElement, 'parameter', name='dataformat', value=self.dataformat) + se = SubElement(readBranchElement, 'parameter', name='opMode' , value=self.opMode) + se = SubElement(readBranchElement, 'parameter', name='startDate' , value=self.startDate) + se = SubElement(readBranchElement, 'parameter', name='endDate' , value=self.endDate) + se = SubElement(readBranchElement, 'parameter', name='startTime' , value=self.startTime) + se = SubElement(readBranchElement, 'parameter', name='endTime' , value=self.endTime) + se = SubElement(readBranchElement, 'parameter', name='readMode' , value=str(self.readMode)) + +class ProcBranch(): + + id = None + name = None + + upObjList = None + upsubObjList=None + + def __init__(self, id, name): + + self.id = id + self.name = name + + self.upObjList = [] + self.upsubObjList = [] + + def addUP(self,id, name, type): + + #id = len(self.upObjList) + 1 + + upObj = UP(id, name, type) + + self.upObjList.append(upObj) + + return upObj + + def addUPSUB(self,id, name, type): + + # id = len(self.upsubObjList) + 1 + + upsubObj = UPSUB(id, name, type) + + self.upsubObjList.append(upsubObj) + + return upsubObj + + def makeXml(self, projectElement): + + procBranchElement = SubElement(projectElement, 'procBranch') + procBranchElement.set('id', str(self.id)) + procBranchElement.set('name', self.name) + + for upObj in self.upObjList: + upObj.makeXml(procBranchElement) + + for upsubObj in self.upsubObjList: + upsubObj.makeXml(procBranchElement) + +class UP(): + + id = None + name = None + type = None + upsubObjList=None + opObjList = None + + def __init__(self, id, name, type): + + self.id = id + self.name = name + self.type = type + self.upsubObjList=[] + self.up2subObjList=[] + self.opObjList = [] + + def addOperation(self,id, name, priority): + + #id = len(self.opObjList) + 1 + + opObj = Operation(id, name, priority) + + self.opObjList.append(opObj) + + return opObj + + def addUPSUB(self,id, name, type): + +# id = len(self.upsubObjList) + 1 + + upsubObj = UPSUB(id, name, type) + + self.upsubObjList.append(upsubObj) + + return upsubObj + + def addUP2SUB(self,id, name, type): + +# id = len(self.upsubObjList) + 1 + + up2subObj = UP2SUB(id, name, type) + + self.up2subObjList.append(up2subObj) + + return up2subObj + + def makeXml(self, procBranchElement): + + upElement = SubElement(procBranchElement, 'UP') + upElement.set('id', str(self.id)) + upElement.set('name', self.name) + upElement.set('type', self.type) + + for opObj in self.opObjList: + opObj.makeXml(upElement) + + for upsubObj in self.upsubObjList: + upsubObj.makeXml(upElement) + +class UPSUB(): + + id = None + name = None + type = None + opObjList = None + up2subObjList=None + + + def __init__(self, id, name, type): + + self.id = id + self.name = name + self.type = type + self.up2subObjList = [] + self.opObjList = [] + + def addOperation(self, name, priority): + + id = len(self.opObjList) + 1 + + opObj = Operation(id, name, priority) + + self.opObjList.append(opObj) + + return opObj + + + def addUP2SUB(self,id, name, type): +# +# id = len(self.opObjList) + 1 + up2subObj = UP2SUB(id, name, type) + + self.up2subObjList.append(up2subObj) + + return up2subObj + + def makeXml(self, upElement): + + upsubElement = SubElement(upElement, 'UPSUB') + upsubElement.set('id', str(self.id)) + upsubElement.set('name', self.name) + upsubElement.set('type', self.type) + + for opObj in self.opObjList: + opObj.makeXml(upsubElement) + + for up2subObj in self.up2subObjList: + up2subObj.makeXml(upsubElement) + +class UP2SUB(): + + id = None + name = None + type = None + opObjList = None + + def __init__(self, id, name, type): + + self.id = id + self.name = name + self.type = type + self.opObjList = [] + + def addOperation(self, name, priority): + + id = len(self.opObjList) + 1 + + opObj = Operation(id, name, priority) + + self.opObjList.append(opObj) + + return opObj + + def makeXml(self,upsubElement): + up2subElement = SubElement(upsubElement, 'UPD2SUB') + up2subElement.set('id', str(self.id)) + up2subElement.set('name', self.name) + up2subElement.set('type', self.type) + + for opObj in self.opObjList: + opObj.makeXml(up2subElement) + +class Operation(): + + id = 0 + name = None + priority = None + parmObjList = [] + + def __init__(self, id, name, priority): + + self.id = id + self.name = name + self.priority = priority + + self.parmObjList = [] + + def addParameter(self, name, value): + + id = len(self.parmObjList) + 1 + + parmObj = Parameter(id, name, value) + + self.parmObjList.append(parmObj) + + return parmObj + + def makeXml(self, upElement): + + opElement = SubElement(upElement, 'Operation') + opElement.set('id', str(self.id)) + opElement.set('name', self.name) + opElement.set('priority', str(self.priority)) + + for parmObj in self.parmObjList: + parmObj.makeXml(opElement) + +class Parameter(): + + id = None + name = None + value = None + + def __init__(self, id, name, value): + + self.id = id + self.name = name + self.value = value + + def makeXml(self, opElement): + + parmElement = SubElement(opElement, 'Parameter') + parmElement.set('name', self.name) + parmElement.set('value', self.value) \ No newline at end of file diff --git a/schainpy/Model/JROData.py b/schainpy/Model/JROData.py new file mode 100644 index 0000000..8189cb6 --- /dev/null +++ b/schainpy/Model/JROData.py @@ -0,0 +1,241 @@ +''' + +$Author: murco $ +$Id: JROData.py 173 2012-11-20 15:06:21Z murco $ +''' + +import os, sys +import copy +import numpy + +from JROHeaderIO import SystemHeader, RadarControllerHeader + +class JROData: + +# m_BasicHeader = BasicHeader() +# m_ProcessingHeader = ProcessingHeader() + + systemHeaderObj = SystemHeader() + + radarControllerHeaderObj = RadarControllerHeader() + +# data = None + + type = None + + dtype = None + + nChannels = None + + nHeights = None + + nProfiles = None + + heightList = None + + channelList = None + + channelIndexList = None + + flagNoData = True + + flagTimeBlock = False + + utctime = None + + blocksize = None + + nCode = None + + nBaud = None + + code = None + + flagDecodeData = True #asumo q la data esta decodificada + + flagDeflipData = True #asumo q la data esta sin flip + + flagShiftFFT = False + + ippSeconds = None + + timeInterval = None + + def __init__(self): + + raise ValueError, "This class has not been implemented" + + def copy(self, inputObj=None): + + if inputObj == None: + return copy.deepcopy(self) + + for key in inputObj.__dict__.keys(): + self.__dict__[key] = inputObj.__dict__[key] + + def deepcopy(self): + + return copy.deepcopy(self) + +class Voltage(JROData): + + nCohInt = None + + #data es un numpy array de 2 dmensiones (canales, alturas) + data = None + + def __init__(self): + ''' + Constructor + ''' + + self.radarControllerHeaderObj = RadarControllerHeader() + + self.systemHeaderObj = SystemHeader() + + self.type = "Voltage" + + self.data = None + + self.dtype = None + + self.nChannels = 0 + + self.nHeights = 0 + + self.nProfiles = None + + self.heightList = None + + self.channelList = None + + self.channelIndexList = None + + self.flagNoData = True + + self.flagTimeBlock = False + + self.utctime = None + + self.nCohInt = None + + self.blocksize = None + +class Spectra(JROData): + + #data es un numpy array de 2 dmensiones (canales, perfiles, alturas) + data_spc = None + + #data es un numpy array de 2 dmensiones (canales, pares, alturas) + data_cspc = None + + #data es un numpy array de 2 dmensiones (canales, alturas) + data_dc = None + + nFFTPoints = None + + nPairs = None + + pairsList = None + + nIncohInt = None + + wavelength = None #Necesario para cacular el rango de velocidad desde la frecuencia + + nCohInt = None #se requiere para determinar el valor de timeInterval + + def __init__(self): + ''' + Constructor + ''' + + self.radarControllerHeaderObj = RadarControllerHeader() + + self.systemHeaderObj = SystemHeader() + + self.type = "Spectra" + +# self.data = None + + self.dtype = None + + self.nChannels = 0 + + self.nHeights = 0 + + self.nProfiles = None + + self.heightList = None + + self.channelList = None + + self.channelIndexList = None + + self.flagNoData = True + + self.flagTimeBlock = False + + self.utctime = None + + self.nIncohInt = None + + self.blocksize = None + + self.nFFTPoints = None + + self.wavelength = None + + def getFrequencies(self): + + xrange = numpy.arange(self.nFFTPoints) + xrange = xrange + return None + + +class SpectraHeis(JROData): + + data_spc = None + + data_cspc = None + + data_dc = None + + nFFTPoints = None + + nPairs = None + + pairsList = None + + nIncohInt = None + + def __init__(self): + + self.radarControllerHeaderObj = RadarControllerHeader() + + self.systemHeaderObj = SystemHeader() + + self.type = "SpectraHeis" + + self.dtype = None + + self.nChannels = 0 + + self.nHeights = 0 + + self.nProfiles = None + + self.heightList = None + + self.channelList = None + + self.channelIndexList = None + + self.flagNoData = True + + self.flagTimeBlock = False + + self.nPairs = 0 + + self.utctime = None + + self.blocksize = None diff --git a/schainpy/Model/JRODataIO.py b/schainpy/Model/JRODataIO.py new file mode 100644 index 0000000..0253ffc --- /dev/null +++ b/schainpy/Model/JRODataIO.py @@ -0,0 +1,2461 @@ +''' + +$Author: murco $ +$Id: JRODataIO.py 169 2012-11-19 21:57:03Z murco $ +''' + +import os, sys +import glob +import time +import numpy +import fnmatch +import time, datetime + +from Data.JROData import * +from JROHeaderIO import * + +def isNumber(str): + """ + Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero. + + Excepciones: + Si un determinado string no puede ser convertido a numero + Input: + str, string al cual se le analiza para determinar si convertible a un numero o no + + Return: + True : si el string es uno numerico + False : no es un string numerico + """ + try: + float( str ) + return True + except: + return False + +def isThisFileinRange(filename, startUTSeconds, endUTSeconds): + """ + Esta funcion determina si un archivo de datos se encuentra o no dentro del rango de fecha especificado. + + Inputs: + filename : nombre completo del archivo de datos en formato Jicamarca (.r) + + startUTSeconds : fecha inicial del rango seleccionado. La fecha esta dada en + segundos contados desde 01/01/1970. + endUTSeconds : fecha final del rango seleccionado. La fecha esta dada en + segundos contados desde 01/01/1970. + + Return: + Boolean : Retorna True si el archivo de datos contiene datos en el rango de + fecha especificado, de lo contrario retorna False. + + Excepciones: + Si el archivo no existe o no puede ser abierto + Si la cabecera no puede ser leida. + + """ + basicHeaderObj = BasicHeader() + + try: + fp = open(filename,'rb') + except: + raise IOError, "The file %s can't be opened" %(filename) + + sts = basicHeaderObj.read(fp) + fp.close() + + if not(sts): + print "Skipping the file %s because it has not a valid header" %(filename) + return 0 + + if not ((startUTSeconds <= basicHeaderObj.utc) and (endUTSeconds > basicHeaderObj.utc)): + return 0 + + return 1 + +def getlastFileFromPath(path, ext): + """ + Depura el fileList dejando solo los que cumplan el formato de "PYYYYDDDSSS.ext" + al final de la depuracion devuelve el ultimo file de la lista que quedo. + + Input: + fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta + ext : extension de los files contenidos en una carpeta + + Return: + El ultimo file de una determinada carpeta, no se considera el path. + """ + validFilelist = [] + fileList = os.listdir(path) + + # 0 1234 567 89A BCDE + # H YYYY DDD SSS .ext + + for file in fileList: + try: + year = int(file[1:5]) + doy = int(file[5:8]) + + if (os.path.splitext(file)[-1].upper() != ext.upper()) : continue + except: + continue + + validFilelist.append(file) + + if validFilelist: + validFilelist = sorted( validFilelist, key=str.lower ) + return validFilelist[-1] + + return None + +def checkForRealPath(path, year, doy, set, ext): + """ + Por ser Linux Case Sensitive entonces checkForRealPath encuentra el nombre correcto de un path, + Prueba por varias combinaciones de nombres entre mayusculas y minusculas para determinar + el path exacto de un determinado file. + + Example : + nombre correcto del file es .../.../D2009307/P2009307367.ext + + Entonces la funcion prueba con las siguientes combinaciones + .../.../x2009307/y2009307367.ext + .../.../x2009307/Y2009307367.ext + .../.../X2009307/y2009307367.ext + .../.../X2009307/Y2009307367.ext + siendo para este caso, la ultima combinacion de letras, identica al file buscado + + Return: + Si encuentra la cobinacion adecuada devuelve el path completo y el nombre del file + caso contrario devuelve None como path y el la ultima combinacion de nombre en mayusculas + para el filename + """ + filepath = None + find_flag = False + filename = None + + if ext.lower() == ".r": #voltage + header1 = "dD" + header2 = "dD" + elif ext.lower() == ".pdata": #spectra + header1 = "dD" + header2 = "pP" + else: + return None, filename + + for dir in header1: #barrido por las dos combinaciones posibles de "D" + for fil in header2: #barrido por las dos combinaciones posibles de "D" + doypath = "%s%04d%03d" % ( dir, year, doy ) #formo el nombre del directorio xYYYYDDD (x=d o x=D) + filename = "%s%04d%03d%03d%s" % ( fil, year, doy, set, ext ) #formo el nombre del file xYYYYDDDSSS.ext + filepath = os.path.join( path, doypath, filename ) #formo el path completo + if os.path.exists( filepath ): #verifico que exista + find_flag = True + break + if find_flag: + break + + if not(find_flag): + return None, filename + + return filepath, filename + +class JRODataIO: + + c = 3E8 + + basicHeaderObj = BasicHeader() + + systemHeaderObj = SystemHeader() + + radarControllerHeaderObj = RadarControllerHeader() + + processingHeaderObj = ProcessingHeader() + + online = 0 + + dtype = None + + pathList = [] + + filenameList = [] + + filename = None + + ext = None + + flagNoMoreFiles = 0 + + flagIsNewFile = 1 + + flagTimeBlock = 0 + + flagIsNewBlock = 0 + + fp = None + + firstHeaderSize = 0 + + basicHeaderSize = 24 + + versionFile = 1103 + + fileSize = None + + ippSeconds = None + + fileSizeByHeader = None + + fileIndex = None + + profileIndex = None + + blockIndex = None + + nTotalBlocks = None + + maxTimeStep = 30 + + lastUTTime = None + + datablock = None + + dataOutObj = None + + blocksize = None + + def __init__(self): + pass + +class JRODataReader(JRODataIO): + + nReadBlocks = 0 + + delay = 60 #number of seconds waiting a new file + + nTries = 3 #quantity tries + + nFiles = 3 #number of files for searching + + + def __init__(self): + + """ + + """ + + raise ValueError, "This method has not been implemented" + + + def createObjByDefault(self): + """ + + """ + raise ValueError, "This method has not been implemented" + + def getBlockDimension(self): + + raise ValueError, "No implemented" + + def __searchFilesOffLine(self, + path, + startDate, + endDate, + startTime=datetime.time(0,0,0), + endTime=datetime.time(23,59,59), + set=None, + expLabel="", + ext=".r"): + dirList = [] + for thisPath in os.listdir(path): + if os.path.isdir(os.path.join(path,thisPath)): + dirList.append(thisPath) + + if not(dirList): + return None, None + + pathList = [] + dateList = [] + + thisDate = startDate + + while(thisDate <= endDate): + year = thisDate.timetuple().tm_year + doy = thisDate.timetuple().tm_yday + + match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy)) + if len(match) == 0: + thisDate += datetime.timedelta(1) + continue + + pathList.append(os.path.join(path,match[0],expLabel)) + dateList.append(thisDate) + thisDate += datetime.timedelta(1) + + filenameList = [] + for index in range(len(pathList)): + + thisPath = pathList[index] + fileList = glob.glob1(thisPath, "*%s" %ext) + fileList.sort() + + #Busqueda de datos en el rango de horas indicados + thisDate = dateList[index] + startDT = datetime.datetime.combine(thisDate, startTime) + endDT = datetime.datetime.combine(thisDate, endTime) + + startUtSeconds = time.mktime(startDT.timetuple()) + endUtSeconds = time.mktime(endDT.timetuple()) + + for file in fileList: + + filename = os.path.join(thisPath,file) + + if isThisFileinRange(filename, startUtSeconds, endUtSeconds): + filenameList.append(filename) + + if not(filenameList): + return None, None + + self.filenameList = filenameList + + return pathList, filenameList + + def __searchFilesOnLine(self, path, startDate=None, endDate=None, startTime=None, endTime=None, expLabel = "", ext = None): + + """ + Busca el ultimo archivo de la ultima carpeta (determinada o no por startDateTime) y + devuelve el archivo encontrado ademas de otros datos. + + Input: + path : carpeta donde estan contenidos los files que contiene data + + startDate : Fecha inicial. Rechaza todos los directorios donde + file end time < startDate (obejto datetime.date) + + endDate : Fecha final. Rechaza todos los directorios donde + file start time > endDate (obejto datetime.date) + + startTime : Tiempo inicial. Rechaza todos los archivos donde + file end time < startTime (obejto datetime.time) + + endTime : Tiempo final. Rechaza todos los archivos donde + file start time > endTime (obejto datetime.time) + + expLabel : Nombre del subexperimento (subfolder) + + ext : extension de los files + + Return: + directory : eL directorio donde esta el file encontrado + filename : el ultimo file de una determinada carpeta + year : el anho + doy : el numero de dia del anho + set : el set del archivo + + + """ + dirList = [] + pathList = [] + directory = None + + #Filtra solo los directorios + for thisPath in os.listdir(path): + if os.path.isdir(os.path.join(path, thisPath)): + dirList.append(thisPath) + + if not(dirList): + return None, None, None, None, None + + dirList = sorted( dirList, key=str.lower ) + + if startDate: + startDateTime = datetime.datetime.combine(startDate, startTime) + thisDateTime = startDateTime + if endDate == None: endDateTime = startDateTime + else: endDateTime = datetime.datetime.combine(endDate, endTime) + + while(thisDateTime <= endDateTime): + year = thisDateTime.timetuple().tm_year + doy = thisDateTime.timetuple().tm_yday + + match = fnmatch.filter(dirList, '?' + '%4.4d%3.3d' % (year,doy)) + if len(match) == 0: + thisDateTime += datetime.timedelta(1) + continue + + pathList.append(os.path.join(path,match[0], expLabel)) + thisDateTime += datetime.timedelta(1) + + if not(pathList): + print "\tNo files in range: %s - %s" %(startDateTime.ctime(), endDateTime.ctime()) + return None, None, None, None, None + + directory = pathList[0] + + else: + directory = dirList[-1] + directory = os.path.join(path,directory) + + filename = getlastFileFromPath(directory, ext) + + if not(filename): + return None, None, None, None, None + + if not(self.__verifyFile(os.path.join(directory, filename))): + return None, None, None, None, None + + year = int( filename[1:5] ) + doy = int( filename[5:8] ) + set = int( filename[8:11] ) + + return directory, filename, year, doy, set + + def setup(self,dataOutObj=None, + path=None, + startDate=None, + endDate=None, + startTime=datetime.time(0,0,0), + endTime=datetime.time(23,59,59), + set=0, + expLabel = "", + ext = None, + online = False, + delay = 60): + + if path == None: + raise ValueError, "The path is not valid" + + if ext == None: + ext = self.ext + + if dataOutObj == None: + dataOutObj = self.createObjByDefault() + + self.dataOutObj = dataOutObj + + if online: + print "Searching files in online mode..." + doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=ext) + + if not(doypath): + for nTries in range( self.nTries ): + print '\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1) + time.sleep( self.delay ) + doypath, file, year, doy, set = self.__searchFilesOnLine(path=path, expLabel=expLabel, ext=exp) + if doypath: + break + + if not(doypath): + print "There 'isn't valied files in %s" % path + return None + + self.year = year + self.doy = doy + self.set = set - 1 + self.path = path + + else: + print "Searching files in offline mode ..." + pathList, filenameList = self.__searchFilesOffLine(path, startDate, endDate, startTime, endTime, set, expLabel, ext) + + if not(pathList): + print "No *%s files into the folder %s \nfor the range: %s - %s"%(ext, path, + datetime.datetime.combine(startDate,startTime).ctime(), + datetime.datetime.combine(endDate,endTime).ctime()) + + sys.exit(-1) + + + self.fileIndex = -1 + self.pathList = pathList + self.filenameList = filenameList + + self.online = online + self.delay = delay + ext = ext.lower() + self.ext = ext + + if not(self.setNextFile()): + if (startDate!=None) and (endDate!=None): + print "No files in range: %s - %s" %(datetime.datetime.combine(startDate,startTime).ctime(), datetime.datetime.combine(endDate,endTime).ctime()) + elif startDate != None: + print "No files in range: %s" %(datetime.datetime.combine(startDate,startTime).ctime()) + else: + print "No files" + + sys.exit(-1) + +# self.updateDataHeader() + + return self.dataOutObj + + def __setNextFileOffline(self): + + idFile = self.fileIndex + + while (True): + idFile += 1 + if not(idFile < len(self.filenameList)): + self.flagNoMoreFiles = 1 + print "No more Files" + return 0 + + filename = self.filenameList[idFile] + + if not(self.__verifyFile(filename)): + continue + + fileSize = os.path.getsize(filename) + fp = open(filename,'rb') + break + + self.flagIsNewFile = 1 + self.fileIndex = idFile + self.filename = filename + self.fileSize = fileSize + self.fp = fp + + print "Setting the file: %s"%self.filename + + return 1 + + def __setNextFileOnline(self): + """ + Busca el siguiente file que tenga suficiente data para ser leida, dentro de un folder especifico, si + no encuentra un file valido espera un tiempo determinado y luego busca en los posibles n files + siguientes. + + Affected: + self.flagIsNewFile + self.filename + self.fileSize + self.fp + self.set + self.flagNoMoreFiles + + Return: + 0 : si luego de una busqueda del siguiente file valido este no pudo ser encontrado + 1 : si el file fue abierto con exito y esta listo a ser leido + + Excepciones: + Si un determinado file no puede ser abierto + """ + nFiles = 0 + fileOk_flag = False + firstTime_flag = True + + self.set += 1 + + #busca el 1er file disponible + file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext ) + if file: + if self.__verifyFile(file, False): + fileOk_flag = True + + #si no encuentra un file entonces espera y vuelve a buscar + if not(fileOk_flag): + for nFiles in range(self.nFiles+1): #busco en los siguientes self.nFiles+1 files posibles + + if firstTime_flag: #si es la 1era vez entonces hace el for self.nTries veces + tries = self.nTries + else: + tries = 1 #si no es la 1era vez entonces solo lo hace una vez + + for nTries in range( tries ): + if firstTime_flag: + print "\tWaiting %0.2f sec for the file \"%s\" , try %03d ..." % ( self.delay, filename, nTries+1 ) + time.sleep( self.delay ) + else: + print "\tSearching next \"%s%04d%03d%03d%s\" file ..." % (self.optchar, self.year, self.doy, self.set, self.ext) + + file, filename = checkForRealPath( self.path, self.year, self.doy, self.set, self.ext ) + if file: + if self.__verifyFile(file): + fileOk_flag = True + break + + if fileOk_flag: + break + + firstTime_flag = False + + print "\tSkipping the file \"%s\" due to this file doesn't exist" % filename + self.set += 1 + + if nFiles == (self.nFiles-1): #si no encuentro el file buscado cambio de carpeta y busco en la siguiente carpeta + self.set = 0 + self.doy += 1 + + if fileOk_flag: + self.fileSize = os.path.getsize( file ) + self.filename = file + self.flagIsNewFile = 1 + if self.fp != None: self.fp.close() + self.fp = open(file) + self.flagNoMoreFiles = 0 + print 'Setting the file: %s' % file + else: + self.fileSize = 0 + self.filename = None + self.flagIsNewFile = 0 + self.fp = None + self.flagNoMoreFiles = 1 + print 'No more Files' + + return fileOk_flag + + + def setNextFile(self): + if self.fp != None: + self.fp.close() + + if self.online: + newFile = self.__setNextFileOnline() + else: + newFile = self.__setNextFileOffline() + + if not(newFile): + return 0 + + self.__readFirstHeader() + self.nReadBlocks = 0 + return 1 + + def __setNewBlock(self): + if self.fp == None: + return 0 + + if self.flagIsNewFile: + return 1 + + self.lastUTTime = self.basicHeaderObj.utc + currentSize = self.fileSize - self.fp.tell() + neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize + + if (currentSize >= neededSize): + self.__rdBasicHeader() + return 1 + + if not(self.setNextFile()): + return 0 + + deltaTime = self.basicHeaderObj.utc - self.lastUTTime # + + self.flagTimeBlock = 0 + + if deltaTime > self.maxTimeStep: + self.flagTimeBlock = 1 + + return 1 + + + def readNextBlock(self): + if not(self.__setNewBlock()): + return 0 + + if not(self.readBlock()): + return 0 + + return 1 + + def __rdProcessingHeader(self, fp=None): + if fp == None: + fp = self.fp + + self.processingHeaderObj.read(fp) + + def __rdRadarControllerHeader(self, fp=None): + if fp == None: + fp = self.fp + + self.radarControllerHeaderObj.read(fp) + + def __rdSystemHeader(self, fp=None): + if fp == None: + fp = self.fp + + self.systemHeaderObj.read(fp) + + def __rdBasicHeader(self, fp=None): + if fp == None: + fp = self.fp + + self.basicHeaderObj.read(fp) + + + def __readFirstHeader(self): + self.__rdBasicHeader() + self.__rdSystemHeader() + self.__rdRadarControllerHeader() + self.__rdProcessingHeader() + + self.firstHeaderSize = self.basicHeaderObj.size + + datatype = int(numpy.log2((self.processingHeaderObj.processFlags & PROCFLAG.DATATYPE_MASK))-numpy.log2(PROCFLAG.DATATYPE_CHAR)) + if datatype == 0: + datatype_str = numpy.dtype([('real',' 0: + filesList = sorted( filesList, key=str.lower ) + filen = filesList[-1] + # el filename debera tener el siguiente formato + # 0 1234 567 89A BCDE (hex) + # x YYYY DDD SSS .ext + if isNumber( filen[8:11] ): + self.setFile = int( filen[8:11] ) #inicializo mi contador de seteo al seteo del ultimo file + else: + self.setFile = -1 + else: + self.setFile = -1 #inicializo mi contador de seteo + + setFile = self.setFile + setFile += 1 + + file = '%s%4.4d%3.3d%3.3d%s' % (self.optchar, + timeTuple.tm_year, + timeTuple.tm_yday, + setFile, + ext ) + + filename = os.path.join( path, subfolder, file ) + + fp = open( filename,'wb' ) + + self.blockIndex = 0 + + #guardando atributos + self.filename = filename + self.subfolder = subfolder + self.fp = fp + self.setFile = setFile + self.flagIsNewFile = 1 + + self.getDataHeader() + + print 'Writing the file: %s'%self.filename + + self.__writeFirstHeader() + + return 1 + + def setup(self, path, blocksPerFile, profilesPerBlock=None, set=0, ext=None): + """ + Setea el tipo de formato en la cual sera guardada la data y escribe el First Header + + Inputs: + path : el path destino en el cual se escribiran los files a crear + format : formato en el cual sera salvado un file + set : el setebo del file + + Return: + 0 : Si no realizo un buen seteo + 1 : Si realizo un buen seteo + """ + + if ext == None: + ext = self.ext + + ext = ext.lower() + + self.ext = ext + + self.path = path + + self.setFile = set - 1 + + self.blocksPerFile = blocksPerFile + + self.profilesPerBlock = profilesPerBlock + + if not(self.setNextFile()): + print "There isn't a next file" + return 0 + + self.setBlockDimension() + + return 1 + + def run(self, dataOut, **kwargs): + + if not(self.isConfig): + + self.dataOutObj = dataOut + self.setup(**kwargs) + + self.putData() + +class VoltageReader(JRODataReader): + """ + Esta clase permite leer datos de voltage desde archivos en formato rawdata (.r). La lectura + de los datos siempre se realiza por bloques. Los datos leidos (array de 3 dimensiones: + perfiles*alturas*canales) son almacenados en la variable "buffer". + + perfiles * alturas * canales + + Esta clase contiene instancias (objetos) de las clases BasicHeader, SystemHeader, + RadarControllerHeader y Voltage. Los tres primeros se usan para almacenar informacion de la + cabecera de datos (metadata), y el cuarto (Voltage) para obtener y almacenar un perfil de + datos desde el "buffer" cada vez que se ejecute el metodo "getData". + + Example: + + dpath = "/home/myuser/data" + + startTime = datetime.datetime(2010,1,20,0,0,0,0,0,0) + + endTime = datetime.datetime(2010,1,21,23,59,59,0,0,0) + + readerObj = VoltageReader() + + readerObj.setup(dpath, startTime, endTime) + + while(True): + + #to get one profile + profile = readerObj.getData() + + #print the profile + print profile + + #If you want to see all datablock + print readerObj.datablock + + if readerObj.flagNoMoreFiles: + break + + """ + + ext = ".r" + + optchar = "D" + dataOutObj = None + + + def __init__(self, dataOutObj=None): + """ + Inicializador de la clase VoltageReader para la lectura de datos de voltage. + + Input: + dataOutObj : Objeto de la clase Voltage. Este objeto sera utilizado para + almacenar un perfil de datos cada vez que se haga un requerimiento + (getData). El perfil sera obtenido a partir del buffer de datos, + si el buffer esta vacio se hara un nuevo proceso de lectura de un + bloque de datos. + Si este parametro no es pasado se creara uno internamente. + + Variables afectadas: + self.dataOutObj + + Return: + None + """ + + self.datablock = None + + self.utc = 0 + + self.ext = ".r" + + self.optchar = "D" + + self.basicHeaderObj = BasicHeader() + + self.systemHeaderObj = SystemHeader() + + self.radarControllerHeaderObj = RadarControllerHeader() + + self.processingHeaderObj = ProcessingHeader() + + self.online = 0 + + self.fp = None + + self.idFile = None + + self.dtype = None + + self.fileSizeByHeader = None + + self.filenameList = [] + + self.filename = None + + self.fileSize = None + + self.firstHeaderSize = 0 + + self.basicHeaderSize = 24 + + self.pathList = [] + + self.filenameList = [] + + self.lastUTTime = 0 + + self.maxTimeStep = 30 + + self.flagNoMoreFiles = 0 + + self.set = 0 + + self.path = None + + self.profileIndex = 9999 + + self.delay = 3 #seconds + + self.nTries = 3 #quantity tries + + self.nFiles = 3 #number of files for searching + + self.nReadBlocks = 0 + + self.flagIsNewFile = 1 + + self.ippSeconds = 0 + + self.flagTimeBlock = 0 + + self.flagIsNewBlock = 0 + + self.nTotalBlocks = 0 + + self.blocksize = 0 + + def createObjByDefault(self): + + dataObj = Voltage() + + return dataObj + + def __hasNotDataInBuffer(self): + if self.profileIndex >= self.processingHeaderObj.profilesPerBlock: + return 1 + return 0 + + + def getBlockDimension(self): + """ + Obtiene la cantidad de puntos a leer por cada bloque de datos + + Affected: + self.blocksize + + Return: + None + """ + pts2read = self.processingHeaderObj.profilesPerBlock * self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels + self.blocksize = pts2read + + + def readBlock(self): + """ + readBlock lee el bloque de datos desde la posicion actual del puntero del archivo + (self.fp) y actualiza todos los parametros relacionados al bloque de datos + (metadata + data). La data leida es almacenada en el buffer y el contador del buffer + es seteado a 0 + + Inputs: + None + + Return: + None + + Affected: + self.profileIndex + self.datablock + self.flagIsNewFile + self.flagIsNewBlock + self.nTotalBlocks + + Exceptions: + Si un bloque leido no es un bloque valido + """ + + junk = numpy.fromfile( self.fp, self.dtype, self.blocksize ) + + try: + junk = junk.reshape( (self.processingHeaderObj.profilesPerBlock, self.processingHeaderObj.nHeights, self.systemHeaderObj.nChannels) ) + except: + print "The read block (%3d) has not enough data" %self.nReadBlocks + return 0 + + junk = numpy.transpose(junk, (2,0,1)) + self.datablock = junk['real'] + junk['imag']*1j + + self.profileIndex = 0 + + self.flagIsNewFile = 0 + self.flagIsNewBlock = 1 + + self.nTotalBlocks += 1 + self.nReadBlocks += 1 + + return 1 + + + def getData(self): + """ + getData obtiene una unidad de datos del buffer de lectura y la copia a la clase "Voltage" + con todos los parametros asociados a este (metadata). cuando no hay datos en el buffer de + lectura es necesario hacer una nueva lectura de los bloques de datos usando "readNextBlock" + + Ademas incrementa el contador del buffer en 1. + + Return: + data : retorna un perfil de voltages (alturas * canales) copiados desde el + buffer. Si no hay mas archivos a leer retorna None. + + Variables afectadas: + self.dataOutObj + self.profileIndex + + Affected: + self.dataOutObj + self.profileIndex + self.flagTimeBlock + self.flagIsNewBlock + """ + if self.flagNoMoreFiles: return 0 + + self.flagTimeBlock = 0 + self.flagIsNewBlock = 0 + + if self.__hasNotDataInBuffer(): + + if not( self.readNextBlock() ): + return 0 + +# self.updateDataHeader() + + if self.flagNoMoreFiles == 1: + print 'Process finished' + return 0 + + #data es un numpy array de 3 dmensiones (perfiles, alturas y canales) + + if self.datablock == None: + self.dataOutObj.flagNoData = True + return 0 + + self.dataOutObj.data = self.datablock[:,self.profileIndex,:] + + self.dataOutObj.dtype = self.dtype + + self.dataOutObj.nChannels = self.systemHeaderObj.nChannels + + self.dataOutObj.nHeights = self.processingHeaderObj.nHeights + + self.dataOutObj.nProfiles = self.processingHeaderObj.profilesPerBlock + + xf = self.processingHeaderObj.firstHeight + self.processingHeaderObj.nHeights*self.processingHeaderObj.deltaHeight + + self.dataOutObj.heightList = numpy.arange(self.processingHeaderObj.firstHeight, xf, self.processingHeaderObj.deltaHeight) + + self.dataOutObj.channelList = range(self.systemHeaderObj.nChannels) + + self.dataOutObj.channelIndexList = range(self.systemHeaderObj.nChannels) + + self.dataOutObj.flagTimeBlock = self.flagTimeBlock + + self.dataOutObj.utctime = self.basicHeaderObj.utc + self.basicHeaderObj.miliSecond/1000. + self.profileIndex * self.ippSeconds + + self.dataOutObj.ippSeconds = self.ippSeconds + + self.dataOutObj.timeInterval = self.ippSeconds * self.processingHeaderObj.nCohInt + + self.dataOutObj.nCohInt = self.processingHeaderObj.nCohInt + + self.dataOutObj.flagShiftFFT = False + + if self.processingHeaderObj.code != None: + self.dataOutObj.nCode = self.processingHeaderObj.nCode + + self.dataOutObj.nBaud = self.processingHeaderObj.nBaud + + self.dataOutObj.code = self.processingHeaderObj.code + + self.profileIndex += 1 + + self.dataOutObj.systemHeaderObj = self.systemHeaderObj.copy() + + self.dataOutObj.radarControllerHeaderObj = self.radarControllerHeaderObj.copy() + + self.dataOutObj.flagNoData = False + +# print self.profileIndex, self.dataOutObj.utctime +# if self.profileIndex == 800: +# a=1 + + return self.dataOutObj.data + + +class VoltageWriter(JRODataWriter): + """ + Esta clase permite escribir datos de voltajes a archivos procesados (.r). La escritura + de los datos siempre se realiza por bloques. + """ + + ext = ".r" + + optchar = "D" + + shapeBuffer = None + + + def __init__(self, dataOutObj=None): + """ + Inicializador de la clase VoltageWriter para la escritura de datos de espectros. + + Affected: + self.dataOutObj + + Return: None + """ + if dataOutObj == None: + dataOutObj = Voltage() + + if not( isinstance(dataOutObj, Voltage) ): + raise ValueError, "in VoltageReader, dataOutObj must be an Spectra class object" + + self.dataOutObj = dataOutObj + + self.nTotalBlocks = 0 + + self.profileIndex = 0 + + self.isConfig = False + + self.fp = None + + self.flagIsNewFile = 1 + + self.nTotalBlocks = 0 + + self.flagIsNewBlock = 0 + + self.flagNoMoreFiles = 0 + + self.setFile = None + + self.dtype = None + + self.path = None + + self.noMoreFiles = 0 + + self.filename = None + + self.basicHeaderObj = BasicHeader() + + self.systemHeaderObj = SystemHeader() + + self.radarControllerHeaderObj = RadarControllerHeader() + + self.processingHeaderObj = ProcessingHeader() + + def hasAllDataInBuffer(self): + if self.profileIndex >= self.processingHeaderObj.profilesPerBlock: + return 1 + return 0 + + + def setBlockDimension(self): + """ + Obtiene las formas dimensionales del los subbloques de datos que componen un bloque + + Affected: + self.shape_spc_Buffer + self.shape_cspc_Buffer + self.shape_dc_Buffer + + Return: None + """ + self.shapeBuffer = (self.processingHeaderObj.profilesPerBlock, + self.processingHeaderObj.nHeights, + self.systemHeaderObj.nChannels) + + self.datablock = numpy.zeros((self.systemHeaderObj.nChannels, + self.processingHeaderObj.profilesPerBlock, + self.processingHeaderObj.nHeights), + dtype=numpy.dtype('complex')) + + + def writeBlock(self): + """ + Escribe el buffer en el file designado + + Affected: + self.profileIndex + self.flagIsNewFile + self.flagIsNewBlock + self.nTotalBlocks + self.blockIndex + + Return: None + """ + data = numpy.zeros( self.shapeBuffer, self.dtype ) + + junk = numpy.transpose(self.datablock, (1,2,0)) + + data['real'] = junk.real + data['imag'] = junk.imag + + data = data.reshape( (-1) ) + + data.tofile( self.fp ) + + self.datablock.fill(0) + + self.profileIndex = 0 + self.flagIsNewFile = 0 + self.flagIsNewBlock = 1 + + self.blockIndex += 1 + self.nTotalBlocks += 1 + + def putData(self): + """ + Setea un bloque de datos y luego los escribe en un file + + Affected: + self.flagIsNewBlock + self.profileIndex + + Return: + 0 : Si no hay data o no hay mas files que puedan escribirse + 1 : Si se escribio la data de un bloque en un file + """ + if self.dataOutObj.flagNoData: + return 0 + + self.flagIsNewBlock = 0 + + if self.dataOutObj.flagTimeBlock: + + self.datablock.fill(0) + self.profileIndex = 0 + self.setNextFile() + + if self.profileIndex == 0: + self.getBasicHeader() + + self.datablock[:,self.profileIndex,:] = self.dataOutObj.data + + self.profileIndex += 1 + + if self.hasAllDataInBuffer(): + #if self.flagIsNewFile: + self.writeNextBlock() +# self.getDataHeader() + + if self.flagNoMoreFiles: + #print 'Process finished' + return 0 + + return 1 + + def __getProcessFlags(self): + + processFlags = 0 + + dtype0 = numpy.dtype([('real',' 1: + processFlags += PROCFLAG.COHERENT_INTEGRATION + + return processFlags + + + def __getBlockSize(self): + ''' + Este metodos determina el cantidad de bytes para un bloque de datos de tipo Voltage + ''' + + dtype0 = numpy.dtype([('real',' 1: + processFlags += PROCFLAG.INCOHERENT_INTEGRATION + + if self.dataOutObj.data_dc != None: + processFlags += PROCFLAG.SAVE_CHANNELS_DC + + return processFlags + + + def __getBlockSize(self): + ''' + Este metodos determina el cantidad de bytes para un bloque de datos de tipo Spectra + ''' + + dtype0 = numpy.dtype([('real',' 0: + channelList = [] + for channel in range(self.dataOutObj.nChannels): + channelList.append(channel) + channelList.append(channel) + + pairsList = [] + for pair in self.dataOutObj.pairsList: + pairsList.append(pair[0]) + pairsList.append(pair[1]) + spectraComb = channelList + pairsList + spectraComb = numpy.array(spectraComb,dtype="u1") + self.processingHeaderObj.spectraComb = spectraComb + sizeOfSpcComb = len(spectraComb) + processingHeaderSize += sizeOfSpcComb + + if self.dataOutObj.code != None: + self.processingHeaderObj.code = self.dataOutObj.code + self.processingHeaderObj.nCode = self.dataOutObj.nCode + self.processingHeaderObj.nBaud = self.dataOutObj.nBaud + nCodeSize = 4 # bytes + nBaudSize = 4 # bytes + codeSize = 4 # bytes + sizeOfCode = int(nCodeSize + nBaudSize + codeSize * self.dataOutObj.nCode * self.dataOutObj.nBaud) + processingHeaderSize += sizeOfCode + + if self.processingHeaderObj.nWindows != 0: + self.processingHeaderObj.firstHeight = self.dataOutObj.heightList[0] + self.processingHeaderObj.deltaHeight = self.dataOutObj.heightList[1] - self.dataOutObj.heightList[0] + self.processingHeaderObj.nHeights = self.dataOutObj.nHeights + self.processingHeaderObj.samplesWin = self.dataOutObj.nHeights + sizeOfFirstHeight = 4 + sizeOfdeltaHeight = 4 + sizeOfnHeights = 4 + sizeOfWindows = (sizeOfFirstHeight + sizeOfdeltaHeight + sizeOfnHeights)*self.processingHeaderObj.nWindows + processingHeaderSize += sizeOfWindows + + self.processingHeaderObj.size = processingHeaderSize + +class SpectraHeisWriter(): + + i=0 + + def __init__(self, dataOutObj): + + self.wrObj = FITS() + self.dataOutObj = dataOutObj + + def isNumber(str): + """ + Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero. + + Excepciones: + Si un determinado string no puede ser convertido a numero + Input: + str, string al cual se le analiza para determinar si convertible a un numero o no + + Return: + True : si el string es uno numerico + False : no es un string numerico + """ + try: + float( str ) + return True + except: + return False + + def setup(self, wrpath,): + + if not(os.path.exists(wrpath)): + os.mkdir(wrpath) + + self.wrpath = wrpath + self.setFile = 0 + + def putData(self): + # self.wrObj.writeHeader(nChannels=self.dataOutObj.nChannels, nFFTPoints=self.dataOutObj.nFFTPoints) + #name = self.dataOutObj.utctime + name= time.localtime( self.dataOutObj.utctime) + ext=".fits" + #folder='D%4.4d%3.3d'%(name.tm_year,name.tm_yday) + subfolder = 'D%4.4d%3.3d' % (name.tm_year,name.tm_yday) + + doypath = os.path.join( self.wrpath, subfolder ) + if not( os.path.exists(doypath) ): + os.mkdir(doypath) + self.setFile += 1 + file = 'D%4.4d%3.3d%3.3d%s' % (name.tm_year,name.tm_yday,self.setFile,ext) + + filename = os.path.join(self.wrpath,subfolder, file) + + # print self.dataOutObj.ippSeconds + freq=numpy.arange(-1*self.dataOutObj.nHeights/2.,self.dataOutObj.nHeights/2.)/(2*self.dataOutObj.ippSeconds) + + col1=self.wrObj.setColF(name="freq", format=str(self.dataOutObj.nFFTPoints)+'E', array=freq) + col2=self.wrObj.writeData(name="P_Ch1",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[0,:])) + col3=self.wrObj.writeData(name="P_Ch2",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[1,:])) + col4=self.wrObj.writeData(name="P_Ch3",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[2,:])) + col5=self.wrObj.writeData(name="P_Ch4",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[3,:])) + col6=self.wrObj.writeData(name="P_Ch5",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[4,:])) + col7=self.wrObj.writeData(name="P_Ch6",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[5,:])) + col8=self.wrObj.writeData(name="P_Ch7",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[6,:])) + col9=self.wrObj.writeData(name="P_Ch8",format=str(self.dataOutObj.nFFTPoints)+'E',data=10*numpy.log10(self.dataOutObj.data_spc[7,:])) + #n=numpy.arange((100)) + n=self.dataOutObj.data_spc[6,:] + a=self.wrObj.cFImage(n) + b=self.wrObj.Ctable(col1,col2,col3,col4,col5,col6,col7,col8,col9) + self.wrObj.CFile(a,b) + self.wrObj.wFile(filename) + return 1 + +class FITS: + + name=None + format=None + array =None + data =None + thdulist=None + + def __init__(self): + + pass + + def setColF(self,name,format,array): + self.name=name + self.format=format + self.array=array + a1=numpy.array([self.array],dtype=numpy.float32) + self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1) + return self.col1 + +# def setColP(self,name,format,data): +# self.name=name +# self.format=format +# self.data=data +# a2=numpy.array([self.data],dtype=numpy.float32) +# self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2) +# return self.col2 + + def writeHeader(self,): + pass + + def writeData(self,name,format,data): + self.name=name + self.format=format + self.data=data + a2=numpy.array([self.data],dtype=numpy.float32) + self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2) + return self.col2 + + def cFImage(self,n): + self.hdu= pyfits.PrimaryHDU(n) + return self.hdu + + def Ctable(self,col1,col2,col3,col4,col5,col6,col7,col8,col9): + self.cols=pyfits.ColDefs( [col1,col2,col3,col4,col5,col6,col7,col8,col9]) + self.tbhdu = pyfits.new_table(self.cols) + return self.tbhdu + + def CFile(self,hdu,tbhdu): + self.thdulist=pyfits.HDUList([hdu,tbhdu]) + + def wFile(self,filename): + self.thdulist.writeto(filename) \ No newline at end of file diff --git a/schainpy/Model/JROHeaderIO.py b/schainpy/Model/JROHeaderIO.py new file mode 100644 index 0000000..c2f31c5 --- /dev/null +++ b/schainpy/Model/JROHeaderIO.py @@ -0,0 +1,505 @@ +''' + +$Author: murco $ +$Id: JROHeaderIO.py 151 2012-10-31 19:00:51Z murco $ +''' + +import numpy +import copy + +class Header: + + def __init__(self): + raise + + def copy(self): + return copy.deepcopy(self) + + def read(): + pass + + def write(): + pass + +class BasicHeader(Header): + + size = None + version = None + dataBlock = None + utc = None + miliSecond = None + timeZone = None + dstFlag = None + errorCount = None + struct = None + + def __init__(self): + + self.size = 0 + self.version = 0 + self.dataBlock = 0 + self.utc = 0 + self.miliSecond = 0 + self.timeZone = 0 + self.dstFlag = 0 + self.errorCount = 0 + self.struct = numpy.dtype([ + ('nSize',' 0: + fp.seek(jumpFp) + + except: + return 0 + + return 1 + + def write(self, fp): + headerTuple = (self.size, + self.expType, + self.nTx, + self.ipp, + self.txA, + self.txB, + self.nWindows, + self.numTaus, + self.codeType, + self.line6Function, + self.line5Function, + self.fClock, + self.prePulseBefore, + self.prePulserAfter, + self.rangeIpp, + self.rangeTxA, + self.rangeTxB) + + header = numpy.array(headerTuple,self.struct) + header.tofile(fp) + + dynamic = self.dynamic + dynamic.tofile(fp) + + return 1 + + + +class ProcessingHeader(Header): + + size = None + dtype = None + blockSize = None + profilesPerBlock = None + dataBlocksPerFile = None + nWindows = None + processFlags = None + nCohInt = None + nIncohInt = None + totalSpectra = None + struct = None + flag_dc = None + flag_cspc = None + + def __init__(self): + self.size = 0 + self.dtype = 0 + self.blockSize = 0 + self.profilesPerBlock = 0 + self.dataBlocksPerFile = 0 + self.nWindows = 0 + self.processFlags = 0 + self.nCohInt = 0 + self.nIncohInt = 0 + self.totalSpectra = 0 + self.struct = numpy.dtype([ + ('nSize',' 0: + self.flag_cspc = True + + except: + return 0 + + return 1 + + def write(self, fp): + headerTuple = (self.size, + self.dtype, + self.blockSize, + self.profilesPerBlock, + self.dataBlocksPerFile, + self.nWindows, + self.processFlags, + self.nCohInt, + self.nIncohInt, + self.totalSpectra) + + header = numpy.array(headerTuple,self.struct) + header.tofile(fp) + + if self.nWindows != 0: + sampleWindowTuple = (self.firstHeight,self.deltaHeight,self.samplesWin) + samplingWindow = numpy.array(sampleWindowTuple,self.structSamplingWindow) + samplingWindow.tofile(fp) + + + if self.totalSpectra != 0: + spectraComb = numpy.array([],numpy.dtype('u1')) + spectraComb = self.spectraComb + spectraComb.tofile(fp) + + + if self.processFlags & PROCFLAG.DEFINE_PROCESS_CODE == PROCFLAG.DEFINE_PROCESS_CODE: + nCode = self.nCode #Probar con un dato que almacene codigo, hasta el momento no se hizo la prueba + nCode.tofile(fp) + + nBaud = self.nBaud + nBaud.tofile(fp) + + code = self.code.reshape(nCode*nBaud) + code.tofile(fp) + + return 1 + +class RCfunction: + NONE=0 + FLIP=1 + CODE=2 + SAMPLING=3 + LIN6DIV256=4 + SYNCHRO=5 + +class nCodeType: + NONE=0 + USERDEFINE=1 + BARKER2=2 + BARKER3=3 + BARKER4=4 + BARKER5=5 + BARKER7=6 + BARKER11=7 + BARKER13=8 + AC128=9 + COMPLEMENTARYCODE2=10 + COMPLEMENTARYCODE4=11 + COMPLEMENTARYCODE8=12 + COMPLEMENTARYCODE16=13 + COMPLEMENTARYCODE32=14 + COMPLEMENTARYCODE64=15 + COMPLEMENTARYCODE128=16 + CODE_BINARY28=17 + +class PROCFLAG: + COHERENT_INTEGRATION = numpy.uint32(0x00000001) + DECODE_DATA = numpy.uint32(0x00000002) + SPECTRA_CALC = numpy.uint32(0x00000004) + INCOHERENT_INTEGRATION = numpy.uint32(0x00000008) + POST_COHERENT_INTEGRATION = numpy.uint32(0x00000010) + SHIFT_FFT_DATA = numpy.uint32(0x00000020) + + DATATYPE_CHAR = numpy.uint32(0x00000040) + DATATYPE_SHORT = numpy.uint32(0x00000080) + DATATYPE_LONG = numpy.uint32(0x00000100) + DATATYPE_INT64 = numpy.uint32(0x00000200) + DATATYPE_FLOAT = numpy.uint32(0x00000400) + DATATYPE_DOUBLE = numpy.uint32(0x00000800) + + DATAARRANGE_CONTIGUOUS_CH = numpy.uint32(0x00001000) + DATAARRANGE_CONTIGUOUS_H = numpy.uint32(0x00002000) + DATAARRANGE_CONTIGUOUS_P = numpy.uint32(0x00004000) + + SAVE_CHANNELS_DC = numpy.uint32(0x00008000) + DEFLIP_DATA = numpy.uint32(0x00010000) + DEFINE_PROCESS_CODE = numpy.uint32(0x00020000) + + ACQ_SYS_NATALIA = numpy.uint32(0x00040000) + ACQ_SYS_ECHOTEK = numpy.uint32(0x00080000) + ACQ_SYS_ADRXD = numpy.uint32(0x000C0000) + ACQ_SYS_JULIA = numpy.uint32(0x00100000) + ACQ_SYS_XXXXXX = numpy.uint32(0x00140000) + + EXP_NAME_ESP = numpy.uint32(0x00200000) + CHANNEL_NAMES_ESP = numpy.uint32(0x00400000) + + OPERATION_MASK = numpy.uint32(0x0000003F) + DATATYPE_MASK = numpy.uint32(0x00000FC0) + DATAARRANGE_MASK = numpy.uint32(0x00007000) + ACQ_SYS_MASK = numpy.uint32(0x001C0000) \ No newline at end of file diff --git a/schainpy/Model/JROPlot.py b/schainpy/Model/JROPlot.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/schainpy/Model/JROPlot.py diff --git a/schainpy/Model/JROProcessing.py b/schainpy/Model/JROProcessing.py new file mode 100644 index 0000000..284eaa4 --- /dev/null +++ b/schainpy/Model/JROProcessing.py @@ -0,0 +1,423 @@ +''' + +$Author: dsuarez $ +$Id: Processor.py 1 2012-11-12 18:56:07Z dsuarez $ +''' +import os +import numpy +import datetime +import time + +from JROData import * +from JRODataIO import * +from JROPlot import * + +class ProcessingUnit: + + """ + Esta es la clase base para el procesamiento de datos. + + Contiene el metodo "call" para llamar operaciones. Las operaciones pueden ser: + - Metodos internos (callMethod) + - Objetos del tipo Operation (callObject). Antes de ser llamados, estos objetos + tienen que ser agreagados con el metodo "add". + + """ + # objeto de datos de entrada (Voltage, Spectra o Correlation) + dataIn = None + + # objeto de datos de entrada (Voltage, Spectra o Correlation) + dataOut = None + + + objectDict = None + + def __init__(self): + + self.objectDict = {} + + def addOperation(self, object, objId): + + """ + Agrega el objeto "object" a la lista de objetos "self.objectList" y retorna el + identificador asociado a este objeto. + + Input: + + object : objeto de la clase "Operation" + + Return: + + objId : identificador del objeto, necesario para ejecutar la operacion + """ + + self.object[objId] = object + + return objId + + def operation(self, **kwargs): + + """ + Operacion directa sobre la data (dataout.data). Es necesario actualizar los valores de los + atributos del objeto dataOut + + Input: + + **kwargs : Diccionario de argumentos de la funcion a ejecutar + """ + + if self.dataIn.isEmpty(): + return None + + raise ValueError, "ImplementedError" + + def callMethod(self, name, **kwargs): + + """ + Ejecuta el metodo con el nombre "name" y con argumentos **kwargs de la propia clase. + + Input: + name : nombre del metodo a ejecutar + + **kwargs : diccionario con los nombres y valores de la funcion a ejecutar. + + """ + + if self.dataIn.isEmpty(): + return None + + methodToCall = getattr(self, name) + + methodToCall(**kwargs) + + def callObject(self, objId, **kwargs): + + """ + Ejecuta la operacion asociada al identificador del objeto "objId" + + Input: + + objId : identificador del objeto a ejecutar + + **kwargs : diccionario con los nombres y valores de la funcion a ejecutar. + + Return: + + None + """ + + if self.dataIn.isEmpty(): + return None + + object = self.objectList[objId] + + object.run(self.dataOut, **kwargs) + + def call(self, operation, **kwargs): + + """ + Ejecuta la operacion "operation" con los argumentos "**kwargs". La operacion puede + ser de dos tipos: + + 1. Un metodo propio de esta clase: + + operation.type = "self" + + 2. El metodo "run" de un objeto del tipo Operation o de un derivado de ella: + operation.type = "other". + + Este objeto de tipo Operation debe de haber sido agregado antes con el metodo: + "addOperation" e identificado con el operation.id + + + con el id de la operacion. + """ + if self.dataIn.isEmpty(): + return None + + if operation.type == 'self': + self.callMethod(operation.name, **kwargs) + return + + if operation.type == 'other': + self.callObject(operation.id, **kwargs) + return + +class Operation(): + + """ + Clase base para definir las operaciones adicionales que se pueden agregar a la clase ProcessingUnit + y necesiten acumular información previa de los datos a procesar. De preferencia usar un buffer de + acumulacion dentro de esta clase + + Ejemplo: Integraciones coherentes, necesita la información previa de los n perfiles anteriores (bufffer) + + """ + + __buffer = None + + def __init__(self): + + pass + + def run(self, dataIn, **kwargs): + + """ + Realiza las operaciones necesarias sobre la dataIn.data y actualiza los atributos del objeto dataIn. + + Input: + + dataIn : objeto del tipo JROData + + Return: + + None + + Affected: + __buffer : buffer de recepcion de datos. + + """ + + raise ValueError, "ImplementedError" + +class VoltageProc(ProcessingUnit): + + + def __init__(self): + + pass + + def setup(self, dataInObj=None, dataOutObj=None): + + self.dataInObj = dataInObj + + if self.dataOutObj == None: + dataOutObj = Voltage() + + self.dataOutObj = dataOutObj + + return self.dataOutObj + + def init(self): + + if self.dataInObj.isEmpty(): + return 0 + + self.dataOutObj.copy(self.dataInObj) + # No necesita copiar en cada init() los atributos de dataInObj + # la copia deberia hacerse por cada nuevo bloque de datos + + def selectChannels(self, channelList): + + if self.dataInObj.isEmpty(): + return 0 + + self.selectChannelsByIndex(channelList) + + def selectChannelsByIndex(self, channelIndexList): + """ + Selecciona un bloque de datos en base a canales segun el channelIndexList + + Input: + channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7] + + Affected: + self.dataOutObj.data + self.dataOutObj.channelIndexList + self.dataOutObj.nChannels + self.dataOutObj.m_ProcessingHeader.totalSpectra + self.dataOutObj.systemHeaderObj.numChannels + self.dataOutObj.m_ProcessingHeader.blockSize + + Return: + None + """ + + for channel in channelIndexList: + if channel not in self.dataOutObj.channelIndexList: + raise ValueError, "The value %d in channelIndexList is not valid" %channel + + nChannels = len(channelIndexList) + + data = self.dataOutObj.data[channelIndexList,:] + + self.dataOutObj.data = data + self.dataOutObj.channelIndexList = channelIndexList + self.dataOutObj.channelList = [self.dataOutObj.channelList[i] for i in channelIndexList] + self.dataOutObj.nChannels = nChannels + + return 1 + +class CohInt(Operation): + + __profIndex = 0 + __withOverapping = False + + __byTime = False + __initime = None + __integrationtime = None + + __buffer = None + + __dataReady = False + + nCohInt = None + + + def __init__(self): + + pass + + def setup(self, nCohInt=None, timeInterval=None, overlapping=False): + """ + Set the parameters of the integration class. + + Inputs: + + nCohInt : Number of coherent integrations + timeInterval : Time of integration. If the parameter "nCohInt" is selected this one does not work + overlapping : + + """ + + self.__initime = None + self.__buffer = None + self.__dataReady = False + + + if nCohInt == None and timeInterval == None: + raise ValueError, "nCohInt or timeInterval should be specified ..." + + if nCohInt != None: + self.nCohInt = nCohInt + self.__byTime = False + else: + self.__integrationtime = timeInterval * 60. #if (type(timeInterval)!=integer) -> change this line + self.nCohInt = 9999 + self.__byTime = True + + if overlapping: + self.__withOverapping = True + self.__buffer = None + else: + self.__withOverapping = False + self.__buffer = 0 + + self.__profIndex = 0 + + def putData(self, data): + + """ + Add a profile to the __buffer and increase in one the __profileIndex + + """ + if self.__initime == None: + self.__initime = datatime + + if not self.__withOverapping: + self.__buffer += data + self.__profIndex += 1 + return + + #Overlapping data + nChannels, nHeis = data.shape + data = numpy.reshape(data, (1, nChannels, nHeis)) + + #If the buffer is empty then it takes the data value + if self.__buffer == None: + self.__buffer = data + self.__profIndex += 1 + return + + #If the buffer length is lower than nCohInt then stakcing the data value + if self.__profIndex < self.nCohInt: + self.__buffer = numpy.vstack((self.__buffer, data)) + self.__profIndex += 1 + return + + #If the buffer length is equal to nCohInt then replacing the last buffer value with the data value + self.__buffer = numpy.roll(self.__buffer, -1, axis=0) + self.__buffer[self.nCohInt-1] = data + self.__profIndex = self.nCohInt + return + + + def pushData(self): + """ + Return the sum of the last profiles and the profiles used in the sum. + + Affected: + + self.__profileIndex + + """ + + self.__initime = None + + if not self.__withOverapping: + data = self.__buffer + nCohInt = self.__profIndex + + self.__buffer = 0 + self.__profIndex = 0 + + return data, nCohInt + + #Integration with Overlapping + data = numpy.sum(self.__buffer, axis=0) + nCohInt = self.__profIndex + + return data, nCohInt + + def byProfiles(self, data): + + self.__dataReady = False + avg_data = None + + self.putData(data) + + if self.__profIndex == self.nCohInt: + + avgdata, nCohInt = self.pushData() + self.__dataReady = True + + return avgdata, nCohInt + + def byTime(self, data, datatime): + + self.__dataReady = False + avg_data = None + + self.putData(data) + + if (datatime - self.__initime) >= self.__integrationtime: + avgdata, nCohInt = self.pushData() + self.nCohInt = nCohInt + self.__dataReady = True + + return avgdata, nCohInt + + def integrate(self, data, datatime=None): + + if not self.__byTime: + avg_data = self.byProfiles(data) + else: + avg_data = self.byTime(data, datatime) + + self.data = avg_data + + + def run(self, dataOut, nCohInt=None, timeInterval=None, overlapping=False): + + +# self.dataOutObj.timeInterval *= nCohInt + self.dataOutObj.flagNoData = True + + if myCohIntObj.__dataReady: + self.dataOutObj.data = myCohIntObj.data + self.dataOutObj.timeInterval *= myCohIntObj.nCohInt + self.dataOutObj.nCohInt = myCohIntObj.nCohInt * self.dataInObj.nCohInt + self.dataOutObj.utctime = myCohIntObj.firstdatatime + self.dataOutObj.flagNoData = False + + return avg_data \ No newline at end of file diff --git a/schainpy/TestSpectraHeis.py b/schainpy/TestSpectraHeis.py new file mode 100644 index 0000000..bbff1f8 --- /dev/null +++ b/schainpy/TestSpectraHeis.py @@ -0,0 +1,104 @@ +''' +Created on Jul 31, 2012 + +@author $Author$ +@version $Id$ +''' + +import os, sys +import time, datetime +#import pylab as pl + +from Data.JROData import Voltage +from IO.VoltageIO import * + +from Data.JROData import SpectraHeis +from IO.SpectraIO import * + +from Processing.VoltageProcessor import * +from Processing.SpectraProcessor import * + +#from Graphics.BaseGraph_mpl import LinearPlot + +class TestHeis(): + i=None + def __init__(self): + self.setValues() + self.createObjects() + self.testSChain() + self.i=0 + + def setValues( self ): + + self.path="/home/roj-idl71/data" + self.path = "/Data/Data/RAWDATA/ASTRONOMIA" + + #self.path = "" + self.startDate = datetime.date(2012,4,1) + self.endDate = datetime.date(2012,12,30) + + self.startTime = datetime.time(0,0,0) + self.endTime = datetime.time(23,0,0) + + + def createObjects( self ): + + self.readerObj = VoltageReader() + self.specProcObj = SpectraHeisProcessor() + + self.voltObj1 = self.readerObj.setup( + path = self.path, + startDate = self.startDate, + endDate = self.endDate, + startTime = self.startTime, + endTime = self.endTime, + expLabel = '', + online = 1) + + if not(self.voltObj1): + sys.exit(0) + + self.specObj1 = self.specProcObj.setup(dataInObj = self.voltObj1,nFFTPoints=self.voltObj1.nHeights) + + +# + +# + + def testSChain( self ): + + ini = time.time() + counter = 0 + while(True): + self.readerObj.getData() + self.specProcObj.init() + + self.specProcObj.integrator(N=32) ## return self.dataOutObj + + + + + self.specProcObj.plotScope(idfigure=1, + wintitle='test plot library', + driver='plplot', + minvalue = 30000.0, + maxvalue = 5000000.0, + save=False, + gpath="/home/roj-idl71/PlotImage") + + + if self.readerObj.flagNoMoreFiles: + break + + + + if self.readerObj.flagIsNewBlock: + print 'Block No %04d, Time: %s' %(self.readerObj.nTotalBlocks, + datetime.datetime.fromtimestamp(self.readerObj.basicHeaderObj.utc),) + + + +if __name__ == '__main__': + TestHeis() + + \ No newline at end of file diff --git a/schainpy/testSchainExp.py b/schainpy/testSchainExp.py new file mode 100644 index 0000000..9afae63 --- /dev/null +++ b/schainpy/testSchainExp.py @@ -0,0 +1,120 @@ +''' + +$Author: murco $ +$Id: testSchainExp.py 158 2012-11-08 21:31:03Z murco $ +''' +import os, sys +import time, datetime + +path = os.path.split(os.getcwd())[0] +sys.path.append(path) + +from Data.JROData import Voltage +from IO.VoltageIO import * + +from Processing.VoltageProcessor import * +from Processing.SpectraProcessor import * + +class TestSChain(): + + def __init__(self): + self.setValues() + self.createObjects() + self.testSChain() + + def setValues(self): + self.path = "/home/roj-idl71/Data/RAWDATA/Meteors" + self.path = "/remote/puma/2012_06/Meteors" + + self.startDate = datetime.date(2012,06,19) + self.endDate = datetime.date(2012,12,30) + + self.startTime = datetime.time(11,0,0) + self.endTime = datetime.time(23,59,59) + + self.nFFTPoints = 32 + + self.wrpath = "/home/roj-idl71/tmp/results" + self.profilesPerBlock = 40 + self.blocksPerFile = 50 + + def createObjects(self): + + self.readerObj = VoltageReader() + self.voltProcObj = VoltageProcessor() + self.specProcObj = SpectraProcessor() + + self.voltObj1 = self.readerObj.setup( + path = self.path, + startDate = self.startDate, + endDate = self.endDate, + startTime = self.startTime, + endTime = self.endTime, + expLabel = '', + online = True) + + self.voltObj2 = self.voltProcObj.setup(dataInObj = self.voltObj1) + self.specObj1 = self.specProcObj.setup(dataInObj = self.voltObj2, nFFTPoints = self.nFFTPoints) + + def testSChain(self): + + ini = time.time() + + while(True): + self.readerObj.getData() + + self.voltProcObj.init() + + self.voltProcObj.integrator(25, overlapping=False) +# +# self.voltProcObj.writeData(self.wrpath,self.profilesPerBlock,self.blocksPerFile) + self.voltProcObj.selectChannels([0,1,2]) + +# self.voltProcObj.plotScope(idfigure=0, +# wintitle='test plot library', +# driver='plplot', +# save=False, +# gpath=None, +# type="power") + +# self.voltProcObj.plotRti(idfigure=1, +# starttime=self.startTime, +# endtime=self.endTime, +# minvalue=0, +# maxvalue=50, +# wintitle='', +# driver='plplot', +# colormap='jet', +# colorbar=True, +# showprofile=False, +# xrangestep=2, +# save=False, +# gpath=None) +# +# if self.voltProcObj.dataOutObj.flagNoData ==False: +# print self.readerObj.dataOutObj.nProfiles + + self.specProcObj.init() + + self.specProcObj.plotSpc(idfigure=2, + minvalue=30, + maxvalue=70, + wintitle='Spectra', + driver='plplot', + colormap='jet', + colorbar=True, + showprofile=True, + save=False, + gpath=None) + + if self.readerObj.flagNoMoreFiles: + break + + if self.readerObj.flagIsNewBlock: +# print 'Block No %04d, Time: %s' %(self.readerObj.nTotalBlocks, datetime.datetime.fromtimestamp(self.readerObj.basicHeaderObj.utc),) + print 'Block No %04d, Time: %s' %(self.readerObj.nTotalBlocks, + datetime.datetime.fromtimestamp(self.readerObj.basicHeaderObj.utc + self.readerObj.basicHeaderObj.miliSecond/1000.0),) + + +if __name__ == '__main__': + TestSChain() \ No newline at end of file diff --git a/schainpy/testSchainSpecExp.py b/schainpy/testSchainSpecExp.py new file mode 100644 index 0000000..61b9eec --- /dev/null +++ b/schainpy/testSchainSpecExp.py @@ -0,0 +1,85 @@ +''' + +$Author: murco $ +$Id: testSchainSpecExp.py 147 2012-10-30 22:50:56Z murco $ +''' + +import os, sys +import time, datetime + +path = os.path.split(os.getcwd())[0] +sys.path.append(path) + + +from Data.JROData import Spectra +from IO.SpectraIO import * +from Processing.SpectraProcessor import * + + + +class TestSChain: + + def __init__(self): + self.setValues() + self.createObjects() + self.testSChain() + + def setValues(self): +# self.path = "/Users/jro/Documents/RadarData/MST_ISR/MST" +## self.path = "/home/roj-idl71/Data/RAWDATA/IMAGING" +# self.path = "/Users/danielangelsuarezmunoz/Data/EW_Drifts" +# self.path = "/Users/danielangelsuarezmunoz/Data/IMAGING" + self.path = "/home/daniel/RadarData/IMAGING" + + self.startDate = datetime.date(2012,3,1) + self.endDate = datetime.date(2012,3,30) + + self.startTime = datetime.time(0,0,0) + self.endTime = datetime.time(14,1,1) + + # paramatros para Escritura de Pdata + self.wrpath = "/home/daniel/RadarData/test_wr2" + self.blocksPerFile = 5 + + + + def createObjects(self): + + self.readerObj = SpectraReader() + + self.specObj1 = self.readerObj.setup( + path = self.path, + startDate = self.startDate, + endDate = self.endDate, + startTime = self.startTime, + endTime = self.endTime, + expLabel = '', + online = 0) + + self.specObjProc = SpectraProcessor() + + self.specObj2 = self.specObjProc.setup(dataInObj = self.specObj1) + + + + def testSChain(self): + + ini = time.time() + + while(True): + self.readerObj.getData() + + self.specObjProc.init() + + self.specObjProc.writeData(self.wrpath,self.blocksPerFile) +# + if self.readerObj.flagNoMoreFiles: + break + + if self.readerObj.flagIsNewBlock: + print 'Block No %04d, Time: %s' %(self.readerObj.nTotalBlocks, + datetime.datetime.fromtimestamp(self.readerObj.basicHeaderObj.utc)) + + +if __name__ == '__main__': + TestSChain() \ No newline at end of file