jroprocessing.py
1144 lines
| 33.7 KiB
| text/x-python
|
PythonLexer
|
r174 | ''' | |
$Author: dsuarez $ | |||
$Id: Processor.py 1 2012-11-12 18:56:07Z dsuarez $ | |||
''' | |||
import os | |||
import numpy | |||
import datetime | |||
import time | |||
|
r178 | from jrodata import * | |
from jrodataIO import * | |||
from jroplot import * | |||
|
r174 | ||
class ProcessingUnit: | |||
""" | |||
Esta es la clase base para el procesamiento de datos. | |||
Contiene el metodo "call" para llamar operaciones. Las operaciones pueden ser: | |||
- Metodos internos (callMethod) | |||
- Objetos del tipo Operation (callObject). Antes de ser llamados, estos objetos | |||
tienen que ser agreagados con el metodo "add". | |||
""" | |||
# objeto de datos de entrada (Voltage, Spectra o Correlation) | |||
dataIn = None | |||
# objeto de datos de entrada (Voltage, Spectra o Correlation) | |||
dataOut = None | |||
objectDict = None | |||
def __init__(self): | |||
self.objectDict = {} | |||
|
r197 | def init(self): | |
raise ValueError, "Not implemented" | |||
|
r174 | def addOperation(self, object, objId): | |
""" | |||
Agrega el objeto "object" a la lista de objetos "self.objectList" y retorna el | |||
identificador asociado a este objeto. | |||
Input: | |||
object : objeto de la clase "Operation" | |||
Return: | |||
objId : identificador del objeto, necesario para ejecutar la operacion | |||
""" | |||
|
r179 | self.objectDict[objId] = object | |
|
r174 | ||
return objId | |||
def operation(self, **kwargs): | |||
""" | |||
Operacion directa sobre la data (dataout.data). Es necesario actualizar los valores de los | |||
atributos del objeto dataOut | |||
Input: | |||
**kwargs : Diccionario de argumentos de la funcion a ejecutar | |||
""" | |||
raise ValueError, "ImplementedError" | |||
def callMethod(self, name, **kwargs): | |||
""" | |||
Ejecuta el metodo con el nombre "name" y con argumentos **kwargs de la propia clase. | |||
Input: | |||
name : nombre del metodo a ejecutar | |||
**kwargs : diccionario con los nombres y valores de la funcion a ejecutar. | |||
""" | |||
|
r197 | if name != 'run': | |
if name == 'init' and self.dataIn.isEmpty(): | |||
|
r199 | self.dataOut.flagNoData = True | |
return False | |||
|
r197 | ||
if name != 'init' and self.dataOut.isEmpty(): | |||
|
r199 | return False | |
|
r174 | ||
methodToCall = getattr(self, name) | |||
methodToCall(**kwargs) | |||
|
r199 | if name != 'run': | |
return True | |||
if self.dataOut.isEmpty(): | |||
return False | |||
return True | |||
|
r174 | def callObject(self, objId, **kwargs): | |
""" | |||
Ejecuta la operacion asociada al identificador del objeto "objId" | |||
Input: | |||
objId : identificador del objeto a ejecutar | |||
**kwargs : diccionario con los nombres y valores de la funcion a ejecutar. | |||
Return: | |||
None | |||
""" | |||
|
r197 | if self.dataOut.isEmpty(): | |
|
r199 | return False | |
|
r197 | ||
|
r189 | object = self.objectDict[objId] | |
|
r174 | ||
object.run(self.dataOut, **kwargs) | |||
|
r199 | ||
return True | |||
|
r174 | ||
|
r188 | def call(self, operationConf, **kwargs): | |
|
r174 | ||
""" | |||
|
r199 | Return True si ejecuta la operacion "operationConf.name" con los | |
argumentos "**kwargs". False si la operacion no se ha ejecutado. | |||
La operacion puede ser de dos tipos: | |||
|
r174 | ||
1. Un metodo propio de esta clase: | |||
operation.type = "self" | |||
2. El metodo "run" de un objeto del tipo Operation o de un derivado de ella: | |||
operation.type = "other". | |||
Este objeto de tipo Operation debe de haber sido agregado antes con el metodo: | |||
"addOperation" e identificado con el operation.id | |||
con el id de la operacion. | |||
|
r188 | ||
Input: | |||
Operation : Objeto del tipo operacion con los atributos: name, type y id. | |||
|
r174 | """ | |
|
r188 | if operationConf.type == 'self': | |
|
r199 | sts = self.callMethod(operationConf.name, **kwargs) | |
|
r174 | ||
|
r188 | if operationConf.type == 'other': | |
|
r199 | sts = self.callObject(operationConf.id, **kwargs) | |
return sts | |||
|
r191 | ||
def setInput(self, dataIn): | |||
self.dataIn = dataIn | |||
def getOutput(self): | |||
return self.dataOut | |||
|
r174 | ||
class Operation(): | |||
""" | |||
Clase base para definir las operaciones adicionales que se pueden agregar a la clase ProcessingUnit | |||
|
r179 | y necesiten acumular informacion previa de los datos a procesar. De preferencia usar un buffer de | |
|
r174 | acumulacion dentro de esta clase | |
|
r179 | Ejemplo: Integraciones coherentes, necesita la informacion previa de los n perfiles anteriores (bufffer) | |
|
r174 | ||
""" | |||
__buffer = None | |||
|
r175 | __isConfig = False | |
|
r174 | ||
def __init__(self): | |||
pass | |||
def run(self, dataIn, **kwargs): | |||
""" | |||
Realiza las operaciones necesarias sobre la dataIn.data y actualiza los atributos del objeto dataIn. | |||
Input: | |||
dataIn : objeto del tipo JROData | |||
Return: | |||
None | |||
Affected: | |||
__buffer : buffer de recepcion de datos. | |||
""" | |||
raise ValueError, "ImplementedError" | |||
class VoltageProc(ProcessingUnit): | |||
def __init__(self): | |||
|
r188 | ||
|
r179 | self.objectDict = {} | |
|
r188 | self.dataOut = Voltage() | |
|
r174 | ||
def init(self): | |||
|
r179 | self.dataOut.copy(self.dataIn) | |
# No necesita copiar en cada init() los atributos de dataIn | |||
|
r174 | # la copia deberia hacerse por cada nuevo bloque de datos | |
def selectChannels(self, channelList): | |||
|
r200 | channelIndexList = [] | |
for channel in channelList: | |||
index = self.dataOut.channelList.index(channel) | |||
channelIndexList.append(index) | |||
self.selectChannelsByIndex(channelIndexList) | |||
|
r174 | ||
def selectChannelsByIndex(self, channelIndexList): | |||
""" | |||
Selecciona un bloque de datos en base a canales segun el channelIndexList | |||
Input: | |||
channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7] | |||
Affected: | |||
|
r179 | self.dataOut.data | |
self.dataOut.channelIndexList | |||
self.dataOut.nChannels | |||
self.dataOut.m_ProcessingHeader.totalSpectra | |||
self.dataOut.systemHeaderObj.numChannels | |||
self.dataOut.m_ProcessingHeader.blockSize | |||
|
r174 | ||
Return: | |||
None | |||
""" | |||
|
r200 | for channelIndex in channelIndexList: | |
if channelIndex not in self.dataOut.channelIndexList: | |||
|
r197 | print channelIndexList | |
|
r200 | raise ValueError, "The value %d in channelIndexList is not valid" %channelIndex | |
|
r174 | ||
nChannels = len(channelIndexList) | |||
|
r179 | data = self.dataOut.data[channelIndexList,:] | |
|
r174 | ||
|
r179 | self.dataOut.data = data | |
self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList] | |||
|
r200 | # self.dataOut.nChannels = nChannels | |
|
r174 | ||
return 1 | |||
|
r219 | ||
def selectHeights(self, minHei, maxHei): | |||
""" | |||
Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango | |||
minHei <= height <= maxHei | |||
Input: | |||
minHei : valor minimo de altura a considerar | |||
maxHei : valor maximo de altura a considerar | |||
Affected: | |||
Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex | |||
Return: | |||
1 si el metodo se ejecuto con exito caso contrario devuelve 0 | |||
""" | |||
if (minHei < self.dataOut.heightList[0]) or (minHei > maxHei): | |||
raise ValueError, "some value in (%d,%d) is not valid" % (minHei, maxHei) | |||
if (maxHei > self.dataOut.heightList[-1]): | |||
maxHei = self.dataOut.heightList[-1] | |||
# raise ValueError, "some value in (%d,%d) is not valid" % (minHei, maxHei) | |||
minIndex = 0 | |||
maxIndex = 0 | |||
data = self.dataOut.heightList | |||
for i,val in enumerate(data): | |||
if val < minHei: | |||
continue | |||
else: | |||
minIndex = i; | |||
break | |||
for i,val in enumerate(data): | |||
if val <= maxHei: | |||
maxIndex = i; | |||
else: | |||
break | |||
self.selectHeightsByIndex(minIndex, maxIndex) | |||
return 1 | |||
def selectHeightsByIndex(self, minIndex, maxIndex): | |||
""" | |||
Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango | |||
minIndex <= index <= maxIndex | |||
Input: | |||
minIndex : valor de indice minimo de altura a considerar | |||
maxIndex : valor de indice maximo de altura a considerar | |||
Affected: | |||
self.dataOut.data | |||
self.dataOut.heightList | |||
Return: | |||
1 si el metodo se ejecuto con exito caso contrario devuelve 0 | |||
""" | |||
if (minIndex < 0) or (minIndex > maxIndex): | |||
raise ValueError, "some value in (%d,%d) is not valid" % (minIndex, maxIndex) | |||
if (maxIndex >= self.dataOut.nHeights): | |||
maxIndex = self.dataOut.nHeights-1 | |||
# raise ValueError, "some value in (%d,%d) is not valid" % (minIndex, maxIndex) | |||
nHeights = maxIndex - minIndex + 1 | |||
#voltage | |||
data = self.dataOut.data[:,minIndex:maxIndex+1] | |||
firstHeight = self.dataOut.heightList[minIndex] | |||
self.dataOut.data = data | |||
self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex+1] | |||
return 1 | |||
|
r223 | ||
def filterByHeights(self, window): | |||
deltaHeight = self.dataOut.heightList[1] - self.dataOut.heightList[0] | |||
if window == None: | |||
window = self.dataOut.radarControllerHeaderObj.txA / deltaHeight | |||
newdelta = deltaHeight * window | |||
r = self.dataOut.data.shape[1] % window | |||
buffer = self.dataOut.data[:,0:self.dataOut.data.shape[1]-r] | |||
buffer = buffer.reshape(self.dataOut.data.shape[0],self.dataOut.data.shape[1]/window,window) | |||
buffer = numpy.sum(buffer,2) | |||
self.dataOut.data = buffer | |||
self.dataOut.heightList = numpy.arange(self.dataOut.heightList[0],newdelta*self.dataOut.nHeights/window,newdelta) | |||
|
r174 | ||
class CohInt(Operation): | |||
__profIndex = 0 | |||
__withOverapping = False | |||
__byTime = False | |||
__initime = None | |||
|
r175 | __lastdatatime = None | |
|
r174 | __integrationtime = None | |
__buffer = None | |||
__dataReady = False | |||
|
r201 | n = None | |
|
r174 | ||
def __init__(self): | |||
|
r175 | self.__isConfig = False | |
|
r174 | ||
|
r201 | def setup(self, n=None, timeInterval=None, overlapping=False): | |
|
r174 | """ | |
Set the parameters of the integration class. | |||
Inputs: | |||
|
r201 | n : Number of coherent integrations | |
timeInterval : Time of integration. If the parameter "n" is selected this one does not work | |||
|
r174 | overlapping : | |
""" | |||
self.__initime = None | |||
|
r175 | self.__lastdatatime = 0 | |
|
r174 | self.__buffer = None | |
self.__dataReady = False | |||
|
r201 | if n == None and timeInterval == None: | |
raise ValueError, "n or timeInterval should be specified ..." | |||
|
r174 | ||
|
r201 | if n != None: | |
self.n = n | |||
|
r174 | self.__byTime = False | |
else: | |||
self.__integrationtime = timeInterval * 60. #if (type(timeInterval)!=integer) -> change this line | |||
|
r201 | self.n = 9999 | |
|
r174 | self.__byTime = True | |
if overlapping: | |||
self.__withOverapping = True | |||
self.__buffer = None | |||
else: | |||
self.__withOverapping = False | |||
self.__buffer = 0 | |||
self.__profIndex = 0 | |||
def putData(self, data): | |||
""" | |||
Add a profile to the __buffer and increase in one the __profileIndex | |||
""" | |||
if not self.__withOverapping: | |||
|
r201 | self.__buffer += data.copy() | |
|
r174 | self.__profIndex += 1 | |
return | |||
#Overlapping data | |||
nChannels, nHeis = data.shape | |||
data = numpy.reshape(data, (1, nChannels, nHeis)) | |||
#If the buffer is empty then it takes the data value | |||
if self.__buffer == None: | |||
self.__buffer = data | |||
self.__profIndex += 1 | |||
return | |||
|
r201 | #If the buffer length is lower than n then stakcing the data value | |
if self.__profIndex < self.n: | |||
|
r174 | self.__buffer = numpy.vstack((self.__buffer, data)) | |
self.__profIndex += 1 | |||
return | |||
|
r201 | #If the buffer length is equal to n then replacing the last buffer value with the data value | |
|
r174 | self.__buffer = numpy.roll(self.__buffer, -1, axis=0) | |
|
r201 | self.__buffer[self.n-1] = data | |
self.__profIndex = self.n | |||
|
r174 | return | |
def pushData(self): | |||
""" | |||
Return the sum of the last profiles and the profiles used in the sum. | |||
Affected: | |||
self.__profileIndex | |||
""" | |||
if not self.__withOverapping: | |||
data = self.__buffer | |||
|
r201 | n = self.__profIndex | |
|
r174 | ||
self.__buffer = 0 | |||
self.__profIndex = 0 | |||
|
r201 | return data, n | |
|
r174 | ||
#Integration with Overlapping | |||
data = numpy.sum(self.__buffer, axis=0) | |||
|
r201 | n = self.__profIndex | |
|
r174 | ||
|
r201 | return data, n | |
|
r174 | ||
def byProfiles(self, data): | |||
self.__dataReady = False | |||
|
r189 | avgdata = None | |
|
r201 | n = None | |
|
r174 | ||
self.putData(data) | |||
|
r201 | if self.__profIndex == self.n: | |
|
r174 | ||
|
r201 | avgdata, n = self.pushData() | |
|
r174 | self.__dataReady = True | |
|
r189 | return avgdata | |
|
r174 | ||
def byTime(self, data, datatime): | |||
self.__dataReady = False | |||
|
r189 | avgdata = None | |
|
r201 | n = None | |
|
r175 | ||
|
r174 | self.putData(data) | |
if (datatime - self.__initime) >= self.__integrationtime: | |||
|
r201 | avgdata, n = self.pushData() | |
self.n = n | |||
|
r174 | self.__dataReady = True | |
|
r189 | return avgdata | |
|
r174 | ||
def integrate(self, data, datatime=None): | |||
|
r175 | if self.__initime == None: | |
self.__initime = datatime | |||
if self.__byTime: | |||
avgdata = self.byTime(data, datatime) | |||
|
r174 | else: | |
|
r175 | avgdata = self.byProfiles(data) | |
|
r174 | ||
|
r175 | self.__lastdatatime = datatime | |
if avgdata == None: | |||
|
r189 | return None, None | |
|
r175 | ||
avgdatatime = self.__initime | |||
deltatime = datatime -self.__lastdatatime | |||
if not self.__withOverapping: | |||
self.__initime = datatime | |||
else: | |||
self.__initime += deltatime | |||
|
r179 | return avgdata, avgdatatime | |
|
r174 | ||
|
r201 | def run(self, dataOut, n=None, timeInterval=None, overlapping=False): | |
|
r174 | ||
|
r175 | if not self.__isConfig: | |
|
r201 | self.setup(n, timeInterval, overlapping) | |
|
r175 | self.__isConfig = True | |
avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime) | |||
|
r174 | ||
|
r201 | # dataOut.timeInterval *= n | |
|
r189 | dataOut.flagNoData = True | |
|
r174 | ||
|
r175 | if self.__dataReady: | |
|
r179 | dataOut.data = avgdata | |
|
r201 | dataOut.nCohInt *= self.n | |
|
r179 | dataOut.utctime = avgdatatime | |
|
r201 | dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt | |
|
r192 | dataOut.flagNoData = False | |
class SpectraProc(ProcessingUnit): | |||
def __init__(self): | |||
|
r199 | ||
|
r192 | self.objectDict = {} | |
self.buffer = None | |||
self.firstdatatime = None | |||
self.profIndex = 0 | |||
self.dataOut = Spectra() | |||
def __updateObjFromInput(self): | |||
self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy() | |||
self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy() | |||
self.dataOut.channelList = self.dataIn.channelList | |||
self.dataOut.heightList = self.dataIn.heightList | |||
self.dataOut.dtype = self.dataIn.dtype | |||
|
r207 | # self.dataOut.nHeights = self.dataIn.nHeights | |
|
r200 | # self.dataOut.nChannels = self.dataIn.nChannels | |
|
r192 | self.dataOut.nBaud = self.dataIn.nBaud | |
self.dataOut.nCode = self.dataIn.nCode | |||
self.dataOut.code = self.dataIn.code | |||
self.dataOut.nProfiles = self.dataOut.nFFTPoints | |||
|
r200 | # self.dataOut.channelIndexList = self.dataIn.channelIndexList | |
|
r192 | self.dataOut.flagTimeBlock = self.dataIn.flagTimeBlock | |
self.dataOut.utctime = self.firstdatatime | |||
self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada | |||
self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip | |||
self.dataOut.flagShiftFFT = self.dataIn.flagShiftFFT | |||
self.dataOut.nCohInt = self.dataIn.nCohInt | |||
self.dataOut.nIncohInt = 1 | |||
self.dataOut.ippSeconds = self.dataIn.ippSeconds | |||
|
r219 | ||
|
r220 | self.dataOut.timeInterval = self.dataIn.timeInterval*self.dataOut.nFFTPoints*self.dataOut.nIncohInt | |
|
r219 | ||
|
r192 | def __getFft(self): | |
""" | |||
Convierte valores de Voltaje a Spectra | |||
Affected: | |||
self.dataOut.data_spc | |||
self.dataOut.data_cspc | |||
self.dataOut.data_dc | |||
self.dataOut.heightList | |||
self.profIndex | |||
self.buffer | |||
self.dataOut.flagNoData | |||
""" | |||
fft_volt = numpy.fft.fft(self.buffer,axis=1) | |||
dc = fft_volt[:,0,:] | |||
#calculo de self-spectra | |||
fft_volt = numpy.fft.fftshift(fft_volt,axes=(1,)) | |||
spc = fft_volt * numpy.conjugate(fft_volt) | |||
spc = spc.real | |||
blocksize = 0 | |||
blocksize += dc.size | |||
blocksize += spc.size | |||
cspc = None | |||
pairIndex = 0 | |||
if self.dataOut.pairsList != None: | |||
#calculo de cross-spectra | |||
cspc = numpy.zeros((self.dataOut.nPairs, self.dataOut.nFFTPoints, self.dataOut.nHeights), dtype='complex') | |||
for pair in self.dataOut.pairsList: | |||
cspc[pairIndex,:,:] = numpy.abs(fft_volt[pair[0],:,:] * numpy.conjugate(fft_volt[pair[1],:,:])) | |||
pairIndex += 1 | |||
blocksize += cspc.size | |||
self.dataOut.data_spc = spc | |||
self.dataOut.data_cspc = cspc | |||
self.dataOut.data_dc = dc | |||
self.dataOut.blockSize = blocksize | |||
|
r199 | ||
def init(self, nFFTPoints=None, pairsList=None): | |||
if self.dataIn.type == "Spectra": | |||
self.dataOut.copy(self.dataIn) | |||
return | |||
if self.dataIn.type == "Voltage": | |||
if nFFTPoints == None: | |||
|
r217 | raise ValueError, "This SpectraProc.init() need nFFTPoints input variable" | |
|
r199 | ||
if pairsList == None: | |||
nPairs = 0 | |||
else: | |||
nPairs = len(pairsList) | |||
self.dataOut.nFFTPoints = nFFTPoints | |||
self.dataOut.pairsList = pairsList | |||
self.dataOut.nPairs = nPairs | |||
if self.buffer == None: | |||
self.buffer = numpy.zeros((self.dataIn.nChannels, | |||
self.dataOut.nFFTPoints, | |||
self.dataIn.nHeights), | |||
dtype='complex') | |||
self.buffer[:,self.profIndex,:] = self.dataIn.data | |||
self.profIndex += 1 | |||
if self.firstdatatime == None: | |||
self.firstdatatime = self.dataIn.utctime | |||
if self.profIndex == self.dataOut.nFFTPoints: | |||
self.__updateObjFromInput() | |||
self.__getFft() | |||
self.dataOut.flagNoData = False | |||
self.buffer = None | |||
self.firstdatatime = None | |||
self.profIndex = 0 | |||
return | |||
raise ValuError, "The type object %s is not valid"%(self.dataIn.type) | |||
def selectChannels(self, channelList): | |||
|
r200 | channelIndexList = [] | |
for channel in channelList: | |||
index = self.dataOut.channelList.index(channel) | |||
channelIndexList.append(index) | |||
self.selectChannelsByIndex(channelIndexList) | |||
|
r199 | ||
def selectChannelsByIndex(self, channelIndexList): | |||
""" | |||
Selecciona un bloque de datos en base a canales segun el channelIndexList | |||
Input: | |||
channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7] | |||
Affected: | |||
|
r201 | self.dataOut.data_spc | |
|
r199 | self.dataOut.channelIndexList | |
self.dataOut.nChannels | |||
Return: | |||
None | |||
""" | |||
|
r200 | for channelIndex in channelIndexList: | |
if channelIndex not in self.dataOut.channelIndexList: | |||
|
r199 | print channelIndexList | |
|
r200 | raise ValueError, "The value %d in channelIndexList is not valid" %channelIndex | |
|
r199 | ||
nChannels = len(channelIndexList) | |||
|
r201 | data_spc = self.dataOut.data_spc[channelIndexList,:] | |
|
r199 | ||
|
r201 | self.dataOut.data_spc = data_spc | |
|
r199 | self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList] | |
|
r200 | # self.dataOut.nChannels = nChannels | |
|
r199 | ||
return 1 | |||
|
r192 | ||
class IncohInt(Operation): | |||
|
r201 | ||
__profIndex = 0 | |||
__withOverapping = False | |||
__byTime = False | |||
__initime = None | |||
__lastdatatime = None | |||
__integrationtime = None | |||
|
r212 | __buffer_spc = None | |
__buffer_cspc = None | |||
__buffer_dc = None | |||
|
r201 | ||
__dataReady = False | |||
n = None | |||
def __init__(self): | |||
self.__isConfig = False | |||
def setup(self, n=None, timeInterval=None, overlapping=False): | |||
""" | |||
Set the parameters of the integration class. | |||
Inputs: | |||
n : Number of coherent integrations | |||
timeInterval : Time of integration. If the parameter "n" is selected this one does not work | |||
overlapping : | |||
""" | |||
self.__initime = None | |||
self.__lastdatatime = 0 | |||
|
r212 | self.__buffer_spc = None | |
self.__buffer_cspc = None | |||
self.__buffer_dc = None | |||
|
r201 | self.__dataReady = False | |
if n == None and timeInterval == None: | |||
raise ValueError, "n or timeInterval should be specified ..." | |||
if n != None: | |||
self.n = n | |||
self.__byTime = False | |||
else: | |||
self.__integrationtime = timeInterval * 60. #if (type(timeInterval)!=integer) -> change this line | |||
self.n = 9999 | |||
self.__byTime = True | |||
if overlapping: | |||
self.__withOverapping = True | |||
else: | |||
self.__withOverapping = False | |||
|
r212 | self.__buffer_spc = 0 | |
self.__buffer_cspc = 0 | |||
self.__buffer_dc = 0 | |||
|
r201 | ||
self.__profIndex = 0 | |||
|
r212 | def putData(self, data_spc, data_cspc, data_dc): | |
|
r201 | ||
""" | |||
|
r212 | Add a profile to the __buffer_spc and increase in one the __profileIndex | |
|
r201 | ||
""" | |||
if not self.__withOverapping: | |||
|
r212 | self.__buffer_spc += data_spc | |
if data_cspc == None: | |||
self.__buffer_cspc = None | |||
else: | |||
self.__buffer_cspc += data_cspc | |||
if data_dc == None: | |||
self.__buffer_dc = None | |||
else: | |||
self.__buffer_dc += data_dc | |||
|
r201 | self.__profIndex += 1 | |
return | |||
#Overlapping data | |||
|
r212 | nChannels, nFFTPoints, nHeis = data_spc.shape | |
data_spc = numpy.reshape(data_spc, (1, nChannels, nFFTPoints, nHeis)) | |||
|
r222 | if data_cspc != None: | |
data_cspc = numpy.reshape(data_cspc, (1, -1, nFFTPoints, nHeis)) | |||
if data_dc != None: | |||
data_dc = numpy.reshape(data_dc, (1, -1, nHeis)) | |||
|
r201 | ||
#If the buffer is empty then it takes the data value | |||
|
r212 | if self.__buffer_spc == None: | |
|
r222 | self.__buffer_spc = data_spc | |
|
r212 | ||
if data_cspc == None: | |||
self.__buffer_cspc = None | |||
else: | |||
|
r222 | self.__buffer_cspc += data_cspc | |
|
r212 | ||
if data_dc == None: | |||
self.__buffer_dc = None | |||
else: | |||
|
r222 | self.__buffer_dc += data_dc | |
|
r212 | ||
|
r201 | self.__profIndex += 1 | |
return | |||
#If the buffer length is lower than n then stakcing the data value | |||
if self.__profIndex < self.n: | |||
|
r212 | self.__buffer_spc = numpy.vstack((self.__buffer_spc, data_spc)) | |
|
r222 | if data_cspc != None: | |
|
r212 | self.__buffer_cspc = numpy.vstack((self.__buffer_cspc, data_cspc)) | |
|
r222 | if data_dc != None: | |
|
r212 | self.__buffer_dc = numpy.vstack((self.__buffer_dc, data_dc)) | |
|
r201 | self.__profIndex += 1 | |
return | |||
#If the buffer length is equal to n then replacing the last buffer value with the data value | |||
|
r212 | self.__buffer_spc = numpy.roll(self.__buffer_spc, -1, axis=0) | |
self.__buffer_spc[self.n-1] = data_spc | |||
|
r222 | if data_cspc != None: | |
self.__buffer_cspc = numpy.roll(self.__buffer_cspc, -1, axis=0) | |||
self.__buffer_cspc[self.n-1] = data_cspc | |||
|
r212 | ||
|
r222 | if data_dc != None: | |
self.__buffer_dc = numpy.roll(self.__buffer_dc, -1, axis=0) | |||
self.__buffer_dc[self.n-1] = data_dc | |||
|
r212 | ||
|
r201 | self.__profIndex = self.n | |
return | |||
def pushData(self): | |||
""" | |||
Return the sum of the last profiles and the profiles used in the sum. | |||
Affected: | |||
self.__profileIndex | |||
""" | |||
|
r212 | data_spc = None | |
data_cspc = None | |||
data_dc = None | |||
|
r201 | ||
if not self.__withOverapping: | |||
|
r212 | data_spc = self.__buffer_spc | |
data_cspc = self.__buffer_cspc | |||
data_dc = self.__buffer_dc | |||
|
r201 | n = self.__profIndex | |
|
r212 | self.__buffer_spc = 0 | |
self.__buffer_cspc = 0 | |||
self.__buffer_dc = 0 | |||
|
r201 | self.__profIndex = 0 | |
|
r212 | return data_spc, data_cspc, data_dc, n | |
|
r201 | ||
#Integration with Overlapping | |||
|
r212 | data_spc = numpy.sum(self.__buffer_spc, axis=0) | |
if self.__buffer_cspc != None: | |||
data_cspc = numpy.sum(self.__buffer_cspc, axis=0) | |||
if self.__buffer_dc != None: | |||
data_dc = numpy.sum(self.__buffer_dc, axis=0) | |||
|
r201 | n = self.__profIndex | |
|
r212 | return data_spc, data_cspc, data_dc, n | |
|
r201 | ||
|
r212 | def byProfiles(self, *args): | |
|
r201 | ||
self.__dataReady = False | |||
|
r212 | avgdata_spc = None | |
avgdata_cspc = None | |||
avgdata_dc = None | |||
|
r201 | n = None | |
|
r212 | self.putData(*args) | |
|
r201 | ||
if self.__profIndex == self.n: | |||
|
r212 | avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData() | |
|
r201 | self.__dataReady = True | |
|
r212 | return avgdata_spc, avgdata_cspc, avgdata_dc | |
|
r201 | ||
|
r212 | def byTime(self, datatime, *args): | |
|
r201 | ||
self.__dataReady = False | |||
|
r212 | avgdata_spc = None | |
avgdata_cspc = None | |||
avgdata_dc = None | |||
|
r201 | n = None | |
|
r212 | self.putData(*args) | |
|
r201 | ||
if (datatime - self.__initime) >= self.__integrationtime: | |||
|
r212 | avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData() | |
|
r201 | self.n = n | |
self.__dataReady = True | |||
|
r212 | return avgdata_spc, avgdata_cspc, avgdata_dc | |
|
r201 | ||
|
r212 | def integrate(self, datatime, *args): | |
|
r201 | ||
if self.__initime == None: | |||
self.__initime = datatime | |||
if self.__byTime: | |||
|
r212 | avgdata_spc, avgdata_cspc, avgdata_dc = self.byTime(datatime, *args) | |
|
r201 | else: | |
|
r212 | avgdata_spc, avgdata_cspc, avgdata_dc = self.byProfiles(*args) | |
|
r201 | ||
self.__lastdatatime = datatime | |||
|
r212 | if avgdata_spc == None: | |
return None, None, None, None | |||
|
r201 | ||
avgdatatime = self.__initime | |||
deltatime = datatime -self.__lastdatatime | |||
if not self.__withOverapping: | |||
self.__initime = datatime | |||
else: | |||
self.__initime += deltatime | |||
|
r212 | return avgdatatime, avgdata_spc, avgdata_cspc, avgdata_dc | |
|
r201 | ||
def run(self, dataOut, n=None, timeInterval=None, overlapping=False): | |||
if not self.__isConfig: | |||
self.setup(n, timeInterval, overlapping) | |||
self.__isConfig = True | |||
|
r212 | avgdatatime, avgdata_spc, avgdata_cspc, avgdata_dc = self.integrate(dataOut.utctime, | |
dataOut.data_spc, | |||
dataOut.data_cspc, | |||
dataOut.data_dc) | |||
|
r201 | ||
# dataOut.timeInterval *= n | |||
dataOut.flagNoData = True | |||
if self.__dataReady: | |||
|
r212 | dataOut.data_spc = avgdata_spc | |
dataOut.data_cspc = avgdata_cspc | |||
dataOut.data_dc = avgdata_dc | |||
|
r201 | dataOut.nIncohInt *= self.n | |
dataOut.utctime = avgdatatime | |||
|
r202 | dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt * dataOut.nIncohInt * dataOut.nFFTPoints | |
|
r201 | dataOut.flagNoData = False | |
|
r223 | ||
class ProfileSelector(Operation): | |||
profileIndex = None | |||
# Tamanho total de los perfiles | |||
nProfiles = None | |||
def __init__(self): | |||
self.profileIndex = 0 | |||
def incIndex(self): | |||
self.profileIndex += 1 | |||
if self.profileIndex >= self.nProfiles: | |||
self.profileIndex = 0 | |||
def isProfileInRange(self, minIndex, maxIndex): | |||
if self.profileIndex < minIndex: | |||
return False | |||
if self.profileIndex > maxIndex: | |||
return False | |||
return True | |||
def isProfileInList(self, profileList): | |||
if self.profileIndex not in profileList: | |||
return False | |||
return True | |||
def run(self, dataOut, profileList=None, profileRangeList=None): | |||
self.nProfiles = dataOut.nProfiles | |||
if profileList != None: | |||
if not(self.isProfileInList(profileList)): | |||
dataOut.flagNoData = True | |||
else: | |||
dataOut.flagNoData = False | |||
self.incIndex() | |||
return 1 | |||
elif profileRangeList != None: | |||
minIndex = profileRangeList[0] | |||
maxIndex = profileRangeList[1] | |||
if not(self.isProfileInRange(minIndex, maxIndex)): | |||
dataOut.flagNoData = True | |||
else: | |||
dataOut.flagNoData = False | |||
self.incIndex() | |||
return 1 | |||
else: | |||
raise ValueError, "ProfileSelector needs profileList or profileRangeList" | |||
return 0 | |||
class Decoder: | |||
data = None | |||
profCounter = None | |||
code = None | |||
ncode = None | |||
nbaud = None | |||
codeIndex = None | |||
flag = False | |||
def __init__(self): | |||
self.data = None | |||
self.ndata = None | |||
self.profCounter = 1 | |||
self.codeIndex = 0 | |||
self.flag = False | |||
self.code = None | |||
self.ncode = None | |||
self.nbaud = None | |||
self.__isConfig = False | |||
def convolutionInFreq(self, data, ndata): | |||
newcode = numpy.zeros(ndata) | |||
newcode[0:self.nbaud] = self.code[self.codeIndex] | |||
self.codeIndex += 1 | |||
fft_data = numpy.fft.fft(data, axis=1) | |||
fft_code = numpy.conj(numpy.fft.fft(newcode)) | |||
fft_code = fft_code.reshape(1,len(fft_code)) | |||
conv = fft_data.copy() | |||
conv.fill(0) | |||
conv = fft_data*fft_code | |||
data = numpy.fft.ifft(conv,axis=1) | |||
self.data = data[:,:-self.nbaud+1] | |||
self.flag = True | |||
if self.profCounter == self.ncode: | |||
self.profCounter = 0 | |||
self.codeIndex = 0 | |||
self.profCounter += 1 | |||
def convolutionInTime(self, data, ndata): | |||
nchannel = data.shape[1] | |||
newcode = self.code[self.codeIndex] | |||
self.codeIndex += 1 | |||
conv = data.copy() | |||
for i in range(nchannel): | |||
conv[i,:] = numpy.correlate(data[i,:], newcode) | |||
self.data = conv | |||
self.flag = True | |||
if self.profCounter == self.ncode: | |||
self.profCounter = 0 | |||
self.codeIndex = 0 | |||
self.profCounter += 1 | |||
def run(self, dataOut, code=None, mode = 0): | |||
if not(self.__isConfig): | |||
if code == None: | |||
code = dataOut.radarControllerHeaderObj.code | |||
# code = dataOut.code | |||
ncode, nbaud = code.shape | |||
self.code = code | |||
self.ncode = ncode | |||
self.nbaud = nbaud | |||
self.__isConfig = True | |||
ndata = dataOut.data.shape[1] | |||
if mode == 0: | |||
self.convolutionInFreq(dataOut.data, ndata) | |||
if mode == 1: | |||
self.convolutionInTime(dataOut.data, ndata) | |||
self.ndata = ndata - self.nbaud + 1 | |||
dataOut.data = self.data | |||
dataOut.heightList = dataOut.heightList[:self.ndata] | |||
dataOut.flagNoData = False |