##// END OF EJS Templates
Block360_vRF4 and Weather_vRF_Plot Fixed and Tested
rflores -
r1447:73923d8d784b
parent child
Show More
@@ -1,2588 +1,2589
1 1 import os
2 2 import datetime
3 3 import numpy
4 4 from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter
5 5
6 6 from schainpy.model.graphics.jroplot_base import Plot, plt
7 7 from schainpy.model.graphics.jroplot_spectra import SpectraPlot, RTIPlot, CoherencePlot, SpectraCutPlot
8 8 from schainpy.utils import log
9 9 # libreria wradlib
10 10 import wradlib as wrl
11 11
12 12 EARTH_RADIUS = 6.3710e3
13 13
14 14
15 15 def ll2xy(lat1, lon1, lat2, lon2):
16 16
17 17 p = 0.017453292519943295
18 18 a = 0.5 - numpy.cos((lat2 - lat1) * p)/2 + numpy.cos(lat1 * p) * \
19 19 numpy.cos(lat2 * p) * (1 - numpy.cos((lon2 - lon1) * p)) / 2
20 20 r = 12742 * numpy.arcsin(numpy.sqrt(a))
21 21 theta = numpy.arctan2(numpy.sin((lon2-lon1)*p)*numpy.cos(lat2*p), numpy.cos(lat1*p)
22 22 * numpy.sin(lat2*p)-numpy.sin(lat1*p)*numpy.cos(lat2*p)*numpy.cos((lon2-lon1)*p))
23 23 theta = -theta + numpy.pi/2
24 24 return r*numpy.cos(theta), r*numpy.sin(theta)
25 25
26 26
27 27 def km2deg(km):
28 28 '''
29 29 Convert distance in km to degrees
30 30 '''
31 31
32 32 return numpy.rad2deg(km/EARTH_RADIUS)
33 33
34 34
35 35
36 36 class SpectralMomentsPlot(SpectraPlot):
37 37 '''
38 38 Plot for Spectral Moments
39 39 '''
40 40 CODE = 'spc_moments'
41 41 # colormap = 'jet'
42 42 # plot_type = 'pcolor'
43 43
44 44 class DobleGaussianPlot(SpectraPlot):
45 45 '''
46 46 Plot for Double Gaussian Plot
47 47 '''
48 48 CODE = 'gaussian_fit'
49 49 # colormap = 'jet'
50 50 # plot_type = 'pcolor'
51 51
52 52 class DoubleGaussianSpectraCutPlot(SpectraCutPlot):
53 53 '''
54 54 Plot SpectraCut with Double Gaussian Fit
55 55 '''
56 56 CODE = 'cut_gaussian_fit'
57 57
58 58 class SnrPlot(RTIPlot):
59 59 '''
60 60 Plot for SNR Data
61 61 '''
62 62
63 63 CODE = 'snr'
64 64 colormap = 'jet'
65 65
66 66 def update(self, dataOut):
67 67
68 68 data = {
69 69 'snr': 10*numpy.log10(dataOut.data_snr)
70 70 }
71 71
72 72 return data, {}
73 73
74 74 class DopplerPlot(RTIPlot):
75 75 '''
76 76 Plot for DOPPLER Data (1st moment)
77 77 '''
78 78
79 79 CODE = 'dop'
80 80 colormap = 'jet'
81 81
82 82 def update(self, dataOut):
83 83
84 84 data = {
85 85 'dop': 10*numpy.log10(dataOut.data_dop)
86 86 }
87 87
88 88 return data, {}
89 89
90 90 class PowerPlot(RTIPlot):
91 91 '''
92 92 Plot for Power Data (0 moment)
93 93 '''
94 94
95 95 CODE = 'pow'
96 96 colormap = 'jet'
97 97
98 98 def update(self, dataOut):
99 99 data = {
100 100 'pow': 10*numpy.log10(dataOut.data_pow/dataOut.normFactor)
101 101 }
102 102 return data, {}
103 103
104 104 class SpectralWidthPlot(RTIPlot):
105 105 '''
106 106 Plot for Spectral Width Data (2nd moment)
107 107 '''
108 108
109 109 CODE = 'width'
110 110 colormap = 'jet'
111 111
112 112 def update(self, dataOut):
113 113
114 114 data = {
115 115 'width': dataOut.data_width
116 116 }
117 117
118 118 return data, {}
119 119
120 120 class SkyMapPlot(Plot):
121 121 '''
122 122 Plot for meteors detection data
123 123 '''
124 124
125 125 CODE = 'param'
126 126
127 127 def setup(self):
128 128
129 129 self.ncols = 1
130 130 self.nrows = 1
131 131 self.width = 7.2
132 132 self.height = 7.2
133 133 self.nplots = 1
134 134 self.xlabel = 'Zonal Zenith Angle (deg)'
135 135 self.ylabel = 'Meridional Zenith Angle (deg)'
136 136 self.polar = True
137 137 self.ymin = -180
138 138 self.ymax = 180
139 139 self.colorbar = False
140 140
141 141 def plot(self):
142 142
143 143 arrayParameters = numpy.concatenate(self.data['param'])
144 144 error = arrayParameters[:, -1]
145 145 indValid = numpy.where(error == 0)[0]
146 146 finalMeteor = arrayParameters[indValid, :]
147 147 finalAzimuth = finalMeteor[:, 3]
148 148 finalZenith = finalMeteor[:, 4]
149 149
150 150 x = finalAzimuth * numpy.pi / 180
151 151 y = finalZenith
152 152
153 153 ax = self.axes[0]
154 154
155 155 if ax.firsttime:
156 156 ax.plot = ax.plot(x, y, 'bo', markersize=5)[0]
157 157 else:
158 158 ax.plot.set_data(x, y)
159 159
160 160 dt1 = self.getDateTime(self.data.min_time).strftime('%y/%m/%d %H:%M:%S')
161 161 dt2 = self.getDateTime(self.data.max_time).strftime('%y/%m/%d %H:%M:%S')
162 162 title = 'Meteor Detection Sky Map\n %s - %s \n Number of events: %5.0f\n' % (dt1,
163 163 dt2,
164 164 len(x))
165 165 self.titles[0] = title
166 166
167 167
168 168 class GenericRTIPlot(Plot):
169 169 '''
170 170 Plot for data_xxxx object
171 171 '''
172 172
173 173 CODE = 'param'
174 174 colormap = 'viridis'
175 175 plot_type = 'pcolorbuffer'
176 176
177 177 def setup(self):
178 178 self.xaxis = 'time'
179 179 self.ncols = 1
180 180 self.nrows = self.data.shape('param')[0]
181 181 self.nplots = self.nrows
182 182 self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.08, 'right':0.95, 'top': 0.95})
183 183
184 184 if not self.xlabel:
185 185 self.xlabel = 'Time'
186 186
187 187 self.ylabel = 'Range [km]'
188 188 if not self.titles:
189 189 self.titles = ['Param {}'.format(x) for x in range(self.nrows)]
190 190
191 191 def update(self, dataOut):
192 192
193 193 data = {
194 194 'param' : numpy.concatenate([getattr(dataOut, attr) for attr in self.attr_data], axis=0)
195 195 }
196 196
197 197 meta = {}
198 198
199 199 return data, meta
200 200
201 201 def plot(self):
202 202 # self.data.normalize_heights()
203 203 self.x = self.data.times
204 204 self.y = self.data.yrange
205 205 self.z = self.data['param']
206 206 self.z = 10*numpy.log10(self.z)
207 207 self.z = numpy.ma.masked_invalid(self.z)
208 208
209 209 if self.decimation is None:
210 210 x, y, z = self.fill_gaps(self.x, self.y, self.z)
211 211 else:
212 212 x, y, z = self.fill_gaps(*self.decimate())
213 213
214 214 for n, ax in enumerate(self.axes):
215 215
216 216 self.zmax = self.zmax if self.zmax is not None else numpy.max(
217 217 self.z[n])
218 218 self.zmin = self.zmin if self.zmin is not None else numpy.min(
219 219 self.z[n])
220 220
221 221 if ax.firsttime:
222 222 if self.zlimits is not None:
223 223 self.zmin, self.zmax = self.zlimits[n]
224 224
225 225 ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n],
226 226 vmin=self.zmin,
227 227 vmax=self.zmax,
228 228 cmap=self.cmaps[n]
229 229 )
230 230 else:
231 231 if self.zlimits is not None:
232 232 self.zmin, self.zmax = self.zlimits[n]
233 233 ax.collections.remove(ax.collections[0])
234 234 ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n],
235 235 vmin=self.zmin,
236 236 vmax=self.zmax,
237 237 cmap=self.cmaps[n]
238 238 )
239 239
240 240
241 241 class PolarMapPlot(Plot):
242 242 '''
243 243 Plot for weather radar
244 244 '''
245 245
246 246 CODE = 'param'
247 247 colormap = 'seismic'
248 248
249 249 def setup(self):
250 250 self.ncols = 1
251 251 self.nrows = 1
252 252 self.width = 9
253 253 self.height = 8
254 254 self.mode = self.data.meta['mode']
255 255 if self.channels is not None:
256 256 self.nplots = len(self.channels)
257 257 self.nrows = len(self.channels)
258 258 else:
259 259 self.nplots = self.data.shape(self.CODE)[0]
260 260 self.nrows = self.nplots
261 261 self.channels = list(range(self.nplots))
262 262 if self.mode == 'E':
263 263 self.xlabel = 'Longitude'
264 264 self.ylabel = 'Latitude'
265 265 else:
266 266 self.xlabel = 'Range (km)'
267 267 self.ylabel = 'Height (km)'
268 268 self.bgcolor = 'white'
269 269 self.cb_labels = self.data.meta['units']
270 270 self.lat = self.data.meta['latitude']
271 271 self.lon = self.data.meta['longitude']
272 272 self.xmin, self.xmax = float(
273 273 km2deg(self.xmin) + self.lon), float(km2deg(self.xmax) + self.lon)
274 274 self.ymin, self.ymax = float(
275 275 km2deg(self.ymin) + self.lat), float(km2deg(self.ymax) + self.lat)
276 276 # self.polar = True
277 277
278 278 def plot(self):
279 279
280 280 for n, ax in enumerate(self.axes):
281 281 data = self.data['param'][self.channels[n]]
282 282
283 283 zeniths = numpy.linspace(
284 284 0, self.data.meta['max_range'], data.shape[1])
285 285 if self.mode == 'E':
286 286 azimuths = -numpy.radians(self.data.yrange)+numpy.pi/2
287 287 r, theta = numpy.meshgrid(zeniths, azimuths)
288 288 x, y = r*numpy.cos(theta)*numpy.cos(numpy.radians(self.data.meta['elevation'])), r*numpy.sin(
289 289 theta)*numpy.cos(numpy.radians(self.data.meta['elevation']))
290 290 x = km2deg(x) + self.lon
291 291 y = km2deg(y) + self.lat
292 292 else:
293 293 azimuths = numpy.radians(self.data.yrange)
294 294 r, theta = numpy.meshgrid(zeniths, azimuths)
295 295 x, y = r*numpy.cos(theta), r*numpy.sin(theta)
296 296 self.y = zeniths
297 297
298 298 if ax.firsttime:
299 299 if self.zlimits is not None:
300 300 self.zmin, self.zmax = self.zlimits[n]
301 301 ax.plt = ax.pcolormesh( # r, theta, numpy.ma.array(data, mask=numpy.isnan(data)),
302 302 x, y, numpy.ma.array(data, mask=numpy.isnan(data)),
303 303 vmin=self.zmin,
304 304 vmax=self.zmax,
305 305 cmap=self.cmaps[n])
306 306 else:
307 307 if self.zlimits is not None:
308 308 self.zmin, self.zmax = self.zlimits[n]
309 309 ax.collections.remove(ax.collections[0])
310 310 ax.plt = ax.pcolormesh( # r, theta, numpy.ma.array(data, mask=numpy.isnan(data)),
311 311 x, y, numpy.ma.array(data, mask=numpy.isnan(data)),
312 312 vmin=self.zmin,
313 313 vmax=self.zmax,
314 314 cmap=self.cmaps[n])
315 315
316 316 if self.mode == 'A':
317 317 continue
318 318
319 319 # plot district names
320 320 f = open('/data/workspace/schain_scripts/distrito.csv')
321 321 for line in f:
322 322 label, lon, lat = [s.strip() for s in line.split(',') if s]
323 323 lat = float(lat)
324 324 lon = float(lon)
325 325 # ax.plot(lon, lat, '.b', ms=2)
326 326 ax.text(lon, lat, label.decode('utf8'), ha='center',
327 327 va='bottom', size='8', color='black')
328 328
329 329 # plot limites
330 330 limites = []
331 331 tmp = []
332 332 for line in open('/data/workspace/schain_scripts/lima.csv'):
333 333 if '#' in line:
334 334 if tmp:
335 335 limites.append(tmp)
336 336 tmp = []
337 337 continue
338 338 values = line.strip().split(',')
339 339 tmp.append((float(values[0]), float(values[1])))
340 340 for points in limites:
341 341 ax.add_patch(
342 342 Polygon(points, ec='k', fc='none', ls='--', lw=0.5))
343 343
344 344 # plot Cuencas
345 345 for cuenca in ('rimac', 'lurin', 'mala', 'chillon', 'chilca', 'chancay-huaral'):
346 346 f = open('/data/workspace/schain_scripts/{}.csv'.format(cuenca))
347 347 values = [line.strip().split(',') for line in f]
348 348 points = [(float(s[0]), float(s[1])) for s in values]
349 349 ax.add_patch(Polygon(points, ec='b', fc='none'))
350 350
351 351 # plot grid
352 352 for r in (15, 30, 45, 60):
353 353 ax.add_artist(plt.Circle((self.lon, self.lat),
354 354 km2deg(r), color='0.6', fill=False, lw=0.2))
355 355 ax.text(
356 356 self.lon + (km2deg(r))*numpy.cos(60*numpy.pi/180),
357 357 self.lat + (km2deg(r))*numpy.sin(60*numpy.pi/180),
358 358 '{}km'.format(r),
359 359 ha='center', va='bottom', size='8', color='0.6', weight='heavy')
360 360
361 361 if self.mode == 'E':
362 362 title = 'El={}$^\circ$'.format(self.data.meta['elevation'])
363 363 label = 'E{:02d}'.format(int(self.data.meta['elevation']))
364 364 else:
365 365 title = 'Az={}$^\circ$'.format(self.data.meta['azimuth'])
366 366 label = 'A{:02d}'.format(int(self.data.meta['azimuth']))
367 367
368 368 self.save_labels = ['{}-{}'.format(lbl, label) for lbl in self.labels]
369 369 self.titles = ['{} {}'.format(
370 370 self.data.parameters[x], title) for x in self.channels]
371 371
372 372 class WeatherPlot(Plot):
373 373 CODE = 'weather'
374 374 plot_name = 'weather'
375 375 plot_type = 'ppistyle'
376 376 buffering = False
377 377
378 378 def setup(self):
379 379 self.ncols = 1
380 380 self.nrows = 1
381 381 self.width =8
382 382 self.height =8
383 383 self.nplots= 1
384 384 self.ylabel= 'Range [Km]'
385 385 self.titles= ['Weather']
386 386 self.colorbar=False
387 387 self.ini =0
388 388 self.len_azi =0
389 389 self.buffer_ini = None
390 390 self.buffer_azi = None
391 391 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
392 392 self.flag =0
393 393 self.indicador= 0
394 394 self.last_data_azi = None
395 395 self.val_mean = None
396 396
397 397 def update(self, dataOut):
398 398
399 399 data = {}
400 400 meta = {}
401 401 if hasattr(dataOut, 'dataPP_POWER'):
402 402 factor = 1
403 403 if hasattr(dataOut, 'nFFTPoints'):
404 404 factor = dataOut.normFactor
405 405 #print("DIME EL SHAPE PORFAVOR",dataOut.data_360.shape)
406 406 data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
407 407 data['azi'] = dataOut.data_azi
408 408 data['ele'] = dataOut.data_ele
409 409 return data, meta
410 410
411 411 def get2List(self,angulos):
412 412 list1=[]
413 413 list2=[]
414 414 for i in reversed(range(len(angulos))):
415 415 diff_ = angulos[i]-angulos[i-1]
416 416 if diff_ >1.5:
417 417 list1.append(i-1)
418 418 list2.append(diff_)
419 419 return list(reversed(list1)),list(reversed(list2))
420 420
421 421 def fixData360(self,list_,ang_):
422 422 if list_[0]==-1:
423 423 vec = numpy.where(ang_<ang_[0])
424 424 ang_[vec] = ang_[vec]+360
425 425 return ang_
426 426 return ang_
427 427
428 428 def fixData360HL(self,angulos):
429 429 vec = numpy.where(angulos>=360)
430 430 angulos[vec]=angulos[vec]-360
431 431 return angulos
432 432
433 433 def search_pos(self,pos,list_):
434 434 for i in range(len(list_)):
435 435 if pos == list_[i]:
436 436 return True,i
437 437 i=None
438 438 return False,i
439 439
440 440 def fixDataComp(self,ang_,list1_,list2_):
441 441 size = len(ang_)
442 442 size2 = 0
443 443 for i in range(len(list2_)):
444 444 size2=size2+round(list2_[i])-1
445 445 new_size= size+size2
446 446 ang_new = numpy.zeros(new_size)
447 447 ang_new2 = numpy.zeros(new_size)
448 448
449 449 tmp = 0
450 450 c = 0
451 451 for i in range(len(ang_)):
452 452 ang_new[tmp +c] = ang_[i]
453 453 ang_new2[tmp+c] = ang_[i]
454 454 condition , value = self.search_pos(i,list1_)
455 455 if condition:
456 456 pos = tmp + c + 1
457 457 for k in range(round(list2_[value])-1):
458 458 ang_new[pos+k] = ang_new[pos+k-1]+1
459 459 ang_new2[pos+k] = numpy.nan
460 460 tmp = pos +k
461 461 c = 0
462 462 c=c+1
463 463 return ang_new,ang_new2
464 464
465 465 def globalCheckPED(self,angulos):
466 466 l1,l2 = self.get2List(angulos)
467 467 if len(l1)>0:
468 468 angulos2 = self.fixData360(list_=l1,ang_=angulos)
469 469 l1,l2 = self.get2List(angulos2)
470 470
471 471 ang1_,ang2_ = self.fixDataComp(ang_=angulos2,list1_=l1,list2_=l2)
472 472 ang1_ = self.fixData360HL(ang1_)
473 473 ang2_ = self.fixData360HL(ang2_)
474 474 else:
475 475 ang1_= angulos
476 476 ang2_= angulos
477 477 return ang1_,ang2_
478 478
479 479 def analizeDATA(self,data_azi):
480 480 list1 = []
481 481 list2 = []
482 482 dat = data_azi
483 483 for i in reversed(range(1,len(dat))):
484 484 if dat[i]>dat[i-1]:
485 485 diff = int(dat[i])-int(dat[i-1])
486 486 else:
487 487 diff = 360+int(dat[i])-int(dat[i-1])
488 488 if diff > 1:
489 489 list1.append(i-1)
490 490 list2.append(diff-1)
491 491 return list1,list2
492 492
493 493 def fixDATANEW(self,data_azi,data_weather):
494 494 list1,list2 = self.analizeDATA(data_azi)
495 495 if len(list1)== 0:
496 496 return data_azi,data_weather
497 497 else:
498 498 resize = 0
499 499 for i in range(len(list2)):
500 500 resize= resize + list2[i]
501 501 new_data_azi = numpy.resize(data_azi,resize)
502 502 new_data_weather= numpy.resize(date_weather,resize)
503 503
504 504 for i in range(len(list2)):
505 505 j=0
506 506 position=list1[i]+1
507 507 for j in range(list2[i]):
508 508 new_data_azi[position+j]=new_data_azi[position+j-1]+1
509 509 return new_data_azi
510 510
511 511 def fixDATA(self,data_azi):
512 512 data=data_azi
513 513 for i in range(len(data)):
514 514 if numpy.isnan(data[i]):
515 515 data[i]=data[i-1]+1
516 516 return data
517 517
518 518 def replaceNAN(self,data_weather,data_azi,val):
519 519 data= data_azi
520 520 data_T= data_weather
521 521 if data.shape[0]> data_T.shape[0]:
522 522 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
523 523 c = 0
524 524 for i in range(len(data)):
525 525 if numpy.isnan(data[i]):
526 526 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
527 527 else:
528 528 data_N[i,:]=data_T[c,:]
529 529 c=c+1
530 530 return data_N
531 531 else:
532 532 for i in range(len(data)):
533 533 if numpy.isnan(data[i]):
534 534 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
535 535 return data_T
536 536
537 537 def const_ploteo(self,data_weather,data_azi,step,res):
538 538 if self.ini==0:
539 539 #-------
540 540 n = (360/res)-len(data_azi)
541 541 #--------------------- new -------------------------
542 542 data_azi_new ,data_azi_old= self.globalCheckPED(data_azi)
543 543 #------------------------
544 544 start = data_azi_new[-1] + res
545 545 end = data_azi_new[0] - res
546 546 #------ new
547 547 self.last_data_azi = end
548 548 if start>end:
549 549 end = end + 360
550 550 azi_vacia = numpy.linspace(start,end,int(n))
551 551 azi_vacia = numpy.where(azi_vacia>360,azi_vacia-360,azi_vacia)
552 552 data_azi = numpy.hstack((data_azi_new,azi_vacia))
553 553 # RADAR
554 554 val_mean = numpy.mean(data_weather[:,-1])
555 555 self.val_mean = val_mean
556 556 data_weather_cmp = numpy.ones([(360-data_weather.shape[0]),data_weather.shape[1]])*val_mean
557 557 data_weather = self.replaceNAN(data_weather=data_weather,data_azi=data_azi_old,val=self.val_mean)
558 558 data_weather = numpy.vstack((data_weather,data_weather_cmp))
559 559 else:
560 560 # azimuth
561 561 flag=0
562 562 start_azi = self.res_azi[0]
563 563 #-----------new------------
564 564 data_azi ,data_azi_old= self.globalCheckPED(data_azi)
565 565 data_weather = self.replaceNAN(data_weather=data_weather,data_azi=data_azi_old,val=self.val_mean)
566 566 #--------------------------
567 567 start = data_azi[0]
568 568 end = data_azi[-1]
569 569 self.last_data_azi= end
570 570 if start< start_azi:
571 571 start = start +360
572 572 if end <start_azi:
573 573 end = end +360
574 574
575 575 pos_ini = int((start-start_azi)/res)
576 576 len_azi = len(data_azi)
577 577 if (360-pos_ini)<len_azi:
578 578 if pos_ini+1==360:
579 579 pos_ini=0
580 580 else:
581 581 flag=1
582 582 dif= 360-pos_ini
583 583 comp= len_azi-dif
584 584 #-----------------
585 585 if flag==0:
586 586 # AZIMUTH
587 587 self.res_azi[pos_ini:pos_ini+len_azi] = data_azi
588 588 # RADAR
589 589 self.res_weather[pos_ini:pos_ini+len_azi,:] = data_weather
590 590 else:
591 591 # AZIMUTH
592 592 self.res_azi[pos_ini:pos_ini+dif] = data_azi[0:dif]
593 593 self.res_azi[0:comp] = data_azi[dif:]
594 594 # RADAR
595 595 self.res_weather[pos_ini:pos_ini+dif,:] = data_weather[0:dif,:]
596 596 self.res_weather[0:comp,:] = data_weather[dif:,:]
597 597 flag=0
598 598 data_azi = self.res_azi
599 599 data_weather = self.res_weather
600 600
601 601 return data_weather,data_azi
602 602
603 603 def plot(self):
604 604 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
605 605 data = self.data[-1]
606 606 r = self.data.yrange
607 607 delta_height = r[1]-r[0]
608 608 r_mask = numpy.where(r>=0)[0]
609 609 r = numpy.arange(len(r_mask))*delta_height
610 610 self.y = 2*r
611 611 # RADAR
612 612 #data_weather = data['weather']
613 613 # PEDESTAL
614 614 #data_azi = data['azi']
615 615 res = 1
616 616 # STEP
617 617 step = (360/(res*data['weather'].shape[0]))
618 618
619 619 self.res_weather, self.res_azi = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_azi=data['azi'],step=step,res=res)
620 620 self.res_ele = numpy.mean(data['ele'])
621 621 ################# PLOTEO ###################
622 622 for i,ax in enumerate(self.axes):
623 623 self.zmin = self.zmin if self.zmin else 20
624 624 self.zmax = self.zmax if self.zmax else 80
625 625 if ax.firsttime:
626 626 plt.clf()
627 627 cgax, pm = wrl.vis.plot_ppi(self.res_weather,r=r,az=self.res_azi,fig=self.figures[0], proj='cg', vmin=self.zmin, vmax=self.zmax)
628 628 else:
629 629 plt.clf()
630 630 cgax, pm = wrl.vis.plot_ppi(self.res_weather,r=r,az=self.res_azi,fig=self.figures[0], proj='cg', vmin=self.zmin, vmax=self.zmax)
631 631 caax = cgax.parasites[0]
632 632 paax = cgax.parasites[1]
633 633 cbar = plt.gcf().colorbar(pm, pad=0.075)
634 634 caax.set_xlabel('x_range [km]')
635 635 caax.set_ylabel('y_range [km]')
636 636 plt.text(1.0, 1.05, 'Azimuth '+str(thisDatetime)+" Step "+str(self.ini)+ " EL: "+str(round(self.res_ele, 1)), transform=caax.transAxes, va='bottom',ha='right')
637 637
638 638 self.ini= self.ini+1
639 639
640 640
641 641 class WeatherRHIPlot(Plot):
642 642 CODE = 'weather'
643 643 plot_name = 'weather'
644 644 plot_type = 'rhistyle'
645 645 buffering = False
646 646 data_ele_tmp = None
647 647
648 648 def setup(self):
649 649 print("********************")
650 650 print("********************")
651 651 print("********************")
652 652 print("SETUP WEATHER PLOT")
653 653 self.ncols = 1
654 654 self.nrows = 1
655 655 self.nplots= 1
656 656 self.ylabel= 'Range [Km]'
657 657 self.titles= ['Weather']
658 658 if self.channels is not None:
659 659 self.nplots = len(self.channels)
660 660 self.nrows = len(self.channels)
661 661 else:
662 662 self.nplots = self.data.shape(self.CODE)[0]
663 663 self.nrows = self.nplots
664 664 self.channels = list(range(self.nplots))
665 665 print("channels",self.channels)
666 666 print("que saldra", self.data.shape(self.CODE)[0])
667 667 self.titles = ['{} Channel {}'.format(self.CODE.upper(), x) for x in range(self.nrows)]
668 668 print("self.titles",self.titles)
669 669 self.colorbar=False
670 670 self.width =12
671 671 self.height =8
672 672 self.ini =0
673 673 self.len_azi =0
674 674 self.buffer_ini = None
675 675 self.buffer_ele = None
676 676 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
677 677 self.flag =0
678 678 self.indicador= 0
679 679 self.last_data_ele = None
680 680 self.val_mean = None
681 681
682 682 def update(self, dataOut):
683 683
684 684 data = {}
685 685 meta = {}
686 686 if hasattr(dataOut, 'dataPP_POWER'):
687 687 factor = 1
688 688 if hasattr(dataOut, 'nFFTPoints'):
689 689 factor = dataOut.normFactor
690 690 print("dataOut",dataOut.data_360.shape)
691 691 #
692 692 data['weather'] = 10*numpy.log10(dataOut.data_360/(factor))
693 693 #
694 694 #data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
695 695 data['azi'] = dataOut.data_azi
696 696 data['ele'] = dataOut.data_ele
697 697 #print("UPDATE")
698 698 #print("data[weather]",data['weather'].shape)
699 699 #print("data[azi]",data['azi'])
700 700 return data, meta
701 701
702 702 def get2List(self,angulos):
703 703 list1=[]
704 704 list2=[]
705 705 for i in reversed(range(len(angulos))):
706 706 if not i==0:#el caso de i=0 evalula el primero de la lista con el ultimo y no es relevante
707 707 diff_ = angulos[i]-angulos[i-1]
708 708 if abs(diff_) >1.5:
709 709 list1.append(i-1)
710 710 list2.append(diff_)
711 711 return list(reversed(list1)),list(reversed(list2))
712 712
713 713 def fixData90(self,list_,ang_):
714 714 if list_[0]==-1:
715 715 vec = numpy.where(ang_<ang_[0])
716 716 ang_[vec] = ang_[vec]+90
717 717 return ang_
718 718 return ang_
719 719
720 720 def fixData90HL(self,angulos):
721 721 vec = numpy.where(angulos>=90)
722 722 angulos[vec]=angulos[vec]-90
723 723 return angulos
724 724
725 725
726 726 def search_pos(self,pos,list_):
727 727 for i in range(len(list_)):
728 728 if pos == list_[i]:
729 729 return True,i
730 730 i=None
731 731 return False,i
732 732
733 733 def fixDataComp(self,ang_,list1_,list2_,tipo_case):
734 734 size = len(ang_)
735 735 size2 = 0
736 736 for i in range(len(list2_)):
737 737 size2=size2+round(abs(list2_[i]))-1
738 738 new_size= size+size2
739 739 ang_new = numpy.zeros(new_size)
740 740 ang_new2 = numpy.zeros(new_size)
741 741
742 742 tmp = 0
743 743 c = 0
744 744 for i in range(len(ang_)):
745 745 ang_new[tmp +c] = ang_[i]
746 746 ang_new2[tmp+c] = ang_[i]
747 747 condition , value = self.search_pos(i,list1_)
748 748 if condition:
749 749 pos = tmp + c + 1
750 750 for k in range(round(abs(list2_[value]))-1):
751 751 if tipo_case==0 or tipo_case==3:#subida
752 752 ang_new[pos+k] = ang_new[pos+k-1]+1
753 753 ang_new2[pos+k] = numpy.nan
754 754 elif tipo_case==1 or tipo_case==2:#bajada
755 755 ang_new[pos+k] = ang_new[pos+k-1]-1
756 756 ang_new2[pos+k] = numpy.nan
757 757
758 758 tmp = pos +k
759 759 c = 0
760 760 c=c+1
761 761 return ang_new,ang_new2
762 762
763 763 def globalCheckPED(self,angulos,tipo_case):
764 764 l1,l2 = self.get2List(angulos)
765 765 ##print("l1",l1)
766 766 ##print("l2",l2)
767 767 if len(l1)>0:
768 768 #angulos2 = self.fixData90(list_=l1,ang_=angulos)
769 769 #l1,l2 = self.get2List(angulos2)
770 770 ang1_,ang2_ = self.fixDataComp(ang_=angulos,list1_=l1,list2_=l2,tipo_case=tipo_case)
771 771 #ang1_ = self.fixData90HL(ang1_)
772 772 #ang2_ = self.fixData90HL(ang2_)
773 773 else:
774 774 ang1_= angulos
775 775 ang2_= angulos
776 776 return ang1_,ang2_
777 777
778 778
779 779 def replaceNAN(self,data_weather,data_ele,val):
780 780 data= data_ele
781 781 data_T= data_weather
782 782 if data.shape[0]> data_T.shape[0]:
783 783 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
784 784 c = 0
785 785 for i in range(len(data)):
786 786 if numpy.isnan(data[i]):
787 787 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
788 788 else:
789 789 data_N[i,:]=data_T[c,:]
790 790 c=c+1
791 791 return data_N
792 792 else:
793 793 for i in range(len(data)):
794 794 if numpy.isnan(data[i]):
795 795 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
796 796 return data_T
797 797
798 798 def check_case(self,data_ele,ang_max,ang_min):
799 799 start = data_ele[0]
800 800 end = data_ele[-1]
801 801 number = (end-start)
802 802 len_ang=len(data_ele)
803 803 print("start",start)
804 804 print("end",end)
805 805 print("number",number)
806 806
807 807 print("len_ang",len_ang)
808 808
809 809 #exit(1)
810 810
811 811 if start<end and (round(abs(number)+1)>=len_ang or (numpy.argmin(data_ele)==0)):#caso subida
812 812 return 0
813 813 #elif start>end and (round(abs(number)+1)>=len_ang or(numpy.argmax(data_ele)==0)):#caso bajada
814 814 # return 1
815 815 elif round(abs(number)+1)>=len_ang and (start>end or(numpy.argmax(data_ele)==0)):#caso bajada
816 816 return 1
817 817 elif round(abs(number)+1)<len_ang and data_ele[-2]>data_ele[-1]:# caso BAJADA CAMBIO ANG MAX
818 818 return 2
819 819 elif round(abs(number)+1)<len_ang and data_ele[-2]<data_ele[-1] :# caso SUBIDA CAMBIO ANG MIN
820 820 return 3
821 821
822 822
823 823 def const_ploteo(self,val_ch,data_weather,data_ele,step,res,ang_max,ang_min):
824 824 ang_max= ang_max
825 825 ang_min= ang_min
826 826 data_weather=data_weather
827 827 val_ch=val_ch
828 828 ##print("*********************DATA WEATHER**************************************")
829 829 ##print(data_weather)
830 830 if self.ini==0:
831 831 '''
832 832 print("**********************************************")
833 833 print("**********************************************")
834 834 print("***************ini**************")
835 835 print("**********************************************")
836 836 print("**********************************************")
837 837 '''
838 838 #print("data_ele",data_ele)
839 839 #----------------------------------------------------------
840 840 tipo_case = self.check_case(data_ele,ang_max,ang_min)
841 841 print("check_case",tipo_case)
842 842 #exit(1)
843 843 #--------------------- new -------------------------
844 844 data_ele_new ,data_ele_old= self.globalCheckPED(data_ele,tipo_case)
845 845
846 846 #-------------------------CAMBIOS RHI---------------------------------
847 847 start= ang_min
848 848 end = ang_max
849 849 n= (ang_max-ang_min)/res
850 850 #------ new
851 851 self.start_data_ele = data_ele_new[0]
852 852 self.end_data_ele = data_ele_new[-1]
853 853 if tipo_case==0 or tipo_case==3: # SUBIDA
854 854 n1= round(self.start_data_ele)- start
855 855 n2= end - round(self.end_data_ele)
856 856 print(self.start_data_ele)
857 857 print(self.end_data_ele)
858 858 if n1>0:
859 859 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
860 860 ele1_nan= numpy.ones(n1)*numpy.nan
861 861 data_ele = numpy.hstack((ele1,data_ele_new))
862 862 print("ele1_nan",ele1_nan.shape)
863 863 print("data_ele_old",data_ele_old.shape)
864 864 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
865 865 if n2>0:
866 866 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
867 867 ele2_nan= numpy.ones(n2)*numpy.nan
868 868 data_ele = numpy.hstack((data_ele,ele2))
869 869 print("ele2_nan",ele2_nan.shape)
870 870 print("data_ele_old",data_ele_old.shape)
871 871 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
872 872
873 873 if tipo_case==1 or tipo_case==2: # BAJADA
874 874 data_ele_new = data_ele_new[::-1] # reversa
875 875 data_ele_old = data_ele_old[::-1]# reversa
876 876 data_weather = data_weather[::-1,:]# reversa
877 877 vec= numpy.where(data_ele_new<ang_max)
878 878 data_ele_new = data_ele_new[vec]
879 879 data_ele_old = data_ele_old[vec]
880 880 data_weather = data_weather[vec[0]]
881 881 vec2= numpy.where(0<data_ele_new)
882 882 data_ele_new = data_ele_new[vec2]
883 883 data_ele_old = data_ele_old[vec2]
884 884 data_weather = data_weather[vec2[0]]
885 885 self.start_data_ele = data_ele_new[0]
886 886 self.end_data_ele = data_ele_new[-1]
887 887
888 888 n1= round(self.start_data_ele)- start
889 889 n2= end - round(self.end_data_ele)-1
890 890 print(self.start_data_ele)
891 891 print(self.end_data_ele)
892 892 if n1>0:
893 893 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
894 894 ele1_nan= numpy.ones(n1)*numpy.nan
895 895 data_ele = numpy.hstack((ele1,data_ele_new))
896 896 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
897 897 if n2>0:
898 898 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
899 899 ele2_nan= numpy.ones(n2)*numpy.nan
900 900 data_ele = numpy.hstack((data_ele,ele2))
901 901 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
902 902 # RADAR
903 903 # NOTA data_ele y data_weather es la variable que retorna
904 904 val_mean = numpy.mean(data_weather[:,-1])
905 905 self.val_mean = val_mean
906 906 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
907 907 self.data_ele_tmp[val_ch]= data_ele_old
908 908 else:
909 909 #print("**********************************************")
910 910 #print("****************VARIABLE**********************")
911 911 #-------------------------CAMBIOS RHI---------------------------------
912 912 #---------------------------------------------------------------------
913 913 ##print("INPUT data_ele",data_ele)
914 914 flag=0
915 915 start_ele = self.res_ele[0]
916 916 tipo_case = self.check_case(data_ele,ang_max,ang_min)
917 917 #print("TIPO DE DATA",tipo_case)
918 918 #-----------new------------
919 919 data_ele ,data_ele_old = self.globalCheckPED(data_ele,tipo_case)
920 920 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
921 921
922 922 #-------------------------------NEW RHI ITERATIVO-------------------------
923 923
924 924 if tipo_case==0 : # SUBIDA
925 925 vec = numpy.where(data_ele<ang_max)
926 926 data_ele = data_ele[vec]
927 927 data_ele_old = data_ele_old[vec]
928 928 data_weather = data_weather[vec[0]]
929 929
930 930 vec2 = numpy.where(0<data_ele)
931 931 data_ele= data_ele[vec2]
932 932 data_ele_old= data_ele_old[vec2]
933 933 ##print(data_ele_new)
934 934 data_weather= data_weather[vec2[0]]
935 935
936 936 new_i_ele = int(round(data_ele[0]))
937 937 new_f_ele = int(round(data_ele[-1]))
938 938 #print(new_i_ele)
939 939 #print(new_f_ele)
940 940 #print(data_ele,len(data_ele))
941 941 #print(data_ele_old,len(data_ele_old))
942 942 if new_i_ele< 2:
943 943 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
944 944 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
945 945 self.data_ele_tmp[val_ch][new_i_ele:new_i_ele+len(data_ele)]=data_ele_old
946 946 self.res_ele[new_i_ele:new_i_ele+len(data_ele)]= data_ele
947 947 self.res_weather[val_ch][new_i_ele:new_i_ele+len(data_ele),:]= data_weather
948 948 data_ele = self.res_ele
949 949 data_weather = self.res_weather[val_ch]
950 950
951 951 elif tipo_case==1 : #BAJADA
952 952 data_ele = data_ele[::-1] # reversa
953 953 data_ele_old = data_ele_old[::-1]# reversa
954 954 data_weather = data_weather[::-1,:]# reversa
955 955 vec= numpy.where(data_ele<ang_max)
956 956 data_ele = data_ele[vec]
957 957 data_ele_old = data_ele_old[vec]
958 958 data_weather = data_weather[vec[0]]
959 959 vec2= numpy.where(0<data_ele)
960 960 data_ele = data_ele[vec2]
961 961 data_ele_old = data_ele_old[vec2]
962 962 data_weather = data_weather[vec2[0]]
963 963
964 964
965 965 new_i_ele = int(round(data_ele[0]))
966 966 new_f_ele = int(round(data_ele[-1]))
967 967 #print(data_ele)
968 968 #print(ang_max)
969 969 #print(data_ele_old)
970 970 if new_i_ele <= 1:
971 971 new_i_ele = 1
972 972 if round(data_ele[-1])>=ang_max-1:
973 973 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
974 974 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
975 975 self.data_ele_tmp[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1]=data_ele_old
976 976 self.res_ele[new_i_ele-1:new_i_ele+len(data_ele)-1]= data_ele
977 977 self.res_weather[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1,:]= data_weather
978 978 data_ele = self.res_ele
979 979 data_weather = self.res_weather[val_ch]
980 980
981 981 elif tipo_case==2: #bajada
982 982 vec = numpy.where(data_ele<ang_max)
983 983 data_ele = data_ele[vec]
984 984 data_weather= data_weather[vec[0]]
985 985
986 986 len_vec = len(vec)
987 987 data_ele_new = data_ele[::-1] # reversa
988 988 data_weather = data_weather[::-1,:]
989 989 new_i_ele = int(data_ele_new[0])
990 990 new_f_ele = int(data_ele_new[-1])
991 991
992 992 n1= new_i_ele- ang_min
993 993 n2= ang_max - new_f_ele-1
994 994 if n1>0:
995 995 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
996 996 ele1_nan= numpy.ones(n1)*numpy.nan
997 997 data_ele = numpy.hstack((ele1,data_ele_new))
998 998 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
999 999 if n2>0:
1000 1000 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1001 1001 ele2_nan= numpy.ones(n2)*numpy.nan
1002 1002 data_ele = numpy.hstack((data_ele,ele2))
1003 1003 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1004 1004
1005 1005 self.data_ele_tmp[val_ch] = data_ele_old
1006 1006 self.res_ele = data_ele
1007 1007 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1008 1008 data_ele = self.res_ele
1009 1009 data_weather = self.res_weather[val_ch]
1010 1010
1011 1011 elif tipo_case==3:#subida
1012 1012 vec = numpy.where(0<data_ele)
1013 1013 data_ele= data_ele[vec]
1014 1014 data_ele_new = data_ele
1015 1015 data_ele_old= data_ele_old[vec]
1016 1016 data_weather= data_weather[vec[0]]
1017 1017 pos_ini = numpy.argmin(data_ele)
1018 1018 if pos_ini>0:
1019 1019 len_vec= len(data_ele)
1020 1020 vec3 = numpy.linspace(pos_ini,len_vec-1,len_vec-pos_ini).astype(int)
1021 1021 #print(vec3)
1022 1022 data_ele= data_ele[vec3]
1023 1023 data_ele_new = data_ele
1024 1024 data_ele_old= data_ele_old[vec3]
1025 1025 data_weather= data_weather[vec3]
1026 1026
1027 1027 new_i_ele = int(data_ele_new[0])
1028 1028 new_f_ele = int(data_ele_new[-1])
1029 1029 n1= new_i_ele- ang_min
1030 1030 n2= ang_max - new_f_ele-1
1031 1031 if n1>0:
1032 1032 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
1033 1033 ele1_nan= numpy.ones(n1)*numpy.nan
1034 1034 data_ele = numpy.hstack((ele1,data_ele_new))
1035 1035 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
1036 1036 if n2>0:
1037 1037 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1038 1038 ele2_nan= numpy.ones(n2)*numpy.nan
1039 1039 data_ele = numpy.hstack((data_ele,ele2))
1040 1040 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1041 1041
1042 1042 self.data_ele_tmp[val_ch] = data_ele_old
1043 1043 self.res_ele = data_ele
1044 1044 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1045 1045 data_ele = self.res_ele
1046 1046 data_weather = self.res_weather[val_ch]
1047 1047 #print("self.data_ele_tmp",self.data_ele_tmp)
1048 1048 return data_weather,data_ele
1049 1049
1050 1050
1051 1051 def plot(self):
1052 1052 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
1053 1053 data = self.data[-1]
1054 1054 r = self.data.yrange
1055 1055 delta_height = r[1]-r[0]
1056 1056 r_mask = numpy.where(r>=0)[0]
1057 1057 ##print("delta_height",delta_height)
1058 1058 #print("r_mask",r_mask,len(r_mask))
1059 1059 r = numpy.arange(len(r_mask))*delta_height
1060 1060 self.y = 2*r
1061 1061 res = 1
1062 1062 ###print("data['weather'].shape[0]",data['weather'].shape[0])
1063 1063 ang_max = self.ang_max
1064 1064 ang_min = self.ang_min
1065 1065 var_ang =ang_max - ang_min
1066 1066 step = (int(var_ang)/(res*data['weather'].shape[0]))
1067 1067 ###print("step",step)
1068 1068 #--------------------------------------------------------
1069 1069 ##print('weather',data['weather'].shape)
1070 1070 ##print('ele',data['ele'].shape)
1071 1071
1072 1072 ###self.res_weather, self.res_ele = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
1073 1073 ###self.res_azi = numpy.mean(data['azi'])
1074 1074 ###print("self.res_ele",self.res_ele)
1075 1075 plt.clf()
1076 1076 subplots = [121, 122]
1077 1077 cg={'angular_spacing': 20.}
1078 1078 if self.ini==0:
1079 1079 self.data_ele_tmp = numpy.ones([self.nplots,int(var_ang)])*numpy.nan
1080 1080 self.res_weather= numpy.ones([self.nplots,int(var_ang),len(r_mask)])*numpy.nan
1081 1081 print("SHAPE",self.data_ele_tmp.shape)
1082 1082
1083 1083 for i,ax in enumerate(self.axes):
1084 1084 self.res_weather[i], self.res_ele = self.const_ploteo(val_ch=i, data_weather=data['weather'][i][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
1085 1085 self.res_azi = numpy.mean(data['azi'])
1086 1086 if i==0:
1087 1087 print("*****************************************************************************to plot**************************",self.res_weather[i].shape)
1088 1088 self.zmin = self.zmin if self.zmin else 20
1089 1089 self.zmax = self.zmax if self.zmax else 80
1090 1090 if ax.firsttime:
1091 1091 #plt.clf()
1092 1092 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj=cg,vmin=self.zmin, vmax=self.zmax)
1093 1093 #fig=self.figures[0]
1094 1094 else:
1095 1095 #plt.clf()
1096 1096 if i==0:
1097 1097 print(self.res_weather[i])
1098 1098 print(self.res_ele)
1099 1099 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj=cg,vmin=self.zmin, vmax=self.zmax)
1100 1100 caax = cgax.parasites[0]
1101 1101 paax = cgax.parasites[1]
1102 1102 cbar = plt.gcf().colorbar(pm, pad=0.075)
1103 1103 caax.set_xlabel('x_range [km]')
1104 1104 caax.set_ylabel('y_range [km]')
1105 1105 plt.text(1.0, 1.05, 'Elevacion '+str(thisDatetime)+" Step "+str(self.ini)+ " Azi: "+str(round(self.res_azi,2)), transform=caax.transAxes, va='bottom',ha='right')
1106 1106 print("***************************self.ini****************************",self.ini)
1107 1107 self.ini= self.ini+1
1108 1108
1109 1109 class Weather_vRF_Plot(Plot):
1110 1110 CODE = 'PPI'
1111 1111 plot_name = 'PPI'
1112 1112 #plot_type = 'ppistyle'
1113 1113 buffering = False
1114 1114
1115 1115 def setup(self):
1116 1116
1117 1117 self.ncols = 1
1118 1118 self.nrows = 1
1119 1119 self.width =8
1120 1120 self.height =8
1121 1121 self.nplots= 1
1122 1122 self.ylabel= 'Range [Km]'
1123 1123 self.titles= ['PPI']
1124 1124 self.polar = True
1125 1125 if self.channels is not None:
1126 1126 self.nplots = len(self.channels)
1127 1127 self.nrows = len(self.channels)
1128 1128 else:
1129 1129 self.nplots = self.data.shape(self.CODE)[0]
1130 1130 self.nrows = self.nplots
1131 1131 self.channels = list(range(self.nplots))
1132 1132
1133 1133 if self.CODE == 'POWER':
1134 1134 self.cb_label = r'Power (dB)'
1135 1135 elif self.CODE == 'DOPPLER':
1136 1136 self.cb_label = r'Velocity (m/s)'
1137 1137 self.colorbar=True
1138 1138 self.width =8
1139 1139 self.height =8
1140 1140 self.ini =0
1141 1141 self.len_azi =0
1142 1142 self.buffer_ini = None
1143 1143 self.buffer_ele = None
1144 1144 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
1145 1145 self.flag =0
1146 1146 self.indicador= 0
1147 1147 self.last_data_ele = None
1148 1148 self.val_mean = None
1149 1149
1150 1150 def update(self, dataOut):
1151 1151
1152 1152 data = {}
1153 1153 meta = {}
1154 1154 if hasattr(dataOut, 'dataPP_POWER'):
1155 1155 factor = 1
1156 1156 if hasattr(dataOut, 'nFFTPoints'):
1157 1157 factor = dataOut.normFactor
1158 1158
1159 1159 if 'pow' in self.attr_data[0].lower():
1160 1160 data['data'] = 10*numpy.log10(getattr(dataOut, self.attr_data[0])/(factor))
1161 1161 else:
1162 1162 data['data'] = getattr(dataOut, self.attr_data[0])/(factor)
1163 1163
1164 1164 data['azi'] = dataOut.data_azi
1165 1165 data['ele'] = dataOut.data_ele
1166 1166
1167 1167 return data, meta
1168 1168
1169 1169 def plot(self):
1170 1170 data = self.data[-1]
1171 1171 r = self.data.yrange
1172 1172 delta_height = r[1]-r[0]
1173 1173 r_mask = numpy.where(r>=0)[0]
1174 1174 self.r_mask = r_mask
1175 1175 r = numpy.arange(len(r_mask))*delta_height
1176 1176 self.y = 2*r
1177 1177 res = 1
1178 1178
1179 var_ang =ang_max - ang_min
1180 step = (int(var_ang)/(res*data['data'].shape[0]))
1179 #var_ang = ang_max - ang_min
1180 #step = (int(var_ang)/(res*data['data'].shape[0]))
1181 1181
1182 1182 z = data['data'][self.channels[0]][:,r_mask]
1183 1183
1184 1184 self.titles = []
1185 1185
1186 1186 self.ymax = self.ymax if self.ymax else numpy.nanmax(r)
1187 1187 self.ymin = self.ymin if self.ymin else numpy.nanmin(r)
1188 1188 self.zmax = self.zmax if self.zmax else numpy.nanmax(z)
1189 1189 self.zmin = self.zmin if self.zmin else numpy.nanmin(z)
1190 1190 self.ang_min = self.ang_min if self.ang_min else 0
1191 self.ang_max = self.ang_max if self.ang_max else 2*numpy.pi
1191 self.ang_max = self.ang_max if self.ang_max else 360
1192 1192
1193 1193 subplots = [121, 122]
1194 1194
1195 1195 r, theta = numpy.meshgrid(r, numpy.radians(data['azi']) )
1196 1196
1197 1197 for i,ax in enumerate(self.axes):
1198 1198
1199 1199 if ax.firsttime:
1200 1200 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
1201 1201 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
1202 1202
1203 1203 else:
1204 1204 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
1205 1205 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
1206 1206
1207 1207 if len(self.channels) !=1:
1208 1208 self.titles = ['{} Ele: {} Channel {}'.format(self.CODE.upper(), str(round(numpy.mean(data['ele']),2)), x) for x in range(self.nrows)]
1209 1209 else:
1210 1210 self.titles = ['{} Ele: {} Channel {}'.format(self.CODE.upper(), str(round(numpy.mean(data['ele']),2)), self.channels[0])]
1211 1211
1212 1212 class WeatherRHI_vRF2_Plot(Plot):
1213 1213 CODE = 'weather'
1214 1214 plot_name = 'weather'
1215 1215 plot_type = 'rhistyle'
1216 1216 buffering = False
1217 1217 data_ele_tmp = None
1218 1218
1219 1219 def setup(self):
1220 1220 print("********************")
1221 1221 print("********************")
1222 1222 print("********************")
1223 1223 print("SETUP WEATHER PLOT")
1224 1224 self.ncols = 1
1225 1225 self.nrows = 1
1226 1226 self.nplots= 1
1227 1227 self.ylabel= 'Range [Km]'
1228 1228 self.titles= ['Weather']
1229 1229 if self.channels is not None:
1230 1230 self.nplots = len(self.channels)
1231 1231 self.nrows = len(self.channels)
1232 1232 else:
1233 1233 self.nplots = self.data.shape(self.CODE)[0]
1234 1234 self.nrows = self.nplots
1235 1235 self.channels = list(range(self.nplots))
1236 1236 print("channels",self.channels)
1237 1237 print("que saldra", self.data.shape(self.CODE)[0])
1238 1238 self.titles = ['{} Channel {}'.format(self.CODE.upper(), x) for x in range(self.nrows)]
1239 1239 print("self.titles",self.titles)
1240 1240 self.colorbar=False
1241 1241 self.width =8
1242 1242 self.height =8
1243 1243 self.ini =0
1244 1244 self.len_azi =0
1245 1245 self.buffer_ini = None
1246 1246 self.buffer_ele = None
1247 1247 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
1248 1248 self.flag =0
1249 1249 self.indicador= 0
1250 1250 self.last_data_ele = None
1251 1251 self.val_mean = None
1252 1252
1253 1253 def update(self, dataOut):
1254 1254
1255 1255 data = {}
1256 1256 meta = {}
1257 1257 if hasattr(dataOut, 'dataPP_POWER'):
1258 1258 factor = 1
1259 1259 if hasattr(dataOut, 'nFFTPoints'):
1260 1260 factor = dataOut.normFactor
1261 1261 print("dataOut",dataOut.data_360.shape)
1262 1262 #
1263 1263 data['weather'] = 10*numpy.log10(dataOut.data_360/(factor))
1264 1264 #
1265 1265 #data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
1266 1266 data['azi'] = dataOut.data_azi
1267 1267 data['ele'] = dataOut.data_ele
1268 1268 data['case_flag'] = dataOut.case_flag
1269 1269 #print("UPDATE")
1270 1270 #print("data[weather]",data['weather'].shape)
1271 1271 #print("data[azi]",data['azi'])
1272 1272 return data, meta
1273 1273
1274 1274 def get2List(self,angulos):
1275 1275 list1=[]
1276 1276 list2=[]
1277 1277 for i in reversed(range(len(angulos))):
1278 1278 if not i==0:#el caso de i=0 evalula el primero de la lista con el ultimo y no es relevante
1279 1279 diff_ = angulos[i]-angulos[i-1]
1280 1280 if abs(diff_) >1.5:
1281 1281 list1.append(i-1)
1282 1282 list2.append(diff_)
1283 1283 return list(reversed(list1)),list(reversed(list2))
1284 1284
1285 1285 def fixData90(self,list_,ang_):
1286 1286 if list_[0]==-1:
1287 1287 vec = numpy.where(ang_<ang_[0])
1288 1288 ang_[vec] = ang_[vec]+90
1289 1289 return ang_
1290 1290 return ang_
1291 1291
1292 1292 def fixData90HL(self,angulos):
1293 1293 vec = numpy.where(angulos>=90)
1294 1294 angulos[vec]=angulos[vec]-90
1295 1295 return angulos
1296 1296
1297 1297
1298 1298 def search_pos(self,pos,list_):
1299 1299 for i in range(len(list_)):
1300 1300 if pos == list_[i]:
1301 1301 return True,i
1302 1302 i=None
1303 1303 return False,i
1304 1304
1305 1305 def fixDataComp(self,ang_,list1_,list2_,tipo_case):
1306 1306 size = len(ang_)
1307 1307 size2 = 0
1308 1308 for i in range(len(list2_)):
1309 1309 size2=size2+round(abs(list2_[i]))-1
1310 1310 new_size= size+size2
1311 1311 ang_new = numpy.zeros(new_size)
1312 1312 ang_new2 = numpy.zeros(new_size)
1313 1313
1314 1314 tmp = 0
1315 1315 c = 0
1316 1316 for i in range(len(ang_)):
1317 1317 ang_new[tmp +c] = ang_[i]
1318 1318 ang_new2[tmp+c] = ang_[i]
1319 1319 condition , value = self.search_pos(i,list1_)
1320 1320 if condition:
1321 1321 pos = tmp + c + 1
1322 1322 for k in range(round(abs(list2_[value]))-1):
1323 1323 if tipo_case==0 or tipo_case==3:#subida
1324 1324 ang_new[pos+k] = ang_new[pos+k-1]+1
1325 1325 ang_new2[pos+k] = numpy.nan
1326 1326 elif tipo_case==1 or tipo_case==2:#bajada
1327 1327 ang_new[pos+k] = ang_new[pos+k-1]-1
1328 1328 ang_new2[pos+k] = numpy.nan
1329 1329
1330 1330 tmp = pos +k
1331 1331 c = 0
1332 1332 c=c+1
1333 1333 return ang_new,ang_new2
1334 1334
1335 1335 def globalCheckPED(self,angulos,tipo_case):
1336 1336 l1,l2 = self.get2List(angulos)
1337 1337 ##print("l1",l1)
1338 1338 ##print("l2",l2)
1339 1339 if len(l1)>0:
1340 1340 #angulos2 = self.fixData90(list_=l1,ang_=angulos)
1341 1341 #l1,l2 = self.get2List(angulos2)
1342 1342 ang1_,ang2_ = self.fixDataComp(ang_=angulos,list1_=l1,list2_=l2,tipo_case=tipo_case)
1343 1343 #ang1_ = self.fixData90HL(ang1_)
1344 1344 #ang2_ = self.fixData90HL(ang2_)
1345 1345 else:
1346 1346 ang1_= angulos
1347 1347 ang2_= angulos
1348 1348 return ang1_,ang2_
1349 1349
1350 1350
1351 1351 def replaceNAN(self,data_weather,data_ele,val):
1352 1352 data= data_ele
1353 1353 data_T= data_weather
1354 1354 if data.shape[0]> data_T.shape[0]:
1355 1355 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
1356 1356 c = 0
1357 1357 for i in range(len(data)):
1358 1358 if numpy.isnan(data[i]):
1359 1359 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
1360 1360 else:
1361 1361 data_N[i,:]=data_T[c,:]
1362 1362 c=c+1
1363 1363 return data_N
1364 1364 else:
1365 1365 for i in range(len(data)):
1366 1366 if numpy.isnan(data[i]):
1367 1367 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
1368 1368 return data_T
1369 1369
1370 1370 def check_case(self,data_ele,ang_max,ang_min):
1371 1371 start = data_ele[0]
1372 1372 end = data_ele[-1]
1373 1373 number = (end-start)
1374 1374 len_ang=len(data_ele)
1375 1375 print("start",start)
1376 1376 print("end",end)
1377 1377 print("number",number)
1378 1378
1379 1379 print("len_ang",len_ang)
1380 1380
1381 1381 #exit(1)
1382 1382
1383 1383 if start<end and (round(abs(number)+1)>=len_ang or (numpy.argmin(data_ele)==0)):#caso subida
1384 1384 return 0
1385 1385 #elif start>end and (round(abs(number)+1)>=len_ang or(numpy.argmax(data_ele)==0)):#caso bajada
1386 1386 # return 1
1387 1387 elif round(abs(number)+1)>=len_ang and (start>end or(numpy.argmax(data_ele)==0)):#caso bajada
1388 1388 return 1
1389 1389 elif round(abs(number)+1)<len_ang and data_ele[-2]>data_ele[-1]:# caso BAJADA CAMBIO ANG MAX
1390 1390 return 2
1391 1391 elif round(abs(number)+1)<len_ang and data_ele[-2]<data_ele[-1] :# caso SUBIDA CAMBIO ANG MIN
1392 1392 return 3
1393 1393
1394 1394
1395 1395 def const_ploteo(self,val_ch,data_weather,data_ele,step,res,ang_max,ang_min,case_flag):
1396 1396 ang_max= ang_max
1397 1397 ang_min= ang_min
1398 1398 data_weather=data_weather
1399 1399 val_ch=val_ch
1400 1400 ##print("*********************DATA WEATHER**************************************")
1401 1401 ##print(data_weather)
1402 1402 if self.ini==0:
1403 1403 '''
1404 1404 print("**********************************************")
1405 1405 print("**********************************************")
1406 1406 print("***************ini**************")
1407 1407 print("**********************************************")
1408 1408 print("**********************************************")
1409 1409 '''
1410 1410 #print("data_ele",data_ele)
1411 1411 #----------------------------------------------------------
1412 1412 tipo_case = case_flag[-1]
1413 1413 #tipo_case = self.check_case(data_ele,ang_max,ang_min)
1414 1414 print("check_case",tipo_case)
1415 1415 #exit(1)
1416 1416 #--------------------- new -------------------------
1417 1417 data_ele_new ,data_ele_old= self.globalCheckPED(data_ele,tipo_case)
1418 1418
1419 1419 #-------------------------CAMBIOS RHI---------------------------------
1420 1420 start= ang_min
1421 1421 end = ang_max
1422 1422 n= (ang_max-ang_min)/res
1423 1423 #------ new
1424 1424 self.start_data_ele = data_ele_new[0]
1425 1425 self.end_data_ele = data_ele_new[-1]
1426 1426 if tipo_case==0 or tipo_case==3: # SUBIDA
1427 1427 n1= round(self.start_data_ele)- start
1428 1428 n2= end - round(self.end_data_ele)
1429 1429 print(self.start_data_ele)
1430 1430 print(self.end_data_ele)
1431 1431 if n1>0:
1432 1432 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
1433 1433 ele1_nan= numpy.ones(n1)*numpy.nan
1434 1434 data_ele = numpy.hstack((ele1,data_ele_new))
1435 1435 print("ele1_nan",ele1_nan.shape)
1436 1436 print("data_ele_old",data_ele_old.shape)
1437 1437 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
1438 1438 if n2>0:
1439 1439 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
1440 1440 ele2_nan= numpy.ones(n2)*numpy.nan
1441 1441 data_ele = numpy.hstack((data_ele,ele2))
1442 1442 print("ele2_nan",ele2_nan.shape)
1443 1443 print("data_ele_old",data_ele_old.shape)
1444 1444 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1445 1445
1446 1446 if tipo_case==1 or tipo_case==2: # BAJADA
1447 1447 data_ele_new = data_ele_new[::-1] # reversa
1448 1448 data_ele_old = data_ele_old[::-1]# reversa
1449 1449 data_weather = data_weather[::-1,:]# reversa
1450 1450 vec= numpy.where(data_ele_new<ang_max)
1451 1451 data_ele_new = data_ele_new[vec]
1452 1452 data_ele_old = data_ele_old[vec]
1453 1453 data_weather = data_weather[vec[0]]
1454 1454 vec2= numpy.where(0<data_ele_new)
1455 1455 data_ele_new = data_ele_new[vec2]
1456 1456 data_ele_old = data_ele_old[vec2]
1457 1457 data_weather = data_weather[vec2[0]]
1458 1458 self.start_data_ele = data_ele_new[0]
1459 1459 self.end_data_ele = data_ele_new[-1]
1460 1460
1461 1461 n1= round(self.start_data_ele)- start
1462 1462 n2= end - round(self.end_data_ele)-1
1463 1463 print(self.start_data_ele)
1464 1464 print(self.end_data_ele)
1465 1465 if n1>0:
1466 1466 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
1467 1467 ele1_nan= numpy.ones(n1)*numpy.nan
1468 1468 data_ele = numpy.hstack((ele1,data_ele_new))
1469 1469 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
1470 1470 if n2>0:
1471 1471 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
1472 1472 ele2_nan= numpy.ones(n2)*numpy.nan
1473 1473 data_ele = numpy.hstack((data_ele,ele2))
1474 1474 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1475 1475 # RADAR
1476 1476 # NOTA data_ele y data_weather es la variable que retorna
1477 1477 val_mean = numpy.mean(data_weather[:,-1])
1478 1478 self.val_mean = val_mean
1479 1479 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1480 1480 print("eleold",data_ele_old)
1481 1481 print(self.data_ele_tmp[val_ch])
1482 1482 print(data_ele_old.shape[0])
1483 1483 print(self.data_ele_tmp[val_ch].shape[0])
1484 1484 if (data_ele_old.shape[0]==91 or self.data_ele_tmp[val_ch].shape[0]==91):
1485 1485 import sys
1486 1486 print("EXIT",self.ini)
1487 1487
1488 1488 sys.exit(1)
1489 1489 self.data_ele_tmp[val_ch]= data_ele_old
1490 1490 else:
1491 1491 #print("**********************************************")
1492 1492 #print("****************VARIABLE**********************")
1493 1493 #-------------------------CAMBIOS RHI---------------------------------
1494 1494 #---------------------------------------------------------------------
1495 1495 ##print("INPUT data_ele",data_ele)
1496 1496 flag=0
1497 1497 start_ele = self.res_ele[0]
1498 1498 #tipo_case = self.check_case(data_ele,ang_max,ang_min)
1499 1499 tipo_case = case_flag[-1]
1500 1500 #print("TIPO DE DATA",tipo_case)
1501 1501 #-----------new------------
1502 1502 data_ele ,data_ele_old = self.globalCheckPED(data_ele,tipo_case)
1503 1503 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1504 1504
1505 1505 #-------------------------------NEW RHI ITERATIVO-------------------------
1506 1506
1507 1507 if tipo_case==0 : # SUBIDA
1508 1508 vec = numpy.where(data_ele<ang_max)
1509 1509 data_ele = data_ele[vec]
1510 1510 data_ele_old = data_ele_old[vec]
1511 1511 data_weather = data_weather[vec[0]]
1512 1512
1513 1513 vec2 = numpy.where(0<data_ele)
1514 1514 data_ele= data_ele[vec2]
1515 1515 data_ele_old= data_ele_old[vec2]
1516 1516 ##print(data_ele_new)
1517 1517 data_weather= data_weather[vec2[0]]
1518 1518
1519 1519 new_i_ele = int(round(data_ele[0]))
1520 1520 new_f_ele = int(round(data_ele[-1]))
1521 1521 #print(new_i_ele)
1522 1522 #print(new_f_ele)
1523 1523 #print(data_ele,len(data_ele))
1524 1524 #print(data_ele_old,len(data_ele_old))
1525 1525 if new_i_ele< 2:
1526 1526 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
1527 1527 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
1528 1528 self.data_ele_tmp[val_ch][new_i_ele:new_i_ele+len(data_ele)]=data_ele_old
1529 1529 self.res_ele[new_i_ele:new_i_ele+len(data_ele)]= data_ele
1530 1530 self.res_weather[val_ch][new_i_ele:new_i_ele+len(data_ele),:]= data_weather
1531 1531 data_ele = self.res_ele
1532 1532 data_weather = self.res_weather[val_ch]
1533 1533
1534 1534 elif tipo_case==1 : #BAJADA
1535 1535 data_ele = data_ele[::-1] # reversa
1536 1536 data_ele_old = data_ele_old[::-1]# reversa
1537 1537 data_weather = data_weather[::-1,:]# reversa
1538 1538 vec= numpy.where(data_ele<ang_max)
1539 1539 data_ele = data_ele[vec]
1540 1540 data_ele_old = data_ele_old[vec]
1541 1541 data_weather = data_weather[vec[0]]
1542 1542 vec2= numpy.where(0<data_ele)
1543 1543 data_ele = data_ele[vec2]
1544 1544 data_ele_old = data_ele_old[vec2]
1545 1545 data_weather = data_weather[vec2[0]]
1546 1546
1547 1547
1548 1548 new_i_ele = int(round(data_ele[0]))
1549 1549 new_f_ele = int(round(data_ele[-1]))
1550 1550 #print(data_ele)
1551 1551 #print(ang_max)
1552 1552 #print(data_ele_old)
1553 1553 if new_i_ele <= 1:
1554 1554 new_i_ele = 1
1555 1555 if round(data_ele[-1])>=ang_max-1:
1556 1556 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
1557 1557 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
1558 1558 self.data_ele_tmp[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1]=data_ele_old
1559 1559 self.res_ele[new_i_ele-1:new_i_ele+len(data_ele)-1]= data_ele
1560 1560 self.res_weather[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1,:]= data_weather
1561 1561 data_ele = self.res_ele
1562 1562 data_weather = self.res_weather[val_ch]
1563 1563
1564 1564 elif tipo_case==2: #bajada
1565 1565 vec = numpy.where(data_ele<ang_max)
1566 1566 data_ele = data_ele[vec]
1567 1567 data_weather= data_weather[vec[0]]
1568 1568
1569 1569 len_vec = len(vec)
1570 1570 data_ele_new = data_ele[::-1] # reversa
1571 1571 data_weather = data_weather[::-1,:]
1572 1572 new_i_ele = int(data_ele_new[0])
1573 1573 new_f_ele = int(data_ele_new[-1])
1574 1574
1575 1575 n1= new_i_ele- ang_min
1576 1576 n2= ang_max - new_f_ele-1
1577 1577 if n1>0:
1578 1578 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
1579 1579 ele1_nan= numpy.ones(n1)*numpy.nan
1580 1580 data_ele = numpy.hstack((ele1,data_ele_new))
1581 1581 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
1582 1582 if n2>0:
1583 1583 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1584 1584 ele2_nan= numpy.ones(n2)*numpy.nan
1585 1585 data_ele = numpy.hstack((data_ele,ele2))
1586 1586 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1587 1587
1588 1588 self.data_ele_tmp[val_ch] = data_ele_old
1589 1589 self.res_ele = data_ele
1590 1590 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1591 1591 data_ele = self.res_ele
1592 1592 data_weather = self.res_weather[val_ch]
1593 1593
1594 1594 elif tipo_case==3:#subida
1595 1595 vec = numpy.where(0<data_ele)
1596 1596 data_ele= data_ele[vec]
1597 1597 data_ele_new = data_ele
1598 1598 data_ele_old= data_ele_old[vec]
1599 1599 data_weather= data_weather[vec[0]]
1600 1600 pos_ini = numpy.argmin(data_ele)
1601 1601 if pos_ini>0:
1602 1602 len_vec= len(data_ele)
1603 1603 vec3 = numpy.linspace(pos_ini,len_vec-1,len_vec-pos_ini).astype(int)
1604 1604 #print(vec3)
1605 1605 data_ele= data_ele[vec3]
1606 1606 data_ele_new = data_ele
1607 1607 data_ele_old= data_ele_old[vec3]
1608 1608 data_weather= data_weather[vec3]
1609 1609
1610 1610 new_i_ele = int(data_ele_new[0])
1611 1611 new_f_ele = int(data_ele_new[-1])
1612 1612 n1= new_i_ele- ang_min
1613 1613 n2= ang_max - new_f_ele-1
1614 1614 if n1>0:
1615 1615 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
1616 1616 ele1_nan= numpy.ones(n1)*numpy.nan
1617 1617 data_ele = numpy.hstack((ele1,data_ele_new))
1618 1618 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
1619 1619 if n2>0:
1620 1620 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1621 1621 ele2_nan= numpy.ones(n2)*numpy.nan
1622 1622 data_ele = numpy.hstack((data_ele,ele2))
1623 1623 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1624 1624
1625 1625 self.data_ele_tmp[val_ch] = data_ele_old
1626 1626 self.res_ele = data_ele
1627 1627 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1628 1628 data_ele = self.res_ele
1629 1629 data_weather = self.res_weather[val_ch]
1630 1630 #print("self.data_ele_tmp",self.data_ele_tmp)
1631 1631 return data_weather,data_ele
1632 1632
1633 1633
1634 1634 def plot(self):
1635 1635 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
1636 1636 data = self.data[-1]
1637 1637 r = self.data.yrange
1638 1638 delta_height = r[1]-r[0]
1639 1639 r_mask = numpy.where(r>=0)[0]
1640 1640 ##print("delta_height",delta_height)
1641 1641 #print("r_mask",r_mask,len(r_mask))
1642 1642 r = numpy.arange(len(r_mask))*delta_height
1643 1643 self.y = 2*r
1644 1644 res = 1
1645 1645 ###print("data['weather'].shape[0]",data['weather'].shape[0])
1646 1646 ang_max = self.ang_max
1647 1647 ang_min = self.ang_min
1648 1648 var_ang =ang_max - ang_min
1649 1649 step = (int(var_ang)/(res*data['weather'].shape[0]))
1650 1650 ###print("step",step)
1651 1651 #--------------------------------------------------------
1652 1652 ##print('weather',data['weather'].shape)
1653 1653 ##print('ele',data['ele'].shape)
1654 1654
1655 1655 ###self.res_weather, self.res_ele = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
1656 1656 ###self.res_azi = numpy.mean(data['azi'])
1657 1657 ###print("self.res_ele",self.res_ele)
1658 1658 plt.clf()
1659 1659 subplots = [121, 122]
1660 1660 try:
1661 1661 if self.data[-2]['ele'].max()<data['ele'].max():
1662 1662 self.ini=0
1663 1663 except:
1664 1664 pass
1665 1665 if self.ini==0:
1666 1666 self.data_ele_tmp = numpy.ones([self.nplots,int(var_ang)])*numpy.nan
1667 1667 self.res_weather= numpy.ones([self.nplots,int(var_ang),len(r_mask)])*numpy.nan
1668 1668 print("SHAPE",self.data_ele_tmp.shape)
1669 1669
1670 1670 for i,ax in enumerate(self.axes):
1671 1671 self.res_weather[i], self.res_ele = self.const_ploteo(val_ch=i, data_weather=data['weather'][i][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min,case_flag=self.data['case_flag'])
1672 1672 self.res_azi = numpy.mean(data['azi'])
1673 1673
1674 1674 if ax.firsttime:
1675 1675 #plt.clf()
1676 1676 print("Frist Plot")
1677 1677 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
1678 1678 #fig=self.figures[0]
1679 1679 else:
1680 1680 #plt.clf()
1681 1681 print("ELSE PLOT")
1682 1682 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
1683 1683 caax = cgax.parasites[0]
1684 1684 paax = cgax.parasites[1]
1685 1685 cbar = plt.gcf().colorbar(pm, pad=0.075)
1686 1686 caax.set_xlabel('x_range [km]')
1687 1687 caax.set_ylabel('y_range [km]')
1688 1688 plt.text(1.0, 1.05, 'Elevacion '+str(thisDatetime)+" Step "+str(self.ini)+ " Azi: "+str(round(self.res_azi,2)), transform=caax.transAxes, va='bottom',ha='right')
1689 1689 print("***************************self.ini****************************",self.ini)
1690 1690 self.ini= self.ini+1
1691 1691
1692 1692 class WeatherRHI_vRF_Plot(Plot):
1693 1693 CODE = 'weather'
1694 1694 plot_name = 'weather'
1695 1695 plot_type = 'rhistyle'
1696 1696 buffering = False
1697 1697 data_ele_tmp = None
1698 1698
1699 1699 def setup(self):
1700 1700 print("********************")
1701 1701 print("********************")
1702 1702 print("********************")
1703 1703 print("SETUP WEATHER PLOT")
1704 1704 self.ncols = 1
1705 1705 self.nrows = 1
1706 1706 self.nplots= 1
1707 1707 self.ylabel= 'Range [Km]'
1708 1708 self.titles= ['Weather']
1709 1709 if self.channels is not None:
1710 1710 self.nplots = len(self.channels)
1711 1711 self.nrows = len(self.channels)
1712 1712 else:
1713 1713 self.nplots = self.data.shape(self.CODE)[0]
1714 1714 self.nrows = self.nplots
1715 1715 self.channels = list(range(self.nplots))
1716 1716 print("channels",self.channels)
1717 1717 print("que saldra", self.data.shape(self.CODE)[0])
1718 1718 self.titles = ['{} Channel {}'.format(self.CODE.upper(), x) for x in range(self.nrows)]
1719 1719 print("self.titles",self.titles)
1720 1720 self.colorbar=False
1721 1721 self.width =8
1722 1722 self.height =8
1723 1723 self.ini =0
1724 1724 self.len_azi =0
1725 1725 self.buffer_ini = None
1726 1726 self.buffer_ele = None
1727 1727 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
1728 1728 self.flag =0
1729 1729 self.indicador= 0
1730 1730 self.last_data_ele = None
1731 1731 self.val_mean = None
1732 1732
1733 1733 def update(self, dataOut):
1734 1734
1735 1735 data = {}
1736 1736 meta = {}
1737 1737 if hasattr(dataOut, 'dataPP_POWER'):
1738 1738 factor = 1
1739 1739 if hasattr(dataOut, 'nFFTPoints'):
1740 1740 factor = dataOut.normFactor
1741 1741 print("dataOut",dataOut.data_360.shape)
1742 1742 #
1743 1743 data['weather'] = 10*numpy.log10(dataOut.data_360/(factor))
1744 1744 #
1745 1745 #data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
1746 1746 data['azi'] = dataOut.data_azi
1747 1747 data['ele'] = dataOut.data_ele
1748 1748 data['case_flag'] = dataOut.case_flag
1749 1749 #print("UPDATE")
1750 1750 #print("data[weather]",data['weather'].shape)
1751 1751 #print("data[azi]",data['azi'])
1752 1752 return data, meta
1753 1753
1754 1754 def get2List(self,angulos):
1755 1755 list1=[]
1756 1756 list2=[]
1757 1757 #print(angulos)
1758 1758 #exit(1)
1759 1759 for i in reversed(range(len(angulos))):
1760 1760 if not i==0:#el caso de i=0 evalula el primero de la lista con el ultimo y no es relevante
1761 1761 diff_ = angulos[i]-angulos[i-1]
1762 1762 if abs(diff_) >1.5:
1763 1763 list1.append(i-1)
1764 1764 list2.append(diff_)
1765 1765 return list(reversed(list1)),list(reversed(list2))
1766 1766
1767 1767 def fixData90(self,list_,ang_):
1768 1768 if list_[0]==-1:
1769 1769 vec = numpy.where(ang_<ang_[0])
1770 1770 ang_[vec] = ang_[vec]+90
1771 1771 return ang_
1772 1772 return ang_
1773 1773
1774 1774 def fixData90HL(self,angulos):
1775 1775 vec = numpy.where(angulos>=90)
1776 1776 angulos[vec]=angulos[vec]-90
1777 1777 return angulos
1778 1778
1779 1779
1780 1780 def search_pos(self,pos,list_):
1781 1781 for i in range(len(list_)):
1782 1782 if pos == list_[i]:
1783 1783 return True,i
1784 1784 i=None
1785 1785 return False,i
1786 1786
1787 1787 def fixDataComp(self,ang_,list1_,list2_,tipo_case):
1788 1788 size = len(ang_)
1789 1789 size2 = 0
1790 1790 for i in range(len(list2_)):
1791 1791 size2=size2+round(abs(list2_[i]))-1
1792 1792 new_size= size+size2
1793 1793 ang_new = numpy.zeros(new_size)
1794 1794 ang_new2 = numpy.zeros(new_size)
1795 1795
1796 1796 tmp = 0
1797 1797 c = 0
1798 1798 for i in range(len(ang_)):
1799 1799 ang_new[tmp +c] = ang_[i]
1800 1800 ang_new2[tmp+c] = ang_[i]
1801 1801 condition , value = self.search_pos(i,list1_)
1802 1802 if condition:
1803 1803 pos = tmp + c + 1
1804 1804 for k in range(round(abs(list2_[value]))-1):
1805 1805 if tipo_case==0 or tipo_case==3:#subida
1806 1806 ang_new[pos+k] = ang_new[pos+k-1]+1
1807 1807 ang_new2[pos+k] = numpy.nan
1808 1808 elif tipo_case==1 or tipo_case==2:#bajada
1809 1809 ang_new[pos+k] = ang_new[pos+k-1]-1
1810 1810 ang_new2[pos+k] = numpy.nan
1811 1811
1812 1812 tmp = pos +k
1813 1813 c = 0
1814 1814 c=c+1
1815 1815 return ang_new,ang_new2
1816 1816
1817 1817 def globalCheckPED(self,angulos,tipo_case):
1818 1818 l1,l2 = self.get2List(angulos)
1819 1819 print("l1",l1)
1820 1820 print("l2",l2)
1821 1821 if len(l1)>0:
1822 1822 #angulos2 = self.fixData90(list_=l1,ang_=angulos)
1823 1823 #l1,l2 = self.get2List(angulos2)
1824 1824 ang1_,ang2_ = self.fixDataComp(ang_=angulos,list1_=l1,list2_=l2,tipo_case=tipo_case)
1825 1825 #ang1_ = self.fixData90HL(ang1_)
1826 1826 #ang2_ = self.fixData90HL(ang2_)
1827 1827 else:
1828 1828 ang1_= angulos
1829 1829 ang2_= angulos
1830 1830 return ang1_,ang2_
1831 1831
1832 1832
1833 1833 def replaceNAN(self,data_weather,data_ele,val):
1834 1834 data= data_ele
1835 1835 data_T= data_weather
1836 1836 #print(data.shape[0])
1837 1837 #print(data_T.shape[0])
1838 1838 #exit(1)
1839 1839 if data.shape[0]> data_T.shape[0]:
1840 1840 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
1841 1841 c = 0
1842 1842 for i in range(len(data)):
1843 1843 if numpy.isnan(data[i]):
1844 1844 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
1845 1845 else:
1846 1846 data_N[i,:]=data_T[c,:]
1847 1847 c=c+1
1848 1848 return data_N
1849 1849 else:
1850 1850 for i in range(len(data)):
1851 1851 if numpy.isnan(data[i]):
1852 1852 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
1853 1853 return data_T
1854 1854
1855 1855
1856 1856 def const_ploteo(self,val_ch,data_weather,data_ele,step,res,ang_max,ang_min,case_flag):
1857 1857 ang_max= ang_max
1858 1858 ang_min= ang_min
1859 1859 data_weather=data_weather
1860 1860 val_ch=val_ch
1861 1861 ##print("*********************DATA WEATHER**************************************")
1862 1862 ##print(data_weather)
1863 1863
1864 1864 '''
1865 1865 print("**********************************************")
1866 1866 print("**********************************************")
1867 1867 print("***************ini**************")
1868 1868 print("**********************************************")
1869 1869 print("**********************************************")
1870 1870 '''
1871 1871 #print("data_ele",data_ele)
1872 1872 #----------------------------------------------------------
1873 1873
1874 1874 #exit(1)
1875 1875 tipo_case = case_flag[-1]
1876 1876 print("tipo_case",tipo_case)
1877 1877 #--------------------- new -------------------------
1878 1878 data_ele_new ,data_ele_old= self.globalCheckPED(data_ele,tipo_case)
1879 1879
1880 1880 #-------------------------CAMBIOS RHI---------------------------------
1881 1881
1882 1882 vec = numpy.where(data_ele<ang_max)
1883 1883 data_ele = data_ele[vec]
1884 1884 data_weather= data_weather[vec[0]]
1885 1885
1886 1886 len_vec = len(vec)
1887 1887 data_ele_new = data_ele[::-1] # reversa
1888 1888 data_weather = data_weather[::-1,:]
1889 1889 new_i_ele = int(data_ele_new[0])
1890 1890 new_f_ele = int(data_ele_new[-1])
1891 1891
1892 1892 n1= new_i_ele- ang_min
1893 1893 n2= ang_max - new_f_ele-1
1894 1894 if n1>0:
1895 1895 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
1896 1896 ele1_nan= numpy.ones(n1)*numpy.nan
1897 1897 data_ele = numpy.hstack((ele1,data_ele_new))
1898 1898 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
1899 1899 if n2>0:
1900 1900 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1901 1901 ele2_nan= numpy.ones(n2)*numpy.nan
1902 1902 data_ele = numpy.hstack((data_ele,ele2))
1903 1903 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1904 1904
1905 1905
1906 1906 print("ele shape",data_ele.shape)
1907 1907 print(data_ele)
1908 1908
1909 1909 #print("self.data_ele_tmp",self.data_ele_tmp)
1910 1910 val_mean = numpy.mean(data_weather[:,-1])
1911 1911 self.val_mean = val_mean
1912 1912 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1913 1913 self.data_ele_tmp[val_ch]= data_ele_old
1914 1914
1915 1915
1916 1916 print("data_weather shape",data_weather.shape)
1917 1917 print(data_weather)
1918 1918 #exit(1)
1919 1919 return data_weather,data_ele
1920 1920
1921 1921
1922 1922 def plot(self):
1923 1923 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
1924 1924 data = self.data[-1]
1925 1925 r = self.data.yrange
1926 1926 delta_height = r[1]-r[0]
1927 1927 r_mask = numpy.where(r>=0)[0]
1928 1928 ##print("delta_height",delta_height)
1929 1929 #print("r_mask",r_mask,len(r_mask))
1930 1930 r = numpy.arange(len(r_mask))*delta_height
1931 1931 self.y = 2*r
1932 1932 res = 1
1933 1933 ###print("data['weather'].shape[0]",data['weather'].shape[0])
1934 1934 ang_max = self.ang_max
1935 1935 ang_min = self.ang_min
1936 1936 var_ang =ang_max - ang_min
1937 1937 step = (int(var_ang)/(res*data['weather'].shape[0]))
1938 1938 ###print("step",step)
1939 1939 #--------------------------------------------------------
1940 1940 ##print('weather',data['weather'].shape)
1941 1941 ##print('ele',data['ele'].shape)
1942 1942
1943 1943 ###self.res_weather, self.res_ele = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
1944 1944 ###self.res_azi = numpy.mean(data['azi'])
1945 1945 ###print("self.res_ele",self.res_ele)
1946 1946 plt.clf()
1947 1947 subplots = [121, 122]
1948 1948 if self.ini==0:
1949 1949 self.data_ele_tmp = numpy.ones([self.nplots,int(var_ang)])*numpy.nan
1950 1950 self.res_weather= numpy.ones([self.nplots,int(var_ang),len(r_mask)])*numpy.nan
1951 1951 print("SHAPE",self.data_ele_tmp.shape)
1952 1952
1953 1953 for i,ax in enumerate(self.axes):
1954 1954 self.res_weather[i], self.res_ele = self.const_ploteo(val_ch=i, data_weather=data['weather'][i][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min,case_flag=self.data['case_flag'])
1955 1955 self.res_azi = numpy.mean(data['azi'])
1956 1956
1957 1957 print(self.res_ele)
1958 1958 #exit(1)
1959 1959 if ax.firsttime:
1960 1960 #plt.clf()
1961 1961 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
1962 1962 #fig=self.figures[0]
1963 1963 else:
1964 1964
1965 1965 #plt.clf()
1966 1966 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
1967 1967 caax = cgax.parasites[0]
1968 1968 paax = cgax.parasites[1]
1969 1969 cbar = plt.gcf().colorbar(pm, pad=0.075)
1970 1970 caax.set_xlabel('x_range [km]')
1971 1971 caax.set_ylabel('y_range [km]')
1972 1972 plt.text(1.0, 1.05, 'Elevacion '+str(thisDatetime)+" Step "+str(self.ini)+ " Azi: "+str(round(self.res_azi,2)), transform=caax.transAxes, va='bottom',ha='right')
1973 1973 print("***************************self.ini****************************",self.ini)
1974 1974 self.ini= self.ini+1
1975 1975
1976 1976 class WeatherRHI_vRF3_Plot(Plot):
1977 1977 CODE = 'weather'
1978 1978 plot_name = 'weather'
1979 1979 plot_type = 'rhistyle'
1980 1980 buffering = False
1981 1981 data_ele_tmp = None
1982 1982
1983 1983 def setup(self):
1984 1984 print("********************")
1985 1985 print("********************")
1986 1986 print("********************")
1987 1987 print("SETUP WEATHER PLOT")
1988 1988 self.ncols = 1
1989 1989 self.nrows = 1
1990 1990 self.nplots= 1
1991 1991 self.ylabel= 'Range [Km]'
1992 1992 self.titles= ['Weather']
1993 1993 if self.channels is not None:
1994 1994 self.nplots = len(self.channels)
1995 1995 self.nrows = len(self.channels)
1996 1996 else:
1997 1997 self.nplots = self.data.shape(self.CODE)[0]
1998 1998 self.nrows = self.nplots
1999 1999 self.channels = list(range(self.nplots))
2000 2000 print("channels",self.channels)
2001 2001 print("que saldra", self.data.shape(self.CODE)[0])
2002 2002 self.titles = ['{} Channel {}'.format(self.CODE.upper(), x) for x in range(self.nrows)]
2003 2003 print("self.titles",self.titles)
2004 2004 self.colorbar=False
2005 2005 self.width =8
2006 2006 self.height =8
2007 2007 self.ini =0
2008 2008 self.len_azi =0
2009 2009 self.buffer_ini = None
2010 2010 self.buffer_ele = None
2011 2011 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
2012 2012 self.flag =0
2013 2013 self.indicador= 0
2014 2014 self.last_data_ele = None
2015 2015 self.val_mean = None
2016 2016
2017 2017 def update(self, dataOut):
2018 2018
2019 2019 data = {}
2020 2020 meta = {}
2021 2021 if hasattr(dataOut, 'dataPP_POWER'):
2022 2022 factor = 1
2023 2023 if hasattr(dataOut, 'nFFTPoints'):
2024 2024 factor = dataOut.normFactor
2025 2025 print("dataOut",dataOut.data_360.shape)
2026 2026 #
2027 2027 data['weather'] = 10*numpy.log10(dataOut.data_360/(factor))
2028 2028 #
2029 2029 #data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
2030 2030 data['azi'] = dataOut.data_azi
2031 2031 data['ele'] = dataOut.data_ele
2032 2032 #data['case_flag'] = dataOut.case_flag
2033 2033 #print("UPDATE")
2034 2034 #print("data[weather]",data['weather'].shape)
2035 2035 #print("data[azi]",data['azi'])
2036 2036 return data, meta
2037 2037
2038 2038 def get2List(self,angulos):
2039 2039 list1=[]
2040 2040 list2=[]
2041 2041 for i in reversed(range(len(angulos))):
2042 2042 if not i==0:#el caso de i=0 evalula el primero de la lista con el ultimo y no es relevante
2043 2043 diff_ = angulos[i]-angulos[i-1]
2044 2044 if abs(diff_) >1.5:
2045 2045 list1.append(i-1)
2046 2046 list2.append(diff_)
2047 2047 return list(reversed(list1)),list(reversed(list2))
2048 2048
2049 2049 def fixData90(self,list_,ang_):
2050 2050 if list_[0]==-1:
2051 2051 vec = numpy.where(ang_<ang_[0])
2052 2052 ang_[vec] = ang_[vec]+90
2053 2053 return ang_
2054 2054 return ang_
2055 2055
2056 2056 def fixData90HL(self,angulos):
2057 2057 vec = numpy.where(angulos>=90)
2058 2058 angulos[vec]=angulos[vec]-90
2059 2059 return angulos
2060 2060
2061 2061
2062 2062 def search_pos(self,pos,list_):
2063 2063 for i in range(len(list_)):
2064 2064 if pos == list_[i]:
2065 2065 return True,i
2066 2066 i=None
2067 2067 return False,i
2068 2068
2069 2069 def fixDataComp(self,ang_,list1_,list2_,tipo_case):
2070 2070 size = len(ang_)
2071 2071 size2 = 0
2072 2072 for i in range(len(list2_)):
2073 2073 size2=size2+round(abs(list2_[i]))-1
2074 2074 new_size= size+size2
2075 2075 ang_new = numpy.zeros(new_size)
2076 2076 ang_new2 = numpy.zeros(new_size)
2077 2077
2078 2078 tmp = 0
2079 2079 c = 0
2080 2080 for i in range(len(ang_)):
2081 2081 ang_new[tmp +c] = ang_[i]
2082 2082 ang_new2[tmp+c] = ang_[i]
2083 2083 condition , value = self.search_pos(i,list1_)
2084 2084 if condition:
2085 2085 pos = tmp + c + 1
2086 2086 for k in range(round(abs(list2_[value]))-1):
2087 2087 if tipo_case==0 or tipo_case==3:#subida
2088 2088 ang_new[pos+k] = ang_new[pos+k-1]+1
2089 2089 ang_new2[pos+k] = numpy.nan
2090 2090 elif tipo_case==1 or tipo_case==2:#bajada
2091 2091 ang_new[pos+k] = ang_new[pos+k-1]-1
2092 2092 ang_new2[pos+k] = numpy.nan
2093 2093
2094 2094 tmp = pos +k
2095 2095 c = 0
2096 2096 c=c+1
2097 2097 return ang_new,ang_new2
2098 2098
2099 2099 def globalCheckPED(self,angulos,tipo_case):
2100 2100 l1,l2 = self.get2List(angulos)
2101 2101 ##print("l1",l1)
2102 2102 ##print("l2",l2)
2103 2103 if len(l1)>0:
2104 2104 #angulos2 = self.fixData90(list_=l1,ang_=angulos)
2105 2105 #l1,l2 = self.get2List(angulos2)
2106 2106 ang1_,ang2_ = self.fixDataComp(ang_=angulos,list1_=l1,list2_=l2,tipo_case=tipo_case)
2107 2107 #ang1_ = self.fixData90HL(ang1_)
2108 2108 #ang2_ = self.fixData90HL(ang2_)
2109 2109 else:
2110 2110 ang1_= angulos
2111 2111 ang2_= angulos
2112 2112 return ang1_,ang2_
2113 2113
2114 2114
2115 2115 def replaceNAN(self,data_weather,data_ele,val):
2116 2116 data= data_ele
2117 2117 data_T= data_weather
2118 2118
2119 2119 if data.shape[0]> data_T.shape[0]:
2120 2120 print("IF")
2121 2121 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
2122 2122 c = 0
2123 2123 for i in range(len(data)):
2124 2124 if numpy.isnan(data[i]):
2125 2125 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
2126 2126 else:
2127 2127 data_N[i,:]=data_T[c,:]
2128 2128 c=c+1
2129 2129 return data_N
2130 2130 else:
2131 2131 print("else")
2132 2132 for i in range(len(data)):
2133 2133 if numpy.isnan(data[i]):
2134 2134 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
2135 2135 return data_T
2136 2136
2137 2137 def check_case(self,data_ele,ang_max,ang_min):
2138 2138 start = data_ele[0]
2139 2139 end = data_ele[-1]
2140 2140 number = (end-start)
2141 2141 len_ang=len(data_ele)
2142 2142 print("start",start)
2143 2143 print("end",end)
2144 2144 print("number",number)
2145 2145
2146 2146 print("len_ang",len_ang)
2147 2147
2148 2148 #exit(1)
2149 2149
2150 2150 if start<end and (round(abs(number)+1)>=len_ang or (numpy.argmin(data_ele)==0)):#caso subida
2151 2151 return 0
2152 2152 #elif start>end and (round(abs(number)+1)>=len_ang or(numpy.argmax(data_ele)==0)):#caso bajada
2153 2153 # return 1
2154 2154 elif round(abs(number)+1)>=len_ang and (start>end or(numpy.argmax(data_ele)==0)):#caso bajada
2155 2155 return 1
2156 2156 elif round(abs(number)+1)<len_ang and data_ele[-2]>data_ele[-1]:# caso BAJADA CAMBIO ANG MAX
2157 2157 return 2
2158 2158 elif round(abs(number)+1)<len_ang and data_ele[-2]<data_ele[-1] :# caso SUBIDA CAMBIO ANG MIN
2159 2159 return 3
2160 2160
2161 2161
2162 2162 def const_ploteo(self,val_ch,data_weather,data_ele,step,res,ang_max,ang_min,case_flag):
2163 2163 ang_max= ang_max
2164 2164 ang_min= ang_min
2165 2165 data_weather=data_weather
2166 2166 val_ch=val_ch
2167 2167 ##print("*********************DATA WEATHER**************************************")
2168 2168 ##print(data_weather)
2169 2169 if self.ini==0:
2170 2170
2171 2171 #--------------------- new -------------------------
2172 2172 data_ele_new ,data_ele_old= self.globalCheckPED(data_ele,tipo_case)
2173 2173
2174 2174 #-------------------------CAMBIOS RHI---------------------------------
2175 2175 start= ang_min
2176 2176 end = ang_max
2177 2177 n= (ang_max-ang_min)/res
2178 2178 #------ new
2179 2179 self.start_data_ele = data_ele_new[0]
2180 2180 self.end_data_ele = data_ele_new[-1]
2181 2181 if tipo_case==0 or tipo_case==3: # SUBIDA
2182 2182 n1= round(self.start_data_ele)- start
2183 2183 n2= end - round(self.end_data_ele)
2184 2184 print(self.start_data_ele)
2185 2185 print(self.end_data_ele)
2186 2186 if n1>0:
2187 2187 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
2188 2188 ele1_nan= numpy.ones(n1)*numpy.nan
2189 2189 data_ele = numpy.hstack((ele1,data_ele_new))
2190 2190 print("ele1_nan",ele1_nan.shape)
2191 2191 print("data_ele_old",data_ele_old.shape)
2192 2192 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
2193 2193 if n2>0:
2194 2194 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
2195 2195 ele2_nan= numpy.ones(n2)*numpy.nan
2196 2196 data_ele = numpy.hstack((data_ele,ele2))
2197 2197 print("ele2_nan",ele2_nan.shape)
2198 2198 print("data_ele_old",data_ele_old.shape)
2199 2199 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
2200 2200
2201 2201 if tipo_case==1 or tipo_case==2: # BAJADA
2202 2202 data_ele_new = data_ele_new[::-1] # reversa
2203 2203 data_ele_old = data_ele_old[::-1]# reversa
2204 2204 data_weather = data_weather[::-1,:]# reversa
2205 2205 vec= numpy.where(data_ele_new<ang_max)
2206 2206 data_ele_new = data_ele_new[vec]
2207 2207 data_ele_old = data_ele_old[vec]
2208 2208 data_weather = data_weather[vec[0]]
2209 2209 vec2= numpy.where(0<data_ele_new)
2210 2210 data_ele_new = data_ele_new[vec2]
2211 2211 data_ele_old = data_ele_old[vec2]
2212 2212 data_weather = data_weather[vec2[0]]
2213 2213 self.start_data_ele = data_ele_new[0]
2214 2214 self.end_data_ele = data_ele_new[-1]
2215 2215
2216 2216 n1= round(self.start_data_ele)- start
2217 2217 n2= end - round(self.end_data_ele)-1
2218 2218 print(self.start_data_ele)
2219 2219 print(self.end_data_ele)
2220 2220 if n1>0:
2221 2221 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
2222 2222 ele1_nan= numpy.ones(n1)*numpy.nan
2223 2223 data_ele = numpy.hstack((ele1,data_ele_new))
2224 2224 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
2225 2225 if n2>0:
2226 2226 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
2227 2227 ele2_nan= numpy.ones(n2)*numpy.nan
2228 2228 data_ele = numpy.hstack((data_ele,ele2))
2229 2229 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
2230 2230 # RADAR
2231 2231 # NOTA data_ele y data_weather es la variable que retorna
2232 2232 val_mean = numpy.mean(data_weather[:,-1])
2233 2233 self.val_mean = val_mean
2234 2234 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
2235 2235 print("eleold",data_ele_old)
2236 2236 print(self.data_ele_tmp[val_ch])
2237 2237 print(data_ele_old.shape[0])
2238 2238 print(self.data_ele_tmp[val_ch].shape[0])
2239 2239 if (data_ele_old.shape[0]==91 or self.data_ele_tmp[val_ch].shape[0]==91):
2240 2240 import sys
2241 2241 print("EXIT",self.ini)
2242 2242
2243 2243 sys.exit(1)
2244 2244 self.data_ele_tmp[val_ch]= data_ele_old
2245 2245 else:
2246 2246 #print("**********************************************")
2247 2247 #print("****************VARIABLE**********************")
2248 2248 #-------------------------CAMBIOS RHI---------------------------------
2249 2249 #---------------------------------------------------------------------
2250 2250 ##print("INPUT data_ele",data_ele)
2251 2251 flag=0
2252 2252 start_ele = self.res_ele[0]
2253 2253 #tipo_case = self.check_case(data_ele,ang_max,ang_min)
2254 2254 tipo_case = case_flag[-1]
2255 2255 #print("TIPO DE DATA",tipo_case)
2256 2256 #-----------new------------
2257 2257 data_ele ,data_ele_old = self.globalCheckPED(data_ele,tipo_case)
2258 2258 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
2259 2259
2260 2260 #-------------------------------NEW RHI ITERATIVO-------------------------
2261 2261
2262 2262 if tipo_case==0 : # SUBIDA
2263 2263 vec = numpy.where(data_ele<ang_max)
2264 2264 data_ele = data_ele[vec]
2265 2265 data_ele_old = data_ele_old[vec]
2266 2266 data_weather = data_weather[vec[0]]
2267 2267
2268 2268 vec2 = numpy.where(0<data_ele)
2269 2269 data_ele= data_ele[vec2]
2270 2270 data_ele_old= data_ele_old[vec2]
2271 2271 ##print(data_ele_new)
2272 2272 data_weather= data_weather[vec2[0]]
2273 2273
2274 2274 new_i_ele = int(round(data_ele[0]))
2275 2275 new_f_ele = int(round(data_ele[-1]))
2276 2276 #print(new_i_ele)
2277 2277 #print(new_f_ele)
2278 2278 #print(data_ele,len(data_ele))
2279 2279 #print(data_ele_old,len(data_ele_old))
2280 2280 if new_i_ele< 2:
2281 2281 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
2282 2282 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
2283 2283 self.data_ele_tmp[val_ch][new_i_ele:new_i_ele+len(data_ele)]=data_ele_old
2284 2284 self.res_ele[new_i_ele:new_i_ele+len(data_ele)]= data_ele
2285 2285 self.res_weather[val_ch][new_i_ele:new_i_ele+len(data_ele),:]= data_weather
2286 2286 data_ele = self.res_ele
2287 2287 data_weather = self.res_weather[val_ch]
2288 2288
2289 2289 elif tipo_case==1 : #BAJADA
2290 2290 data_ele = data_ele[::-1] # reversa
2291 2291 data_ele_old = data_ele_old[::-1]# reversa
2292 2292 data_weather = data_weather[::-1,:]# reversa
2293 2293 vec= numpy.where(data_ele<ang_max)
2294 2294 data_ele = data_ele[vec]
2295 2295 data_ele_old = data_ele_old[vec]
2296 2296 data_weather = data_weather[vec[0]]
2297 2297 vec2= numpy.where(0<data_ele)
2298 2298 data_ele = data_ele[vec2]
2299 2299 data_ele_old = data_ele_old[vec2]
2300 2300 data_weather = data_weather[vec2[0]]
2301 2301
2302 2302
2303 2303 new_i_ele = int(round(data_ele[0]))
2304 2304 new_f_ele = int(round(data_ele[-1]))
2305 2305 #print(data_ele)
2306 2306 #print(ang_max)
2307 2307 #print(data_ele_old)
2308 2308 if new_i_ele <= 1:
2309 2309 new_i_ele = 1
2310 2310 if round(data_ele[-1])>=ang_max-1:
2311 2311 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
2312 2312 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
2313 2313 self.data_ele_tmp[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1]=data_ele_old
2314 2314 self.res_ele[new_i_ele-1:new_i_ele+len(data_ele)-1]= data_ele
2315 2315 self.res_weather[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1,:]= data_weather
2316 2316 data_ele = self.res_ele
2317 2317 data_weather = self.res_weather[val_ch]
2318 2318
2319 2319 elif tipo_case==2: #bajada
2320 2320 vec = numpy.where(data_ele<ang_max)
2321 2321 data_ele = data_ele[vec]
2322 2322 data_weather= data_weather[vec[0]]
2323 2323
2324 2324 len_vec = len(vec)
2325 2325 data_ele_new = data_ele[::-1] # reversa
2326 2326 data_weather = data_weather[::-1,:]
2327 2327 new_i_ele = int(data_ele_new[0])
2328 2328 new_f_ele = int(data_ele_new[-1])
2329 2329
2330 2330 n1= new_i_ele- ang_min
2331 2331 n2= ang_max - new_f_ele-1
2332 2332 if n1>0:
2333 2333 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
2334 2334 ele1_nan= numpy.ones(n1)*numpy.nan
2335 2335 data_ele = numpy.hstack((ele1,data_ele_new))
2336 2336 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
2337 2337 if n2>0:
2338 2338 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
2339 2339 ele2_nan= numpy.ones(n2)*numpy.nan
2340 2340 data_ele = numpy.hstack((data_ele,ele2))
2341 2341 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
2342 2342
2343 2343 self.data_ele_tmp[val_ch] = data_ele_old
2344 2344 self.res_ele = data_ele
2345 2345 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
2346 2346 data_ele = self.res_ele
2347 2347 data_weather = self.res_weather[val_ch]
2348 2348
2349 2349 elif tipo_case==3:#subida
2350 2350 vec = numpy.where(0<data_ele)
2351 2351 data_ele= data_ele[vec]
2352 2352 data_ele_new = data_ele
2353 2353 data_ele_old= data_ele_old[vec]
2354 2354 data_weather= data_weather[vec[0]]
2355 2355 pos_ini = numpy.argmin(data_ele)
2356 2356 if pos_ini>0:
2357 2357 len_vec= len(data_ele)
2358 2358 vec3 = numpy.linspace(pos_ini,len_vec-1,len_vec-pos_ini).astype(int)
2359 2359 #print(vec3)
2360 2360 data_ele= data_ele[vec3]
2361 2361 data_ele_new = data_ele
2362 2362 data_ele_old= data_ele_old[vec3]
2363 2363 data_weather= data_weather[vec3]
2364 2364
2365 2365 new_i_ele = int(data_ele_new[0])
2366 2366 new_f_ele = int(data_ele_new[-1])
2367 2367 n1= new_i_ele- ang_min
2368 2368 n2= ang_max - new_f_ele-1
2369 2369 if n1>0:
2370 2370 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
2371 2371 ele1_nan= numpy.ones(n1)*numpy.nan
2372 2372 data_ele = numpy.hstack((ele1,data_ele_new))
2373 2373 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
2374 2374 if n2>0:
2375 2375 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
2376 2376 ele2_nan= numpy.ones(n2)*numpy.nan
2377 2377 data_ele = numpy.hstack((data_ele,ele2))
2378 2378 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
2379 2379
2380 2380 self.data_ele_tmp[val_ch] = data_ele_old
2381 2381 self.res_ele = data_ele
2382 2382 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
2383 2383 data_ele = self.res_ele
2384 2384 data_weather = self.res_weather[val_ch]
2385 2385 #print("self.data_ele_tmp",self.data_ele_tmp)
2386 2386 return data_weather,data_ele
2387 2387
2388 2388 def const_ploteo_vRF(self,val_ch,data_weather,data_ele,res,ang_max,ang_min):
2389 2389
2390 2390 data_ele_new ,data_ele_old= self.globalCheckPED(data_ele,1)
2391 2391
2392 2392 data_ele = data_ele_old.copy()
2393 2393
2394 2394 diff_1 = ang_max - data_ele[0]
2395 2395 angles_1_nan = numpy.linspace(ang_max,data_ele[0]+1,int(diff_1)-1)#*numpy.nan
2396 2396
2397 2397 diff_2 = data_ele[-1]-ang_min
2398 2398 angles_2_nan = numpy.linspace(data_ele[-1]-1,ang_min,int(diff_2)-1)#*numpy.nan
2399 2399
2400 2400 angles_filled = numpy.concatenate((angles_1_nan,data_ele,angles_2_nan))
2401 2401
2402 2402 print(angles_filled)
2403 2403
2404 2404 data_1_nan = numpy.ones([angles_1_nan.shape[0],len(self.r_mask)])*numpy.nan
2405 2405 data_2_nan = numpy.ones([angles_2_nan.shape[0],len(self.r_mask)])*numpy.nan
2406 2406
2407 2407 data_filled = numpy.concatenate((data_1_nan,data_weather,data_2_nan),axis=0)
2408 2408 #val_mean = numpy.mean(data_weather[:,-1])
2409 2409 #self.val_mean = val_mean
2410 2410 print(data_filled)
2411 2411 data_filled = self.replaceNAN(data_weather=data_filled,data_ele=angles_filled,val=numpy.nan)
2412 2412
2413 2413 print(data_filled)
2414 2414 print(data_filled.shape)
2415 2415 print(angles_filled.shape)
2416 2416
2417 2417 return data_filled,angles_filled
2418 2418
2419 2419 def plot(self):
2420 2420 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
2421 2421 data = self.data[-1]
2422 2422 r = self.data.yrange
2423 2423 delta_height = r[1]-r[0]
2424 2424 r_mask = numpy.where(r>=0)[0]
2425 2425 self.r_mask =r_mask
2426 2426 ##print("delta_height",delta_height)
2427 2427 #print("r_mask",r_mask,len(r_mask))
2428 2428 r = numpy.arange(len(r_mask))*delta_height
2429 2429 self.y = 2*r
2430 2430 res = 1
2431 2431 ###print("data['weather'].shape[0]",data['weather'].shape[0])
2432 2432 ang_max = self.ang_max
2433 2433 ang_min = self.ang_min
2434 2434 var_ang =ang_max - ang_min
2435 2435 step = (int(var_ang)/(res*data['weather'].shape[0]))
2436 2436 ###print("step",step)
2437 2437 #--------------------------------------------------------
2438 2438 ##print('weather',data['weather'].shape)
2439 2439 ##print('ele',data['ele'].shape)
2440 2440
2441 2441 ###self.res_weather, self.res_ele = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
2442 2442 ###self.res_azi = numpy.mean(data['azi'])
2443 2443 ###print("self.res_ele",self.res_ele)
2444 2444
2445 2445 plt.clf()
2446 2446 subplots = [121, 122]
2447 2447 #if self.ini==0:
2448 2448 #self.res_weather= numpy.ones([self.nplots,int(var_ang),len(r_mask)])*numpy.nan
2449 2449 #print("SHAPE",self.data_ele_tmp.shape)
2450 2450
2451 2451 for i,ax in enumerate(self.axes):
2452 2452 res_weather, self.res_ele = self.const_ploteo_vRF(val_ch=i, data_weather=data['weather'][i][:,r_mask],data_ele=data['ele'],res=res,ang_max=ang_max,ang_min=ang_min)
2453 2453 self.res_azi = numpy.mean(data['azi'])
2454 2454
2455 2455 if ax.firsttime:
2456 2456 #plt.clf()
2457 2457 print("Frist Plot")
2458 2458 print(data['weather'][i][:,r_mask].shape)
2459 2459 print(data['ele'].shape)
2460 2460 cgax, pm = wrl.vis.plot_rhi(res_weather,r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
2461 2461 #cgax, pm = wrl.vis.plot_rhi(data['weather'][i][:,r_mask],r=r,th=data['ele'],ax=subplots[i], proj='cg',vmin=20, vmax=80)
2462 2462 gh = cgax.get_grid_helper()
2463 2463 locs = numpy.linspace(ang_min,ang_max,var_ang+1)
2464 2464 gh.grid_finder.grid_locator1 = FixedLocator(locs)
2465 2465 gh.grid_finder.tick_formatter1 = DictFormatter(dict([(i, r"${0:.0f}^\circ$".format(i)) for i in locs]))
2466 2466
2467 2467
2468 2468 #fig=self.figures[0]
2469 2469 else:
2470 2470 #plt.clf()
2471 2471 print("ELSE PLOT")
2472 2472 cgax, pm = wrl.vis.plot_rhi(res_weather,r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
2473 2473 #cgax, pm = wrl.vis.plot_rhi(data['weather'][i][:,r_mask],r=r,th=data['ele'],ax=subplots[i], proj='cg',vmin=20, vmax=80)
2474 2474 gh = cgax.get_grid_helper()
2475 2475 locs = numpy.linspace(ang_min,ang_max,var_ang+1)
2476 2476 gh.grid_finder.grid_locator1 = FixedLocator(locs)
2477 2477 gh.grid_finder.tick_formatter1 = DictFormatter(dict([(i, r"${0:.0f}^\circ$".format(i)) for i in locs]))
2478 2478
2479 2479 caax = cgax.parasites[0]
2480 2480 paax = cgax.parasites[1]
2481 2481 cbar = plt.gcf().colorbar(pm, pad=0.075)
2482 2482 caax.set_xlabel('x_range [km]')
2483 2483 caax.set_ylabel('y_range [km]')
2484 2484 plt.text(1.0, 1.05, 'Elevacion '+str(thisDatetime)+" Step "+str(self.ini)+ " Azi: "+str(round(self.res_azi,2)), transform=caax.transAxes, va='bottom',ha='right')
2485 2485 print("***************************self.ini****************************",self.ini)
2486 2486 self.ini= self.ini+1
2487 2487
2488 2488 class WeatherRHI_vRF4_Plot(Plot):
2489 2489 CODE = 'RHI'
2490 2490 plot_name = 'RHI'
2491 2491 #plot_type = 'rhistyle'
2492 2492 buffering = False
2493 2493
2494 2494 def setup(self):
2495 2495
2496 2496 self.ncols = 1
2497 2497 self.nrows = 1
2498 2498 self.nplots= 1
2499 2499 self.ylabel= 'Range [Km]'
2500 2500 self.titles= ['RHI']
2501 2501 self.polar = True
2502 self.grid = True
2502 2503 if self.channels is not None:
2503 2504 self.nplots = len(self.channels)
2504 2505 self.nrows = len(self.channels)
2505 2506 else:
2506 2507 self.nplots = self.data.shape(self.CODE)[0]
2507 2508 self.nrows = self.nplots
2508 2509 self.channels = list(range(self.nplots))
2509 2510
2510 2511 if self.CODE == 'Power':
2511 2512 self.cb_label = r'Power (dB)'
2512 2513 elif self.CODE == 'Doppler':
2513 2514 self.cb_label = r'Velocity (m/s)'
2514 2515 self.colorbar=True
2515 2516 self.width =8
2516 2517 self.height =8
2517 2518 self.ini =0
2518 2519 self.len_azi =0
2519 2520 self.buffer_ini = None
2520 2521 self.buffer_ele = None
2521 2522 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
2522 2523 self.flag =0
2523 2524 self.indicador= 0
2524 2525 self.last_data_ele = None
2525 2526 self.val_mean = None
2526 2527
2527 2528 def update(self, dataOut):
2528 2529
2529 2530 data = {}
2530 2531 meta = {}
2531 2532 if hasattr(dataOut, 'dataPP_POWER'):
2532 2533 factor = 1
2533 2534 if hasattr(dataOut, 'nFFTPoints'):
2534 2535 factor = dataOut.normFactor
2535 2536
2536 2537 if 'pow' in self.attr_data[0].lower():
2537 2538 data['data'] = 10*numpy.log10(getattr(dataOut, self.attr_data[0])/(factor))
2538 2539 else:
2539 2540 data['data'] = getattr(dataOut, self.attr_data[0])/(factor)
2540 2541
2541 2542 data['azi'] = dataOut.data_azi
2542 2543 data['ele'] = dataOut.data_ele
2543 2544
2544 2545 return data, meta
2545 2546
2546 2547 def plot(self):
2547 2548 data = self.data[-1]
2548 2549 r = self.data.yrange
2549 2550 delta_height = r[1]-r[0]
2550 2551 r_mask = numpy.where(r>=0)[0]
2551 2552 self.r_mask =r_mask
2552 2553 r = numpy.arange(len(r_mask))*delta_height
2553 2554 self.y = 2*r
2554 2555 res = 1
2555 ang_max = self.ang_max
2556 ang_min = self.ang_min
2557 var_ang =ang_max - ang_min
2558 step = (int(var_ang)/(res*data['data'].shape[0]))
2556 #ang_max = self.ang_max
2557 #ang_min = self.ang_min
2558 #var_ang =ang_max - ang_min
2559 #step = (int(var_ang)/(res*data['data'].shape[0]))
2559 2560
2560 2561 z = data['data'][self.channels[0]][:,r_mask]
2561 2562
2562 2563 self.titles = []
2563 2564
2564 2565 self.ymax = self.ymax if self.ymax else numpy.nanmax(r)
2565 2566 self.ymin = self.ymin if self.ymin else numpy.nanmin(r)
2566 2567 self.zmax = self.zmax if self.zmax else numpy.nanmax(z)
2567 2568 self.zmin = self.zmin if self.zmin else numpy.nanmin(z)
2568 2569 self.ang_min = self.ang_min if self.ang_min else 0
2569 self.ang_max = self.ang_max if self.ang_max else 2*numpy.pi
2570 self.ang_max = self.ang_max if self.ang_max else 90
2570 2571
2571 2572 subplots = [121, 122]
2572 2573
2573 2574 r, theta = numpy.meshgrid(r, numpy.radians(data['ele']) )
2574 2575
2575 2576 for i,ax in enumerate(self.axes):
2576 2577
2577 2578 if ax.firsttime:
2578 2579 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
2579 2580 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
2580 2581
2581 2582 else:
2582 2583 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
2583 2584 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
2584 2585
2585 2586 if len(self.channels) !=1:
2586 2587 self.titles = ['RHI {} AZ: {} Channel {}'.format(self.labels[x], str(round(numpy.mean(data['azi']),1)), x) for x in range(self.nrows)]
2587 2588 else:
2588 2589 self.titles = ['RHI {} AZ: {} Channel {}'.format(self.labels[0], str(round(numpy.mean(data['azi']),1)), self.channels[0])]
@@ -1,838 +1,834
1 1 '''
2 2 Created on Jul 3, 2014
3 3
4 4 @author: roj-idl71
5 5 '''
6 6 # SUBCHANNELS EN VEZ DE CHANNELS
7 7 # BENCHMARKS -> PROBLEMAS CON ARCHIVOS GRANDES -> INCONSTANTE EN EL TIEMPO
8 8 # ACTUALIZACION DE VERSION
9 9 # HEADERS
10 10 # MODULO DE ESCRITURA
11 11 # METADATA
12 12
13 13 import os
14 14 import time
15 15 import datetime
16 16 import numpy
17 17 import timeit
18 18 from fractions import Fraction
19 19 from time import time
20 20 from time import sleep
21 21
22 22 import schainpy.admin
23 23 from schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader
24 24 from schainpy.model.data.jrodata import Voltage
25 25 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
26 26
27 27 import pickle
28 28 try:
29 29 import digital_rf
30 30 except:
31 31 pass
32 32
33 33
34 34 class DigitalRFReader(ProcessingUnit):
35 35 '''
36 36 classdocs
37 37 '''
38 38
39 39 def __init__(self):
40 40 '''
41 41 Constructor
42 42 '''
43 43
44 44 ProcessingUnit.__init__(self)
45 45
46 46 self.dataOut = Voltage()
47 47 self.__printInfo = True
48 48 self.__flagDiscontinuousBlock = False
49 49 self.__bufferIndex = 9999999
50 50 self.__codeType = 0
51 51 self.__ippKm = None
52 52 self.__nCode = None
53 53 self.__nBaud = None
54 54 self.__code = None
55 55 self.dtype = None
56 56 self.oldAverage = None
57 57 self.path = None
58 58
59 59 def close(self):
60 60 print('Average of writing to digital rf format is ', self.oldAverage * 1000)
61 61 return
62 62
63 63 def __getCurrentSecond(self):
64 64
65 65 return self.__thisUnixSample / self.__sample_rate
66 66
67 67 thisSecond = property(__getCurrentSecond, "I'm the 'thisSecond' property.")
68 68
69 69 def __setFileHeader(self):
70 70 '''
71 71 In this method will be initialized every parameter of dataOut object (header, no data)
72 72 '''
73 73 ippSeconds = 1.0 * self.__nSamples / self.__sample_rate
74 74 if not self.getByBlock:
75 75 nProfiles = 1.0 / ippSeconds # Number of profiles in one second
76 76 else:
77 77 nProfiles = self.nProfileBlocks # Number of profiles in one block
78 78
79 79 try:
80 80 self.dataOut.radarControllerHeaderObj = RadarControllerHeader(
81 81 self.__radarControllerHeader)
82 82 except:
83 83 self.dataOut.radarControllerHeaderObj = RadarControllerHeader(
84 84 txA=0,
85 85 txB=0,
86 86 nWindows=1,
87 87 nHeights=self.__nSamples,
88 88 firstHeight=self.__firstHeigth,
89 89 deltaHeight=self.__deltaHeigth,
90 90 codeType=self.__codeType,
91 91 nCode=self.__nCode, nBaud=self.__nBaud,
92 92 code=self.__code)
93 93
94 94 try:
95 95 self.dataOut.systemHeaderObj = SystemHeader(self.__systemHeader)
96 96 except:
97 97 self.dataOut.systemHeaderObj = SystemHeader(nSamples=self.__nSamples,
98 98 nProfiles=nProfiles,
99 99 nChannels=len(
100 100 self.__channelList),
101 101 adcResolution=14)
102 102 self.dataOut.type = "Voltage"
103 103
104 104 self.dataOut.data = None
105 105
106 106 self.dataOut.dtype = self.dtype
107 107
108 108 # self.dataOut.nChannels = 0
109 109
110 110 # self.dataOut.nHeights = 0
111 111
112 112 self.dataOut.nProfiles = int(nProfiles)
113 113
114 114 self.dataOut.heightList = self.__firstHeigth + \
115 115 numpy.arange(self.__nSamples, dtype=numpy.float) * \
116 116 self.__deltaHeigth
117 117
118 118 #self.dataOut.channelList = list(range(self.__num_subchannels))
119 119 self.dataOut.channelList = list(range(len(self.__channelList)))
120 120 if not self.getByBlock:
121 121
122 122 self.dataOut.blocksize = self.dataOut.nChannels * self.dataOut.nHeights
123 123 else:
124 124 self.dataOut.blocksize = self.dataOut.nChannels * self.dataOut.nHeights*self.nProfileBlocks
125 125
126 126 # self.dataOut.channelIndexList = None
127 127
128 128 self.dataOut.flagNoData = True
129 129 if not self.getByBlock:
130 130 self.dataOut.flagDataAsBlock = False
131 131 else:
132 132 self.dataOut.flagDataAsBlock = True
133 133 # Set to TRUE if the data is discontinuous
134 134 self.dataOut.flagDiscontinuousBlock = False
135 135
136 136 self.dataOut.utctime = None
137 137
138 138 # timezone like jroheader, difference in minutes between UTC and localtime
139 139 self.dataOut.timeZone = self.__timezone / 60
140 140
141 141 self.dataOut.dstFlag = 0
142 142
143 143 self.dataOut.errorCount = 0
144 144
145 145 try:
146 146 self.dataOut.nCohInt = self.fixed_metadata_dict.get(
147 147 'nCohInt', self.nCohInt)
148 148
149 149 # asumo que la data esta decodificada
150 150 self.dataOut.flagDecodeData = self.fixed_metadata_dict.get(
151 151 'flagDecodeData', self.flagDecodeData)
152 152
153 153 # asumo que la data esta sin flip
154 154 self.dataOut.flagDeflipData = self.fixed_metadata_dict['flagDeflipData']
155 155
156 156 self.dataOut.flagShiftFFT = self.fixed_metadata_dict['flagShiftFFT']
157 157
158 158 self.dataOut.useLocalTime = self.fixed_metadata_dict['useLocalTime']
159 159 except:
160 160 pass
161 161
162 162 self.dataOut.ippSeconds = ippSeconds
163 163
164 164 # Time interval between profiles
165 165 # self.dataOut.timeInterval = self.dataOut.ippSeconds * self.dataOut.nCohInt
166 166
167 167 self.dataOut.frequency = self.__frequency
168 168
169 169 self.dataOut.realtime = self.__online
170 170
171 171 def findDatafiles(self, path, startDate=None, endDate=None):
172 172
173 173 if not os.path.isdir(path):
174 174 return []
175 175
176 176 try:
177 177 digitalReadObj = digital_rf.DigitalRFReader(
178 178 path, load_all_metadata=True)
179 179 except:
180 180 digitalReadObj = digital_rf.DigitalRFReader(path)
181 181
182 182 channelNameList = digitalReadObj.get_channels()
183 183
184 184 if not channelNameList:
185 185 return []
186 186
187 187 metadata_dict = digitalReadObj.get_rf_file_metadata(channelNameList[0])
188 188
189 189 sample_rate = metadata_dict['sample_rate'][0]
190 190
191 191 this_metadata_file = digitalReadObj.get_metadata(channelNameList[0])
192 192
193 193 try:
194 194 timezone = this_metadata_file['timezone'].value
195 195 except:
196 196 timezone = 0
197 197
198 198 startUTCSecond, endUTCSecond = digitalReadObj.get_bounds(
199 199 channelNameList[0]) / sample_rate - timezone
200 200
201 201 startDatetime = datetime.datetime.utcfromtimestamp(startUTCSecond)
202 202 endDatatime = datetime.datetime.utcfromtimestamp(endUTCSecond)
203 203
204 204 if not startDate:
205 205 startDate = startDatetime.date()
206 206
207 207 if not endDate:
208 208 endDate = endDatatime.date()
209 209
210 210 dateList = []
211 211
212 212 thisDatetime = startDatetime
213 213
214 214 while(thisDatetime <= endDatatime):
215 215
216 216 thisDate = thisDatetime.date()
217 217
218 218 if thisDate < startDate:
219 219 continue
220 220
221 221 if thisDate > endDate:
222 222 break
223 223
224 224 dateList.append(thisDate)
225 225 thisDatetime += datetime.timedelta(1)
226 226
227 227 return dateList
228 228
229 229 def setup(self, path=None,
230 230 startDate=None,
231 231 endDate=None,
232 232 startTime=datetime.time(0, 0, 0),
233 233 endTime=datetime.time(23, 59, 59),
234 234 channelList=None,
235 235 nSamples=None,
236 236 online=False,
237 237 delay=60,
238 238 buffer_size=1024,
239 239 ippKm=None,
240 240 nCohInt=1,
241 241 nCode=1,
242 242 nBaud=1,
243 243 flagDecodeData=False,
244 244 code=numpy.ones((1, 1), dtype=numpy.int),
245 245 getByBlock=0,
246 246 nProfileBlocks=1,
247 247 **kwargs):
248 248 '''
249 249 In this method we should set all initial parameters.
250 250
251 251 Inputs:
252 252 path
253 253 startDate
254 254 endDate
255 255 startTime
256 256 endTime
257 257 set
258 258 expLabel
259 259 ext
260 260 online
261 261 delay
262 262 '''
263 263 self.path = path
264 264 self.nCohInt = nCohInt
265 265 self.flagDecodeData = flagDecodeData
266 266 self.i = 0
267 267
268 268 self.getByBlock = getByBlock
269 269 self.nProfileBlocks = nProfileBlocks
270 270 if not os.path.isdir(path):
271 271 raise ValueError("[Reading] Directory %s does not exist" % path)
272 272
273 273 try:
274 274 self.digitalReadObj = digital_rf.DigitalRFReader(
275 275 path, load_all_metadata=True)
276 276 except:
277 277 self.digitalReadObj = digital_rf.DigitalRFReader(path)
278 278
279 279 channelNameList = self.digitalReadObj.get_channels()
280 280
281 281 if not channelNameList:
282 282 raise ValueError("[Reading] Directory %s does not have any files" % path)
283 283
284 284 if not channelList:
285 285 channelList = list(range(len(channelNameList)))
286 286
287 287 ########## Reading metadata ######################
288 288
289 289 top_properties = self.digitalReadObj.get_properties(
290 290 channelNameList[channelList[0]])
291 291
292 292 self.__num_subchannels = top_properties['num_subchannels']
293 293 self.__sample_rate = 1.0 * \
294 294 top_properties['sample_rate_numerator'] / \
295 295 top_properties['sample_rate_denominator']
296 296 # self.__samples_per_file = top_properties['samples_per_file'][0]
297 297 self.__deltaHeigth = 1e6 * 0.15 / self.__sample_rate # why 0.15?
298 298
299 299 this_metadata_file = self.digitalReadObj.get_digital_metadata(
300 300 channelNameList[channelList[0]])
301 301 metadata_bounds = this_metadata_file.get_bounds()
302 302 self.fixed_metadata_dict = this_metadata_file.read(
303 303 metadata_bounds[0])[metadata_bounds[0]] # GET FIRST HEADER
304 304
305 305 try:
306 306 self.__processingHeader = self.fixed_metadata_dict['processingHeader']
307 307 self.__radarControllerHeader = self.fixed_metadata_dict['radarControllerHeader']
308 308 self.__systemHeader = self.fixed_metadata_dict['systemHeader']
309 309 self.dtype = pickle.loads(self.fixed_metadata_dict['dtype'])
310 310 except:
311 311 pass
312 312
313 313 self.__frequency = None
314 314
315 315 self.__frequency = self.fixed_metadata_dict.get('frequency', 1)
316 316
317 317 self.__timezone = self.fixed_metadata_dict.get('timezone', 18000)
318 318
319 319 try:
320 320 nSamples = self.fixed_metadata_dict['nSamples']
321 321 except:
322 322 nSamples = None
323 323
324 324 self.__firstHeigth = 0
325 325
326 326 try:
327 327 codeType = self.__radarControllerHeader['codeType']
328 328 except:
329 329 codeType = 0
330 330
331 331 try:
332 332 if codeType:
333 333 nCode = self.__radarControllerHeader['nCode']
334 334 nBaud = self.__radarControllerHeader['nBaud']
335 335 code = self.__radarControllerHeader['code']
336 336 except:
337 337 pass
338 338
339 339 if not ippKm:
340 340 try:
341 341 # seconds to km
342 342 ippKm = self.__radarControllerHeader['ipp']
343 343 except:
344 344 ippKm = None
345 345 ####################################################
346 346 self.__ippKm = ippKm
347 347 startUTCSecond = None
348 348 endUTCSecond = None
349 349
350 350 if startDate:
351 351 startDatetime = datetime.datetime.combine(startDate, startTime)
352 352 startUTCSecond = (
353 353 startDatetime - datetime.datetime(1970, 1, 1)).total_seconds() + self.__timezone
354 354
355 355 if endDate:
356 356 endDatetime = datetime.datetime.combine(endDate, endTime)
357 357 endUTCSecond = (endDatetime - datetime.datetime(1970,
358 358 1, 1)).total_seconds() + self.__timezone
359 359
360 360
361 print(startUTCSecond,endUTCSecond)
361 #print(startUTCSecond,endUTCSecond)
362 362 start_index, end_index = self.digitalReadObj.get_bounds(
363 363 channelNameList[channelList[0]])
364 364
365 print("*****",start_index,end_index)
366 print(metadata_bounds)
365 #print("*****",start_index,end_index)
367 366 if not startUTCSecond:
368 367 startUTCSecond = start_index / self.__sample_rate
369 368
370 369 if start_index > startUTCSecond * self.__sample_rate:
371 370 startUTCSecond = start_index / self.__sample_rate
372 371
373 372 if not endUTCSecond:
374 373 endUTCSecond = end_index / self.__sample_rate
375 print("1",endUTCSecond)
376 print(self.__sample_rate)
377 374 if end_index < endUTCSecond * self.__sample_rate:
378 375 endUTCSecond = end_index / self.__sample_rate #Check UTC and LT time
379 print("2",endUTCSecond)
380 376 if not nSamples:
381 377 if not ippKm:
382 378 raise ValueError("[Reading] nSamples or ippKm should be defined")
383 379 nSamples = int(ippKm / (1e6 * 0.15 / self.__sample_rate))
384 380
385 381 channelBoundList = []
386 382 channelNameListFiltered = []
387 383
388 384 for thisIndexChannel in channelList:
389 385 thisChannelName = channelNameList[thisIndexChannel]
390 386 start_index, end_index = self.digitalReadObj.get_bounds(
391 387 thisChannelName)
392 388 channelBoundList.append((start_index, end_index))
393 389 channelNameListFiltered.append(thisChannelName)
394 390
395 391 self.profileIndex = 0
396 392 self.i = 0
397 393 self.__delay = delay
398 394
399 395 self.__codeType = codeType
400 396 self.__nCode = nCode
401 397 self.__nBaud = nBaud
402 398 self.__code = code
403 399
404 400 self.__datapath = path
405 401 self.__online = online
406 402 self.__channelList = channelList
407 403 self.__channelNameList = channelNameListFiltered
408 404 self.__channelBoundList = channelBoundList
409 405 self.__nSamples = nSamples
410 406 if self.getByBlock:
411 407 nSamples = nSamples*nProfileBlocks
412 408
413 409
414 410 self.__samples_to_read = int(nSamples) # FIJO: AHORA 40
415 411 self.__nChannels = len(self.__channelList)
416 412 #print("------------------------------------------")
417 413 #print("self.__samples_to_read",self.__samples_to_read)
418 414 #print("self.__nSamples",self.__nSamples)
419 415 # son iguales y el buffer_index da 0
420 416 self.__startUTCSecond = startUTCSecond
421 417 self.__endUTCSecond = endUTCSecond
422 418
423 419 self.__timeInterval = 1.0 * self.__samples_to_read / \
424 420 self.__sample_rate # Time interval
425 421
426 422 if online:
427 423 # self.__thisUnixSample = int(endUTCSecond*self.__sample_rate - 4*self.__samples_to_read)
428 424 startUTCSecond = numpy.floor(endUTCSecond)
429 425
430 426 # por que en el otro metodo lo primero q se hace es sumar samplestoread
431 427 self.__thisUnixSample = int(startUTCSecond * self.__sample_rate) - self.__samples_to_read
432 428
433 429 #self.__data_buffer = numpy.zeros(
434 430 # (self.__num_subchannels, self.__samples_to_read), dtype=numpy.complex)
435 431 self.__data_buffer = numpy.zeros((int(len(channelList)), self.__samples_to_read), dtype=numpy.complex)
436 432
437 433
438 434 self.__setFileHeader()
439 435 self.isConfig = True
440 436
441 437 print("[Reading] Digital RF Data was found from %s to %s " % (
442 438 datetime.datetime.utcfromtimestamp(
443 439 self.__startUTCSecond - self.__timezone),
444 440 datetime.datetime.utcfromtimestamp(
445 441 self.__endUTCSecond - self.__timezone)
446 442 ))
447 443
448 444 print("[Reading] Starting process from %s to %s" % (datetime.datetime.utcfromtimestamp(startUTCSecond - self.__timezone),
449 445 datetime.datetime.utcfromtimestamp(
450 446 endUTCSecond - self.__timezone)
451 447 ))
452 448 self.oldAverage = None
453 449 self.count = 0
454 450 self.executionTime = 0
455 451
456 452 def __reload(self):
457 453 # print
458 454 # print "%s not in range [%s, %s]" %(
459 455 # datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
460 456 # datetime.datetime.utcfromtimestamp(self.__startUTCSecond - self.__timezone),
461 457 # datetime.datetime.utcfromtimestamp(self.__endUTCSecond - self.__timezone)
462 458 # )
463 459 print("[Reading] reloading metadata ...")
464 460
465 461 try:
466 462 self.digitalReadObj.reload(complete_update=True)
467 463 except:
468 464 self.digitalReadObj = digital_rf.DigitalRFReader(self.path)
469 465
470 466 start_index, end_index = self.digitalReadObj.get_bounds(
471 467 self.__channelNameList[self.__channelList[0]])
472 468
473 469 if start_index > self.__startUTCSecond * self.__sample_rate:
474 470 self.__startUTCSecond = 1.0 * start_index / self.__sample_rate
475 471
476 472 if end_index > self.__endUTCSecond * self.__sample_rate:
477 473 self.__endUTCSecond = 1.0 * end_index / self.__sample_rate
478 474 print()
479 475 print("[Reading] New timerange found [%s, %s] " % (
480 476 datetime.datetime.utcfromtimestamp(
481 477 self.__startUTCSecond - self.__timezone),
482 478 datetime.datetime.utcfromtimestamp(
483 479 self.__endUTCSecond - self.__timezone)
484 480 ))
485 481
486 482 return True
487 483
488 484 return False
489 485
490 486 def timeit(self, toExecute):
491 487 t0 = time.time()
492 488 toExecute()
493 489 self.executionTime = time.time() - t0
494 490 if self.oldAverage is None:
495 491 self.oldAverage = self.executionTime
496 492 self.oldAverage = (self.executionTime + self.count *
497 493 self.oldAverage) / (self.count + 1.0)
498 494 self.count = self.count + 1.0
499 495 return
500 496
501 497 def __readNextBlock(self, seconds=30, volt_scale=1):
502 498 '''
503 499 '''
504 500
505 501 # Set the next data
506 502 self.__flagDiscontinuousBlock = False
507 503 self.__thisUnixSample += self.__samples_to_read
508 504
509 505 if self.__thisUnixSample + 2 * self.__samples_to_read > self.__endUTCSecond * self.__sample_rate:
510 506 print ("[Reading] There are no more data into selected time-range")
511 507 if self.__online:
512 508 sleep(3)
513 509 self.__reload()
514 510 else:
515 511 return False
516 512
517 513 if self.__thisUnixSample + 2 * self.__samples_to_read > self.__endUTCSecond * self.__sample_rate:
518 514 return False
519 515 self.__thisUnixSample -= self.__samples_to_read
520 516
521 517 indexChannel = 0
522 518
523 519 dataOk = False
524 520
525 521 for thisChannelName in self.__channelNameList: # TODO VARIOS CHANNELS?
526 522 for indexSubchannel in range(self.__num_subchannels):
527 523 try:
528 524 t0 = time()
529 525 result = self.digitalReadObj.read_vector_c81d(self.__thisUnixSample,
530 526 self.__samples_to_read,
531 527 thisChannelName, sub_channel=indexSubchannel)
532 528 self.executionTime = time() - t0
533 529 if self.oldAverage is None:
534 530 self.oldAverage = self.executionTime
535 531 self.oldAverage = (
536 532 self.executionTime + self.count * self.oldAverage) / (self.count + 1.0)
537 533 self.count = self.count + 1.0
538 534
539 535 except IOError as e:
540 536 # read next profile
541 537 self.__flagDiscontinuousBlock = True
542 538 print("[Reading] %s" % datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone), e)
543 539 break
544 540
545 541 if result.shape[0] != self.__samples_to_read:
546 542 self.__flagDiscontinuousBlock = True
547 543 print("[Reading] %s: Too few samples were found, just %d/%d samples" % (datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
548 544 result.shape[0],
549 545 self.__samples_to_read))
550 546 break
551 547
552 548 self.__data_buffer[indexChannel, :] = result * volt_scale
553 549 indexChannel+=1
554 550
555 551 dataOk = True
556 552
557 553 self.__utctime = self.__thisUnixSample / self.__sample_rate
558 554
559 555 if not dataOk:
560 556 return False
561 557
562 558 print("[Reading] %s: %d samples <> %f sec" % (datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
563 559 self.__samples_to_read,
564 560 self.__timeInterval))
565 561
566 562 self.__bufferIndex = 0
567 563
568 564 return True
569 565
570 566 def __isBufferEmpty(self):
571 567
572 568 return self.__bufferIndex > self.__samples_to_read - self.__nSamples # 40960 - 40
573 569
574 570 def getData(self, seconds=30, nTries=5):
575 571 '''
576 572 This method gets the data from files and put the data into the dataOut object
577 573
578 574 In addition, increase el the buffer counter in one.
579 575
580 576 Return:
581 577 data : retorna un perfil de voltages (alturas * canales) copiados desde el
582 578 buffer. Si no hay mas archivos a leer retorna None.
583 579
584 580 Affected:
585 581 self.dataOut
586 582 self.profileIndex
587 583 self.flagDiscontinuousBlock
588 584 self.flagIsNewBlock
589 585 '''
590 586 #print("getdata")
591 587 err_counter = 0
592 588 self.dataOut.flagNoData = True
593 589
594 590
595 591 if self.__isBufferEmpty():
596 592 #print("hi")
597 593 self.__flagDiscontinuousBlock = False
598 594
599 595 while True:
600 596 if self.__readNextBlock():
601 597 break
602 598 if self.__thisUnixSample > self.__endUTCSecond * self.__sample_rate:
603 599 raise schainpy.admin.SchainError('Error')
604 600 return
605 601
606 602 if self.__flagDiscontinuousBlock:
607 603 raise schainpy.admin.SchainError('discontinuous block found')
608 604 return
609 605
610 606 if not self.__online:
611 607 raise schainpy.admin.SchainError('Online?')
612 608 return
613 609
614 610 err_counter += 1
615 611 if err_counter > nTries:
616 612 raise schainpy.admin.SchainError('Max retrys reach')
617 613 return
618 614
619 615 print('[Reading] waiting %d seconds to read a new block' % seconds)
620 616 sleep(seconds)
621 617
622 618
623 619 if not self.getByBlock:
624 620
625 621 #print("self.__bufferIndex",self.__bufferIndex)# este valor siempre es cero aparentemente
626 622 self.dataOut.data = self.__data_buffer[:, self.__bufferIndex:self.__bufferIndex + self.__nSamples]
627 623 self.dataOut.utctime = ( self.__thisUnixSample + self.__bufferIndex) / self.__sample_rate
628 624 self.dataOut.flagNoData = False
629 625 self.dataOut.flagDiscontinuousBlock = self.__flagDiscontinuousBlock
630 626 self.dataOut.profileIndex = self.profileIndex
631 627
632 628 self.__bufferIndex += self.__nSamples
633 629 self.profileIndex += 1
634 630
635 631 if self.profileIndex == self.dataOut.nProfiles:
636 632 self.profileIndex = 0
637 633 else:
638 634 # ojo debo anadir el readNextBLock y el __isBufferEmpty(
639 635 self.dataOut.flagNoData = False
640 636 buffer = self.__data_buffer[:,self.__bufferIndex:self.__bufferIndex + self.__samples_to_read]
641 637 buffer = buffer.reshape((self.__nChannels, self.nProfileBlocks, int(self.__samples_to_read/self.nProfileBlocks)))
642 638 self.dataOut.nProfileBlocks = self.nProfileBlocks
643 639 self.dataOut.data = buffer
644 640 self.dataOut.utctime = ( self.__thisUnixSample + self.__bufferIndex) / self.__sample_rate
645 641 self.profileIndex += self.__samples_to_read
646 642 self.__bufferIndex += self.__samples_to_read
647 643 self.dataOut.flagDiscontinuousBlock = self.__flagDiscontinuousBlock
648 644 return True
649 645
650 646
651 647 def printInfo(self):
652 648 '''
653 649 '''
654 650 if self.__printInfo == False:
655 651 return
656 652
657 653 # self.systemHeaderObj.printInfo()
658 654 # self.radarControllerHeaderObj.printInfo()
659 655
660 656 self.__printInfo = False
661 657
662 658 def printNumberOfBlock(self):
663 659 '''
664 660 '''
665 661 return
666 662 # print self.profileIndex
667 663
668 664 def run(self, **kwargs):
669 665 '''
670 666 This method will be called many times so here you should put all your code
671 667 '''
672 668
673 669 if not self.isConfig:
674 670 self.setup(**kwargs)
675 671
676 672 self.getData(seconds=self.__delay)
677 673
678 674 return
679 675
680 676 @MPDecorator
681 677 class DigitalRFWriter(Operation):
682 678 '''
683 679 classdocs
684 680 '''
685 681
686 682 def __init__(self, **kwargs):
687 683 '''
688 684 Constructor
689 685 '''
690 686 Operation.__init__(self, **kwargs)
691 687 self.metadata_dict = {}
692 688 self.dataOut = None
693 689 self.dtype = None
694 690 self.oldAverage = 0
695 691
696 692 def setHeader(self):
697 693
698 694 self.metadata_dict['frequency'] = self.dataOut.frequency
699 695 self.metadata_dict['timezone'] = self.dataOut.timeZone
700 696 self.metadata_dict['dtype'] = pickle.dumps(self.dataOut.dtype)
701 697 self.metadata_dict['nProfiles'] = self.dataOut.nProfiles
702 698 self.metadata_dict['heightList'] = self.dataOut.heightList
703 699 self.metadata_dict['channelList'] = self.dataOut.channelList
704 700 self.metadata_dict['flagDecodeData'] = self.dataOut.flagDecodeData
705 701 self.metadata_dict['flagDeflipData'] = self.dataOut.flagDeflipData
706 702 self.metadata_dict['flagShiftFFT'] = self.dataOut.flagShiftFFT
707 703 self.metadata_dict['useLocalTime'] = self.dataOut.useLocalTime
708 704 self.metadata_dict['nCohInt'] = self.dataOut.nCohInt
709 705 self.metadata_dict['type'] = self.dataOut.type
710 706 self.metadata_dict['flagDataAsBlock']= getattr(
711 707 self.dataOut, 'flagDataAsBlock', None) # chequear
712 708
713 709 def setup(self, dataOut, path, frequency, fileCadence, dirCadence, metadataCadence, set=0, metadataFile='metadata', ext='.h5'):
714 710 '''
715 711 In this method we should set all initial parameters.
716 712 Input:
717 713 dataOut: Input data will also be outputa data
718 714 '''
719 715 self.setHeader()
720 716 self.__ippSeconds = dataOut.ippSeconds
721 717 self.__deltaH = dataOut.getDeltaH()
722 718 self.__sample_rate = 1e6 * 0.15 / self.__deltaH
723 719 self.__dtype = dataOut.dtype
724 720 if len(dataOut.dtype) == 2:
725 721 self.__dtype = dataOut.dtype[0]
726 722 self.__nSamples = dataOut.systemHeaderObj.nSamples
727 723 self.__nProfiles = dataOut.nProfiles
728 724
729 725 if self.dataOut.type != 'Voltage':
730 726 raise 'Digital RF cannot be used with this data type'
731 727 self.arr_data = numpy.ones((1, dataOut.nFFTPoints * len(
732 728 self.dataOut.channelList)), dtype=[('r', self.__dtype), ('i', self.__dtype)])
733 729 else:
734 730 self.arr_data = numpy.ones((self.__nSamples, len(
735 731 self.dataOut.channelList)), dtype=[('r', self.__dtype), ('i', self.__dtype)])
736 732
737 733 file_cadence_millisecs = 1000
738 734
739 735 sample_rate_fraction = Fraction(self.__sample_rate).limit_denominator()
740 736 sample_rate_numerator = int(sample_rate_fraction.numerator)
741 737 sample_rate_denominator = int(sample_rate_fraction.denominator)
742 738 start_global_index = dataOut.utctime * self.__sample_rate
743 739
744 740 uuid = 'prueba'
745 741 compression_level = 0
746 742 checksum = False
747 743 is_complex = True
748 744 num_subchannels = len(dataOut.channelList)
749 745 is_continuous = True
750 746 marching_periods = False
751 747
752 748 self.digitalWriteObj = digital_rf.DigitalRFWriter(path, self.__dtype, dirCadence,
753 749 fileCadence, start_global_index,
754 750 sample_rate_numerator, sample_rate_denominator, uuid, compression_level, checksum,
755 751 is_complex, num_subchannels, is_continuous, marching_periods)
756 752 metadata_dir = os.path.join(path, 'metadata')
757 753 os.system('mkdir %s' % (metadata_dir))
758 754 self.digitalMetadataWriteObj = digital_rf.DigitalMetadataWriter(metadata_dir, dirCadence, 1, # 236, file_cadence_millisecs / 1000
759 755 sample_rate_numerator, sample_rate_denominator,
760 756 metadataFile)
761 757 self.isConfig = True
762 758 self.currentSample = 0
763 759 self.oldAverage = 0
764 760 self.count = 0
765 761 return
766 762
767 763 def writeMetadata(self):
768 764 start_idx = self.__sample_rate * self.dataOut.utctime
769 765
770 766 self.metadata_dict['processingHeader'] = self.dataOut.processingHeaderObj.getAsDict(
771 767 )
772 768 self.metadata_dict['radarControllerHeader'] = self.dataOut.radarControllerHeaderObj.getAsDict(
773 769 )
774 770 self.metadata_dict['systemHeader'] = self.dataOut.systemHeaderObj.getAsDict(
775 771 )
776 772 self.digitalMetadataWriteObj.write(start_idx, self.metadata_dict)
777 773 return
778 774
779 775 def timeit(self, toExecute):
780 776 t0 = time()
781 777 toExecute()
782 778 self.executionTime = time() - t0
783 779 if self.oldAverage is None:
784 780 self.oldAverage = self.executionTime
785 781 self.oldAverage = (self.executionTime + self.count *
786 782 self.oldAverage) / (self.count + 1.0)
787 783 self.count = self.count + 1.0
788 784 return
789 785
790 786 def writeData(self):
791 787 if self.dataOut.type != 'Voltage':
792 788 raise 'Digital RF cannot be used with this data type'
793 789 for channel in self.dataOut.channelList:
794 790 for i in range(self.dataOut.nFFTPoints):
795 791 self.arr_data[1][channel * self.dataOut.nFFTPoints +
796 792 i]['r'] = self.dataOut.data[channel][i].real
797 793 self.arr_data[1][channel * self.dataOut.nFFTPoints +
798 794 i]['i'] = self.dataOut.data[channel][i].imag
799 795 else:
800 796 for i in range(self.dataOut.systemHeaderObj.nSamples):
801 797 for channel in self.dataOut.channelList:
802 798 self.arr_data[i][channel]['r'] = self.dataOut.data[channel][i].real
803 799 self.arr_data[i][channel]['i'] = self.dataOut.data[channel][i].imag
804 800
805 801 def f(): return self.digitalWriteObj.rf_write(self.arr_data)
806 802 self.timeit(f)
807 803
808 804 return
809 805
810 806 def run(self, dataOut, frequency=49.92e6, path=None, fileCadence=1000, dirCadence=36000, metadataCadence=1, **kwargs):
811 807 '''
812 808 This method will be called many times so here you should put all your code
813 809 Inputs:
814 810 dataOut: object with the data
815 811 '''
816 812 # print dataOut.__dict__
817 813 self.dataOut = dataOut
818 814 if not self.isConfig:
819 815 self.setup(dataOut, path, frequency, fileCadence,
820 816 dirCadence, metadataCadence, **kwargs)
821 817 self.writeMetadata()
822 818
823 819 self.writeData()
824 820
825 821 ## self.currentSample += 1
826 822 # if self.dataOut.flagDataAsBlock or self.currentSample == 1:
827 823 # self.writeMetadata()
828 824 ## if self.currentSample == self.__nProfiles: self.currentSample = 0
829 825
830 826 return dataOut# en la version 2.7 no aparece este return
831 827
832 828 def close(self):
833 829 print('[Writing] - Closing files ')
834 830 print('Average of writing to digital rf format is ', self.oldAverage * 1000)
835 831 try:
836 832 self.digitalWriteObj.close()
837 833 except:
838 834 pass
@@ -1,4986 +1,4986
1 1
2 2 import os
3 3 import time
4 4 import math
5 5
6 6 import re
7 7 import datetime
8 8 import copy
9 9 import sys
10 10 import importlib
11 11 import itertools
12 12
13 13 from multiprocessing import Pool, TimeoutError
14 14 from multiprocessing.pool import ThreadPool
15 15 import numpy
16 16 import glob
17 17 import scipy
18 18 import h5py
19 19 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
20 20 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
21 21 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
22 22 from scipy import asarray as ar,exp
23 23 from scipy.optimize import curve_fit
24 24 from schainpy.utils import log
25 25 import schainpy.admin
26 26 import warnings
27 27 from scipy import optimize, interpolate, signal, stats, ndimage
28 28 from scipy.optimize.optimize import OptimizeWarning
29 29 warnings.filterwarnings('ignore')
30 30
31 31
32 32 SPEED_OF_LIGHT = 299792458
33 33
34 34 '''solving pickling issue'''
35 35
36 36 def _pickle_method(method):
37 37 func_name = method.__func__.__name__
38 38 obj = method.__self__
39 39 cls = method.__self__.__class__
40 40 return _unpickle_method, (func_name, obj, cls)
41 41
42 42 def _unpickle_method(func_name, obj, cls):
43 43 for cls in cls.mro():
44 44 try:
45 45 func = cls.__dict__[func_name]
46 46 except KeyError:
47 47 pass
48 48 else:
49 49 break
50 50 return func.__get__(obj, cls)
51 51
52 52 def isNumber(str):
53 53 try:
54 54 float(str)
55 55 return True
56 56 except:
57 57 return False
58 58
59 59 class ParametersProc(ProcessingUnit):
60 60
61 61 METHODS = {}
62 62 nSeconds = None
63 63
64 64 def __init__(self):
65 65 ProcessingUnit.__init__(self)
66 66
67 67 # self.objectDict = {}
68 68 self.buffer = None
69 69 self.firstdatatime = None
70 70 self.profIndex = 0
71 71 self.dataOut = Parameters()
72 72 self.setupReq = False #Agregar a todas las unidades de proc
73 73
74 74 def __updateObjFromInput(self):
75 75
76 76 self.dataOut.inputUnit = self.dataIn.type
77 77
78 78 self.dataOut.timeZone = self.dataIn.timeZone
79 79 self.dataOut.dstFlag = self.dataIn.dstFlag
80 80 self.dataOut.errorCount = self.dataIn.errorCount
81 81 self.dataOut.useLocalTime = self.dataIn.useLocalTime
82 82
83 83 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
84 84 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
85 85 self.dataOut.channelList = self.dataIn.channelList
86 86 self.dataOut.heightList = self.dataIn.heightList
87 87 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
88 88 # self.dataOut.nHeights = self.dataIn.nHeights
89 89 # self.dataOut.nChannels = self.dataIn.nChannels
90 90 # self.dataOut.nBaud = self.dataIn.nBaud
91 91 # self.dataOut.nCode = self.dataIn.nCode
92 92 # self.dataOut.code = self.dataIn.code
93 93 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
94 94 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
95 95 # self.dataOut.utctime = self.firstdatatime
96 96 self.dataOut.utctime = self.dataIn.utctime
97 97 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
98 98 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
99 99 self.dataOut.nCohInt = self.dataIn.nCohInt
100 100 # self.dataOut.nIncohInt = 1
101 101 # self.dataOut.ippSeconds = self.dataIn.ippSeconds
102 102 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
103 103 self.dataOut.timeInterval1 = self.dataIn.timeInterval
104 104 self.dataOut.heightList = self.dataIn.heightList
105 105 self.dataOut.frequency = self.dataIn.frequency
106 106 # self.dataOut.noise = self.dataIn.noise
107 107
108 108 def run(self):
109 109
110 110
111 111 #print("HOLA MUNDO SOY YO")
112 112 #---------------------- Voltage Data ---------------------------
113 113
114 114 if self.dataIn.type == "Voltage":
115 115
116 116 self.__updateObjFromInput()
117 117 self.dataOut.data_pre = self.dataIn.data.copy()
118 118 self.dataOut.flagNoData = False
119 119 self.dataOut.utctimeInit = self.dataIn.utctime
120 120 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
121 121
122 122 if hasattr(self.dataIn, 'flagDataAsBlock'):
123 123 self.dataOut.flagDataAsBlock = self.dataIn.flagDataAsBlock
124 124
125 125 if hasattr(self.dataIn, 'profileIndex'):
126 126 self.dataOut.profileIndex = self.dataIn.profileIndex
127 127
128 128 if hasattr(self.dataIn, 'dataPP_POW'):
129 129 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
130 130
131 131 if hasattr(self.dataIn, 'dataPP_POWER'):
132 132 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
133 133
134 134 if hasattr(self.dataIn, 'dataPP_DOP'):
135 135 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
136 136
137 137 if hasattr(self.dataIn, 'dataPP_SNR'):
138 138 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
139 139
140 140 if hasattr(self.dataIn, 'dataPP_WIDTH'):
141 141 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
142 142 return
143 143
144 144 #---------------------- Spectra Data ---------------------------
145 145
146 146 if self.dataIn.type == "Spectra":
147 147 #print("que paso en spectra")
148 148 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
149 149 self.dataOut.data_spc = self.dataIn.data_spc
150 150 self.dataOut.data_cspc = self.dataIn.data_cspc
151 151 self.dataOut.nProfiles = self.dataIn.nProfiles
152 152 self.dataOut.nIncohInt = self.dataIn.nIncohInt
153 153 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
154 154 self.dataOut.ippFactor = self.dataIn.ippFactor
155 155 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
156 156 self.dataOut.spc_noise = self.dataIn.getNoise()
157 157 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
158 158 # self.dataOut.normFactor = self.dataIn.normFactor
159 159 self.dataOut.pairsList = self.dataIn.pairsList
160 160 self.dataOut.groupList = self.dataIn.pairsList
161 161 self.dataOut.flagNoData = False
162 162
163 163 if hasattr(self.dataIn, 'flagDataAsBlock'):
164 164 self.dataOut.flagDataAsBlock = self.dataIn.flagDataAsBlock
165 165
166 166 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
167 167 self.dataOut.ChanDist = self.dataIn.ChanDist
168 168 else: self.dataOut.ChanDist = None
169 169
170 170 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
171 171 # self.dataOut.VelRange = self.dataIn.VelRange
172 172 #else: self.dataOut.VelRange = None
173 173
174 174 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
175 175 self.dataOut.RadarConst = self.dataIn.RadarConst
176 176
177 177 if hasattr(self.dataIn, 'NPW'): #NPW
178 178 self.dataOut.NPW = self.dataIn.NPW
179 179
180 180 if hasattr(self.dataIn, 'COFA'): #COFA
181 181 self.dataOut.COFA = self.dataIn.COFA
182 182
183 183
184 184
185 185 #---------------------- Correlation Data ---------------------------
186 186
187 187 if self.dataIn.type == "Correlation":
188 188 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
189 189
190 190 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
191 191 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
192 192 self.dataOut.groupList = (acf_pairs, ccf_pairs)
193 193
194 194 self.dataOut.abscissaList = self.dataIn.lagRange
195 195 self.dataOut.noise = self.dataIn.noise
196 196 self.dataOut.data_snr = self.dataIn.SNR
197 197 self.dataOut.flagNoData = False
198 198 self.dataOut.nAvg = self.dataIn.nAvg
199 199
200 200 #---------------------- Parameters Data ---------------------------
201 201
202 202 if self.dataIn.type == "Parameters":
203 203 self.dataOut.copy(self.dataIn)
204 204 self.dataOut.flagNoData = False
205 205 #print("yo si entre")
206 206
207 207 return True
208 208
209 209 self.__updateObjFromInput()
210 210 #print("yo si entre2")
211 211
212 212 self.dataOut.utctimeInit = self.dataIn.utctime
213 213 self.dataOut.paramInterval = self.dataIn.timeInterval
214 214 #print("soy spectra ",self.dataOut.utctimeInit)
215 215 return
216 216
217 217
218 218 def target(tups):
219 219
220 220 obj, args = tups
221 221
222 222 return obj.FitGau(args)
223 223
224 224 class RemoveWideGC(Operation):
225 225 ''' This class remove the wide clutter and replace it with a simple interpolation points
226 226 This mainly applies to CLAIRE radar
227 227
228 228 ClutterWidth : Width to look for the clutter peak
229 229
230 230 Input:
231 231
232 232 self.dataOut.data_pre : SPC and CSPC
233 233 self.dataOut.spc_range : To select wind and rainfall velocities
234 234
235 235 Affected:
236 236
237 237 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
238 238
239 239 Written by D. Scipión 25.02.2021
240 240 '''
241 241 def __init__(self):
242 242 Operation.__init__(self)
243 243 self.i = 0
244 244 self.ich = 0
245 245 self.ir = 0
246 246
247 247 def run(self, dataOut, ClutterWidth=2.5):
248 248 # print ('Entering RemoveWideGC ... ')
249 249
250 250 self.spc = dataOut.data_pre[0].copy()
251 251 self.spc_out = dataOut.data_pre[0].copy()
252 252 self.Num_Chn = self.spc.shape[0]
253 253 self.Num_Hei = self.spc.shape[2]
254 254 VelRange = dataOut.spc_range[2][:-1]
255 255 dv = VelRange[1]-VelRange[0]
256 256
257 257 # Find the velocities that corresponds to zero
258 258 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
259 259
260 260 # Removing novalid data from the spectra
261 261 for ich in range(self.Num_Chn) :
262 262 for ir in range(self.Num_Hei) :
263 263 # Estimate the noise at each range
264 264 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
265 265
266 266 # Removing the noise floor at each range
267 267 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
268 268 self.spc[ich,novalid,ir] = HSn
269 269
270 270 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
271 271 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
272 272 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
273 273 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
274 274 continue
275 275 junk3 = numpy.squeeze(numpy.diff(j1index))
276 276 junk4 = numpy.squeeze(numpy.diff(j2index))
277 277
278 278 valleyindex = j2index[numpy.where(junk4>1)]
279 279 peakindex = j1index[numpy.where(junk3>1)]
280 280
281 281 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
282 282 if numpy.size(isvalid) == 0 :
283 283 continue
284 284 if numpy.size(isvalid) >1 :
285 285 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
286 286 isvalid = isvalid[vindex]
287 287
288 288 # clutter peak
289 289 gcpeak = peakindex[isvalid]
290 290 vl = numpy.where(valleyindex < gcpeak)
291 291 if numpy.size(vl) == 0:
292 292 continue
293 293 gcvl = valleyindex[vl[0][-1]]
294 294 vr = numpy.where(valleyindex > gcpeak)
295 295 if numpy.size(vr) == 0:
296 296 continue
297 297 gcvr = valleyindex[vr[0][0]]
298 298
299 299 # Removing the clutter
300 300 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
301 301 gcindex = gc_values[gcvl+1:gcvr-1]
302 302 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
303 303
304 304 dataOut.data_pre[0] = self.spc_out
305 305 #print ('Leaving RemoveWideGC ... ')
306 306 return dataOut
307 307
308 308 class SpectralFilters(Operation):
309 309 ''' This class allows to replace the novalid values with noise for each channel
310 310 This applies to CLAIRE RADAR
311 311
312 312 PositiveLimit : RightLimit of novalid data
313 313 NegativeLimit : LeftLimit of novalid data
314 314
315 315 Input:
316 316
317 317 self.dataOut.data_pre : SPC and CSPC
318 318 self.dataOut.spc_range : To select wind and rainfall velocities
319 319
320 320 Affected:
321 321
322 322 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
323 323
324 324 Written by D. Scipión 29.01.2021
325 325 '''
326 326 def __init__(self):
327 327 Operation.__init__(self)
328 328 self.i = 0
329 329
330 330 def run(self, dataOut, ):
331 331
332 332 self.spc = dataOut.data_pre[0].copy()
333 333 self.Num_Chn = self.spc.shape[0]
334 334 VelRange = dataOut.spc_range[2]
335 335
336 336 # novalid corresponds to data within the Negative and PositiveLimit
337 337
338 338
339 339 # Removing novalid data from the spectra
340 340 for i in range(self.Num_Chn):
341 341 self.spc[i,novalid,:] = dataOut.noise[i]
342 342 dataOut.data_pre[0] = self.spc
343 343 return dataOut
344 344
345 345 class GaussianFit(Operation):
346 346
347 347 '''
348 348 Function that fit of one and two generalized gaussians (gg) based
349 349 on the PSD shape across an "power band" identified from a cumsum of
350 350 the measured spectrum - noise.
351 351
352 352 Input:
353 353 self.dataOut.data_pre : SelfSpectra
354 354
355 355 Output:
356 356 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
357 357
358 358 '''
359 359 def __init__(self):
360 360 Operation.__init__(self)
361 361 self.i=0
362 362
363 363
364 364 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
365 365 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
366 366 """This routine will find a couple of generalized Gaussians to a power spectrum
367 367 methods: generalized, squared
368 368 input: spc
369 369 output:
370 370 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
371 371 """
372 372 print ('Entering ',method,' double Gaussian fit')
373 373 self.spc = dataOut.data_pre[0].copy()
374 374 self.Num_Hei = self.spc.shape[2]
375 375 self.Num_Bin = self.spc.shape[1]
376 376 self.Num_Chn = self.spc.shape[0]
377 377
378 378 start_time = time.time()
379 379
380 380 pool = Pool(processes=self.Num_Chn)
381 381 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
382 382 objs = [self for __ in range(self.Num_Chn)]
383 383 attrs = list(zip(objs, args))
384 384 DGauFitParam = pool.map(target, attrs)
385 385 # Parameters:
386 386 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
387 387 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
388 388
389 389 # Double Gaussian Curves
390 390 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
391 391 gau0[:] = numpy.NaN
392 392 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
393 393 gau1[:] = numpy.NaN
394 394 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
395 395 for iCh in range(self.Num_Chn):
396 396 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
397 397 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
398 398 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
399 399 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
400 400 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
401 401 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
402 402 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
403 403 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
404 404 if method == 'genealized':
405 405 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
406 406 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
407 407 elif method == 'squared':
408 408 p0 = 2.
409 409 p1 = 2.
410 410 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
411 411 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
412 412 dataOut.GaussFit0 = gau0
413 413 dataOut.GaussFit1 = gau1
414 414
415 415 print('Leaving ',method ,' double Gaussian fit')
416 416 return dataOut
417 417
418 418 def FitGau(self, X):
419 419 # print('Entering FitGau')
420 420 # Assigning the variables
421 421 Vrange, ch, wnoise, num_intg, SNRlimit = X
422 422 # Noise Limits
423 423 noisebl = wnoise * 0.9
424 424 noisebh = wnoise * 1.1
425 425 # Radar Velocity
426 426 Va = max(Vrange)
427 427 deltav = Vrange[1] - Vrange[0]
428 428 x = numpy.arange(self.Num_Bin)
429 429
430 430 # print ('stop 0')
431 431
432 432 # 5 parameters, 2 Gaussians
433 433 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
434 434 DGauFitParam[:] = numpy.NaN
435 435
436 436 # SPCparam = []
437 437 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
438 438 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
439 439 # SPC_ch1[:] = 0 #numpy.NaN
440 440 # SPC_ch2[:] = 0 #numpy.NaN
441 441 # print ('stop 1')
442 442 for ht in range(self.Num_Hei):
443 443 # print (ht)
444 444 # print ('stop 2')
445 445 # Spectra at each range
446 446 spc = numpy.asarray(self.spc)[ch,:,ht]
447 447 snr = ( spc.mean() - wnoise ) / wnoise
448 448 snrdB = 10.*numpy.log10(snr)
449 449
450 450 #print ('stop 3')
451 451 if snrdB < SNRlimit :
452 452 # snr = numpy.NaN
453 453 # SPC_ch1[:,ht] = 0#numpy.NaN
454 454 # SPC_ch1[:,ht] = 0#numpy.NaN
455 455 # SPCparam = (SPC_ch1,SPC_ch2)
456 456 # print ('SNR less than SNRth')
457 457 continue
458 458 # wnoise = hildebrand_sekhon(spc,num_intg)
459 459 # print ('stop 2.01')
460 460 #############################################
461 461 # normalizing spc and noise
462 462 # This part differs from gg1
463 463 # spc_norm_max = max(spc) #commented by D. Scipión 19.03.2021
464 464 #spc = spc / spc_norm_max
465 465 # pnoise = pnoise #/ spc_norm_max #commented by D. Scipión 19.03.2021
466 466 #############################################
467 467
468 468 # print ('stop 2.1')
469 469 fatspectra=1.0
470 470 # noise per channel.... we might want to use the noise at each range
471 471
472 472 # wnoise = noise_ #/ spc_norm_max #commented by D. Scipión 19.03.2021
473 473 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
474 474 #if wnoise>1.1*pnoise: # to be tested later
475 475 # wnoise=pnoise
476 476 # noisebl = wnoise*0.9
477 477 # noisebh = wnoise*1.1
478 478 spc = spc - wnoise # signal
479 479
480 480 # print ('stop 2.2')
481 481 minx = numpy.argmin(spc)
482 482 #spcs=spc.copy()
483 483 spcs = numpy.roll(spc,-minx)
484 484 cum = numpy.cumsum(spcs)
485 485 # tot_noise = wnoise * self.Num_Bin #64;
486 486
487 487 # print ('stop 2.3')
488 488 # snr = sum(spcs) / tot_noise
489 489 # snrdB = 10.*numpy.log10(snr)
490 490 #print ('stop 3')
491 491 # if snrdB < SNRlimit :
492 492 # snr = numpy.NaN
493 493 # SPC_ch1[:,ht] = 0#numpy.NaN
494 494 # SPC_ch1[:,ht] = 0#numpy.NaN
495 495 # SPCparam = (SPC_ch1,SPC_ch2)
496 496 # print ('SNR less than SNRth')
497 497 # continue
498 498
499 499
500 500 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
501 501 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
502 502 # print ('stop 4')
503 503 cummax = max(cum)
504 504 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
505 505 cumlo = cummax * epsi
506 506 cumhi = cummax * (1-epsi)
507 507 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
508 508
509 509 # print ('stop 5')
510 510 if len(powerindex) < 1:# case for powerindex 0
511 511 # print ('powerindex < 1')
512 512 continue
513 513 powerlo = powerindex[0]
514 514 powerhi = powerindex[-1]
515 515 powerwidth = powerhi-powerlo
516 516 if powerwidth <= 1:
517 517 # print('powerwidth <= 1')
518 518 continue
519 519
520 520 # print ('stop 6')
521 521 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
522 522 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
523 523 midpeak = (firstpeak + secondpeak)/2.
524 524 firstamp = spcs[int(firstpeak)]
525 525 secondamp = spcs[int(secondpeak)]
526 526 midamp = spcs[int(midpeak)]
527 527
528 528 y_data = spc + wnoise
529 529
530 530 ''' single Gaussian '''
531 531 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
532 532 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
533 533 power0 = 2.
534 534 amplitude0 = midamp
535 535 state0 = [shift0,width0,amplitude0,power0,wnoise]
536 536 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
537 537 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
538 538 # print ('stop 7.1')
539 539 # print (bnds)
540 540
541 541 chiSq1=lsq1[1]
542 542
543 543 # print ('stop 8')
544 544 if fatspectra<1.0 and powerwidth<4:
545 545 choice=0
546 546 Amplitude0=lsq1[0][2]
547 547 shift0=lsq1[0][0]
548 548 width0=lsq1[0][1]
549 549 p0=lsq1[0][3]
550 550 Amplitude1=0.
551 551 shift1=0.
552 552 width1=0.
553 553 p1=0.
554 554 noise=lsq1[0][4]
555 555 #return (numpy.array([shift0,width0,Amplitude0,p0]),
556 556 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
557 557
558 558 # print ('stop 9')
559 559 ''' two Gaussians '''
560 560 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
561 561 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
562 562 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
563 563 width0 = powerwidth/6.
564 564 width1 = width0
565 565 power0 = 2.
566 566 power1 = power0
567 567 amplitude0 = firstamp
568 568 amplitude1 = secondamp
569 569 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
570 570 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
571 571 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
572 572 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
573 573
574 574 # print ('stop 10')
575 575 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
576 576
577 577 # print ('stop 11')
578 578 chiSq2 = lsq2[1]
579 579
580 580 # print ('stop 12')
581 581
582 582 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
583 583
584 584 # print ('stop 13')
585 585 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
586 586 if oneG:
587 587 choice = 0
588 588 else:
589 589 w1 = lsq2[0][1]; w2 = lsq2[0][5]
590 590 a1 = lsq2[0][2]; a2 = lsq2[0][6]
591 591 p1 = lsq2[0][3]; p2 = lsq2[0][7]
592 592 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
593 593 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
594 594 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
595 595
596 596 if gp1>gp2:
597 597 if a1>0.7*a2:
598 598 choice = 1
599 599 else:
600 600 choice = 2
601 601 elif gp2>gp1:
602 602 if a2>0.7*a1:
603 603 choice = 2
604 604 else:
605 605 choice = 1
606 606 else:
607 607 choice = numpy.argmax([a1,a2])+1
608 608 #else:
609 609 #choice=argmin([std2a,std2b])+1
610 610
611 611 else: # with low SNR go to the most energetic peak
612 612 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
613 613
614 614 # print ('stop 14')
615 615 shift0 = lsq2[0][0]
616 616 vel0 = Vrange[0] + shift0 * deltav
617 617 shift1 = lsq2[0][4]
618 618 # vel1=Vrange[0] + shift1 * deltav
619 619
620 620 # max_vel = 1.0
621 621 # Va = max(Vrange)
622 622 # deltav = Vrange[1]-Vrange[0]
623 623 # print ('stop 15')
624 624 #first peak will be 0, second peak will be 1
625 625 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.Scipión 19.03.2021
626 626 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
627 627 shift0 = lsq2[0][0]
628 628 width0 = lsq2[0][1]
629 629 Amplitude0 = lsq2[0][2]
630 630 p0 = lsq2[0][3]
631 631
632 632 shift1 = lsq2[0][4]
633 633 width1 = lsq2[0][5]
634 634 Amplitude1 = lsq2[0][6]
635 635 p1 = lsq2[0][7]
636 636 noise = lsq2[0][8]
637 637 else:
638 638 shift1 = lsq2[0][0]
639 639 width1 = lsq2[0][1]
640 640 Amplitude1 = lsq2[0][2]
641 641 p1 = lsq2[0][3]
642 642
643 643 shift0 = lsq2[0][4]
644 644 width0 = lsq2[0][5]
645 645 Amplitude0 = lsq2[0][6]
646 646 p0 = lsq2[0][7]
647 647 noise = lsq2[0][8]
648 648
649 649 if Amplitude0<0.05: # in case the peak is noise
650 650 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
651 651 if Amplitude1<0.05:
652 652 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
653 653
654 654 # print ('stop 16 ')
655 655 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
656 656 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
657 657 # SPCparam = (SPC_ch1,SPC_ch2)
658 658
659 659 DGauFitParam[0,ht,0] = noise
660 660 DGauFitParam[0,ht,1] = noise
661 661 DGauFitParam[1,ht,0] = Amplitude0
662 662 DGauFitParam[1,ht,1] = Amplitude1
663 663 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
664 664 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
665 665 DGauFitParam[3,ht,0] = width0 * deltav
666 666 DGauFitParam[3,ht,1] = width1 * deltav
667 667 DGauFitParam[4,ht,0] = p0
668 668 DGauFitParam[4,ht,1] = p1
669 669
670 670 # print (DGauFitParam.shape)
671 671 # print ('Leaving FitGau')
672 672 return DGauFitParam
673 673 # return SPCparam
674 674 # return GauSPC
675 675
676 676 def y_model1(self,x,state):
677 677 shift0, width0, amplitude0, power0, noise = state
678 678 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
679 679 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
680 680 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
681 681 return model0 + model0u + model0d + noise
682 682
683 683 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
684 684 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
685 685 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
686 686 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
687 687 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
688 688
689 689 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
690 690 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
691 691 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
692 692 return model0 + model0u + model0d + model1 + model1u + model1d + noise
693 693
694 694 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
695 695
696 696 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
697 697
698 698 def misfit2(self,state,y_data,x,num_intg):
699 699 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
700 700
701 701
702 702
703 703 class PrecipitationProc(Operation):
704 704
705 705 '''
706 706 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
707 707
708 708 Input:
709 709 self.dataOut.data_pre : SelfSpectra
710 710
711 711 Output:
712 712
713 713 self.dataOut.data_output : Reflectivity factor, rainfall Rate
714 714
715 715
716 716 Parameters affected:
717 717 '''
718 718
719 719 def __init__(self):
720 720 Operation.__init__(self)
721 721 self.i=0
722 722
723 723 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
724 724 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30):
725 725
726 726 # print ('Entering PrecepitationProc ... ')
727 727
728 728 if radar == "MIRA35C" :
729 729
730 730 self.spc = dataOut.data_pre[0].copy()
731 731 self.Num_Hei = self.spc.shape[2]
732 732 self.Num_Bin = self.spc.shape[1]
733 733 self.Num_Chn = self.spc.shape[0]
734 734 Ze = self.dBZeMODE2(dataOut)
735 735
736 736 else:
737 737
738 738 self.spc = dataOut.data_pre[0].copy()
739 739
740 740 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
741 741 self.spc[:,:,0:7]= numpy.NaN
742 742
743 743 self.Num_Hei = self.spc.shape[2]
744 744 self.Num_Bin = self.spc.shape[1]
745 745 self.Num_Chn = self.spc.shape[0]
746 746
747 747 VelRange = dataOut.spc_range[2]
748 748
749 749 ''' Se obtiene la constante del RADAR '''
750 750
751 751 self.Pt = Pt
752 752 self.Gt = Gt
753 753 self.Gr = Gr
754 754 self.Lambda = Lambda
755 755 self.aL = aL
756 756 self.tauW = tauW
757 757 self.ThetaT = ThetaT
758 758 self.ThetaR = ThetaR
759 759 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
760 760 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
761 761 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
762 762
763 763 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
764 764 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
765 765 RadarConstant = 10e-26 * Numerator / Denominator #
766 766 ExpConstant = 10**(40/10) #Constante Experimental
767 767
768 768 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
769 769 for i in range(self.Num_Chn):
770 770 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
771 771 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
772 772
773 773 SPCmean = numpy.mean(SignalPower, 0)
774 774 Pr = SPCmean[:,:]/dataOut.normFactor
775 775
776 776 # Declaring auxiliary variables
777 777 Range = dataOut.heightList*1000. #Range in m
778 778 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
779 779 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
780 780 zMtrx = rMtrx+Altitude
781 781 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
782 782 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
783 783
784 784 # height dependence to air density Foote and Du Toit (1969)
785 785 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
786 786 VMtrx = VelMtrx / delv_z #Normalized velocity
787 787 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
788 788 # Diameter is related to the fall speed of falling drops
789 789 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
790 790 # Only valid for D>= 0.16 mm
791 791 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
792 792
793 793 #Calculate Radar Reflectivity ETAn
794 794 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
795 795 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
796 796 # Radar Cross Section
797 797 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
798 798 # Drop Size Distribution
799 799 DSD = ETAn / sigmaD
800 800 # Equivalente Reflectivy
801 801 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
802 802 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
803 803 # RainFall Rate
804 804 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
805 805
806 806 # Censoring the data
807 807 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
808 808 SNRth = 10**(SNRdBlimit/10) #-30dB
809 809 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
810 810 W = numpy.nanmean(dataOut.data_dop,0)
811 811 W[novalid] = numpy.NaN
812 812 Ze_org[novalid] = numpy.NaN
813 813 RR[novalid] = numpy.NaN
814 814
815 815 dataOut.data_output = RR[8]
816 816 dataOut.data_param = numpy.ones([3,self.Num_Hei])
817 817 dataOut.channelList = [0,1,2]
818 818
819 819 dataOut.data_param[0]=10*numpy.log10(Ze_org)
820 820 dataOut.data_param[1]=-W
821 821 dataOut.data_param[2]=RR
822 822
823 823 # print ('Leaving PrecepitationProc ... ')
824 824 return dataOut
825 825
826 826 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
827 827
828 828 NPW = dataOut.NPW
829 829 COFA = dataOut.COFA
830 830
831 831 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
832 832 RadarConst = dataOut.RadarConst
833 833 #frequency = 34.85*10**9
834 834
835 835 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
836 836 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
837 837
838 838 ETA = numpy.sum(SNR,1)
839 839
840 840 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
841 841
842 842 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
843 843
844 844 for r in range(self.Num_Hei):
845 845
846 846 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
847 847 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
848 848
849 849 return Ze
850 850
851 851 # def GetRadarConstant(self):
852 852 #
853 853 # """
854 854 # Constants:
855 855 #
856 856 # Pt: Transmission Power dB 5kW 5000
857 857 # Gt: Transmission Gain dB 24.7 dB 295.1209
858 858 # Gr: Reception Gain dB 18.5 dB 70.7945
859 859 # Lambda: Wavelenght m 0.6741 m 0.6741
860 860 # aL: Attenuation loses dB 4dB 2.5118
861 861 # tauW: Width of transmission pulse s 4us 4e-6
862 862 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
863 863 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
864 864 #
865 865 # """
866 866 #
867 867 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
868 868 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
869 869 # RadarConstant = Numerator / Denominator
870 870 #
871 871 # return RadarConstant
872 872
873 873
874 874
875 875 class FullSpectralAnalysis(Operation):
876 876
877 877 """
878 878 Function that implements Full Spectral Analysis technique.
879 879
880 880 Input:
881 881 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
882 882 self.dataOut.groupList : Pairlist of channels
883 883 self.dataOut.ChanDist : Physical distance between receivers
884 884
885 885
886 886 Output:
887 887
888 888 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
889 889
890 890
891 891 Parameters affected: Winds, height range, SNR
892 892
893 893 """
894 894 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
895 895 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
896 896
897 897 spc = dataOut.data_pre[0].copy()
898 898 cspc = dataOut.data_pre[1]
899 899 nHeights = spc.shape[2]
900 900
901 901 # first_height = 0.75 #km (ref: data header 20170822)
902 902 # resolution_height = 0.075 #km
903 903 '''
904 904 finding height range. check this when radar parameters are changed!
905 905 '''
906 906 if maxheight is not None:
907 907 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
908 908 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
909 909 else:
910 910 range_max = nHeights
911 911 if minheight is not None:
912 912 # range_min = int((minheight - first_height) / resolution_height) # theoretical
913 913 range_min = int(13.26 * minheight - 5) # empirical, works better
914 914 if range_min < 0:
915 915 range_min = 0
916 916 else:
917 917 range_min = 0
918 918
919 919 pairsList = dataOut.groupList
920 920 if dataOut.ChanDist is not None :
921 921 ChanDist = dataOut.ChanDist
922 922 else:
923 923 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
924 924
925 925 # 4 variables: zonal, meridional, vertical, and average SNR
926 926 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
927 927 velocityX = numpy.zeros([nHeights]) * numpy.NaN
928 928 velocityY = numpy.zeros([nHeights]) * numpy.NaN
929 929 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
930 930
931 931 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
932 932
933 933 '''***********************************************WIND ESTIMATION**************************************'''
934 934 for Height in range(nHeights):
935 935
936 936 if Height >= range_min and Height < range_max:
937 937 # error_code will be useful in future analysis
938 938 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
939 939 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
940 940
941 941 if abs(Vzon) < 100. and abs(Vmer) < 100.:
942 942 velocityX[Height] = Vzon
943 943 velocityY[Height] = -Vmer
944 944 velocityZ[Height] = Vver
945 945
946 946 # Censoring data with SNR threshold
947 947 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
948 948
949 949 data_param[0] = velocityX
950 950 data_param[1] = velocityY
951 951 data_param[2] = velocityZ
952 952 data_param[3] = dbSNR
953 953 dataOut.data_param = data_param
954 954 return dataOut
955 955
956 956 def moving_average(self,x, N=2):
957 957 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
958 958 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
959 959
960 960 def gaus(self,xSamples,Amp,Mu,Sigma):
961 961 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
962 962
963 963 def Moments(self, ySamples, xSamples):
964 964 Power = numpy.nanmean(ySamples) # Power, 0th Moment
965 965 yNorm = ySamples / numpy.nansum(ySamples)
966 966 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
967 967 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
968 968 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
969 969 return numpy.array([Power,RadVel,StdDev])
970 970
971 971 def StopWindEstimation(self, error_code):
972 972 Vzon = numpy.NaN
973 973 Vmer = numpy.NaN
974 974 Vver = numpy.NaN
975 975 return Vzon, Vmer, Vver, error_code
976 976
977 977 def AntiAliasing(self, interval, maxstep):
978 978 """
979 979 function to prevent errors from aliased values when computing phaseslope
980 980 """
981 981 antialiased = numpy.zeros(len(interval))
982 982 copyinterval = interval.copy()
983 983
984 984 antialiased[0] = copyinterval[0]
985 985
986 986 for i in range(1,len(antialiased)):
987 987 step = interval[i] - interval[i-1]
988 988 if step > maxstep:
989 989 copyinterval -= 2*numpy.pi
990 990 antialiased[i] = copyinterval[i]
991 991 elif step < maxstep*(-1):
992 992 copyinterval += 2*numpy.pi
993 993 antialiased[i] = copyinterval[i]
994 994 else:
995 995 antialiased[i] = copyinterval[i].copy()
996 996
997 997 return antialiased
998 998
999 999 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
1000 1000 """
1001 1001 Function that Calculates Zonal, Meridional and Vertical wind velocities.
1002 1002 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
1003 1003
1004 1004 Input:
1005 1005 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
1006 1006 pairsList : Pairlist of channels
1007 1007 ChanDist : array of xi_ij and eta_ij
1008 1008 Height : height at which data is processed
1009 1009 noise : noise in [channels] format for specific height
1010 1010 Abbsisarange : range of the frequencies or velocities
1011 1011 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
1012 1012
1013 1013 Output:
1014 1014 Vzon, Vmer, Vver : wind velocities
1015 1015 error_code : int that states where code is terminated
1016 1016
1017 1017 0 : no error detected
1018 1018 1 : Gaussian of mean spc exceeds widthlimit
1019 1019 2 : no Gaussian of mean spc found
1020 1020 3 : SNR to low or velocity to high -> prec. e.g.
1021 1021 4 : at least one Gaussian of cspc exceeds widthlimit
1022 1022 5 : zero out of three cspc Gaussian fits converged
1023 1023 6 : phase slope fit could not be found
1024 1024 7 : arrays used to fit phase have different length
1025 1025 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
1026 1026
1027 1027 """
1028 1028
1029 1029 error_code = 0
1030 1030
1031 1031 nChan = spc.shape[0]
1032 1032 nProf = spc.shape[1]
1033 1033 nPair = cspc.shape[0]
1034 1034
1035 1035 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
1036 1036 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
1037 1037 phase = numpy.zeros([nPair, nProf]) # phase between channels
1038 1038 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
1039 1039 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
1040 1040 xFrec = AbbsisaRange[0][:-1] # frequency range
1041 1041 xVel = AbbsisaRange[2][:-1] # velocity range
1042 1042 xSamples = xFrec # the frequency range is taken
1043 1043 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
1044 1044
1045 1045 # only consider velocities with in NegativeLimit and PositiveLimit
1046 1046 if (NegativeLimit is None):
1047 1047 NegativeLimit = numpy.min(xVel)
1048 1048 if (PositiveLimit is None):
1049 1049 PositiveLimit = numpy.max(xVel)
1050 1050 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
1051 1051 xSamples_zoom = xSamples[xvalid]
1052 1052
1053 1053 '''Getting Eij and Nij'''
1054 1054 Xi01, Xi02, Xi12 = ChanDist[:,0]
1055 1055 Eta01, Eta02, Eta12 = ChanDist[:,1]
1056 1056
1057 1057 # spwd limit - updated by D. Scipión 30.03.2021
1058 1058 widthlimit = 10
1059 1059 '''************************* SPC is normalized ********************************'''
1060 1060 spc_norm = spc.copy()
1061 1061 # For each channel
1062 1062 for i in range(nChan):
1063 1063 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
1064 1064 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
1065 1065
1066 1066 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
1067 1067
1068 1068 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
1069 1069 you only fit the curve and don't need the absolute value of height for calculation,
1070 1070 only for estimation of width. for normalization of cross spectra, you need initial,
1071 1071 unnormalized self-spectra With noise.
1072 1072
1073 1073 Technically, you don't even need to normalize the self-spectra, as you only need the
1074 1074 width of the peak. However, it was left this way. Note that the normalization has a flaw:
1075 1075 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
1076 1076 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
1077 1077 """
1078 1078 # initial conditions
1079 1079 popt = [1e-10,0,1e-10]
1080 1080 # Spectra average
1081 1081 SPCMean = numpy.average(SPC_Samples,0)
1082 1082 # Moments in frequency
1083 1083 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
1084 1084
1085 1085 # Gauss Fit SPC in frequency domain
1086 1086 if dbSNR > SNRlimit: # only if SNR > SNRth
1087 1087 try:
1088 1088 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
1089 1089 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
1090 1090 return self.StopWindEstimation(error_code = 1)
1091 1091 FitGauss = self.gaus(xSamples_zoom,*popt)
1092 1092 except :#RuntimeError:
1093 1093 return self.StopWindEstimation(error_code = 2)
1094 1094 else:
1095 1095 return self.StopWindEstimation(error_code = 3)
1096 1096
1097 1097 '''***************************** CSPC Normalization *************************
1098 1098 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
1099 1099 influence the norm which is not desired. First, a range is identified where the
1100 1100 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
1101 1101 around it gets cut off and values replaced by mean determined by the boundary
1102 1102 data -> sum_noise (spc is not normalized here, thats why the noise is important)
1103 1103
1104 1104 The sums are then added and multiplied by range/datapoints, because you need
1105 1105 an integral and not a sum for normalization.
1106 1106
1107 1107 A norm is found according to Briggs 92.
1108 1108 '''
1109 1109 # for each pair
1110 1110 for i in range(nPair):
1111 1111 cspc_norm = cspc[i,:].copy()
1112 1112 chan_index0 = pairsList[i][0]
1113 1113 chan_index1 = pairsList[i][1]
1114 1114 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
1115 1115 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
1116 1116
1117 1117 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
1118 1118 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
1119 1119 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
1120 1120
1121 1121 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
1122 1122 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
1123 1123
1124 1124 '''*******************************FIT GAUSS CSPC************************************'''
1125 1125 try:
1126 1126 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
1127 1127 if popt01[2] > widthlimit: # CONDITION
1128 1128 return self.StopWindEstimation(error_code = 4)
1129 1129 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
1130 1130 if popt02[2] > widthlimit: # CONDITION
1131 1131 return self.StopWindEstimation(error_code = 4)
1132 1132 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
1133 1133 if popt12[2] > widthlimit: # CONDITION
1134 1134 return self.StopWindEstimation(error_code = 4)
1135 1135
1136 1136 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
1137 1137 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
1138 1138 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
1139 1139 except:
1140 1140 return self.StopWindEstimation(error_code = 5)
1141 1141
1142 1142
1143 1143 '''************* Getting Fij ***************'''
1144 1144 # x-axis point of the gaussian where the center is located from GaussFit of spectra
1145 1145 GaussCenter = popt[1]
1146 1146 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
1147 1147 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
1148 1148
1149 1149 # Point where e^-1 is located in the gaussian
1150 1150 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
1151 1151 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
1152 1152 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
1153 1153 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
1154 1154
1155 1155 '''********** Taking frequency ranges from mean SPCs **********'''
1156 1156 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
1157 1157 Range = numpy.empty(2)
1158 1158 Range[0] = GaussCenter - GauWidth
1159 1159 Range[1] = GaussCenter + GauWidth
1160 1160 # Point in x-axis where the bandwidth is located (min:max)
1161 1161 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
1162 1162 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
1163 1163 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
1164 1164 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
1165 1165 Range = numpy.array([ PointRangeMin, PointRangeMax ])
1166 1166 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
1167 1167
1168 1168 '''************************** Getting Phase Slope ***************************'''
1169 1169 for i in range(nPair):
1170 1170 if len(FrecRange) > 5:
1171 1171 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
1172 1172 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
1173 1173 if len(FrecRange) == len(PhaseRange):
1174 1174 try:
1175 1175 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
1176 1176 PhaseSlope[i] = slope
1177 1177 PhaseInter[i] = intercept
1178 1178 except:
1179 1179 return self.StopWindEstimation(error_code = 6)
1180 1180 else:
1181 1181 return self.StopWindEstimation(error_code = 7)
1182 1182 else:
1183 1183 return self.StopWindEstimation(error_code = 8)
1184 1184
1185 1185 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
1186 1186
1187 1187 '''Getting constant C'''
1188 1188 cC=(Fij*numpy.pi)**2
1189 1189
1190 1190 '''****** Getting constants F and G ******'''
1191 1191 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
1192 1192 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
1193 1193 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
1194 1194 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
1195 1195 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
1196 1196 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
1197 1197 MijResults = numpy.array([MijResult1, MijResult2])
1198 1198 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
1199 1199
1200 1200 '''****** Getting constants A, B and H ******'''
1201 1201 W01 = numpy.nanmax( FitGauss01 )
1202 1202 W02 = numpy.nanmax( FitGauss02 )
1203 1203 W12 = numpy.nanmax( FitGauss12 )
1204 1204
1205 1205 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
1206 1206 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
1207 1207 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
1208 1208 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
1209 1209
1210 1210 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
1211 1211 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
1212 1212
1213 1213 VxVy = numpy.array([[cA,cH],[cH,cB]])
1214 1214 VxVyResults = numpy.array([-cF,-cG])
1215 1215 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
1216 1216 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
1217 1217 error_code = 0
1218 1218
1219 1219 return Vzon, Vmer, Vver, error_code
1220 1220
1221 1221 class SpectralMoments(Operation):
1222 1222
1223 1223 '''
1224 1224 Function SpectralMoments()
1225 1225
1226 1226 Calculates moments (power, mean, standard deviation) and SNR of the signal
1227 1227
1228 1228 Type of dataIn: Spectra
1229 1229
1230 1230 Configuration Parameters:
1231 1231
1232 1232 dirCosx : Cosine director in X axis
1233 1233 dirCosy : Cosine director in Y axis
1234 1234
1235 1235 elevation :
1236 1236 azimuth :
1237 1237
1238 1238 Input:
1239 1239 channelList : simple channel list to select e.g. [2,3,7]
1240 1240 self.dataOut.data_pre : Spectral data
1241 1241 self.dataOut.abscissaList : List of frequencies
1242 1242 self.dataOut.noise : Noise level per channel
1243 1243
1244 1244 Affected:
1245 1245 self.dataOut.moments : Parameters per channel
1246 1246 self.dataOut.data_snr : SNR per channel
1247 1247
1248 1248 '''
1249 1249
1250 1250 def run(self, dataOut):
1251 1251
1252 1252 data = dataOut.data_pre[0]
1253 1253 absc = dataOut.abscissaList[:-1]
1254 1254 noise = dataOut.noise
1255 1255 nChannel = data.shape[0]
1256 1256 data_param = numpy.zeros((nChannel, 4, data.shape[2]))
1257 1257
1258 1258 for ind in range(nChannel):
1259 1259 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind] )
1260 1260
1261 1261 dataOut.moments = data_param[:,1:,:]
1262 1262 dataOut.data_snr = data_param[:,0]
1263 1263 dataOut.data_pow = data_param[:,1]
1264 1264 dataOut.data_dop = data_param[:,2]
1265 1265 dataOut.data_width = data_param[:,3]
1266 1266 return dataOut
1267 1267
1268 1268 def __calculateMoments(self, oldspec, oldfreq, n0,
1269 1269 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
1270 1270
1271 1271 if (nicoh is None): nicoh = 1
1272 1272 if (graph is None): graph = 0
1273 1273 if (smooth is None): smooth = 0
1274 1274 elif (self.smooth < 3): smooth = 0
1275 1275
1276 1276 if (type1 is None): type1 = 0
1277 1277 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
1278 1278 if (snrth is None): snrth = -3
1279 1279 if (dc is None): dc = 0
1280 1280 if (aliasing is None): aliasing = 0
1281 1281 if (oldfd is None): oldfd = 0
1282 1282 if (wwauto is None): wwauto = 0
1283 1283
1284 1284 if (n0 < 1.e-20): n0 = 1.e-20
1285 1285
1286 1286 freq = oldfreq
1287 1287 vec_power = numpy.zeros(oldspec.shape[1])
1288 1288 vec_fd = numpy.zeros(oldspec.shape[1])
1289 1289 vec_w = numpy.zeros(oldspec.shape[1])
1290 1290 vec_snr = numpy.zeros(oldspec.shape[1])
1291 1291
1292 1292 # oldspec = numpy.ma.masked_invalid(oldspec)
1293 1293 for ind in range(oldspec.shape[1]):
1294 1294
1295 1295 spec = oldspec[:,ind]
1296 1296 aux = spec*fwindow
1297 1297 max_spec = aux.max()
1298 1298 m = aux.tolist().index(max_spec)
1299 1299
1300 1300 # Smooth
1301 1301 if (smooth == 0):
1302 1302 spec2 = spec
1303 1303 else:
1304 1304 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
1305 1305
1306 1306 # Moments Estimation
1307 1307 bb = spec2[numpy.arange(m,spec2.size)]
1308 1308 bb = (bb<n0).nonzero()
1309 1309 bb = bb[0]
1310 1310
1311 1311 ss = spec2[numpy.arange(0,m + 1)]
1312 1312 ss = (ss<n0).nonzero()
1313 1313 ss = ss[0]
1314 1314
1315 1315 if (bb.size == 0):
1316 1316 bb0 = spec.size - 1 - m
1317 1317 else:
1318 1318 bb0 = bb[0] - 1
1319 1319 if (bb0 < 0):
1320 1320 bb0 = 0
1321 1321
1322 1322 if (ss.size == 0):
1323 1323 ss1 = 1
1324 1324 else:
1325 1325 ss1 = max(ss) + 1
1326 1326
1327 1327 if (ss1 > m):
1328 1328 ss1 = m
1329 1329
1330 1330 #valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
1331 1331 valid = numpy.arange(1,oldspec.shape[0])# valid perfil completo igual pulsepair
1332 1332 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. Scipión added with correct definition
1333 1333 total_power = (spec2[valid] * fwindow[valid]).mean() # D. Scipión added with correct definition
1334 1334 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
1335 1335 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
1336 1336 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
1337 1337 snr = (spec2.mean()-n0)/n0
1338 1338 if (snr < 1.e-20) :
1339 1339 snr = 1.e-20
1340 1340
1341 1341 # vec_power[ind] = power #D. Scipión replaced with the line below
1342 1342 vec_power[ind] = total_power
1343 1343 vec_fd[ind] = fd
1344 1344 vec_w[ind] = w
1345 1345 vec_snr[ind] = snr
1346 1346
1347 1347 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
1348 1348
1349 1349 #------------------ Get SA Parameters --------------------------
1350 1350
1351 1351 def GetSAParameters(self):
1352 1352 #SA en frecuencia
1353 1353 pairslist = self.dataOut.groupList
1354 1354 num_pairs = len(pairslist)
1355 1355
1356 1356 vel = self.dataOut.abscissaList
1357 1357 spectra = self.dataOut.data_pre
1358 1358 cspectra = self.dataIn.data_cspc
1359 1359 delta_v = vel[1] - vel[0]
1360 1360
1361 1361 #Calculating the power spectrum
1362 1362 spc_pow = numpy.sum(spectra, 3)*delta_v
1363 1363 #Normalizing Spectra
1364 1364 norm_spectra = spectra/spc_pow
1365 1365 #Calculating the norm_spectra at peak
1366 1366 max_spectra = numpy.max(norm_spectra, 3)
1367 1367
1368 1368 #Normalizing Cross Spectra
1369 1369 norm_cspectra = numpy.zeros(cspectra.shape)
1370 1370
1371 1371 for i in range(num_chan):
1372 1372 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
1373 1373
1374 1374 max_cspectra = numpy.max(norm_cspectra,2)
1375 1375 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
1376 1376
1377 1377 for i in range(num_pairs):
1378 1378 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
1379 1379 #------------------- Get Lags ----------------------------------
1380 1380
1381 1381 class SALags(Operation):
1382 1382 '''
1383 1383 Function GetMoments()
1384 1384
1385 1385 Input:
1386 1386 self.dataOut.data_pre
1387 1387 self.dataOut.abscissaList
1388 1388 self.dataOut.noise
1389 1389 self.dataOut.normFactor
1390 1390 self.dataOut.data_snr
1391 1391 self.dataOut.groupList
1392 1392 self.dataOut.nChannels
1393 1393
1394 1394 Affected:
1395 1395 self.dataOut.data_param
1396 1396
1397 1397 '''
1398 1398 def run(self, dataOut):
1399 1399 data_acf = dataOut.data_pre[0]
1400 1400 data_ccf = dataOut.data_pre[1]
1401 1401 normFactor_acf = dataOut.normFactor[0]
1402 1402 normFactor_ccf = dataOut.normFactor[1]
1403 1403 pairs_acf = dataOut.groupList[0]
1404 1404 pairs_ccf = dataOut.groupList[1]
1405 1405
1406 1406 nHeights = dataOut.nHeights
1407 1407 absc = dataOut.abscissaList
1408 1408 noise = dataOut.noise
1409 1409 SNR = dataOut.data_snr
1410 1410 nChannels = dataOut.nChannels
1411 1411 # pairsList = dataOut.groupList
1412 1412 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
1413 1413
1414 1414 for l in range(len(pairs_acf)):
1415 1415 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
1416 1416
1417 1417 for l in range(len(pairs_ccf)):
1418 1418 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
1419 1419
1420 1420 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
1421 1421 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
1422 1422 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
1423 1423 return
1424 1424
1425 1425 # def __getPairsAutoCorr(self, pairsList, nChannels):
1426 1426 #
1427 1427 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1428 1428 #
1429 1429 # for l in range(len(pairsList)):
1430 1430 # firstChannel = pairsList[l][0]
1431 1431 # secondChannel = pairsList[l][1]
1432 1432 #
1433 1433 # #Obteniendo pares de Autocorrelacion
1434 1434 # if firstChannel == secondChannel:
1435 1435 # pairsAutoCorr[firstChannel] = int(l)
1436 1436 #
1437 1437 # pairsAutoCorr = pairsAutoCorr.astype(int)
1438 1438 #
1439 1439 # pairsCrossCorr = range(len(pairsList))
1440 1440 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1441 1441 #
1442 1442 # return pairsAutoCorr, pairsCrossCorr
1443 1443
1444 1444 def __calculateTaus(self, data_acf, data_ccf, lagRange):
1445 1445
1446 1446 lag0 = data_acf.shape[1]/2
1447 1447 #Funcion de Autocorrelacion
1448 1448 mean_acf = stats.nanmean(data_acf, axis = 0)
1449 1449
1450 1450 #Obtencion Indice de TauCross
1451 1451 ind_ccf = data_ccf.argmax(axis = 1)
1452 1452 #Obtencion Indice de TauAuto
1453 1453 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
1454 1454 ccf_lag0 = data_ccf[:,lag0,:]
1455 1455
1456 1456 for i in range(ccf_lag0.shape[0]):
1457 1457 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
1458 1458
1459 1459 #Obtencion de TauCross y TauAuto
1460 1460 tau_ccf = lagRange[ind_ccf]
1461 1461 tau_acf = lagRange[ind_acf]
1462 1462
1463 1463 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
1464 1464
1465 1465 tau_ccf[Nan1,Nan2] = numpy.nan
1466 1466 tau_acf[Nan1,Nan2] = numpy.nan
1467 1467 tau = numpy.vstack((tau_ccf,tau_acf))
1468 1468
1469 1469 return tau
1470 1470
1471 1471 def __calculateLag1Phase(self, data, lagTRange):
1472 1472 data1 = stats.nanmean(data, axis = 0)
1473 1473 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
1474 1474
1475 1475 phase = numpy.angle(data1[lag1,:])
1476 1476
1477 1477 return phase
1478 1478
1479 1479 class SpectralFitting(Operation):
1480 1480 '''
1481 1481 Function GetMoments()
1482 1482
1483 1483 Input:
1484 1484 Output:
1485 1485 Variables modified:
1486 1486 '''
1487 1487
1488 1488 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None):
1489 1489
1490 1490
1491 1491 if path != None:
1492 1492 sys.path.append(path)
1493 1493 self.dataOut.library = importlib.import_module(file)
1494 1494
1495 1495 #To be inserted as a parameter
1496 1496 groupArray = numpy.array(groupList)
1497 1497 # groupArray = numpy.array([[0,1],[2,3]])
1498 1498 self.dataOut.groupList = groupArray
1499 1499
1500 1500 nGroups = groupArray.shape[0]
1501 1501 nChannels = self.dataIn.nChannels
1502 1502 nHeights=self.dataIn.heightList.size
1503 1503
1504 1504 #Parameters Array
1505 1505 self.dataOut.data_param = None
1506 1506
1507 1507 #Set constants
1508 1508 constants = self.dataOut.library.setConstants(self.dataIn)
1509 1509 self.dataOut.constants = constants
1510 1510 M = self.dataIn.normFactor
1511 1511 N = self.dataIn.nFFTPoints
1512 1512 ippSeconds = self.dataIn.ippSeconds
1513 1513 K = self.dataIn.nIncohInt
1514 1514 pairsArray = numpy.array(self.dataIn.pairsList)
1515 1515
1516 1516 #List of possible combinations
1517 1517 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
1518 1518 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
1519 1519
1520 1520 if getSNR:
1521 1521 listChannels = groupArray.reshape((groupArray.size))
1522 1522 listChannels.sort()
1523 1523 noise = self.dataIn.getNoise()
1524 1524 self.dataOut.data_snr = self.__getSNR(self.dataIn.data_spc[listChannels,:,:], noise[listChannels])
1525 1525
1526 1526 for i in range(nGroups):
1527 1527 coord = groupArray[i,:]
1528 1528
1529 1529 #Input data array
1530 1530 data = self.dataIn.data_spc[coord,:,:]/(M*N)
1531 1531 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
1532 1532
1533 1533 #Cross Spectra data array for Covariance Matrixes
1534 1534 ind = 0
1535 1535 for pairs in listComb:
1536 1536 pairsSel = numpy.array([coord[x],coord[y]])
1537 1537 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
1538 1538 ind += 1
1539 1539 dataCross = self.dataIn.data_cspc[indCross,:,:]/(M*N)
1540 1540 dataCross = dataCross**2/K
1541 1541
1542 1542 for h in range(nHeights):
1543 1543
1544 1544 #Input
1545 1545 d = data[:,h]
1546 1546
1547 1547 #Covariance Matrix
1548 1548 D = numpy.diag(d**2/K)
1549 1549 ind = 0
1550 1550 for pairs in listComb:
1551 1551 #Coordinates in Covariance Matrix
1552 1552 x = pairs[0]
1553 1553 y = pairs[1]
1554 1554 #Channel Index
1555 1555 S12 = dataCross[ind,:,h]
1556 1556 D12 = numpy.diag(S12)
1557 1557 #Completing Covariance Matrix with Cross Spectras
1558 1558 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
1559 1559 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
1560 1560 ind += 1
1561 1561 Dinv=numpy.linalg.inv(D)
1562 1562 L=numpy.linalg.cholesky(Dinv)
1563 1563 LT=L.T
1564 1564
1565 1565 dp = numpy.dot(LT,d)
1566 1566
1567 1567 #Initial values
1568 1568 data_spc = self.dataIn.data_spc[coord,:,h]
1569 1569
1570 1570 if (h>0)and(error1[3]<5):
1571 1571 p0 = self.dataOut.data_param[i,:,h-1]
1572 1572 else:
1573 1573 p0 = numpy.array(self.dataOut.library.initialValuesFunction(data_spc, constants, i))
1574 1574
1575 1575 try:
1576 1576 #Least Squares
1577 1577 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
1578 1578 # minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
1579 1579 #Chi square error
1580 1580 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
1581 1581 #Error with Jacobian
1582 1582 error1 = self.dataOut.library.errorFunction(minp,constants,LT)
1583 1583 except:
1584 1584 minp = p0*numpy.nan
1585 1585 error0 = numpy.nan
1586 1586 error1 = p0*numpy.nan
1587 1587
1588 1588 #Save
1589 1589 if self.dataOut.data_param is None:
1590 1590 self.dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
1591 1591 self.dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
1592 1592
1593 1593 self.dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
1594 1594 self.dataOut.data_param[i,:,h] = minp
1595 1595 return
1596 1596
1597 1597 def __residFunction(self, p, dp, LT, constants):
1598 1598
1599 1599 fm = self.dataOut.library.modelFunction(p, constants)
1600 1600 fmp=numpy.dot(LT,fm)
1601 1601
1602 1602 return dp-fmp
1603 1603
1604 1604 def __getSNR(self, z, noise):
1605 1605
1606 1606 avg = numpy.average(z, axis=1)
1607 1607 SNR = (avg.T-noise)/noise
1608 1608 SNR = SNR.T
1609 1609 return SNR
1610 1610
1611 1611 def __chisq(p,chindex,hindex):
1612 1612 #similar to Resid but calculates CHI**2
1613 1613 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
1614 1614 dp=numpy.dot(LT,d)
1615 1615 fmp=numpy.dot(LT,fm)
1616 1616 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
1617 1617 return chisq
1618 1618
1619 1619 class WindProfiler(Operation):
1620 1620
1621 1621 __isConfig = False
1622 1622
1623 1623 __initime = None
1624 1624 __lastdatatime = None
1625 1625 __integrationtime = None
1626 1626
1627 1627 __buffer = None
1628 1628
1629 1629 __dataReady = False
1630 1630
1631 1631 __firstdata = None
1632 1632
1633 1633 n = None
1634 1634
1635 1635 def __init__(self):
1636 1636 Operation.__init__(self)
1637 1637
1638 1638 def __calculateCosDir(self, elev, azim):
1639 1639 zen = (90 - elev)*numpy.pi/180
1640 1640 azim = azim*numpy.pi/180
1641 1641 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
1642 1642 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
1643 1643
1644 1644 signX = numpy.sign(numpy.cos(azim))
1645 1645 signY = numpy.sign(numpy.sin(azim))
1646 1646
1647 1647 cosDirX = numpy.copysign(cosDirX, signX)
1648 1648 cosDirY = numpy.copysign(cosDirY, signY)
1649 1649 return cosDirX, cosDirY
1650 1650
1651 1651 def __calculateAngles(self, theta_x, theta_y, azimuth):
1652 1652
1653 1653 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
1654 1654 zenith_arr = numpy.arccos(dir_cosw)
1655 1655 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
1656 1656
1657 1657 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
1658 1658 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
1659 1659
1660 1660 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
1661 1661
1662 1662 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
1663 1663
1664 1664 #
1665 1665 if horOnly:
1666 1666 A = numpy.c_[dir_cosu,dir_cosv]
1667 1667 else:
1668 1668 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
1669 1669 A = numpy.asmatrix(A)
1670 1670 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
1671 1671
1672 1672 return A1
1673 1673
1674 1674 def __correctValues(self, heiRang, phi, velRadial, SNR):
1675 1675 listPhi = phi.tolist()
1676 1676 maxid = listPhi.index(max(listPhi))
1677 1677 minid = listPhi.index(min(listPhi))
1678 1678
1679 1679 rango = list(range(len(phi)))
1680 1680 # rango = numpy.delete(rango,maxid)
1681 1681
1682 1682 heiRang1 = heiRang*math.cos(phi[maxid])
1683 1683 heiRangAux = heiRang*math.cos(phi[minid])
1684 1684 indOut = (heiRang1 < heiRangAux[0]).nonzero()
1685 1685 heiRang1 = numpy.delete(heiRang1,indOut)
1686 1686
1687 1687 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
1688 1688 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
1689 1689
1690 1690 for i in rango:
1691 1691 x = heiRang*math.cos(phi[i])
1692 1692 y1 = velRadial[i,:]
1693 1693 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
1694 1694
1695 1695 x1 = heiRang1
1696 1696 y11 = f1(x1)
1697 1697
1698 1698 y2 = SNR[i,:]
1699 1699 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
1700 1700 y21 = f2(x1)
1701 1701
1702 1702 velRadial1[i,:] = y11
1703 1703 SNR1[i,:] = y21
1704 1704
1705 1705 return heiRang1, velRadial1, SNR1
1706 1706
1707 1707 def __calculateVelUVW(self, A, velRadial):
1708 1708
1709 1709 #Operacion Matricial
1710 1710 # velUVW = numpy.zeros((velRadial.shape[1],3))
1711 1711 # for ind in range(velRadial.shape[1]):
1712 1712 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
1713 1713 # velUVW = velUVW.transpose()
1714 1714 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
1715 1715 velUVW[:,:] = numpy.dot(A,velRadial)
1716 1716
1717 1717
1718 1718 return velUVW
1719 1719
1720 1720 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
1721 1721
1722 1722 def techniqueDBS(self, kwargs):
1723 1723 """
1724 1724 Function that implements Doppler Beam Swinging (DBS) technique.
1725 1725
1726 1726 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1727 1727 Direction correction (if necessary), Ranges and SNR
1728 1728
1729 1729 Output: Winds estimation (Zonal, Meridional and Vertical)
1730 1730
1731 1731 Parameters affected: Winds, height range, SNR
1732 1732 """
1733 1733 velRadial0 = kwargs['velRadial']
1734 1734 heiRang = kwargs['heightList']
1735 1735 SNR0 = kwargs['SNR']
1736 1736
1737 1737 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
1738 1738 theta_x = numpy.array(kwargs['dirCosx'])
1739 1739 theta_y = numpy.array(kwargs['dirCosy'])
1740 1740 else:
1741 1741 elev = numpy.array(kwargs['elevation'])
1742 1742 azim = numpy.array(kwargs['azimuth'])
1743 1743 theta_x, theta_y = self.__calculateCosDir(elev, azim)
1744 1744 azimuth = kwargs['correctAzimuth']
1745 1745 if 'horizontalOnly' in kwargs:
1746 1746 horizontalOnly = kwargs['horizontalOnly']
1747 1747 else: horizontalOnly = False
1748 1748 if 'correctFactor' in kwargs:
1749 1749 correctFactor = kwargs['correctFactor']
1750 1750 else: correctFactor = 1
1751 1751 if 'channelList' in kwargs:
1752 1752 channelList = kwargs['channelList']
1753 1753 if len(channelList) == 2:
1754 1754 horizontalOnly = True
1755 1755 arrayChannel = numpy.array(channelList)
1756 1756 param = param[arrayChannel,:,:]
1757 1757 theta_x = theta_x[arrayChannel]
1758 1758 theta_y = theta_y[arrayChannel]
1759 1759
1760 1760 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
1761 1761 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
1762 1762 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
1763 1763
1764 1764 #Calculo de Componentes de la velocidad con DBS
1765 1765 winds = self.__calculateVelUVW(A,velRadial1)
1766 1766
1767 1767 return winds, heiRang1, SNR1
1768 1768
1769 1769 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
1770 1770
1771 1771 nPairs = len(pairs_ccf)
1772 1772 posx = numpy.asarray(posx)
1773 1773 posy = numpy.asarray(posy)
1774 1774
1775 1775 #Rotacion Inversa para alinear con el azimuth
1776 1776 if azimuth!= None:
1777 1777 azimuth = azimuth*math.pi/180
1778 1778 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
1779 1779 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
1780 1780 else:
1781 1781 posx1 = posx
1782 1782 posy1 = posy
1783 1783
1784 1784 #Calculo de Distancias
1785 1785 distx = numpy.zeros(nPairs)
1786 1786 disty = numpy.zeros(nPairs)
1787 1787 dist = numpy.zeros(nPairs)
1788 1788 ang = numpy.zeros(nPairs)
1789 1789
1790 1790 for i in range(nPairs):
1791 1791 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
1792 1792 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
1793 1793 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
1794 1794 ang[i] = numpy.arctan2(disty[i],distx[i])
1795 1795
1796 1796 return distx, disty, dist, ang
1797 1797 #Calculo de Matrices
1798 1798 # nPairs = len(pairs)
1799 1799 # ang1 = numpy.zeros((nPairs, 2, 1))
1800 1800 # dist1 = numpy.zeros((nPairs, 2, 1))
1801 1801 #
1802 1802 # for j in range(nPairs):
1803 1803 # dist1[j,0,0] = dist[pairs[j][0]]
1804 1804 # dist1[j,1,0] = dist[pairs[j][1]]
1805 1805 # ang1[j,0,0] = ang[pairs[j][0]]
1806 1806 # ang1[j,1,0] = ang[pairs[j][1]]
1807 1807 #
1808 1808 # return distx,disty, dist1,ang1
1809 1809
1810 1810
1811 1811 def __calculateVelVer(self, phase, lagTRange, _lambda):
1812 1812
1813 1813 Ts = lagTRange[1] - lagTRange[0]
1814 1814 velW = -_lambda*phase/(4*math.pi*Ts)
1815 1815
1816 1816 return velW
1817 1817
1818 1818 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
1819 1819 nPairs = tau1.shape[0]
1820 1820 nHeights = tau1.shape[1]
1821 1821 vel = numpy.zeros((nPairs,3,nHeights))
1822 1822 dist1 = numpy.reshape(dist, (dist.size,1))
1823 1823
1824 1824 angCos = numpy.cos(ang)
1825 1825 angSin = numpy.sin(ang)
1826 1826
1827 1827 vel0 = dist1*tau1/(2*tau2**2)
1828 1828 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
1829 1829 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
1830 1830
1831 1831 ind = numpy.where(numpy.isinf(vel))
1832 1832 vel[ind] = numpy.nan
1833 1833
1834 1834 return vel
1835 1835
1836 1836 # def __getPairsAutoCorr(self, pairsList, nChannels):
1837 1837 #
1838 1838 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1839 1839 #
1840 1840 # for l in range(len(pairsList)):
1841 1841 # firstChannel = pairsList[l][0]
1842 1842 # secondChannel = pairsList[l][1]
1843 1843 #
1844 1844 # #Obteniendo pares de Autocorrelacion
1845 1845 # if firstChannel == secondChannel:
1846 1846 # pairsAutoCorr[firstChannel] = int(l)
1847 1847 #
1848 1848 # pairsAutoCorr = pairsAutoCorr.astype(int)
1849 1849 #
1850 1850 # pairsCrossCorr = range(len(pairsList))
1851 1851 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1852 1852 #
1853 1853 # return pairsAutoCorr, pairsCrossCorr
1854 1854
1855 1855 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
1856 1856 def techniqueSA(self, kwargs):
1857 1857
1858 1858 """
1859 1859 Function that implements Spaced Antenna (SA) technique.
1860 1860
1861 1861 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1862 1862 Direction correction (if necessary), Ranges and SNR
1863 1863
1864 1864 Output: Winds estimation (Zonal, Meridional and Vertical)
1865 1865
1866 1866 Parameters affected: Winds
1867 1867 """
1868 1868 position_x = kwargs['positionX']
1869 1869 position_y = kwargs['positionY']
1870 1870 azimuth = kwargs['azimuth']
1871 1871
1872 1872 if 'correctFactor' in kwargs:
1873 1873 correctFactor = kwargs['correctFactor']
1874 1874 else:
1875 1875 correctFactor = 1
1876 1876
1877 1877 groupList = kwargs['groupList']
1878 1878 pairs_ccf = groupList[1]
1879 1879 tau = kwargs['tau']
1880 1880 _lambda = kwargs['_lambda']
1881 1881
1882 1882 #Cross Correlation pairs obtained
1883 1883 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
1884 1884 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
1885 1885 # pairsSelArray = numpy.array(pairsSelected)
1886 1886 # pairs = []
1887 1887 #
1888 1888 # #Wind estimation pairs obtained
1889 1889 # for i in range(pairsSelArray.shape[0]/2):
1890 1890 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
1891 1891 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
1892 1892 # pairs.append((ind1,ind2))
1893 1893
1894 1894 indtau = tau.shape[0]/2
1895 1895 tau1 = tau[:indtau,:]
1896 1896 tau2 = tau[indtau:-1,:]
1897 1897 # tau1 = tau1[pairs,:]
1898 1898 # tau2 = tau2[pairs,:]
1899 1899 phase1 = tau[-1,:]
1900 1900
1901 1901 #---------------------------------------------------------------------
1902 1902 #Metodo Directo
1903 1903 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
1904 1904 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
1905 1905 winds = stats.nanmean(winds, axis=0)
1906 1906 #---------------------------------------------------------------------
1907 1907 #Metodo General
1908 1908 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
1909 1909 # #Calculo Coeficientes de Funcion de Correlacion
1910 1910 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
1911 1911 # #Calculo de Velocidades
1912 1912 # winds = self.calculateVelUV(F,G,A,B,H)
1913 1913
1914 1914 #---------------------------------------------------------------------
1915 1915 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
1916 1916 winds = correctFactor*winds
1917 1917 return winds
1918 1918
1919 1919 def __checkTime(self, currentTime, paramInterval, outputInterval):
1920 1920
1921 1921 dataTime = currentTime + paramInterval
1922 1922 deltaTime = dataTime - self.__initime
1923 1923
1924 1924 if deltaTime >= outputInterval or deltaTime < 0:
1925 1925 self.__dataReady = True
1926 1926 return
1927 1927
1928 1928 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
1929 1929 '''
1930 1930 Function that implements winds estimation technique with detected meteors.
1931 1931
1932 1932 Input: Detected meteors, Minimum meteor quantity to wind estimation
1933 1933
1934 1934 Output: Winds estimation (Zonal and Meridional)
1935 1935
1936 1936 Parameters affected: Winds
1937 1937 '''
1938 1938 #Settings
1939 1939 nInt = (heightMax - heightMin)/2
1940 1940 nInt = int(nInt)
1941 1941 winds = numpy.zeros((2,nInt))*numpy.nan
1942 1942
1943 1943 #Filter errors
1944 1944 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
1945 1945 finalMeteor = arrayMeteor[error,:]
1946 1946
1947 1947 #Meteor Histogram
1948 1948 finalHeights = finalMeteor[:,2]
1949 1949 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
1950 1950 nMeteorsPerI = hist[0]
1951 1951 heightPerI = hist[1]
1952 1952
1953 1953 #Sort of meteors
1954 1954 indSort = finalHeights.argsort()
1955 1955 finalMeteor2 = finalMeteor[indSort,:]
1956 1956
1957 1957 # Calculating winds
1958 1958 ind1 = 0
1959 1959 ind2 = 0
1960 1960
1961 1961 for i in range(nInt):
1962 1962 nMet = nMeteorsPerI[i]
1963 1963 ind1 = ind2
1964 1964 ind2 = ind1 + nMet
1965 1965
1966 1966 meteorAux = finalMeteor2[ind1:ind2,:]
1967 1967
1968 1968 if meteorAux.shape[0] >= meteorThresh:
1969 1969 vel = meteorAux[:, 6]
1970 1970 zen = meteorAux[:, 4]*numpy.pi/180
1971 1971 azim = meteorAux[:, 3]*numpy.pi/180
1972 1972
1973 1973 n = numpy.cos(zen)
1974 1974 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
1975 1975 # l = m*numpy.tan(azim)
1976 1976 l = numpy.sin(zen)*numpy.sin(azim)
1977 1977 m = numpy.sin(zen)*numpy.cos(azim)
1978 1978
1979 1979 A = numpy.vstack((l, m)).transpose()
1980 1980 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
1981 1981 windsAux = numpy.dot(A1, vel)
1982 1982
1983 1983 winds[0,i] = windsAux[0]
1984 1984 winds[1,i] = windsAux[1]
1985 1985
1986 1986 return winds, heightPerI[:-1]
1987 1987
1988 1988 def techniqueNSM_SA(self, **kwargs):
1989 1989 metArray = kwargs['metArray']
1990 1990 heightList = kwargs['heightList']
1991 1991 timeList = kwargs['timeList']
1992 1992
1993 1993 rx_location = kwargs['rx_location']
1994 1994 groupList = kwargs['groupList']
1995 1995 azimuth = kwargs['azimuth']
1996 1996 dfactor = kwargs['dfactor']
1997 1997 k = kwargs['k']
1998 1998
1999 1999 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
2000 2000 d = dist*dfactor
2001 2001 #Phase calculation
2002 2002 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
2003 2003
2004 2004 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
2005 2005
2006 2006 velEst = numpy.zeros((heightList.size,2))*numpy.nan
2007 2007 azimuth1 = azimuth1*numpy.pi/180
2008 2008
2009 2009 for i in range(heightList.size):
2010 2010 h = heightList[i]
2011 2011 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
2012 2012 metHeight = metArray1[indH,:]
2013 2013 if metHeight.shape[0] >= 2:
2014 2014 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
2015 2015 iazim = metHeight[:,1].astype(int)
2016 2016 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
2017 2017 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
2018 2018 A = numpy.asmatrix(A)
2019 2019 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
2020 2020 velHor = numpy.dot(A1,velAux)
2021 2021
2022 2022 velEst[i,:] = numpy.squeeze(velHor)
2023 2023 return velEst
2024 2024
2025 2025 def __getPhaseSlope(self, metArray, heightList, timeList):
2026 2026 meteorList = []
2027 2027 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
2028 2028 #Putting back together the meteor matrix
2029 2029 utctime = metArray[:,0]
2030 2030 uniqueTime = numpy.unique(utctime)
2031 2031
2032 2032 phaseDerThresh = 0.5
2033 2033 ippSeconds = timeList[1] - timeList[0]
2034 2034 sec = numpy.where(timeList>1)[0][0]
2035 2035 nPairs = metArray.shape[1] - 6
2036 2036 nHeights = len(heightList)
2037 2037
2038 2038 for t in uniqueTime:
2039 2039 metArray1 = metArray[utctime==t,:]
2040 2040 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
2041 2041 tmet = metArray1[:,1].astype(int)
2042 2042 hmet = metArray1[:,2].astype(int)
2043 2043
2044 2044 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
2045 2045 metPhase[:,:] = numpy.nan
2046 2046 metPhase[:,hmet,tmet] = metArray1[:,6:].T
2047 2047
2048 2048 #Delete short trails
2049 2049 metBool = ~numpy.isnan(metPhase[0,:,:])
2050 2050 heightVect = numpy.sum(metBool, axis = 1)
2051 2051 metBool[heightVect<sec,:] = False
2052 2052 metPhase[:,heightVect<sec,:] = numpy.nan
2053 2053
2054 2054 #Derivative
2055 2055 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
2056 2056 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
2057 2057 metPhase[phDerAux] = numpy.nan
2058 2058
2059 2059 #--------------------------METEOR DETECTION -----------------------------------------
2060 2060 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
2061 2061
2062 2062 for p in numpy.arange(nPairs):
2063 2063 phase = metPhase[p,:,:]
2064 2064 phDer = metDer[p,:,:]
2065 2065
2066 2066 for h in indMet:
2067 2067 height = heightList[h]
2068 2068 phase1 = phase[h,:] #82
2069 2069 phDer1 = phDer[h,:]
2070 2070
2071 2071 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
2072 2072
2073 2073 indValid = numpy.where(~numpy.isnan(phase1))[0]
2074 2074 initMet = indValid[0]
2075 2075 endMet = 0
2076 2076
2077 2077 for i in range(len(indValid)-1):
2078 2078
2079 2079 #Time difference
2080 2080 inow = indValid[i]
2081 2081 inext = indValid[i+1]
2082 2082 idiff = inext - inow
2083 2083 #Phase difference
2084 2084 phDiff = numpy.abs(phase1[inext] - phase1[inow])
2085 2085
2086 2086 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
2087 2087 sizeTrail = inow - initMet + 1
2088 2088 if sizeTrail>3*sec: #Too short meteors
2089 2089 x = numpy.arange(initMet,inow+1)*ippSeconds
2090 2090 y = phase1[initMet:inow+1]
2091 2091 ynnan = ~numpy.isnan(y)
2092 2092 x = x[ynnan]
2093 2093 y = y[ynnan]
2094 2094 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
2095 2095 ylin = x*slope + intercept
2096 2096 rsq = r_value**2
2097 2097 if rsq > 0.5:
2098 2098 vel = slope#*height*1000/(k*d)
2099 2099 estAux = numpy.array([utctime,p,height, vel, rsq])
2100 2100 meteorList.append(estAux)
2101 2101 initMet = inext
2102 2102 metArray2 = numpy.array(meteorList)
2103 2103
2104 2104 return metArray2
2105 2105
2106 2106 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
2107 2107
2108 2108 azimuth1 = numpy.zeros(len(pairslist))
2109 2109 dist = numpy.zeros(len(pairslist))
2110 2110
2111 2111 for i in range(len(rx_location)):
2112 2112 ch0 = pairslist[i][0]
2113 2113 ch1 = pairslist[i][1]
2114 2114
2115 2115 diffX = rx_location[ch0][0] - rx_location[ch1][0]
2116 2116 diffY = rx_location[ch0][1] - rx_location[ch1][1]
2117 2117 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
2118 2118 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
2119 2119
2120 2120 azimuth1 -= azimuth0
2121 2121 return azimuth1, dist
2122 2122
2123 2123 def techniqueNSM_DBS(self, **kwargs):
2124 2124 metArray = kwargs['metArray']
2125 2125 heightList = kwargs['heightList']
2126 2126 timeList = kwargs['timeList']
2127 2127 azimuth = kwargs['azimuth']
2128 2128 theta_x = numpy.array(kwargs['theta_x'])
2129 2129 theta_y = numpy.array(kwargs['theta_y'])
2130 2130
2131 2131 utctime = metArray[:,0]
2132 2132 cmet = metArray[:,1].astype(int)
2133 2133 hmet = metArray[:,3].astype(int)
2134 2134 SNRmet = metArray[:,4]
2135 2135 vmet = metArray[:,5]
2136 2136 spcmet = metArray[:,6]
2137 2137
2138 2138 nChan = numpy.max(cmet) + 1
2139 2139 nHeights = len(heightList)
2140 2140
2141 2141 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
2142 2142 hmet = heightList[hmet]
2143 2143 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
2144 2144
2145 2145 velEst = numpy.zeros((heightList.size,2))*numpy.nan
2146 2146
2147 2147 for i in range(nHeights - 1):
2148 2148 hmin = heightList[i]
2149 2149 hmax = heightList[i + 1]
2150 2150
2151 2151 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
2152 2152 indthisH = numpy.where(thisH)
2153 2153
2154 2154 if numpy.size(indthisH) > 3:
2155 2155
2156 2156 vel_aux = vmet[thisH]
2157 2157 chan_aux = cmet[thisH]
2158 2158 cosu_aux = dir_cosu[chan_aux]
2159 2159 cosv_aux = dir_cosv[chan_aux]
2160 2160 cosw_aux = dir_cosw[chan_aux]
2161 2161
2162 2162 nch = numpy.size(numpy.unique(chan_aux))
2163 2163 if nch > 1:
2164 2164 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
2165 2165 velEst[i,:] = numpy.dot(A,vel_aux)
2166 2166
2167 2167 return velEst
2168 2168
2169 2169 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
2170 2170
2171 2171 param = dataOut.data_param
2172 2172 if dataOut.abscissaList != None:
2173 2173 absc = dataOut.abscissaList[:-1]
2174 2174 # noise = dataOut.noise
2175 2175 heightList = dataOut.heightList
2176 2176 SNR = dataOut.data_snr
2177 2177
2178 2178 if technique == 'DBS':
2179 2179
2180 2180 kwargs['velRadial'] = param[:,1,:] #Radial velocity
2181 2181 kwargs['heightList'] = heightList
2182 2182 kwargs['SNR'] = SNR
2183 2183
2184 2184 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
2185 2185 dataOut.utctimeInit = dataOut.utctime
2186 2186 dataOut.outputInterval = dataOut.paramInterval
2187 2187
2188 2188 elif technique == 'SA':
2189 2189
2190 2190 #Parameters
2191 2191 # position_x = kwargs['positionX']
2192 2192 # position_y = kwargs['positionY']
2193 2193 # azimuth = kwargs['azimuth']
2194 2194 #
2195 2195 # if kwargs.has_key('crosspairsList'):
2196 2196 # pairs = kwargs['crosspairsList']
2197 2197 # else:
2198 2198 # pairs = None
2199 2199 #
2200 2200 # if kwargs.has_key('correctFactor'):
2201 2201 # correctFactor = kwargs['correctFactor']
2202 2202 # else:
2203 2203 # correctFactor = 1
2204 2204
2205 2205 # tau = dataOut.data_param
2206 2206 # _lambda = dataOut.C/dataOut.frequency
2207 2207 # pairsList = dataOut.groupList
2208 2208 # nChannels = dataOut.nChannels
2209 2209
2210 2210 kwargs['groupList'] = dataOut.groupList
2211 2211 kwargs['tau'] = dataOut.data_param
2212 2212 kwargs['_lambda'] = dataOut.C/dataOut.frequency
2213 2213 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
2214 2214 dataOut.data_output = self.techniqueSA(kwargs)
2215 2215 dataOut.utctimeInit = dataOut.utctime
2216 2216 dataOut.outputInterval = dataOut.timeInterval
2217 2217
2218 2218 elif technique == 'Meteors':
2219 2219 dataOut.flagNoData = True
2220 2220 self.__dataReady = False
2221 2221
2222 2222 if 'nHours' in kwargs:
2223 2223 nHours = kwargs['nHours']
2224 2224 else:
2225 2225 nHours = 1
2226 2226
2227 2227 if 'meteorsPerBin' in kwargs:
2228 2228 meteorThresh = kwargs['meteorsPerBin']
2229 2229 else:
2230 2230 meteorThresh = 6
2231 2231
2232 2232 if 'hmin' in kwargs:
2233 2233 hmin = kwargs['hmin']
2234 2234 else: hmin = 70
2235 2235 if 'hmax' in kwargs:
2236 2236 hmax = kwargs['hmax']
2237 2237 else: hmax = 110
2238 2238
2239 2239 dataOut.outputInterval = nHours*3600
2240 2240
2241 2241 if self.__isConfig == False:
2242 2242 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2243 2243 #Get Initial LTC time
2244 2244 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2245 2245 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2246 2246
2247 2247 self.__isConfig = True
2248 2248
2249 2249 if self.__buffer is None:
2250 2250 self.__buffer = dataOut.data_param
2251 2251 self.__firstdata = copy.copy(dataOut)
2252 2252
2253 2253 else:
2254 2254 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2255 2255
2256 2256 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2257 2257
2258 2258 if self.__dataReady:
2259 2259 dataOut.utctimeInit = self.__initime
2260 2260
2261 2261 self.__initime += dataOut.outputInterval #to erase time offset
2262 2262
2263 2263 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
2264 2264 dataOut.flagNoData = False
2265 2265 self.__buffer = None
2266 2266
2267 2267 elif technique == 'Meteors1':
2268 2268 dataOut.flagNoData = True
2269 2269 self.__dataReady = False
2270 2270
2271 2271 if 'nMins' in kwargs:
2272 2272 nMins = kwargs['nMins']
2273 2273 else: nMins = 20
2274 2274 if 'rx_location' in kwargs:
2275 2275 rx_location = kwargs['rx_location']
2276 2276 else: rx_location = [(0,1),(1,1),(1,0)]
2277 2277 if 'azimuth' in kwargs:
2278 2278 azimuth = kwargs['azimuth']
2279 2279 else: azimuth = 51.06
2280 2280 if 'dfactor' in kwargs:
2281 2281 dfactor = kwargs['dfactor']
2282 2282 if 'mode' in kwargs:
2283 2283 mode = kwargs['mode']
2284 2284 if 'theta_x' in kwargs:
2285 2285 theta_x = kwargs['theta_x']
2286 2286 if 'theta_y' in kwargs:
2287 2287 theta_y = kwargs['theta_y']
2288 2288 else: mode = 'SA'
2289 2289
2290 2290 #Borrar luego esto
2291 2291 if dataOut.groupList is None:
2292 2292 dataOut.groupList = [(0,1),(0,2),(1,2)]
2293 2293 groupList = dataOut.groupList
2294 2294 C = 3e8
2295 2295 freq = 50e6
2296 2296 lamb = C/freq
2297 2297 k = 2*numpy.pi/lamb
2298 2298
2299 2299 timeList = dataOut.abscissaList
2300 2300 heightList = dataOut.heightList
2301 2301
2302 2302 if self.__isConfig == False:
2303 2303 dataOut.outputInterval = nMins*60
2304 2304 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2305 2305 #Get Initial LTC time
2306 2306 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2307 2307 minuteAux = initime.minute
2308 2308 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
2309 2309 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2310 2310
2311 2311 self.__isConfig = True
2312 2312
2313 2313 if self.__buffer is None:
2314 2314 self.__buffer = dataOut.data_param
2315 2315 self.__firstdata = copy.copy(dataOut)
2316 2316
2317 2317 else:
2318 2318 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2319 2319
2320 2320 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2321 2321
2322 2322 if self.__dataReady:
2323 2323 dataOut.utctimeInit = self.__initime
2324 2324 self.__initime += dataOut.outputInterval #to erase time offset
2325 2325
2326 2326 metArray = self.__buffer
2327 2327 if mode == 'SA':
2328 2328 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
2329 2329 elif mode == 'DBS':
2330 2330 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
2331 2331 dataOut.data_output = dataOut.data_output.T
2332 2332 dataOut.flagNoData = False
2333 2333 self.__buffer = None
2334 2334
2335 2335 return
2336 2336
2337 2337 class EWDriftsEstimation(Operation):
2338 2338
2339 2339 def __init__(self):
2340 2340 Operation.__init__(self)
2341 2341
2342 2342 def __correctValues(self, heiRang, phi, velRadial, SNR):
2343 2343 listPhi = phi.tolist()
2344 2344 maxid = listPhi.index(max(listPhi))
2345 2345 minid = listPhi.index(min(listPhi))
2346 2346
2347 2347 rango = list(range(len(phi)))
2348 2348 # rango = numpy.delete(rango,maxid)
2349 2349
2350 2350 heiRang1 = heiRang*math.cos(phi[maxid])
2351 2351 heiRangAux = heiRang*math.cos(phi[minid])
2352 2352 indOut = (heiRang1 < heiRangAux[0]).nonzero()
2353 2353 heiRang1 = numpy.delete(heiRang1,indOut)
2354 2354
2355 2355 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
2356 2356 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
2357 2357
2358 2358 for i in rango:
2359 2359 x = heiRang*math.cos(phi[i])
2360 2360 y1 = velRadial[i,:]
2361 2361 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
2362 2362
2363 2363 x1 = heiRang1
2364 2364 y11 = f1(x1)
2365 2365
2366 2366 y2 = SNR[i,:]
2367 2367 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
2368 2368 y21 = f2(x1)
2369 2369
2370 2370 velRadial1[i,:] = y11
2371 2371 SNR1[i,:] = y21
2372 2372
2373 2373 return heiRang1, velRadial1, SNR1
2374 2374
2375 2375 def run(self, dataOut, zenith, zenithCorrection):
2376 2376 heiRang = dataOut.heightList
2377 2377 velRadial = dataOut.data_param[:,3,:]
2378 2378 SNR = dataOut.data_snr
2379 2379
2380 2380 zenith = numpy.array(zenith)
2381 2381 zenith -= zenithCorrection
2382 2382 zenith *= numpy.pi/180
2383 2383
2384 2384 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
2385 2385
2386 2386 alp = zenith[0]
2387 2387 bet = zenith[1]
2388 2388
2389 2389 w_w = velRadial1[0,:]
2390 2390 w_e = velRadial1[1,:]
2391 2391
2392 2392 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
2393 2393 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
2394 2394
2395 2395 winds = numpy.vstack((u,w))
2396 2396
2397 2397 dataOut.heightList = heiRang1
2398 2398 dataOut.data_output = winds
2399 2399 dataOut.data_snr = SNR1
2400 2400
2401 2401 dataOut.utctimeInit = dataOut.utctime
2402 2402 dataOut.outputInterval = dataOut.timeInterval
2403 2403 return
2404 2404
2405 2405 #--------------- Non Specular Meteor ----------------
2406 2406
2407 2407 class NonSpecularMeteorDetection(Operation):
2408 2408
2409 2409 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
2410 2410 data_acf = dataOut.data_pre[0]
2411 2411 data_ccf = dataOut.data_pre[1]
2412 2412 pairsList = dataOut.groupList[1]
2413 2413
2414 2414 lamb = dataOut.C/dataOut.frequency
2415 2415 tSamp = dataOut.ippSeconds*dataOut.nCohInt
2416 2416 paramInterval = dataOut.paramInterval
2417 2417
2418 2418 nChannels = data_acf.shape[0]
2419 2419 nLags = data_acf.shape[1]
2420 2420 nProfiles = data_acf.shape[2]
2421 2421 nHeights = dataOut.nHeights
2422 2422 nCohInt = dataOut.nCohInt
2423 2423 sec = numpy.round(nProfiles/dataOut.paramInterval)
2424 2424 heightList = dataOut.heightList
2425 2425 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
2426 2426 utctime = dataOut.utctime
2427 2427
2428 2428 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
2429 2429
2430 2430 #------------------------ SNR --------------------------------------
2431 2431 power = data_acf[:,0,:,:].real
2432 2432 noise = numpy.zeros(nChannels)
2433 2433 SNR = numpy.zeros(power.shape)
2434 2434 for i in range(nChannels):
2435 2435 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
2436 2436 SNR[i] = (power[i]-noise[i])/noise[i]
2437 2437 SNRm = numpy.nanmean(SNR, axis = 0)
2438 2438 SNRdB = 10*numpy.log10(SNR)
2439 2439
2440 2440 if mode == 'SA':
2441 2441 dataOut.groupList = dataOut.groupList[1]
2442 2442 nPairs = data_ccf.shape[0]
2443 2443 #---------------------- Coherence and Phase --------------------------
2444 2444 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
2445 2445 # phase1 = numpy.copy(phase)
2446 2446 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
2447 2447
2448 2448 for p in range(nPairs):
2449 2449 ch0 = pairsList[p][0]
2450 2450 ch1 = pairsList[p][1]
2451 2451 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
2452 2452 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
2453 2453 # phase1[p,:,:] = numpy.angle(ccf) #median filter
2454 2454 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
2455 2455 # coh1[p,:,:] = numpy.abs(ccf) #median filter
2456 2456 coh = numpy.nanmax(coh1, axis = 0)
2457 2457 # struc = numpy.ones((5,1))
2458 2458 # coh = ndimage.morphology.grey_dilation(coh, size=(10,1))
2459 2459 #---------------------- Radial Velocity ----------------------------
2460 2460 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
2461 2461 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
2462 2462
2463 2463 if allData:
2464 2464 boolMetFin = ~numpy.isnan(SNRm)
2465 2465 # coh[:-1,:] = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2466 2466 else:
2467 2467 #------------------------ Meteor mask ---------------------------------
2468 2468 # #SNR mask
2469 2469 # boolMet = (SNRdB>SNRthresh)#|(~numpy.isnan(SNRdB))
2470 2470 #
2471 2471 # #Erase small objects
2472 2472 # boolMet1 = self.__erase_small(boolMet, 2*sec, 5)
2473 2473 #
2474 2474 # auxEEJ = numpy.sum(boolMet1,axis=0)
2475 2475 # indOver = auxEEJ>nProfiles*0.8 #Use this later
2476 2476 # indEEJ = numpy.where(indOver)[0]
2477 2477 # indNEEJ = numpy.where(~indOver)[0]
2478 2478 #
2479 2479 # boolMetFin = boolMet1
2480 2480 #
2481 2481 # if indEEJ.size > 0:
2482 2482 # boolMet1[:,indEEJ] = False #Erase heights with EEJ
2483 2483 #
2484 2484 # boolMet2 = coh > cohThresh
2485 2485 # boolMet2 = self.__erase_small(boolMet2, 2*sec,5)
2486 2486 #
2487 2487 # #Final Meteor mask
2488 2488 # boolMetFin = boolMet1|boolMet2
2489 2489
2490 2490 #Coherence mask
2491 2491 boolMet1 = coh > 0.75
2492 2492 struc = numpy.ones((30,1))
2493 2493 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
2494 2494
2495 2495 #Derivative mask
2496 2496 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2497 2497 boolMet2 = derPhase < 0.2
2498 2498 # boolMet2 = ndimage.morphology.binary_opening(boolMet2)
2499 2499 # boolMet2 = ndimage.morphology.binary_closing(boolMet2, structure = numpy.ones((10,1)))
2500 2500 boolMet2 = ndimage.median_filter(boolMet2,size=5)
2501 2501 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
2502 2502 # #Final mask
2503 2503 # boolMetFin = boolMet2
2504 2504 boolMetFin = boolMet1&boolMet2
2505 2505 # boolMetFin = ndimage.morphology.binary_dilation(boolMetFin)
2506 2506 #Creating data_param
2507 2507 coordMet = numpy.where(boolMetFin)
2508 2508
2509 2509 tmet = coordMet[0]
2510 2510 hmet = coordMet[1]
2511 2511
2512 2512 data_param = numpy.zeros((tmet.size, 6 + nPairs))
2513 2513 data_param[:,0] = utctime
2514 2514 data_param[:,1] = tmet
2515 2515 data_param[:,2] = hmet
2516 2516 data_param[:,3] = SNRm[tmet,hmet]
2517 2517 data_param[:,4] = velRad[tmet,hmet]
2518 2518 data_param[:,5] = coh[tmet,hmet]
2519 2519 data_param[:,6:] = phase[:,tmet,hmet].T
2520 2520
2521 2521 elif mode == 'DBS':
2522 2522 dataOut.groupList = numpy.arange(nChannels)
2523 2523
2524 2524 #Radial Velocities
2525 2525 phase = numpy.angle(data_acf[:,1,:,:])
2526 2526 # phase = ndimage.median_filter(numpy.angle(data_acf[:,1,:,:]), size = (1,5,1))
2527 2527 velRad = phase*lamb/(4*numpy.pi*tSamp)
2528 2528
2529 2529 #Spectral width
2530 2530 # acf1 = ndimage.median_filter(numpy.abs(data_acf[:,1,:,:]), size = (1,5,1))
2531 2531 # acf2 = ndimage.median_filter(numpy.abs(data_acf[:,2,:,:]), size = (1,5,1))
2532 2532 acf1 = data_acf[:,1,:,:]
2533 2533 acf2 = data_acf[:,2,:,:]
2534 2534
2535 2535 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
2536 2536 # velRad = ndimage.median_filter(velRad, size = (1,5,1))
2537 2537 if allData:
2538 2538 boolMetFin = ~numpy.isnan(SNRdB)
2539 2539 else:
2540 2540 #SNR
2541 2541 boolMet1 = (SNRdB>SNRthresh) #SNR mask
2542 2542 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
2543 2543
2544 2544 #Radial velocity
2545 2545 boolMet2 = numpy.abs(velRad) < 20
2546 2546 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
2547 2547
2548 2548 #Spectral Width
2549 2549 boolMet3 = spcWidth < 30
2550 2550 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
2551 2551 # boolMetFin = self.__erase_small(boolMet1, 10,5)
2552 2552 boolMetFin = boolMet1&boolMet2&boolMet3
2553 2553
2554 2554 #Creating data_param
2555 2555 coordMet = numpy.where(boolMetFin)
2556 2556
2557 2557 cmet = coordMet[0]
2558 2558 tmet = coordMet[1]
2559 2559 hmet = coordMet[2]
2560 2560
2561 2561 data_param = numpy.zeros((tmet.size, 7))
2562 2562 data_param[:,0] = utctime
2563 2563 data_param[:,1] = cmet
2564 2564 data_param[:,2] = tmet
2565 2565 data_param[:,3] = hmet
2566 2566 data_param[:,4] = SNR[cmet,tmet,hmet].T
2567 2567 data_param[:,5] = velRad[cmet,tmet,hmet].T
2568 2568 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
2569 2569
2570 2570 # self.dataOut.data_param = data_int
2571 2571 if len(data_param) == 0:
2572 2572 dataOut.flagNoData = True
2573 2573 else:
2574 2574 dataOut.data_param = data_param
2575 2575
2576 2576 def __erase_small(self, binArray, threshX, threshY):
2577 2577 labarray, numfeat = ndimage.measurements.label(binArray)
2578 2578 binArray1 = numpy.copy(binArray)
2579 2579
2580 2580 for i in range(1,numfeat + 1):
2581 2581 auxBin = (labarray==i)
2582 2582 auxSize = auxBin.sum()
2583 2583
2584 2584 x,y = numpy.where(auxBin)
2585 2585 widthX = x.max() - x.min()
2586 2586 widthY = y.max() - y.min()
2587 2587
2588 2588 #width X: 3 seg -> 12.5*3
2589 2589 #width Y:
2590 2590
2591 2591 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
2592 2592 binArray1[auxBin] = False
2593 2593
2594 2594 return binArray1
2595 2595
2596 2596 #--------------- Specular Meteor ----------------
2597 2597
2598 2598 class SMDetection(Operation):
2599 2599 '''
2600 2600 Function DetectMeteors()
2601 2601 Project developed with paper:
2602 2602 HOLDSWORTH ET AL. 2004
2603 2603
2604 2604 Input:
2605 2605 self.dataOut.data_pre
2606 2606
2607 2607 centerReceiverIndex: From the channels, which is the center receiver
2608 2608
2609 2609 hei_ref: Height reference for the Beacon signal extraction
2610 2610 tauindex:
2611 2611 predefinedPhaseShifts: Predefined phase offset for the voltge signals
2612 2612
2613 2613 cohDetection: Whether to user Coherent detection or not
2614 2614 cohDet_timeStep: Coherent Detection calculation time step
2615 2615 cohDet_thresh: Coherent Detection phase threshold to correct phases
2616 2616
2617 2617 noise_timeStep: Noise calculation time step
2618 2618 noise_multiple: Noise multiple to define signal threshold
2619 2619
2620 2620 multDet_timeLimit: Multiple Detection Removal time limit in seconds
2621 2621 multDet_rangeLimit: Multiple Detection Removal range limit in km
2622 2622
2623 2623 phaseThresh: Maximum phase difference between receiver to be consider a meteor
2624 2624 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
2625 2625
2626 2626 hmin: Minimum Height of the meteor to use it in the further wind estimations
2627 2627 hmax: Maximum Height of the meteor to use it in the further wind estimations
2628 2628 azimuth: Azimuth angle correction
2629 2629
2630 2630 Affected:
2631 2631 self.dataOut.data_param
2632 2632
2633 2633 Rejection Criteria (Errors):
2634 2634 0: No error; analysis OK
2635 2635 1: SNR < SNR threshold
2636 2636 2: angle of arrival (AOA) ambiguously determined
2637 2637 3: AOA estimate not feasible
2638 2638 4: Large difference in AOAs obtained from different antenna baselines
2639 2639 5: echo at start or end of time series
2640 2640 6: echo less than 5 examples long; too short for analysis
2641 2641 7: echo rise exceeds 0.3s
2642 2642 8: echo decay time less than twice rise time
2643 2643 9: large power level before echo
2644 2644 10: large power level after echo
2645 2645 11: poor fit to amplitude for estimation of decay time
2646 2646 12: poor fit to CCF phase variation for estimation of radial drift velocity
2647 2647 13: height unresolvable echo: not valid height within 70 to 110 km
2648 2648 14: height ambiguous echo: more then one possible height within 70 to 110 km
2649 2649 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
2650 2650 16: oscilatory echo, indicating event most likely not an underdense echo
2651 2651
2652 2652 17: phase difference in meteor Reestimation
2653 2653
2654 2654 Data Storage:
2655 2655 Meteors for Wind Estimation (8):
2656 2656 Utc Time | Range Height
2657 2657 Azimuth Zenith errorCosDir
2658 2658 VelRad errorVelRad
2659 2659 Phase0 Phase1 Phase2 Phase3
2660 2660 TypeError
2661 2661
2662 2662 '''
2663 2663
2664 2664 def run(self, dataOut, hei_ref = None, tauindex = 0,
2665 2665 phaseOffsets = None,
2666 2666 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
2667 2667 noise_timeStep = 4, noise_multiple = 4,
2668 2668 multDet_timeLimit = 1, multDet_rangeLimit = 3,
2669 2669 phaseThresh = 20, SNRThresh = 5,
2670 2670 hmin = 50, hmax=150, azimuth = 0,
2671 2671 channelPositions = None) :
2672 2672
2673 2673
2674 2674 #Getting Pairslist
2675 2675 if channelPositions is None:
2676 2676 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
2677 2677 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
2678 2678 meteorOps = SMOperations()
2679 2679 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
2680 2680 heiRang = dataOut.heightList
2681 2681 #Get Beacon signal - No Beacon signal anymore
2682 2682 # newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
2683 2683 #
2684 2684 # if hei_ref != None:
2685 2685 # newheis = numpy.where(self.dataOut.heightList>hei_ref)
2686 2686 #
2687 2687
2688 2688
2689 2689 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
2690 2690 # see if the user put in pre defined phase shifts
2691 2691 voltsPShift = dataOut.data_pre.copy()
2692 2692
2693 2693 # if predefinedPhaseShifts != None:
2694 2694 # hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
2695 2695 #
2696 2696 # # elif beaconPhaseShifts:
2697 2697 # # #get hardware phase shifts using beacon signal
2698 2698 # # hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
2699 2699 # # hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
2700 2700 #
2701 2701 # else:
2702 2702 # hardwarePhaseShifts = numpy.zeros(5)
2703 2703 #
2704 2704 # voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
2705 2705 # for i in range(self.dataOut.data_pre.shape[0]):
2706 2706 # voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
2707 2707
2708 2708 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
2709 2709
2710 2710 #Remove DC
2711 2711 voltsDC = numpy.mean(voltsPShift,1)
2712 2712 voltsDC = numpy.mean(voltsDC,1)
2713 2713 for i in range(voltsDC.shape[0]):
2714 2714 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
2715 2715
2716 2716 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
2717 2717 # voltsPShift = voltsPShift[:,:,:newheis[0][0]]
2718 2718
2719 2719 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
2720 2720 #Coherent Detection
2721 2721 if cohDetection:
2722 2722 #use coherent detection to get the net power
2723 2723 cohDet_thresh = cohDet_thresh*numpy.pi/180
2724 2724 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
2725 2725
2726 2726 #Non-coherent detection!
2727 2727 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
2728 2728 #********** END OF COH/NON-COH POWER CALCULATION**********************
2729 2729
2730 2730 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
2731 2731 #Get noise
2732 2732 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
2733 2733 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
2734 2734 #Get signal threshold
2735 2735 signalThresh = noise_multiple*noise
2736 2736 #Meteor echoes detection
2737 2737 listMeteors = self.__findMeteors(powerNet, signalThresh)
2738 2738 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
2739 2739
2740 2740 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
2741 2741 #Parameters
2742 2742 heiRange = dataOut.heightList
2743 2743 rangeInterval = heiRange[1] - heiRange[0]
2744 2744 rangeLimit = multDet_rangeLimit/rangeInterval
2745 2745 timeLimit = multDet_timeLimit/dataOut.timeInterval
2746 2746 #Multiple detection removals
2747 2747 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
2748 2748 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
2749 2749
2750 2750 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
2751 2751 #Parameters
2752 2752 phaseThresh = phaseThresh*numpy.pi/180
2753 2753 thresh = [phaseThresh, noise_multiple, SNRThresh]
2754 2754 #Meteor reestimation (Errors N 1, 6, 12, 17)
2755 2755 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
2756 2756 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
2757 2757 #Estimation of decay times (Errors N 7, 8, 11)
2758 2758 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
2759 2759 #******************* END OF METEOR REESTIMATION *******************
2760 2760
2761 2761 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
2762 2762 #Calculating Radial Velocity (Error N 15)
2763 2763 radialStdThresh = 10
2764 2764 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
2765 2765
2766 2766 if len(listMeteors4) > 0:
2767 2767 #Setting New Array
2768 2768 date = dataOut.utctime
2769 2769 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
2770 2770
2771 2771 #Correcting phase offset
2772 2772 if phaseOffsets != None:
2773 2773 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
2774 2774 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
2775 2775
2776 2776 #Second Pairslist
2777 2777 pairsList = []
2778 2778 pairx = (0,1)
2779 2779 pairy = (2,3)
2780 2780 pairsList.append(pairx)
2781 2781 pairsList.append(pairy)
2782 2782
2783 2783 jph = numpy.array([0,0,0,0])
2784 2784 h = (hmin,hmax)
2785 2785 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
2786 2786
2787 2787 # #Calculate AOA (Error N 3, 4)
2788 2788 # #JONES ET AL. 1998
2789 2789 # error = arrayParameters[:,-1]
2790 2790 # AOAthresh = numpy.pi/8
2791 2791 # phases = -arrayParameters[:,9:13]
2792 2792 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
2793 2793 #
2794 2794 # #Calculate Heights (Error N 13 and 14)
2795 2795 # error = arrayParameters[:,-1]
2796 2796 # Ranges = arrayParameters[:,2]
2797 2797 # zenith = arrayParameters[:,5]
2798 2798 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
2799 2799 # error = arrayParameters[:,-1]
2800 2800 #********************* END OF PARAMETERS CALCULATION **************************
2801 2801
2802 2802 #***************************+ PASS DATA TO NEXT STEP **********************
2803 2803 # arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
2804 2804 dataOut.data_param = arrayParameters
2805 2805
2806 2806 if arrayParameters is None:
2807 2807 dataOut.flagNoData = True
2808 2808 else:
2809 2809 dataOut.flagNoData = True
2810 2810
2811 2811 return
2812 2812
2813 2813 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
2814 2814
2815 2815 minIndex = min(newheis[0])
2816 2816 maxIndex = max(newheis[0])
2817 2817
2818 2818 voltage = voltage0[:,:,minIndex:maxIndex+1]
2819 2819 nLength = voltage.shape[1]/n
2820 2820 nMin = 0
2821 2821 nMax = 0
2822 2822 phaseOffset = numpy.zeros((len(pairslist),n))
2823 2823
2824 2824 for i in range(n):
2825 2825 nMax += nLength
2826 2826 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
2827 2827 phaseCCF = numpy.mean(phaseCCF, axis = 2)
2828 2828 phaseOffset[:,i] = phaseCCF.transpose()
2829 2829 nMin = nMax
2830 2830 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
2831 2831
2832 2832 #Remove Outliers
2833 2833 factor = 2
2834 2834 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
2835 2835 dw = numpy.std(wt,axis = 1)
2836 2836 dw = dw.reshape((dw.size,1))
2837 2837 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
2838 2838 phaseOffset[ind] = numpy.nan
2839 2839 phaseOffset = stats.nanmean(phaseOffset, axis=1)
2840 2840
2841 2841 return phaseOffset
2842 2842
2843 2843 def __shiftPhase(self, data, phaseShift):
2844 2844 #this will shift the phase of a complex number
2845 2845 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
2846 2846 return dataShifted
2847 2847
2848 2848 def __estimatePhaseDifference(self, array, pairslist):
2849 2849 nChannel = array.shape[0]
2850 2850 nHeights = array.shape[2]
2851 2851 numPairs = len(pairslist)
2852 2852 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
2853 2853 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
2854 2854
2855 2855 #Correct phases
2856 2856 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
2857 2857 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
2858 2858
2859 2859 if indDer[0].shape[0] > 0:
2860 2860 for i in range(indDer[0].shape[0]):
2861 2861 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
2862 2862 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
2863 2863
2864 2864 # for j in range(numSides):
2865 2865 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
2866 2866 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
2867 2867 #
2868 2868 #Linear
2869 2869 phaseInt = numpy.zeros((numPairs,1))
2870 2870 angAllCCF = phaseCCF[:,[0,1,3,4],0]
2871 2871 for j in range(numPairs):
2872 2872 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
2873 2873 phaseInt[j] = fit[1]
2874 2874 #Phase Differences
2875 2875 phaseDiff = phaseInt - phaseCCF[:,2,:]
2876 2876 phaseArrival = phaseInt.reshape(phaseInt.size)
2877 2877
2878 2878 #Dealias
2879 2879 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
2880 2880 # indAlias = numpy.where(phaseArrival > numpy.pi)
2881 2881 # phaseArrival[indAlias] -= 2*numpy.pi
2882 2882 # indAlias = numpy.where(phaseArrival < -numpy.pi)
2883 2883 # phaseArrival[indAlias] += 2*numpy.pi
2884 2884
2885 2885 return phaseDiff, phaseArrival
2886 2886
2887 2887 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
2888 2888 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
2889 2889 #find the phase shifts of each channel over 1 second intervals
2890 2890 #only look at ranges below the beacon signal
2891 2891 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2892 2892 numBlocks = int(volts.shape[1]/numProfPerBlock)
2893 2893 numHeights = volts.shape[2]
2894 2894 nChannel = volts.shape[0]
2895 2895 voltsCohDet = volts.copy()
2896 2896
2897 2897 pairsarray = numpy.array(pairslist)
2898 2898 indSides = pairsarray[:,1]
2899 2899 # indSides = numpy.array(range(nChannel))
2900 2900 # indSides = numpy.delete(indSides, indCenter)
2901 2901 #
2902 2902 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
2903 2903 listBlocks = numpy.array_split(volts, numBlocks, 1)
2904 2904
2905 2905 startInd = 0
2906 2906 endInd = 0
2907 2907
2908 2908 for i in range(numBlocks):
2909 2909 startInd = endInd
2910 2910 endInd = endInd + listBlocks[i].shape[1]
2911 2911
2912 2912 arrayBlock = listBlocks[i]
2913 2913 # arrayBlockCenter = listCenter[i]
2914 2914
2915 2915 #Estimate the Phase Difference
2916 2916 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
2917 2917 #Phase Difference RMS
2918 2918 arrayPhaseRMS = numpy.abs(phaseDiff)
2919 2919 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
2920 2920 indPhase = numpy.where(phaseRMSaux==4)
2921 2921 #Shifting
2922 2922 if indPhase[0].shape[0] > 0:
2923 2923 for j in range(indSides.size):
2924 2924 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
2925 2925 voltsCohDet[:,startInd:endInd,:] = arrayBlock
2926 2926
2927 2927 return voltsCohDet
2928 2928
2929 2929 def __calculateCCF(self, volts, pairslist ,laglist):
2930 2930
2931 2931 nHeights = volts.shape[2]
2932 2932 nPoints = volts.shape[1]
2933 2933 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
2934 2934
2935 2935 for i in range(len(pairslist)):
2936 2936 volts1 = volts[pairslist[i][0]]
2937 2937 volts2 = volts[pairslist[i][1]]
2938 2938
2939 2939 for t in range(len(laglist)):
2940 2940 idxT = laglist[t]
2941 2941 if idxT >= 0:
2942 2942 vStacked = numpy.vstack((volts2[idxT:,:],
2943 2943 numpy.zeros((idxT, nHeights),dtype='complex')))
2944 2944 else:
2945 2945 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
2946 2946 volts2[:(nPoints + idxT),:]))
2947 2947 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
2948 2948
2949 2949 vStacked = None
2950 2950 return voltsCCF
2951 2951
2952 2952 def __getNoise(self, power, timeSegment, timeInterval):
2953 2953 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2954 2954 numBlocks = int(power.shape[0]/numProfPerBlock)
2955 2955 numHeights = power.shape[1]
2956 2956
2957 2957 listPower = numpy.array_split(power, numBlocks, 0)
2958 2958 noise = numpy.zeros((power.shape[0], power.shape[1]))
2959 2959 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
2960 2960
2961 2961 startInd = 0
2962 2962 endInd = 0
2963 2963
2964 2964 for i in range(numBlocks): #split por canal
2965 2965 startInd = endInd
2966 2966 endInd = endInd + listPower[i].shape[0]
2967 2967
2968 2968 arrayBlock = listPower[i]
2969 2969 noiseAux = numpy.mean(arrayBlock, 0)
2970 2970 # noiseAux = numpy.median(noiseAux)
2971 2971 # noiseAux = numpy.mean(arrayBlock)
2972 2972 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
2973 2973
2974 2974 noiseAux1 = numpy.mean(arrayBlock)
2975 2975 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
2976 2976
2977 2977 return noise, noise1
2978 2978
2979 2979 def __findMeteors(self, power, thresh):
2980 2980 nProf = power.shape[0]
2981 2981 nHeights = power.shape[1]
2982 2982 listMeteors = []
2983 2983
2984 2984 for i in range(nHeights):
2985 2985 powerAux = power[:,i]
2986 2986 threshAux = thresh[:,i]
2987 2987
2988 2988 indUPthresh = numpy.where(powerAux > threshAux)[0]
2989 2989 indDNthresh = numpy.where(powerAux <= threshAux)[0]
2990 2990
2991 2991 j = 0
2992 2992
2993 2993 while (j < indUPthresh.size - 2):
2994 2994 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
2995 2995 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
2996 2996 indDNthresh = indDNthresh[indDNAux]
2997 2997
2998 2998 if (indDNthresh.size > 0):
2999 2999 indEnd = indDNthresh[0] - 1
3000 3000 indInit = indUPthresh[j]
3001 3001
3002 3002 meteor = powerAux[indInit:indEnd + 1]
3003 3003 indPeak = meteor.argmax() + indInit
3004 3004 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
3005 3005
3006 3006 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
3007 3007 j = numpy.where(indUPthresh == indEnd)[0] + 1
3008 3008 else: j+=1
3009 3009 else: j+=1
3010 3010
3011 3011 return listMeteors
3012 3012
3013 3013 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
3014 3014
3015 3015 arrayMeteors = numpy.asarray(listMeteors)
3016 3016 listMeteors1 = []
3017 3017
3018 3018 while arrayMeteors.shape[0] > 0:
3019 3019 FLAs = arrayMeteors[:,4]
3020 3020 maxFLA = FLAs.argmax()
3021 3021 listMeteors1.append(arrayMeteors[maxFLA,:])
3022 3022
3023 3023 MeteorInitTime = arrayMeteors[maxFLA,1]
3024 3024 MeteorEndTime = arrayMeteors[maxFLA,3]
3025 3025 MeteorHeight = arrayMeteors[maxFLA,0]
3026 3026
3027 3027 #Check neighborhood
3028 3028 maxHeightIndex = MeteorHeight + rangeLimit
3029 3029 minHeightIndex = MeteorHeight - rangeLimit
3030 3030 minTimeIndex = MeteorInitTime - timeLimit
3031 3031 maxTimeIndex = MeteorEndTime + timeLimit
3032 3032
3033 3033 #Check Heights
3034 3034 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
3035 3035 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
3036 3036 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
3037 3037
3038 3038 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
3039 3039
3040 3040 return listMeteors1
3041 3041
3042 3042 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
3043 3043 numHeights = volts.shape[2]
3044 3044 nChannel = volts.shape[0]
3045 3045
3046 3046 thresholdPhase = thresh[0]
3047 3047 thresholdNoise = thresh[1]
3048 3048 thresholdDB = float(thresh[2])
3049 3049
3050 3050 thresholdDB1 = 10**(thresholdDB/10)
3051 3051 pairsarray = numpy.array(pairslist)
3052 3052 indSides = pairsarray[:,1]
3053 3053
3054 3054 pairslist1 = list(pairslist)
3055 3055 pairslist1.append((0,1))
3056 3056 pairslist1.append((3,4))
3057 3057
3058 3058 listMeteors1 = []
3059 3059 listPowerSeries = []
3060 3060 listVoltageSeries = []
3061 3061 #volts has the war data
3062 3062
3063 3063 if frequency == 30e6:
3064 3064 timeLag = 45*10**-3
3065 3065 else:
3066 3066 timeLag = 15*10**-3
3067 3067 lag = numpy.ceil(timeLag/timeInterval)
3068 3068
3069 3069 for i in range(len(listMeteors)):
3070 3070
3071 3071 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
3072 3072 meteorAux = numpy.zeros(16)
3073 3073
3074 3074 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
3075 3075 mHeight = listMeteors[i][0]
3076 3076 mStart = listMeteors[i][1]
3077 3077 mPeak = listMeteors[i][2]
3078 3078 mEnd = listMeteors[i][3]
3079 3079
3080 3080 #get the volt data between the start and end times of the meteor
3081 3081 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
3082 3082 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3083 3083
3084 3084 #3.6. Phase Difference estimation
3085 3085 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
3086 3086
3087 3087 #3.7. Phase difference removal & meteor start, peak and end times reestimated
3088 3088 #meteorVolts0.- all Channels, all Profiles
3089 3089 meteorVolts0 = volts[:,:,mHeight]
3090 3090 meteorThresh = noise[:,mHeight]*thresholdNoise
3091 3091 meteorNoise = noise[:,mHeight]
3092 3092 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
3093 3093 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
3094 3094
3095 3095 #Times reestimation
3096 3096 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
3097 3097 if mStart1.size > 0:
3098 3098 mStart1 = mStart1[-1] + 1
3099 3099
3100 3100 else:
3101 3101 mStart1 = mPeak
3102 3102
3103 3103 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
3104 3104 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
3105 3105 if mEndDecayTime1.size == 0:
3106 3106 mEndDecayTime1 = powerNet0.size
3107 3107 else:
3108 3108 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
3109 3109 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
3110 3110
3111 3111 #meteorVolts1.- all Channels, from start to end
3112 3112 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
3113 3113 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
3114 3114 if meteorVolts2.shape[1] == 0:
3115 3115 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
3116 3116 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
3117 3117 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
3118 3118 ##################### END PARAMETERS REESTIMATION #########################
3119 3119
3120 3120 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
3121 3121 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
3122 3122 if meteorVolts2.shape[1] > 0:
3123 3123 #Phase Difference re-estimation
3124 3124 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
3125 3125 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
3126 3126 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
3127 3127 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
3128 3128 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
3129 3129
3130 3130 #Phase Difference RMS
3131 3131 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
3132 3132 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
3133 3133 #Data from Meteor
3134 3134 mPeak1 = powerNet1.argmax() + mStart1
3135 3135 mPeakPower1 = powerNet1.max()
3136 3136 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
3137 3137 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
3138 3138 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
3139 3139 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
3140 3140 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
3141 3141 #Vectorize
3142 3142 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
3143 3143 meteorAux[7:11] = phaseDiffint[0:4]
3144 3144
3145 3145 #Rejection Criterions
3146 3146 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
3147 3147 meteorAux[-1] = 17
3148 3148 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
3149 3149 meteorAux[-1] = 1
3150 3150
3151 3151
3152 3152 else:
3153 3153 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
3154 3154 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
3155 3155 PowerSeries = 0
3156 3156
3157 3157 listMeteors1.append(meteorAux)
3158 3158 listPowerSeries.append(PowerSeries)
3159 3159 listVoltageSeries.append(meteorVolts1)
3160 3160
3161 3161 return listMeteors1, listPowerSeries, listVoltageSeries
3162 3162
3163 3163 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
3164 3164
3165 3165 threshError = 10
3166 3166 #Depending if it is 30 or 50 MHz
3167 3167 if frequency == 30e6:
3168 3168 timeLag = 45*10**-3
3169 3169 else:
3170 3170 timeLag = 15*10**-3
3171 3171 lag = numpy.ceil(timeLag/timeInterval)
3172 3172
3173 3173 listMeteors1 = []
3174 3174
3175 3175 for i in range(len(listMeteors)):
3176 3176 meteorPower = listPower[i]
3177 3177 meteorAux = listMeteors[i]
3178 3178
3179 3179 if meteorAux[-1] == 0:
3180 3180
3181 3181 try:
3182 3182 indmax = meteorPower.argmax()
3183 3183 indlag = indmax + lag
3184 3184
3185 3185 y = meteorPower[indlag:]
3186 3186 x = numpy.arange(0, y.size)*timeLag
3187 3187
3188 3188 #first guess
3189 3189 a = y[0]
3190 3190 tau = timeLag
3191 3191 #exponential fit
3192 3192 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
3193 3193 y1 = self.__exponential_function(x, *popt)
3194 3194 #error estimation
3195 3195 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
3196 3196
3197 3197 decayTime = popt[1]
3198 3198 riseTime = indmax*timeInterval
3199 3199 meteorAux[11:13] = [decayTime, error]
3200 3200
3201 3201 #Table items 7, 8 and 11
3202 3202 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
3203 3203 meteorAux[-1] = 7
3204 3204 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
3205 3205 meteorAux[-1] = 8
3206 3206 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
3207 3207 meteorAux[-1] = 11
3208 3208
3209 3209
3210 3210 except:
3211 3211 meteorAux[-1] = 11
3212 3212
3213 3213
3214 3214 listMeteors1.append(meteorAux)
3215 3215
3216 3216 return listMeteors1
3217 3217
3218 3218 #Exponential Function
3219 3219
3220 3220 def __exponential_function(self, x, a, tau):
3221 3221 y = a*numpy.exp(-x/tau)
3222 3222 return y
3223 3223
3224 3224 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
3225 3225
3226 3226 pairslist1 = list(pairslist)
3227 3227 pairslist1.append((0,1))
3228 3228 pairslist1.append((3,4))
3229 3229 numPairs = len(pairslist1)
3230 3230 #Time Lag
3231 3231 timeLag = 45*10**-3
3232 3232 c = 3e8
3233 3233 lag = numpy.ceil(timeLag/timeInterval)
3234 3234 freq = 30e6
3235 3235
3236 3236 listMeteors1 = []
3237 3237
3238 3238 for i in range(len(listMeteors)):
3239 3239 meteorAux = listMeteors[i]
3240 3240 if meteorAux[-1] == 0:
3241 3241 mStart = listMeteors[i][1]
3242 3242 mPeak = listMeteors[i][2]
3243 3243 mLag = mPeak - mStart + lag
3244 3244
3245 3245 #get the volt data between the start and end times of the meteor
3246 3246 meteorVolts = listVolts[i]
3247 3247 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3248 3248
3249 3249 #Get CCF
3250 3250 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
3251 3251
3252 3252 #Method 2
3253 3253 slopes = numpy.zeros(numPairs)
3254 3254 time = numpy.array([-2,-1,1,2])*timeInterval
3255 3255 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
3256 3256
3257 3257 #Correct phases
3258 3258 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
3259 3259 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
3260 3260
3261 3261 if indDer[0].shape[0] > 0:
3262 3262 for i in range(indDer[0].shape[0]):
3263 3263 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
3264 3264 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
3265 3265
3266 3266 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
3267 3267 for j in range(numPairs):
3268 3268 fit = stats.linregress(time, angAllCCF[j,:])
3269 3269 slopes[j] = fit[0]
3270 3270
3271 3271 #Remove Outlier
3272 3272 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3273 3273 # slopes = numpy.delete(slopes,indOut)
3274 3274 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3275 3275 # slopes = numpy.delete(slopes,indOut)
3276 3276
3277 3277 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
3278 3278 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
3279 3279 meteorAux[-2] = radialError
3280 3280 meteorAux[-3] = radialVelocity
3281 3281
3282 3282 #Setting Error
3283 3283 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
3284 3284 if numpy.abs(radialVelocity) > 200:
3285 3285 meteorAux[-1] = 15
3286 3286 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
3287 3287 elif radialError > radialStdThresh:
3288 3288 meteorAux[-1] = 12
3289 3289
3290 3290 listMeteors1.append(meteorAux)
3291 3291 return listMeteors1
3292 3292
3293 3293 def __setNewArrays(self, listMeteors, date, heiRang):
3294 3294
3295 3295 #New arrays
3296 3296 arrayMeteors = numpy.array(listMeteors)
3297 3297 arrayParameters = numpy.zeros((len(listMeteors), 13))
3298 3298
3299 3299 #Date inclusion
3300 3300 # date = re.findall(r'\((.*?)\)', date)
3301 3301 # date = date[0].split(',')
3302 3302 # date = map(int, date)
3303 3303 #
3304 3304 # if len(date)<6:
3305 3305 # date.append(0)
3306 3306 #
3307 3307 # date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
3308 3308 # arrayDate = numpy.tile(date, (len(listMeteors), 1))
3309 3309 arrayDate = numpy.tile(date, (len(listMeteors)))
3310 3310
3311 3311 #Meteor array
3312 3312 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
3313 3313 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
3314 3314
3315 3315 #Parameters Array
3316 3316 arrayParameters[:,0] = arrayDate #Date
3317 3317 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
3318 3318 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
3319 3319 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
3320 3320 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
3321 3321
3322 3322
3323 3323 return arrayParameters
3324 3324
3325 3325 class CorrectSMPhases(Operation):
3326 3326
3327 3327 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
3328 3328
3329 3329 arrayParameters = dataOut.data_param
3330 3330 pairsList = []
3331 3331 pairx = (0,1)
3332 3332 pairy = (2,3)
3333 3333 pairsList.append(pairx)
3334 3334 pairsList.append(pairy)
3335 3335 jph = numpy.zeros(4)
3336 3336
3337 3337 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
3338 3338 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
3339 3339 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
3340 3340
3341 3341 meteorOps = SMOperations()
3342 3342 if channelPositions is None:
3343 3343 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3344 3344 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3345 3345
3346 3346 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3347 3347 h = (hmin,hmax)
3348 3348
3349 3349 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
3350 3350
3351 3351 dataOut.data_param = arrayParameters
3352 3352 return
3353 3353
3354 3354 class SMPhaseCalibration(Operation):
3355 3355
3356 3356 __buffer = None
3357 3357
3358 3358 __initime = None
3359 3359
3360 3360 __dataReady = False
3361 3361
3362 3362 __isConfig = False
3363 3363
3364 3364 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
3365 3365
3366 3366 dataTime = currentTime + paramInterval
3367 3367 deltaTime = dataTime - initTime
3368 3368
3369 3369 if deltaTime >= outputInterval or deltaTime < 0:
3370 3370 return True
3371 3371
3372 3372 return False
3373 3373
3374 3374 def __getGammas(self, pairs, d, phases):
3375 3375 gammas = numpy.zeros(2)
3376 3376
3377 3377 for i in range(len(pairs)):
3378 3378
3379 3379 pairi = pairs[i]
3380 3380
3381 3381 phip3 = phases[:,pairi[0]]
3382 3382 d3 = d[pairi[0]]
3383 3383 phip2 = phases[:,pairi[1]]
3384 3384 d2 = d[pairi[1]]
3385 3385 #Calculating gamma
3386 3386 # jdcos = alp1/(k*d1)
3387 3387 # jgamma = numpy.angle(numpy.exp(1j*(d0*alp1/d1 - alp0)))
3388 3388 jgamma = -phip2*d3/d2 - phip3
3389 3389 jgamma = numpy.angle(numpy.exp(1j*jgamma))
3390 3390 # jgamma[jgamma>numpy.pi] -= 2*numpy.pi
3391 3391 # jgamma[jgamma<-numpy.pi] += 2*numpy.pi
3392 3392
3393 3393 #Revised distribution
3394 3394 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
3395 3395
3396 3396 #Histogram
3397 3397 nBins = 64
3398 3398 rmin = -0.5*numpy.pi
3399 3399 rmax = 0.5*numpy.pi
3400 3400 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
3401 3401
3402 3402 meteorsY = phaseHisto[0]
3403 3403 phasesX = phaseHisto[1][:-1]
3404 3404 width = phasesX[1] - phasesX[0]
3405 3405 phasesX += width/2
3406 3406
3407 3407 #Gaussian aproximation
3408 3408 bpeak = meteorsY.argmax()
3409 3409 peak = meteorsY.max()
3410 3410 jmin = bpeak - 5
3411 3411 jmax = bpeak + 5 + 1
3412 3412
3413 3413 if jmin<0:
3414 3414 jmin = 0
3415 3415 jmax = 6
3416 3416 elif jmax > meteorsY.size:
3417 3417 jmin = meteorsY.size - 6
3418 3418 jmax = meteorsY.size
3419 3419
3420 3420 x0 = numpy.array([peak,bpeak,50])
3421 3421 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
3422 3422
3423 3423 #Gammas
3424 3424 gammas[i] = coeff[0][1]
3425 3425
3426 3426 return gammas
3427 3427
3428 3428 def __residualFunction(self, coeffs, y, t):
3429 3429
3430 3430 return y - self.__gauss_function(t, coeffs)
3431 3431
3432 3432 def __gauss_function(self, t, coeffs):
3433 3433
3434 3434 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
3435 3435
3436 3436 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
3437 3437 meteorOps = SMOperations()
3438 3438 nchan = 4
3439 3439 pairx = pairsList[0] #x es 0
3440 3440 pairy = pairsList[1] #y es 1
3441 3441 center_xangle = 0
3442 3442 center_yangle = 0
3443 3443 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
3444 3444 ntimes = len(range_angle)
3445 3445
3446 3446 nstepsx = 20
3447 3447 nstepsy = 20
3448 3448
3449 3449 for iz in range(ntimes):
3450 3450 min_xangle = -range_angle[iz]/2 + center_xangle
3451 3451 max_xangle = range_angle[iz]/2 + center_xangle
3452 3452 min_yangle = -range_angle[iz]/2 + center_yangle
3453 3453 max_yangle = range_angle[iz]/2 + center_yangle
3454 3454
3455 3455 inc_x = (max_xangle-min_xangle)/nstepsx
3456 3456 inc_y = (max_yangle-min_yangle)/nstepsy
3457 3457
3458 3458 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
3459 3459 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
3460 3460 penalty = numpy.zeros((nstepsx,nstepsy))
3461 3461 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
3462 3462 jph = numpy.zeros(nchan)
3463 3463
3464 3464 # Iterations looking for the offset
3465 3465 for iy in range(int(nstepsy)):
3466 3466 for ix in range(int(nstepsx)):
3467 3467 d3 = d[pairsList[1][0]]
3468 3468 d2 = d[pairsList[1][1]]
3469 3469 d5 = d[pairsList[0][0]]
3470 3470 d4 = d[pairsList[0][1]]
3471 3471
3472 3472 alp2 = alpha_y[iy] #gamma 1
3473 3473 alp4 = alpha_x[ix] #gamma 0
3474 3474
3475 3475 alp3 = -alp2*d3/d2 - gammas[1]
3476 3476 alp5 = -alp4*d5/d4 - gammas[0]
3477 3477 # jph[pairy[1]] = alpha_y[iy]
3478 3478 # jph[pairy[0]] = -gammas[1] - alpha_y[iy]*d[pairy[1]]/d[pairy[0]]
3479 3479
3480 3480 # jph[pairx[1]] = alpha_x[ix]
3481 3481 # jph[pairx[0]] = -gammas[0] - alpha_x[ix]*d[pairx[1]]/d[pairx[0]]
3482 3482 jph[pairsList[0][1]] = alp4
3483 3483 jph[pairsList[0][0]] = alp5
3484 3484 jph[pairsList[1][0]] = alp3
3485 3485 jph[pairsList[1][1]] = alp2
3486 3486 jph_array[:,ix,iy] = jph
3487 3487 # d = [2.0,2.5,2.5,2.0]
3488 3488 #falta chequear si va a leer bien los meteoros
3489 3489 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
3490 3490 error = meteorsArray1[:,-1]
3491 3491 ind1 = numpy.where(error==0)[0]
3492 3492 penalty[ix,iy] = ind1.size
3493 3493
3494 3494 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
3495 3495 phOffset = jph_array[:,i,j]
3496 3496
3497 3497 center_xangle = phOffset[pairx[1]]
3498 3498 center_yangle = phOffset[pairy[1]]
3499 3499
3500 3500 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
3501 3501 phOffset = phOffset*180/numpy.pi
3502 3502 return phOffset
3503 3503
3504 3504
3505 3505 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
3506 3506
3507 3507 dataOut.flagNoData = True
3508 3508 self.__dataReady = False
3509 3509 dataOut.outputInterval = nHours*3600
3510 3510
3511 3511 if self.__isConfig == False:
3512 3512 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
3513 3513 #Get Initial LTC time
3514 3514 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3515 3515 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3516 3516
3517 3517 self.__isConfig = True
3518 3518
3519 3519 if self.__buffer is None:
3520 3520 self.__buffer = dataOut.data_param.copy()
3521 3521
3522 3522 else:
3523 3523 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3524 3524
3525 3525 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3526 3526
3527 3527 if self.__dataReady:
3528 3528 dataOut.utctimeInit = self.__initime
3529 3529 self.__initime += dataOut.outputInterval #to erase time offset
3530 3530
3531 3531 freq = dataOut.frequency
3532 3532 c = dataOut.C #m/s
3533 3533 lamb = c/freq
3534 3534 k = 2*numpy.pi/lamb
3535 3535 azimuth = 0
3536 3536 h = (hmin, hmax)
3537 3537 # pairs = ((0,1),(2,3)) #Estrella
3538 3538 # pairs = ((1,0),(2,3)) #T
3539 3539
3540 3540 if channelPositions is None:
3541 3541 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3542 3542 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3543 3543 meteorOps = SMOperations()
3544 3544 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3545 3545
3546 3546 #Checking correct order of pairs
3547 3547 pairs = []
3548 3548 if distances[1] > distances[0]:
3549 3549 pairs.append((1,0))
3550 3550 else:
3551 3551 pairs.append((0,1))
3552 3552
3553 3553 if distances[3] > distances[2]:
3554 3554 pairs.append((3,2))
3555 3555 else:
3556 3556 pairs.append((2,3))
3557 3557 # distances1 = [-distances[0]*lamb, distances[1]*lamb, -distances[2]*lamb, distances[3]*lamb]
3558 3558
3559 3559 meteorsArray = self.__buffer
3560 3560 error = meteorsArray[:,-1]
3561 3561 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
3562 3562 ind1 = numpy.where(boolError)[0]
3563 3563 meteorsArray = meteorsArray[ind1,:]
3564 3564 meteorsArray[:,-1] = 0
3565 3565 phases = meteorsArray[:,8:12]
3566 3566
3567 3567 #Calculate Gammas
3568 3568 gammas = self.__getGammas(pairs, distances, phases)
3569 3569 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
3570 3570 #Calculate Phases
3571 3571 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
3572 3572 phasesOff = phasesOff.reshape((1,phasesOff.size))
3573 3573 dataOut.data_output = -phasesOff
3574 3574 dataOut.flagNoData = False
3575 3575 self.__buffer = None
3576 3576
3577 3577
3578 3578 return
3579 3579
3580 3580 class SMOperations():
3581 3581
3582 3582 def __init__(self):
3583 3583
3584 3584 return
3585 3585
3586 3586 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
3587 3587
3588 3588 arrayParameters = arrayParameters0.copy()
3589 3589 hmin = h[0]
3590 3590 hmax = h[1]
3591 3591
3592 3592 #Calculate AOA (Error N 3, 4)
3593 3593 #JONES ET AL. 1998
3594 3594 AOAthresh = numpy.pi/8
3595 3595 error = arrayParameters[:,-1]
3596 3596 phases = -arrayParameters[:,8:12] + jph
3597 3597 # phases = numpy.unwrap(phases)
3598 3598 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
3599 3599
3600 3600 #Calculate Heights (Error N 13 and 14)
3601 3601 error = arrayParameters[:,-1]
3602 3602 Ranges = arrayParameters[:,1]
3603 3603 zenith = arrayParameters[:,4]
3604 3604 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
3605 3605
3606 3606 #----------------------- Get Final data ------------------------------------
3607 3607 # error = arrayParameters[:,-1]
3608 3608 # ind1 = numpy.where(error==0)[0]
3609 3609 # arrayParameters = arrayParameters[ind1,:]
3610 3610
3611 3611 return arrayParameters
3612 3612
3613 3613 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
3614 3614
3615 3615 arrayAOA = numpy.zeros((phases.shape[0],3))
3616 3616 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
3617 3617
3618 3618 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3619 3619 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3620 3620 arrayAOA[:,2] = cosDirError
3621 3621
3622 3622 azimuthAngle = arrayAOA[:,0]
3623 3623 zenithAngle = arrayAOA[:,1]
3624 3624
3625 3625 #Setting Error
3626 3626 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
3627 3627 error[indError] = 0
3628 3628 #Number 3: AOA not fesible
3629 3629 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3630 3630 error[indInvalid] = 3
3631 3631 #Number 4: Large difference in AOAs obtained from different antenna baselines
3632 3632 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3633 3633 error[indInvalid] = 4
3634 3634 return arrayAOA, error
3635 3635
3636 3636 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
3637 3637
3638 3638 #Initializing some variables
3639 3639 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3640 3640 ang_aux = ang_aux.reshape(1,ang_aux.size)
3641 3641
3642 3642 cosdir = numpy.zeros((arrayPhase.shape[0],2))
3643 3643 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3644 3644
3645 3645
3646 3646 for i in range(2):
3647 3647 ph0 = arrayPhase[:,pairsList[i][0]]
3648 3648 ph1 = arrayPhase[:,pairsList[i][1]]
3649 3649 d0 = distances[pairsList[i][0]]
3650 3650 d1 = distances[pairsList[i][1]]
3651 3651
3652 3652 ph0_aux = ph0 + ph1
3653 3653 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
3654 3654 # ph0_aux[ph0_aux > numpy.pi] -= 2*numpy.pi
3655 3655 # ph0_aux[ph0_aux < -numpy.pi] += 2*numpy.pi
3656 3656 #First Estimation
3657 3657 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
3658 3658
3659 3659 #Most-Accurate Second Estimation
3660 3660 phi1_aux = ph0 - ph1
3661 3661 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3662 3662 #Direction Cosine 1
3663 3663 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
3664 3664
3665 3665 #Searching the correct Direction Cosine
3666 3666 cosdir0_aux = cosdir0[:,i]
3667 3667 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3668 3668 #Minimum Distance
3669 3669 cosDiff = (cosdir1 - cosdir0_aux)**2
3670 3670 indcos = cosDiff.argmin(axis = 1)
3671 3671 #Saving Value obtained
3672 3672 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3673 3673
3674 3674 return cosdir0, cosdir
3675 3675
3676 3676 def __calculateAOA(self, cosdir, azimuth):
3677 3677 cosdirX = cosdir[:,0]
3678 3678 cosdirY = cosdir[:,1]
3679 3679
3680 3680 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3681 3681 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
3682 3682 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3683 3683
3684 3684 return angles
3685 3685
3686 3686 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3687 3687
3688 3688 Ramb = 375 #Ramb = c/(2*PRF)
3689 3689 Re = 6371 #Earth Radius
3690 3690 heights = numpy.zeros(Ranges.shape)
3691 3691
3692 3692 R_aux = numpy.array([0,1,2])*Ramb
3693 3693 R_aux = R_aux.reshape(1,R_aux.size)
3694 3694
3695 3695 Ranges = Ranges.reshape(Ranges.size,1)
3696 3696
3697 3697 Ri = Ranges + R_aux
3698 3698 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3699 3699
3700 3700 #Check if there is a height between 70 and 110 km
3701 3701 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3702 3702 ind_h = numpy.where(h_bool == 1)[0]
3703 3703
3704 3704 hCorr = hi[ind_h, :]
3705 3705 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3706 3706
3707 3707 hCorr = hi[ind_hCorr][:len(ind_h)]
3708 3708 heights[ind_h] = hCorr
3709 3709
3710 3710 #Setting Error
3711 3711 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3712 3712 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3713 3713 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
3714 3714 error[indError] = 0
3715 3715 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3716 3716 error[indInvalid2] = 14
3717 3717 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3718 3718 error[indInvalid1] = 13
3719 3719
3720 3720 return heights, error
3721 3721
3722 3722 def getPhasePairs(self, channelPositions):
3723 3723 chanPos = numpy.array(channelPositions)
3724 3724 listOper = list(itertools.combinations(list(range(5)),2))
3725 3725
3726 3726 distances = numpy.zeros(4)
3727 3727 axisX = []
3728 3728 axisY = []
3729 3729 distX = numpy.zeros(3)
3730 3730 distY = numpy.zeros(3)
3731 3731 ix = 0
3732 3732 iy = 0
3733 3733
3734 3734 pairX = numpy.zeros((2,2))
3735 3735 pairY = numpy.zeros((2,2))
3736 3736
3737 3737 for i in range(len(listOper)):
3738 3738 pairi = listOper[i]
3739 3739
3740 3740 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
3741 3741
3742 3742 if posDif[0] == 0:
3743 3743 axisY.append(pairi)
3744 3744 distY[iy] = posDif[1]
3745 3745 iy += 1
3746 3746 elif posDif[1] == 0:
3747 3747 axisX.append(pairi)
3748 3748 distX[ix] = posDif[0]
3749 3749 ix += 1
3750 3750
3751 3751 for i in range(2):
3752 3752 if i==0:
3753 3753 dist0 = distX
3754 3754 axis0 = axisX
3755 3755 else:
3756 3756 dist0 = distY
3757 3757 axis0 = axisY
3758 3758
3759 3759 side = numpy.argsort(dist0)[:-1]
3760 3760 axis0 = numpy.array(axis0)[side,:]
3761 3761 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
3762 3762 axis1 = numpy.unique(numpy.reshape(axis0,4))
3763 3763 side = axis1[axis1 != chanC]
3764 3764 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
3765 3765 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
3766 3766 if diff1<0:
3767 3767 chan2 = side[0]
3768 3768 d2 = numpy.abs(diff1)
3769 3769 chan1 = side[1]
3770 3770 d1 = numpy.abs(diff2)
3771 3771 else:
3772 3772 chan2 = side[1]
3773 3773 d2 = numpy.abs(diff2)
3774 3774 chan1 = side[0]
3775 3775 d1 = numpy.abs(diff1)
3776 3776
3777 3777 if i==0:
3778 3778 chanCX = chanC
3779 3779 chan1X = chan1
3780 3780 chan2X = chan2
3781 3781 distances[0:2] = numpy.array([d1,d2])
3782 3782 else:
3783 3783 chanCY = chanC
3784 3784 chan1Y = chan1
3785 3785 chan2Y = chan2
3786 3786 distances[2:4] = numpy.array([d1,d2])
3787 3787 # axisXsides = numpy.reshape(axisX[ix,:],4)
3788 3788 #
3789 3789 # channelCentX = int(numpy.intersect1d(pairX[0,:], pairX[1,:])[0])
3790 3790 # channelCentY = int(numpy.intersect1d(pairY[0,:], pairY[1,:])[0])
3791 3791 #
3792 3792 # ind25X = numpy.where(pairX[0,:] != channelCentX)[0][0]
3793 3793 # ind20X = numpy.where(pairX[1,:] != channelCentX)[0][0]
3794 3794 # channel25X = int(pairX[0,ind25X])
3795 3795 # channel20X = int(pairX[1,ind20X])
3796 3796 # ind25Y = numpy.where(pairY[0,:] != channelCentY)[0][0]
3797 3797 # ind20Y = numpy.where(pairY[1,:] != channelCentY)[0][0]
3798 3798 # channel25Y = int(pairY[0,ind25Y])
3799 3799 # channel20Y = int(pairY[1,ind20Y])
3800 3800
3801 3801 # pairslist = [(channelCentX, channel25X),(channelCentX, channel20X),(channelCentY,channel25Y),(channelCentY, channel20Y)]
3802 3802 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
3803 3803
3804 3804 return pairslist, distances
3805 3805 # def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
3806 3806 #
3807 3807 # arrayAOA = numpy.zeros((phases.shape[0],3))
3808 3808 # cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
3809 3809 #
3810 3810 # arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3811 3811 # cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3812 3812 # arrayAOA[:,2] = cosDirError
3813 3813 #
3814 3814 # azimuthAngle = arrayAOA[:,0]
3815 3815 # zenithAngle = arrayAOA[:,1]
3816 3816 #
3817 3817 # #Setting Error
3818 3818 # #Number 3: AOA not fesible
3819 3819 # indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3820 3820 # error[indInvalid] = 3
3821 3821 # #Number 4: Large difference in AOAs obtained from different antenna baselines
3822 3822 # indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3823 3823 # error[indInvalid] = 4
3824 3824 # return arrayAOA, error
3825 3825 #
3826 3826 # def __getDirectionCosines(self, arrayPhase, pairsList):
3827 3827 #
3828 3828 # #Initializing some variables
3829 3829 # ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3830 3830 # ang_aux = ang_aux.reshape(1,ang_aux.size)
3831 3831 #
3832 3832 # cosdir = numpy.zeros((arrayPhase.shape[0],2))
3833 3833 # cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3834 3834 #
3835 3835 #
3836 3836 # for i in range(2):
3837 3837 # #First Estimation
3838 3838 # phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
3839 3839 # #Dealias
3840 3840 # indcsi = numpy.where(phi0_aux > numpy.pi)
3841 3841 # phi0_aux[indcsi] -= 2*numpy.pi
3842 3842 # indcsi = numpy.where(phi0_aux < -numpy.pi)
3843 3843 # phi0_aux[indcsi] += 2*numpy.pi
3844 3844 # #Direction Cosine 0
3845 3845 # cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
3846 3846 #
3847 3847 # #Most-Accurate Second Estimation
3848 3848 # phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
3849 3849 # phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3850 3850 # #Direction Cosine 1
3851 3851 # cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
3852 3852 #
3853 3853 # #Searching the correct Direction Cosine
3854 3854 # cosdir0_aux = cosdir0[:,i]
3855 3855 # cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3856 3856 # #Minimum Distance
3857 3857 # cosDiff = (cosdir1 - cosdir0_aux)**2
3858 3858 # indcos = cosDiff.argmin(axis = 1)
3859 3859 # #Saving Value obtained
3860 3860 # cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3861 3861 #
3862 3862 # return cosdir0, cosdir
3863 3863 #
3864 3864 # def __calculateAOA(self, cosdir, azimuth):
3865 3865 # cosdirX = cosdir[:,0]
3866 3866 # cosdirY = cosdir[:,1]
3867 3867 #
3868 3868 # zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3869 3869 # azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
3870 3870 # angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3871 3871 #
3872 3872 # return angles
3873 3873 #
3874 3874 # def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3875 3875 #
3876 3876 # Ramb = 375 #Ramb = c/(2*PRF)
3877 3877 # Re = 6371 #Earth Radius
3878 3878 # heights = numpy.zeros(Ranges.shape)
3879 3879 #
3880 3880 # R_aux = numpy.array([0,1,2])*Ramb
3881 3881 # R_aux = R_aux.reshape(1,R_aux.size)
3882 3882 #
3883 3883 # Ranges = Ranges.reshape(Ranges.size,1)
3884 3884 #
3885 3885 # Ri = Ranges + R_aux
3886 3886 # hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3887 3887 #
3888 3888 # #Check if there is a height between 70 and 110 km
3889 3889 # h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3890 3890 # ind_h = numpy.where(h_bool == 1)[0]
3891 3891 #
3892 3892 # hCorr = hi[ind_h, :]
3893 3893 # ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3894 3894 #
3895 3895 # hCorr = hi[ind_hCorr]
3896 3896 # heights[ind_h] = hCorr
3897 3897 #
3898 3898 # #Setting Error
3899 3899 # #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3900 3900 # #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3901 3901 #
3902 3902 # indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3903 3903 # error[indInvalid2] = 14
3904 3904 # indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3905 3905 # error[indInvalid1] = 13
3906 3906 #
3907 3907 # return heights, error
3908 3908
3909 3909
3910 3910 class WeatherRadar(Operation):
3911 3911 '''
3912 3912 Function tat implements Weather Radar operations-
3913 3913 Input:
3914 3914 Output:
3915 3915 Parameters affected:
3916 3916 '''
3917 3917 isConfig = False
3918 3918 variableList = None
3919 3919
3920 3920 def __init__(self):
3921 3921 Operation.__init__(self)
3922 3922
3923 3923 def setup(self,dataOut,variableList= None,Pt=0,Gt=0,Gr=0,lambda_=0, aL=0,
3924 3924 tauW= 0,thetaT=0,thetaR=0,Km =0):
3925 3925 self.nCh = dataOut.nChannels
3926 3926 self.nHeis = dataOut.nHeights
3927 3927 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
3928 3928 self.Range = numpy.arange(dataOut.nHeights)*deltaHeight + dataOut.heightList[0]
3929 3929 self.Range = self.Range.reshape(1,self.nHeis)
3930 3930 self.Range = numpy.tile(self.Range,[self.nCh,1])
3931 3931 '''-----------1 Constante del Radar----------'''
3932 3932 self.Pt = Pt
3933 3933 self.Gt = Gt
3934 3934 self.Gr = Gr
3935 3935 self.lambda_ = lambda_
3936 3936 self.aL = aL
3937 3937 self.tauW = tauW
3938 3938 self.thetaT = thetaT
3939 3939 self.thetaR = thetaR
3940 3940 self.Km = Km
3941 3941 Numerator = ((4*numpy.pi)**3 * aL**2 * 16 *numpy.log(2))
3942 3942 Denominator = (Pt * Gt * Gr * lambda_**2 * SPEED_OF_LIGHT * tauW * numpy.pi*thetaT*thetaR)
3943 3943 self.RadarConstant = Numerator/Denominator
3944 3944 self.variableList= variableList
3945 3945
3946 3946 def setMoments(self,dataOut,i):
3947 3947
3948 3948 type = dataOut.inputUnit
3949 3949 nCh = dataOut.nChannels
3950 3950 nHeis = dataOut.nHeights
3951 3951 data_param = numpy.zeros((nCh,4,nHeis))
3952 3952 if type == "Voltage":
3953 3953 factor = dataOut.normFactor
3954 3954 data_param[:,0,:] = dataOut.dataPP_POW/(factor)
3955 3955 data_param[:,1,:] = dataOut.dataPP_DOP
3956 3956 data_param[:,2,:] = dataOut.dataPP_WIDTH
3957 3957 data_param[:,3,:] = dataOut.dataPP_SNR
3958 3958 if type == "Spectra":
3959 3959 data_param[:,0,:] = dataOut.data_POW
3960 3960 data_param[:,1,:] = dataOut.data_DOP
3961 3961 data_param[:,2,:] = dataOut.data_WIDTH
3962 3962 data_param[:,3,:] = dataOut.data_SNR
3963 3963
3964 3964 return data_param[:,i,:]
3965 3965
3966 3966 def getCoeficienteCorrelacionROhv_R(self,dataOut):
3967 3967 type = dataOut.inputUnit
3968 3968 nHeis = dataOut.nHeights
3969 3969 data_RhoHV_R = numpy.zeros((nHeis))
3970 3970 if type == "Voltage":
3971 3971 powa = dataOut.dataPP_POWER[0]
3972 3972 powb = dataOut.dataPP_POWER[1]
3973 3973 ccf = dataOut.dataPP_CCF
3974 3974 avgcoherenceComplex = ccf / numpy.sqrt(powa * powb)
3975 3975 data_RhoHV_R = numpy.abs(avgcoherenceComplex)
3976 3976 if type == "Spectra":
3977 3977 data_RhoHV_R = dataOut.getCoherence()
3978 3978
3979 3979 return data_RhoHV_R
3980 3980
3981 3981 def getFasediferencialPhiD_P(self,dataOut,phase= True):
3982 3982 type = dataOut.inputUnit
3983 3983 nHeis = dataOut.nHeights
3984 3984 data_PhiD_P = numpy.zeros((nHeis))
3985 3985 if type == "Voltage":
3986 3986 powa = dataOut.dataPP_POWER[0]
3987 3987 powb = dataOut.dataPP_POWER[1]
3988 3988 ccf = dataOut.dataPP_CCF
3989 3989 avgcoherenceComplex = ccf / numpy.sqrt(powa * powb)
3990 3990 if phase:
3991 3991 data_PhiD_P = numpy.arctan2(avgcoherenceComplex.imag,
3992 3992 avgcoherenceComplex.real) * 180 / numpy.pi
3993 3993 if type == "Spectra":
3994 3994 data_PhiD_P = dataOut.getCoherence(phase = phase)
3995 3995
3996 3996 return data_PhiD_P
3997 3997
3998 3998 def getReflectividad_D(self,dataOut):
3999 3999 '''-----------------------------Potencia de Radar -Signal S-----------------------------'''
4000 4000
4001 4001 Pr = self.setMoments(dataOut,0)
4002 4002
4003 4003 '''-----------2 Reflectividad del Radar y Factor de Reflectividad------'''
4004 4004 self.n_radar = numpy.zeros((self.nCh,self.nHeis))
4005 4005 self.Z_radar = numpy.zeros((self.nCh,self.nHeis))
4006 4006 for R in range(self.nHeis):
4007 4007 self.n_radar[:,R] = self.RadarConstant*Pr[:,R]* (self.Range[:,R])**2
4008 4008
4009 4009 self.Z_radar[:,R] = self.n_radar[:,R]* self.lambda_**4/( numpy.pi**5 * self.Km**2)
4010 4010
4011 4011 '''----------- Factor de Reflectividad Equivalente lamda_ < 10 cm , lamda_= 3.2cm-------'''
4012 4012 Zeh = self.Z_radar
4013 4013 dBZeh = 10*numpy.log10(Zeh)
4014 4014 Zdb_D = dBZeh[0] - dBZeh[1]
4015 4015 return Zdb_D
4016 4016
4017 4017 def getRadialVelocity_V(self,dataOut):
4018 4018 velRadial_V = self.setMoments(dataOut,1)
4019 4019 return velRadial_V
4020 4020
4021 4021 def getAnchoEspectral_W(self,dataOut):
4022 4022 Sigmav_W = self.setMoments(dataOut,2)
4023 4023 return Sigmav_W
4024 4024
4025 4025
4026 4026 def run(self,dataOut,variableList=None,Pt=25,Gt=200.0,Gr=50.0,lambda_=0.32, aL=2.5118,
4027 4027 tauW= 4.0e-6,thetaT=0.165,thetaR=0.367,Km =0.93):
4028 4028
4029 4029 if not self.isConfig:
4030 4030 self.setup(dataOut= dataOut,variableList=None,Pt=25,Gt=200.0,Gr=50.0,lambda_=0.32, aL=2.5118,
4031 4031 tauW= 4.0e-6,thetaT=0.165,thetaR=0.367,Km =0.93)
4032 4032 self.isConfig = True
4033 4033
4034 4034 for i in range(len(self.variableList)):
4035 4035 if self.variableList[i]=='ReflectividadDiferencial':
4036 4036 dataOut.Zdb_D =self.getReflectividad_D(dataOut=dataOut)
4037 4037 if self.variableList[i]=='FaseDiferencial':
4038 4038 dataOut.PhiD_P =self.getFasediferencialPhiD_P(dataOut=dataOut, phase=True)
4039 4039 if self.variableList[i] == "CoeficienteCorrelacion":
4040 4040 dataOut.RhoHV_R = self.getCoeficienteCorrelacionROhv_R(dataOut)
4041 4041 if self.variableList[i] =="VelocidadRadial":
4042 4042 dataOut.velRadial_V = self.getRadialVelocity_V(dataOut)
4043 4043 if self.variableList[i] =="AnchoEspectral":
4044 4044 dataOut.Sigmav_W = self.getAnchoEspectral_W(dataOut)
4045 4045 return dataOut
4046 4046
4047 4047 class PedestalInformation(Operation):
4048 4048
4049 4049 def __init__(self):
4050 4050 Operation.__init__(self)
4051 4051 self.filename = False
4052 4052
4053 4053 def find_file(self, timestamp):
4054 4054
4055 4055 dt = datetime.datetime.utcfromtimestamp(timestamp)
4056 4056 path = os.path.join(self.path, dt.strftime('%Y-%m-%dT%H-00-00'))
4057 4057
4058 4058 if not os.path.exists(path):
4059 4059 return False, False
4060 4060 fileList = glob.glob(os.path.join(path, '*.h5'))
4061 4061 fileList.sort()
4062 4062 print(fileList)
4063 4063 return fileList
4064 4064
4065 4065 def find_next_file(self):
4066 4066
4067 4067 while True:
4068 4068 if self.utctime < self.utcfile:
4069 4069 self.flagNoData = True
4070 4070 break
4071 4071 self.flagNoData = False
4072 4072 file_size = len(self.fp['Data']['utc'])
4073 4073 if self.utctime < self.utcfile+file_size*self.interval:
4074 4074 break
4075 4075 dt = datetime.datetime.utcfromtimestamp(self.utcfile)
4076 4076 if dt.second > 0:
4077 4077 self.utcfile -= dt.second
4078 4078 self.utcfile += self.samples*self.interval
4079 4079 dt = datetime.datetime.utcfromtimestamp(self.utctime)
4080 4080 path = os.path.join(self.path, dt.strftime('%Y-%m-%dT%H-00-00'))
4081 4081 self.filename = os.path.join(path, 'pos@{}.000.h5'.format(int(self.utcfile)))
4082 4082 print('ACQ time: ', self.utctime, 'POS time: ', self.utcfile)
4083 4083 print('Next file: ', self.filename)
4084 4084 if not os.path.exists(self.filename):
4085 4085 log.warning('Waiting for position files...', self.name)
4086 4086
4087 4087 if not os.path.exists(self.filename):
4088 4088
4089 4089 raise IOError('No new position files found in {}'.format(path))
4090 4090 self.fp.close()
4091 4091 self.fp = h5py.File(self.filename, 'r')
4092 4092 log.log('Opening file: {}'.format(self.filename), self.name)
4093 4093
4094 4094 def get_values(self):
4095 4095
4096 4096 if self.flagNoData:
4097 4097 return numpy.nan, numpy.nan
4098 4098 else:
4099 4099 index = int((self.utctime-self.utcfile)/self.interval)
4100 4100 return self.fp['Data']['azi_pos'][index], self.fp['Data']['ele_pos'][index]
4101 4101
4102 4102 def setup(self, dataOut, path, conf, samples, interval, wr_exp):
4103 4103
4104 4104 self.path = path
4105 4105 self.conf = conf
4106 4106 self.samples = samples
4107 4107 self.interval = interval
4108 4108 filelist = self.find_file(dataOut.utctime)
4109 4109
4110 4110 if not filelist:
4111 4111 log.error('No position files found in {}'.format(path), self.name)
4112 4112 raise IOError('No position files found in {}'.format(path))
4113 4113 else:
4114 4114 self.filename = filelist[0]
4115 4115 self.utcfile = int(self.filename.split('/')[-1][4:14])
4116 4116 log.log('Opening file: {}'.format(self.filename), self.name)
4117 4117 self.fp = h5py.File(self.filename, 'r')
4118 4118
4119 4119 def run(self, dataOut, path, conf=None, samples=1500, interval=0.04, wr_exp=None, offset=0):
4120 4120
4121 4121 if not self.isConfig:
4122 4122 self.setup(dataOut, path, conf, samples, interval, wr_exp)
4123 4123 self.isConfig = True
4124 4124
4125 4125 self.utctime = dataOut.utctime + offset
4126 4126
4127 4127 self.find_next_file()
4128 4128
4129 4129 az, el = self.get_values()
4130 4130 dataOut.flagNoData = False
4131 4131
4132 4132 if numpy.isnan(az) or numpy.isnan(el) :
4133 4133 dataOut.flagNoData = True
4134 4134 return dataOut
4135 4135
4136 4136 dataOut.azimuth = az
4137 4137 dataOut.elevation = el
4138 4138 # print('AZ: ', az, ' EL: ', el)
4139 4139 return dataOut
4140 4140
4141 4141 class Block360(Operation):
4142 4142 '''
4143 4143 '''
4144 4144 isConfig = False
4145 4145 __profIndex = 0
4146 4146 __initime = None
4147 4147 __lastdatatime = None
4148 4148 __buffer = None
4149 4149 __dataReady = False
4150 4150 n = None
4151 4151 __nch = 0
4152 4152 __nHeis = 0
4153 4153 index = 0
4154 4154 mode = 0
4155 4155
4156 4156 def __init__(self,**kwargs):
4157 4157 Operation.__init__(self,**kwargs)
4158 4158
4159 4159 def setup(self, dataOut, n = None, mode = None):
4160 4160 '''
4161 4161 n= Numero de PRF's de entrada
4162 4162 '''
4163 4163 self.__initime = None
4164 4164 self.__lastdatatime = 0
4165 4165 self.__dataReady = False
4166 4166 self.__buffer = 0
4167 4167 self.__buffer_1D = 0
4168 4168 self.__profIndex = 0
4169 4169 self.index = 0
4170 4170 self.__nch = dataOut.nChannels
4171 4171 self.__nHeis = dataOut.nHeights
4172 4172 ##print("ELVALOR DE n es:", n)
4173 4173 if n == None:
4174 4174 raise ValueError("n should be specified.")
4175 4175
4176 4176 if mode == None:
4177 4177 raise ValueError("mode should be specified.")
4178 4178
4179 4179 if n != None:
4180 4180 if n<1:
4181 4181 print("n should be greater than 2")
4182 4182 raise ValueError("n should be greater than 2")
4183 4183
4184 4184 self.n = n
4185 4185 self.mode = mode
4186 4186 #print("self.mode",self.mode)
4187 4187 #print("nHeights")
4188 4188 self.__buffer = numpy.zeros(( dataOut.nChannels,n, dataOut.nHeights))
4189 4189 self.__buffer2 = numpy.zeros(n)
4190 4190 self.__buffer3 = numpy.zeros(n)
4191 4191
4192 4192
4193 4193
4194 4194
4195 4195 def putData(self,data,mode):
4196 4196 '''
4197 4197 Add a profile to he __buffer and increase in one the __profiel Index
4198 4198 '''
4199 4199 #print("line 4049",data.dataPP_POW.shape,data.dataPP_POW[:10])
4200 4200 #print("line 4049",data.azimuth.shape,data.azimuth)
4201 4201 if self.mode==0:
4202 4202 self.__buffer[:,self.__profIndex,:]= data.dataPP_POWER# PRIMER MOMENTO
4203 4203 if self.mode==1:
4204 4204 self.__buffer[:,self.__profIndex,:]= data.data_pow
4205 4205 #print("me casi",self.index,data.azimuth[self.index])
4206 4206 #print(self.__profIndex, self.index , data.azimuth[self.index] )
4207 4207 #print("magic",data.profileIndex)
4208 4208 #print(data.azimuth[self.index])
4209 4209 #print("index",self.index)
4210 4210
4211 4211 #####self.__buffer2[self.__profIndex] = data.azimuth[self.index]
4212 4212 self.__buffer2[self.__profIndex] = data.azimuth
4213 4213 self.__buffer3[self.__profIndex] = data.elevation
4214 4214 #print("q pasa")
4215 4215 #####self.index+=1
4216 4216 #print("index",self.index,data.azimuth[:10])
4217 4217 self.__profIndex += 1
4218 4218 return #················· Remove DC···································
4219 4219
4220 4220 def pushData(self,data):
4221 4221 '''
4222 4222 Return the PULSEPAIR and the profiles used in the operation
4223 4223 Affected : self.__profileIndex
4224 4224 '''
4225 4225 #print("pushData")
4226 4226
4227 4227 data_360 = self.__buffer
4228 4228 data_p = self.__buffer2
4229 4229 data_e = self.__buffer3
4230 4230 n = self.__profIndex
4231 4231
4232 4232 self.__buffer = numpy.zeros((self.__nch, self.n,self.__nHeis))
4233 4233 self.__buffer2 = numpy.zeros(self.n)
4234 4234 self.__buffer3 = numpy.zeros(self.n)
4235 4235 self.__profIndex = 0
4236 4236 #print("pushData")
4237 4237 return data_360,n,data_p,data_e
4238 4238
4239 4239
4240 4240 def byProfiles(self,dataOut):
4241 4241
4242 4242 self.__dataReady = False
4243 4243 data_360 = None
4244 4244 data_p = None
4245 4245 data_e = None
4246 4246 #print("dataOu",dataOut.dataPP_POW)
4247 4247 self.putData(data=dataOut,mode = self.mode)
4248 4248 ##### print("profIndex",self.__profIndex)
4249 4249 if self.__profIndex == self.n:
4250 4250 data_360,n,data_p,data_e = self.pushData(data=dataOut)
4251 4251 self.__dataReady = True
4252 4252
4253 4253 return data_360,data_p,data_e
4254 4254
4255 4255
4256 4256 def blockOp(self, dataOut, datatime= None):
4257 4257 if self.__initime == None:
4258 4258 self.__initime = datatime
4259 4259 data_360,data_p,data_e = self.byProfiles(dataOut)
4260 4260 self.__lastdatatime = datatime
4261 4261
4262 4262 if data_360 is None:
4263 4263 return None, None,None,None
4264 4264
4265 4265
4266 4266 avgdatatime = self.__initime
4267 4267 if self.n==1:
4268 4268 avgdatatime = datatime
4269 4269 deltatime = datatime - self.__lastdatatime
4270 4270 self.__initime = datatime
4271 4271 #print(data_360.shape,avgdatatime,data_p.shape)
4272 4272 return data_360,avgdatatime,data_p,data_e
4273 4273
4274 4274 def run(self, dataOut,n = None,mode=None,**kwargs):
4275 4275 #print("BLOCK 360 HERE WE GO MOMENTOS")
4276 4276 print("Block 360")
4277 4277 #exit(1)
4278 4278 if not self.isConfig:
4279 4279 self.setup(dataOut = dataOut, n = n ,mode= mode ,**kwargs)
4280 4280 ####self.index = 0
4281 4281 #print("comova",self.isConfig)
4282 4282 self.isConfig = True
4283 4283 ####if self.index==dataOut.azimuth.shape[0]:
4284 4284 #### self.index=0
4285 4285 data_360, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4286 4286 dataOut.flagNoData = True
4287 4287
4288 4288 if self.__dataReady:
4289 4289 dataOut.data_360 = data_360 # S
4290 4290 #print("DATA 360")
4291 4291 #print(dataOut.data_360)
4292 4292 #print("---------------------------------------------------------------------------------")
4293 4293 print("---------------------------DATAREADY---------------------------------------------")
4294 4294 #print("---------------------------------------------------------------------------------")
4295 4295 #print("data_360",dataOut.data_360.shape)
4296 4296 dataOut.data_azi = data_p
4297 4297 dataOut.data_ele = data_e
4298 4298 ###print("azi: ",dataOut.data_azi)
4299 4299 #print("ele: ",dataOut.data_ele)
4300 4300 #print("jroproc_parameters",data_p[0],data_p[-1])#,data_360.shape,avgdatatime)
4301 4301 dataOut.utctime = avgdatatime
4302 4302 dataOut.flagNoData = False
4303 4303 return dataOut
4304 4304
4305 4305 class Block360_vRF(Operation):
4306 4306 '''
4307 4307 '''
4308 4308 isConfig = False
4309 4309 __profIndex = 0
4310 4310 __initime = None
4311 4311 __lastdatatime = None
4312 4312 __buffer = None
4313 4313 __dataReady = False
4314 4314 n = None
4315 4315 __nch = 0
4316 4316 __nHeis = 0
4317 4317 index = 0
4318 4318 mode = 0
4319 4319
4320 4320 def __init__(self,**kwargs):
4321 4321 Operation.__init__(self,**kwargs)
4322 4322
4323 4323 def setup(self, dataOut, n = None, mode = None):
4324 4324 '''
4325 4325 n= Numero de PRF's de entrada
4326 4326 '''
4327 4327 self.__initime = None
4328 4328 self.__lastdatatime = 0
4329 4329 self.__dataReady = False
4330 4330 self.__buffer = 0
4331 4331 self.__buffer_1D = 0
4332 4332 self.__profIndex = 0
4333 4333 self.index = 0
4334 4334 self.__nch = dataOut.nChannels
4335 4335 self.__nHeis = dataOut.nHeights
4336 4336 ##print("ELVALOR DE n es:", n)
4337 4337 if n == None:
4338 4338 raise ValueError("n should be specified.")
4339 4339
4340 4340 if mode == None:
4341 4341 raise ValueError("mode should be specified.")
4342 4342
4343 4343 if n != None:
4344 4344 if n<1:
4345 4345 print("n should be greater than 2")
4346 4346 raise ValueError("n should be greater than 2")
4347 4347
4348 4348 self.n = n
4349 4349 self.mode = mode
4350 4350 #print("self.mode",self.mode)
4351 4351 #print("nHeights")
4352 4352 self.__buffer = numpy.zeros(( dataOut.nChannels,n, dataOut.nHeights))
4353 4353 self.__buffer2 = numpy.zeros(n)
4354 4354 self.__buffer3 = numpy.zeros(n)
4355 4355
4356 4356
4357 4357
4358 4358
4359 4359 def putData(self,data,mode):
4360 4360 '''
4361 4361 Add a profile to he __buffer and increase in one the __profiel Index
4362 4362 '''
4363 4363 #print("line 4049",data.dataPP_POW.shape,data.dataPP_POW[:10])
4364 4364 #print("line 4049",data.azimuth.shape,data.azimuth)
4365 4365 if self.mode==0:
4366 4366 self.__buffer[:,self.__profIndex,:]= data.dataPP_POWER# PRIMER MOMENTO
4367 4367 if self.mode==1:
4368 4368 self.__buffer[:,self.__profIndex,:]= data.data_pow
4369 4369 #print("me casi",self.index,data.azimuth[self.index])
4370 4370 #print(self.__profIndex, self.index , data.azimuth[self.index] )
4371 4371 #print("magic",data.profileIndex)
4372 4372 #print(data.azimuth[self.index])
4373 4373 #print("index",self.index)
4374 4374
4375 4375 #####self.__buffer2[self.__profIndex] = data.azimuth[self.index]
4376 4376 self.__buffer2[self.__profIndex] = data.azimuth
4377 4377 self.__buffer3[self.__profIndex] = data.elevation
4378 4378 #print("q pasa")
4379 4379 #####self.index+=1
4380 4380 #print("index",self.index,data.azimuth[:10])
4381 4381 self.__profIndex += 1
4382 4382 return #················· Remove DC···································
4383 4383
4384 4384 def pushData(self,data):
4385 4385 '''
4386 4386 Return the PULSEPAIR and the profiles used in the operation
4387 4387 Affected : self.__profileIndex
4388 4388 '''
4389 4389 #print("pushData")
4390 4390
4391 4391 data_360 = self.__buffer
4392 4392 data_p = self.__buffer2
4393 4393 data_e = self.__buffer3
4394 4394 n = self.__profIndex
4395 4395
4396 4396 self.__buffer = numpy.zeros((self.__nch, self.n,self.__nHeis))
4397 4397 self.__buffer2 = numpy.zeros(self.n)
4398 4398 self.__buffer3 = numpy.zeros(self.n)
4399 4399 self.__profIndex = 0
4400 4400 #print("pushData")
4401 4401 return data_360,n,data_p,data_e
4402 4402
4403 4403
4404 4404 def byProfiles(self,dataOut):
4405 4405
4406 4406 self.__dataReady = False
4407 4407 data_360 = None
4408 4408 data_p = None
4409 4409 data_e = None
4410 4410 #print("dataOu",dataOut.dataPP_POW)
4411 4411 self.putData(data=dataOut,mode = self.mode)
4412 4412 ##### print("profIndex",self.__profIndex)
4413 4413 if self.__profIndex == self.n:
4414 4414 data_360,n,data_p,data_e = self.pushData(data=dataOut)
4415 4415 self.__dataReady = True
4416 4416
4417 4417 return data_360,data_p,data_e
4418 4418
4419 4419
4420 4420 def blockOp(self, dataOut, datatime= None):
4421 4421 if self.__initime == None:
4422 4422 self.__initime = datatime
4423 4423 data_360,data_p,data_e = self.byProfiles(dataOut)
4424 4424 self.__lastdatatime = datatime
4425 4425
4426 4426 if data_360 is None:
4427 4427 return None, None,None,None
4428 4428
4429 4429
4430 4430 avgdatatime = self.__initime
4431 4431 if self.n==1:
4432 4432 avgdatatime = datatime
4433 4433 deltatime = datatime - self.__lastdatatime
4434 4434 self.__initime = datatime
4435 4435 #print(data_360.shape,avgdatatime,data_p.shape)
4436 4436 return data_360,avgdatatime,data_p,data_e
4437 4437
4438 4438 def checkcase(self,data_ele):
4439 4439 start = data_ele[0]
4440 4440 end = data_ele[-1]
4441 4441 diff_angle = (end-start)
4442 4442 len_ang=len(data_ele)
4443 4443 print("start",start)
4444 4444 print("end",end)
4445 4445 print("number",diff_angle)
4446 4446
4447 4447 print("len_ang",len_ang)
4448 4448
4449 4449 aux = (data_ele<0).any(axis=0)
4450 4450
4451 4451 #exit(1)
4452 4452 if diff_angle<0 and aux!=1: #Bajada
4453 4453 return 1
4454 4454 elif diff_angle<0 and aux==1: #Bajada con angulos negativos
4455 4455 return 0
4456 4456 elif diff_angle == 0: # This case happens when the angle reaches the max_angle if n = 2
4457 4457 self.flagEraseFirstData = 1
4458 4458 print("ToDO this case")
4459 4459 exit(1)
4460 4460 elif diff_angle>0: #Subida
4461 4461 return 0
4462 4462
4463 4463 def run(self, dataOut,n = None,mode=None,**kwargs):
4464 4464 #print("BLOCK 360 HERE WE GO MOMENTOS")
4465 4465 print("Block 360")
4466 4466
4467 4467 #exit(1)
4468 4468 if not self.isConfig:
4469 4469 if n == 1:
4470 4470 print("*******************Min Value is 2. Setting n = 2*******************")
4471 4471 n = 2
4472 4472 #exit(1)
4473 4473 print(n)
4474 4474 self.setup(dataOut = dataOut, n = n ,mode= mode ,**kwargs)
4475 4475 ####self.index = 0
4476 4476 #print("comova",self.isConfig)
4477 4477 self.isConfig = True
4478 4478 ####if self.index==dataOut.azimuth.shape[0]:
4479 4479 #### self.index=0
4480 4480 data_360, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4481 4481 dataOut.flagNoData = True
4482 4482
4483 4483 if self.__dataReady:
4484 4484 dataOut.data_360 = data_360 # S
4485 4485 #print("DATA 360")
4486 4486 #print(dataOut.data_360)
4487 4487 #print("---------------------------------------------------------------------------------")
4488 4488 print("---------------------------DATAREADY---------------------------------------------")
4489 4489 #print("---------------------------------------------------------------------------------")
4490 4490 #print("data_360",dataOut.data_360.shape)
4491 4491 dataOut.data_azi = data_p
4492 4492 dataOut.data_ele = data_e
4493 4493 ###print("azi: ",dataOut.data_azi)
4494 4494 #print("ele: ",dataOut.data_ele)
4495 4495 #print("jroproc_parameters",data_p[0],data_p[-1])#,data_360.shape,avgdatatime)
4496 4496 dataOut.utctime = avgdatatime
4497 4497
4498 4498 dataOut.case_flag = self.checkcase(dataOut.data_ele)
4499 4499 if dataOut.case_flag: #Si está de bajada empieza a plotear
4500 4500 print("INSIDE CASE FLAG BAJADA")
4501 4501 dataOut.flagNoData = False
4502 4502 else:
4503 4503 print("CASE SUBIDA")
4504 4504 dataOut.flagNoData = True
4505 4505
4506 4506 #dataOut.flagNoData = False
4507 4507 return dataOut
4508 4508
4509 4509 class Block360_vRF2(Operation):
4510 4510 '''
4511 4511 '''
4512 4512 isConfig = False
4513 4513 __profIndex = 0
4514 4514 __initime = None
4515 4515 __lastdatatime = None
4516 4516 __buffer = None
4517 4517 __dataReady = False
4518 4518 n = None
4519 4519 __nch = 0
4520 4520 __nHeis = 0
4521 4521 index = 0
4522 4522 mode = None
4523 4523
4524 4524 def __init__(self,**kwargs):
4525 4525 Operation.__init__(self,**kwargs)
4526 4526
4527 4527 def setup(self, dataOut, n = None, mode = None):
4528 4528 '''
4529 4529 n= Numero de PRF's de entrada
4530 4530 '''
4531 4531 self.__initime = None
4532 4532 self.__lastdatatime = 0
4533 4533 self.__dataReady = False
4534 4534 self.__buffer = 0
4535 4535 self.__buffer_1D = 0
4536 4536 #self.__profIndex = 0
4537 4537 self.index = 0
4538 4538 self.__nch = dataOut.nChannels
4539 4539 self.__nHeis = dataOut.nHeights
4540 4540
4541 4541 self.mode = mode
4542 4542 #print("self.mode",self.mode)
4543 4543 #print("nHeights")
4544 4544 self.__buffer = []
4545 4545 self.__buffer2 = []
4546 4546 self.__buffer3 = []
4547 4547 self.__buffer4 = []
4548 4548
4549 4549 def putData(self,data,mode):
4550 4550 '''
4551 4551 Add a profile to he __buffer and increase in one the __profiel Index
4552 4552 '''
4553 4553
4554 4554 if self.mode==0:
4555 4555 self.__buffer.append(data.dataPP_POWER)# PRIMER MOMENTO
4556 4556 if self.mode==1:
4557 4557 self.__buffer.append(data.data_pow)
4558 4558
4559 4559 self.__buffer4.append(data.dataPP_DOP)
4560 4560
4561 4561 self.__buffer2.append(data.azimuth)
4562 4562 self.__buffer3.append(data.elevation)
4563 4563 self.__profIndex += 1
4564 4564
4565 4565 return numpy.array(self.__buffer3) #················· Remove DC···································
4566 4566
4567 4567 def pushData(self,data):
4568 4568 '''
4569 4569 Return the PULSEPAIR and the profiles used in the operation
4570 4570 Affected : self.__profileIndex
4571 4571 '''
4572 4572
4573 4573 data_360_Power = numpy.array(self.__buffer).transpose(1,0,2)
4574 4574 data_360_Velocity = numpy.array(self.__buffer4).transpose(1,0,2)
4575 4575 data_p = numpy.array(self.__buffer2)
4576 4576 data_e = numpy.array(self.__buffer3)
4577 4577 n = self.__profIndex
4578 4578
4579 4579 self.__buffer = []
4580 4580 self.__buffer4 = []
4581 4581 self.__buffer2 = []
4582 4582 self.__buffer3 = []
4583 4583 self.__profIndex = 0
4584 4584 return data_360_Power,data_360_Velocity,n,data_p,data_e
4585 4585
4586 4586
4587 4587 def byProfiles(self,dataOut):
4588 4588
4589 4589 self.__dataReady = False
4590 4590 data_360_Power = []
4591 4591 data_360_Velocity = []
4592 4592 data_p = None
4593 4593 data_e = None
4594 4594
4595 4595 elevations = self.putData(data=dataOut,mode = self.mode)
4596 4596
4597 4597 if self.__profIndex > 1:
4598 4598 case_flag = self.checkcase(elevations)
4599 4599
4600 4600 if case_flag == 0: #Subida
4601 4601
4602 4602 if len(self.__buffer) == 2: #Cuando está de subida
4603 4603 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4604 4604 self.__buffer.pop(0) #Erase first data
4605 4605 self.__buffer2.pop(0)
4606 4606 self.__buffer3.pop(0)
4607 4607 self.__buffer4.pop(0)
4608 4608 self.__profIndex -= 1
4609 4609 else: #Cuando ha estado de bajada y ha vuelto a subir
4610 4610 #Se borra el último dato
4611 4611 self.__buffer.pop() #Erase last data
4612 4612 self.__buffer2.pop()
4613 4613 self.__buffer3.pop()
4614 4614 self.__buffer4.pop()
4615 4615 data_360_Power,data_360_Velocity,n,data_p,data_e = self.pushData(data=dataOut)
4616 4616
4617 4617 self.__dataReady = True
4618 4618
4619 4619 return data_360_Power,data_360_Velocity,data_p,data_e
4620 4620
4621 4621
4622 4622 def blockOp(self, dataOut, datatime= None):
4623 4623 if self.__initime == None:
4624 4624 self.__initime = datatime
4625 4625 data_360_Power,data_360_Velocity,data_p,data_e = self.byProfiles(dataOut)
4626 4626 self.__lastdatatime = datatime
4627 4627
4628 4628 avgdatatime = self.__initime
4629 4629 if self.n==1:
4630 4630 avgdatatime = datatime
4631 4631 deltatime = datatime - self.__lastdatatime
4632 4632 self.__initime = datatime
4633 4633 return data_360_Power,data_360_Velocity,avgdatatime,data_p,data_e
4634 4634
4635 4635 def checkcase(self,data_ele):
4636 4636 #print(data_ele)
4637 4637 start = data_ele[-2]
4638 4638 end = data_ele[-1]
4639 4639 diff_angle = (end-start)
4640 4640 len_ang=len(data_ele)
4641 4641
4642 4642 if diff_angle > 0: #Subida
4643 4643 return 0
4644 4644
4645 4645 def run(self, dataOut,mode='Power',**kwargs):
4646 4646 #print("BLOCK 360 HERE WE GO MOMENTOS")
4647 4647 #print("Block 360")
4648 4648 dataOut.mode = mode
4649 4649
4650 4650 if not self.isConfig:
4651 4651 self.setup(dataOut = dataOut ,mode= mode ,**kwargs)
4652 4652 self.isConfig = True
4653 4653
4654 4654
4655 4655 data_360_Power, data_360_Velocity, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4656 4656
4657 4657
4658 4658 dataOut.flagNoData = True
4659 4659
4660 4660
4661 4661 if self.__dataReady:
4662 4662 dataOut.data_360_Power = data_360_Power # S
4663 4663 dataOut.data_360_Velocity = data_360_Velocity
4664 4664 dataOut.data_azi = data_p
4665 4665 dataOut.data_ele = data_e
4666 4666 dataOut.utctime = avgdatatime
4667 4667 dataOut.flagNoData = False
4668 4668
4669 4669 return dataOut
4670 4670
4671 4671 class Block360_vRF3(Operation):
4672 4672 '''
4673 4673 '''
4674 4674 isConfig = False
4675 4675 __profIndex = 0
4676 4676 __initime = None
4677 4677 __lastdatatime = None
4678 4678 __buffer = None
4679 4679 __dataReady = False
4680 4680 n = None
4681 4681 __nch = 0
4682 4682 __nHeis = 0
4683 4683 index = 0
4684 4684 mode = None
4685 4685
4686 4686 def __init__(self,**kwargs):
4687 4687 Operation.__init__(self,**kwargs)
4688 4688
4689 4689 def setup(self, dataOut, attr):
4690 4690 '''
4691 4691 n= Numero de PRF's de entrada
4692 4692 '''
4693 4693 self.__initime = None
4694 4694 self.__lastdatatime = 0
4695 4695 self.__dataReady = False
4696 4696 self.__buffer = 0
4697 4697 self.__buffer_1D = 0
4698 4698 self.index = 0
4699 4699 self.__nch = dataOut.nChannels
4700 4700 self.__nHeis = dataOut.nHeights
4701 4701
4702 4702 self.attr = attr
4703 4703 #print("self.mode",self.mode)
4704 4704 #print("nHeights")
4705 4705 self.__buffer = []
4706 4706 self.__buffer2 = []
4707 4707 self.__buffer3 = []
4708 4708
4709 4709 def putData(self, data, attr):
4710 4710 '''
4711 4711 Add a profile to he __buffer and increase in one the __profiel Index
4712 4712 '''
4713 4713
4714 4714 self.__buffer.append(getattr(data, attr))
4715 4715 self.__buffer2.append(data.azimuth)
4716 4716 self.__buffer3.append(data.elevation)
4717 4717 self.__profIndex += 1
4718 4718
4719 4719 return numpy.array(self.__buffer3)
4720 4720
4721 4721 def pushData(self, data):
4722 4722 '''
4723 4723 Return the PULSEPAIR and the profiles used in the operation
4724 4724 Affected : self.__profileIndex
4725 4725 '''
4726 4726
4727 4727 data_360 = numpy.array(self.__buffer).transpose(1, 0, 2)
4728 4728 data_p = numpy.array(self.__buffer2)
4729 4729 data_e = numpy.array(self.__buffer3)
4730 4730 n = self.__profIndex
4731 4731
4732 4732 self.__buffer = []
4733 4733 self.__buffer2 = []
4734 4734 self.__buffer3 = []
4735 4735 self.__profIndex = 0
4736 4736 return data_360, n, data_p, data_e
4737 4737
4738 4738
4739 4739 def byProfiles(self,dataOut):
4740 4740
4741 4741 self.__dataReady = False
4742 4742 data_360 = []
4743 4743 data_p = None
4744 4744 data_e = None
4745 4745
4746 4746 elevations = self.putData(data=dataOut, attr = self.attr)
4747 4747
4748 4748 if self.__profIndex > 1:
4749 4749 case_flag = self.checkcase(elevations)
4750 4750
4751 4751 if case_flag == 0: #Subida
4752 4752
4753 4753 if len(self.__buffer) == 2: #Cuando está de subida
4754 4754 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4755 4755 self.__buffer.pop(0) #Erase first data
4756 4756 self.__buffer2.pop(0)
4757 4757 self.__buffer3.pop(0)
4758 4758 self.__profIndex -= 1
4759 4759 else: #Cuando ha estado de bajada y ha vuelto a subir
4760 4760 #Se borra el último dato
4761 4761 self.__buffer.pop() #Erase last data
4762 4762 self.__buffer2.pop()
4763 4763 self.__buffer3.pop()
4764 4764 data_360, n, data_p, data_e = self.pushData(data=dataOut)
4765 4765
4766 4766 self.__dataReady = True
4767 4767
4768 4768 return data_360, data_p, data_e
4769 4769
4770 4770
4771 4771 def blockOp(self, dataOut, datatime= None):
4772 4772 if self.__initime == None:
4773 4773 self.__initime = datatime
4774 4774 data_360, data_p, data_e = self.byProfiles(dataOut)
4775 4775 self.__lastdatatime = datatime
4776 4776
4777 4777 avgdatatime = self.__initime
4778 4778 if self.n==1:
4779 4779 avgdatatime = datatime
4780 4780 deltatime = datatime - self.__lastdatatime
4781 4781 self.__initime = datatime
4782 4782 return data_360, avgdatatime, data_p, data_e
4783 4783
4784 4784 def checkcase(self, data_ele):
4785 4785 #print(data_ele)
4786 4786 start = data_ele[-2]
4787 4787 end = data_ele[-1]
4788 4788 diff_angle = (end-start)
4789 4789 len_ang=len(data_ele)
4790 4790
4791 4791 if diff_angle > 0: #Subida
4792 4792 return 0
4793 4793
4794 4794 def run(self, dataOut, attr_data='dataPP_POWER',**kwargs):
4795 4795 #print("BLOCK 360 HERE WE GO MOMENTOS")
4796 4796 #print("Block 360")
4797 4797 dataOut.attr_data = attr_data
4798 4798
4799 4799 if not self.isConfig:
4800 4800 self.setup(dataOut = dataOut, attr = attr_data ,**kwargs)
4801 4801 self.isConfig = True
4802 4802
4803 4803 data_360, avgdatatime, data_p, data_e = self.blockOp(dataOut, dataOut.utctime)
4804 4804
4805 4805 dataOut.flagNoData = True
4806 4806
4807 4807 if self.__dataReady:
4808 4808 setattr(dataOut, attr_data, data_360 )
4809 4809 dataOut.data_azi = data_p
4810 4810 dataOut.data_ele = data_e
4811 4811 dataOut.utctime = avgdatatime
4812 4812 dataOut.flagNoData = False
4813 4813
4814 4814 return dataOut
4815 4815
4816 4816 class Block360_vRF4(Operation):
4817 4817 '''
4818 4818 '''
4819 4819 isConfig = False
4820 4820 __profIndex = 0
4821 4821 __initime = None
4822 4822 __lastdatatime = None
4823 4823 __buffer = None
4824 4824 __dataReady = False
4825 4825 n = None
4826 4826 __nch = 0
4827 4827 __nHeis = 0
4828 4828 index = 0
4829 4829 mode = None
4830 4830
4831 4831 def __init__(self,**kwargs):
4832 4832 Operation.__init__(self,**kwargs)
4833 4833
4834 4834 def setup(self, dataOut, attr):
4835 4835 '''
4836 4836 n= Numero de PRF's de entrada
4837 4837 '''
4838 4838 self.__initime = None
4839 4839 self.__lastdatatime = 0
4840 4840 self.__dataReady = False
4841 4841 self.__buffer = 0
4842 4842 self.__buffer_1D = 0
4843 4843 self.index = 0
4844 4844 self.__nch = dataOut.nChannels
4845 4845 self.__nHeis = dataOut.nHeights
4846 4846
4847 4847 self.attr = attr
4848 4848
4849 4849 self.__buffer = []
4850 4850 self.__buffer2 = []
4851 4851 self.__buffer3 = []
4852 4852
4853 4853 def putData(self, data, attr, flagMode):
4854 4854 '''
4855 4855 Add a profile to he __buffer and increase in one the __profiel Index
4856 4856 '''
4857 4857
4858 4858 self.__buffer.append(getattr(data, attr))
4859 4859 self.__buffer2.append(data.azimuth)
4860 4860 self.__buffer3.append(data.elevation)
4861 4861 self.__profIndex += 1
4862 4862
4863 4863 if flagMode == 1: #'AZI'
4864 4864 return numpy.array(self.__buffer2)
4865 4865 elif flagMode == 0: #'ELE'
4866 4866 return numpy.array(self.__buffer3)
4867 4867
4868 def pushData(self, data,flagMode):
4868 def pushData(self, data,flagMode,case_flag):
4869 4869 '''
4870 4870 Return the PULSEPAIR and the profiles used in the operation
4871 4871 Affected : self.__profileIndex
4872 4872 '''
4873 4873
4874 4874 data_360 = numpy.array(self.__buffer).transpose(1, 0, 2)
4875 4875 data_p = numpy.array(self.__buffer2)
4876 4876 data_e = numpy.array(self.__buffer3)
4877 4877 n = self.__profIndex
4878 4878
4879 4879 self.__buffer = []
4880 4880 self.__buffer2 = []
4881 4881 self.__buffer3 = []
4882 4882 self.__profIndex = 0
4883 4883
4884 4884 if flagMode == 1 and case_flag == 0: #'AZI' y ha girado
4885 self.putData(data=dataOut, attr = self.attr, flagMode=flagMode)
4885 self.putData(data=data, attr = self.attr, flagMode=flagMode)
4886 4886
4887 4887 return data_360, n, data_p, data_e
4888 4888
4889 4889
4890 4890 def byProfiles(self,dataOut,flagMode):
4891 4891
4892 4892 self.__dataReady = False
4893 4893 data_360 = []
4894 4894 data_p = None
4895 4895 data_e = None
4896 4896
4897 4897 angles = self.putData(data=dataOut, attr = self.attr, flagMode=flagMode)
4898 4898
4899 4899 if self.__profIndex > 1:
4900 4900 case_flag = self.checkcase(angles,flagMode)
4901 4901
4902 4902 if flagMode == 1: #'AZI':
4903 4903 if case_flag == 0: #Ya giró
4904 4904 self.__buffer.pop() #Erase last data
4905 4905 self.__buffer2.pop()
4906 4906 self.__buffer3.pop()
4907 data_360,n,data_p,data_e = self.pushData(data=dataOut,flagMode=flagMode)
4907 data_360,n,data_p,data_e = self.pushData(data=dataOut,flagMode=flagMode,case_flag=case_flag)
4908 4908
4909 4909 self.__dataReady = True
4910 4910
4911 4911 elif flagMode == 0: #'ELE'
4912 4912
4913 4913 if case_flag == 0: #Subida
4914 4914
4915 4915 if len(self.__buffer) == 2: #Cuando está de subida
4916 4916 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4917 4917 self.__buffer.pop(0) #Erase first data
4918 4918 self.__buffer2.pop(0)
4919 4919 self.__buffer3.pop(0)
4920 4920 self.__profIndex -= 1
4921 4921 else: #Cuando ha estado de bajada y ha vuelto a subir
4922 4922 #Se borra el último dato
4923 4923 self.__buffer.pop() #Erase last data
4924 4924 self.__buffer2.pop()
4925 4925 self.__buffer3.pop()
4926 data_360, n, data_p, data_e = self.pushData(data=dataOut,flagMode=flagMode)
4926 data_360, n, data_p, data_e = self.pushData(data=dataOut,flagMode=flagMode,case_flag=case_flag)
4927 4927
4928 4928 self.__dataReady = True
4929 4929
4930 4930 return data_360, data_p, data_e
4931 4931
4932 4932
4933 4933 def blockOp(self, dataOut, flagMode, datatime= None):
4934 4934 if self.__initime == None:
4935 4935 self.__initime = datatime
4936 4936 data_360, data_p, data_e = self.byProfiles(dataOut,flagMode)
4937 4937 self.__lastdatatime = datatime
4938 4938
4939 4939 avgdatatime = self.__initime
4940 4940 if self.n==1:
4941 4941 avgdatatime = datatime
4942 4942 deltatime = datatime - self.__lastdatatime
4943 4943 self.__initime = datatime
4944 4944 return data_360, avgdatatime, data_p, data_e
4945 4945
4946 4946 def checkcase(self, angles, flagMode):
4947 4947
4948 4948 if flagMode == 1: #'AZI'
4949 4949 start = angles[-2]
4950 4950 end = angles[-1]
4951 4951 diff_angle = (end-start)
4952 4952
4953 4953 if diff_angle < 0: #Ya giró
4954 4954 return 0
4955 4955
4956 4956 elif flagMode == 0: #'ELE'
4957 4957
4958 4958 start = angles[-2]
4959 4959 end = angles[-1]
4960 4960 diff_angle = (end-start)
4961 4961
4962 4962 if diff_angle > 0: #Subida
4963 4963 return 0
4964 4964
4965 4965 def run(self, dataOut, attr_data='dataPP_POWER', axis=None,**kwargs):
4966 4966
4967 4967 dataOut.attr_data = attr_data
4968 4968
4969 4969 dataOut.flagMode = axis[0] #Provisional, debería venir del header
4970 4970
4971 4971 if not self.isConfig:
4972 4972 self.setup(dataOut = dataOut, attr = attr_data ,**kwargs)
4973 4973 self.isConfig = True
4974 4974
4975 4975 data_360, avgdatatime, data_p, data_e = self.blockOp(dataOut, dataOut.flagMode, dataOut.utctime)
4976 4976
4977 4977 dataOut.flagNoData = True
4978 4978
4979 4979 if self.__dataReady:
4980 4980 setattr(dataOut, attr_data, data_360 )
4981 4981 dataOut.data_azi = data_p
4982 4982 dataOut.data_ele = data_e
4983 4983 dataOut.utctime = avgdatatime
4984 4984 dataOut.flagNoData = False
4985 4985
4986 4986 return dataOut
General Comments 0
You need to be logged in to leave comments. Login now