##// END OF EJS Templates
Madrigal 3 reading/writing task #1154
jespinoza -
r1070:d6c1e5e8fd34
parent child
Show More
@@ -1,580 +1,632
1 1 '''
2 2 Created on Aug 1, 2017
3 3
4 4 @author: Juan C. Espinoza
5 5 '''
6 6
7 7 import os
8 8 import sys
9 9 import time
10 10 import json
11 11 import glob
12 12 import datetime
13 13
14 14 import numpy
15 15 import h5py
16 16
17 try:
18 import madrigal
19 import madrigal.cedar
20 except:
21 print 'You should install "madrigal library" module if you want to read/write Madrigal data'
22
23 17 from schainpy.model.io.jroIO_base import JRODataReader
24 18 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation
25 19 from schainpy.model.data.jrodata import Parameters
26 20 from schainpy.utils import log
27 21
22 try:
23 import madrigal.cedar
24 except:
25 log.warning(
26 'You should install "madrigal library" module if you want to read/write Madrigal data'
27 )
28 28
29 29 DEF_CATALOG = {
30 30 'principleInvestigator': 'Marco Milla',
31 31 'expPurpose': None,
32 'expMode': None,
33 32 'cycleTime': None,
34 33 'correlativeExp': None,
35 34 'sciRemarks': None,
36 35 'instRemarks': None
37 36 }
38 37 DEF_HEADER = {
39 38 'kindatDesc': None,
40 39 'analyst': 'Jicamarca User',
41 40 'comments': None,
42 41 'history': None
43 42 }
44 43 MNEMONICS = {
45 44 10: 'jro',
46 45 11: 'jbr',
47 46 840: 'jul',
48 47 13: 'jas',
49 48 1000: 'pbr',
50 49 1001: 'hbr',
51 50 1002: 'obr',
52 51 }
53 52
54 53 UT1970 = datetime.datetime(1970, 1, 1) - datetime.timedelta(seconds=time.timezone)
55 54
56 55 def load_json(obj):
57 56 '''
58 57 Parse json as string instead of unicode
59 58 '''
60 59
61 60 if isinstance(obj, str):
62 61 iterable = json.loads(obj)
62 else:
63 iterable = obj
63 64
64 65 if isinstance(iterable, dict):
65 66 return {str(k): load_json(v) if isinstance(v, dict) else str(v) if isinstance(v, unicode) else v
66 67 for k, v in iterable.items()}
67 68 elif isinstance(iterable, (list, tuple)):
68 69 return [str(v) if isinstance(v, unicode) else v for v in iterable]
69 70
70 71 return iterable
71 72
72 73
73 74 class MADReader(JRODataReader, ProcessingUnit):
74 75
75 76 def __init__(self, **kwargs):
76 77
77 78 ProcessingUnit.__init__(self, **kwargs)
78 79
79 80 self.dataOut = Parameters()
80 81 self.counter_records = 0
81 82 self.nrecords = None
82 83 self.flagNoMoreFiles = 0
83 84 self.isConfig = False
84 85 self.filename = None
85 86 self.intervals = set()
86 87
87 88 def setup(self,
88 89 path=None,
89 90 startDate=None,
90 91 endDate=None,
91 92 format=None,
92 93 startTime=datetime.time(0, 0, 0),
93 94 endTime=datetime.time(23, 59, 59),
94 95 **kwargs):
95 96
96 self.started = True
97 97 self.path = path
98 98 self.startDate = startDate
99 99 self.endDate = endDate
100 100 self.startTime = startTime
101 101 self.endTime = endTime
102 102 self.datatime = datetime.datetime(1900,1,1)
103 103 self.oneDDict = load_json(kwargs.get('oneDDict',
104 104 "{\"GDLATR\":\"lat\", \"GDLONR\":\"lon\"}"))
105 105 self.twoDDict = load_json(kwargs.get('twoDDict',
106 106 "{\"GDALT\": \"heightList\"}"))
107 107 self.ind2DList = load_json(kwargs.get('ind2DList',
108 108 "[\"GDALT\"]"))
109 109 if self.path is None:
110 110 raise ValueError, 'The path is not valid'
111 111
112 112 if format is None:
113 113 raise ValueError, 'The format is not valid choose simple or hdf5'
114 114 elif format.lower() in ('simple', 'txt'):
115 115 self.ext = '.txt'
116 116 elif format.lower() in ('cedar',):
117 117 self.ext = '.001'
118 118 else:
119 119 self.ext = '.hdf5'
120 120
121 121 self.search_files(self.path)
122 122 self.fileId = 0
123 123
124 124 if not self.fileList:
125 125 raise Warning, 'There is no files matching these date in the folder: {}. \n Check startDate and endDate'.format(path)
126 126
127 127 self.setNextFile()
128 128
129 129 def search_files(self, path):
130 130 '''
131 131 Searching for madrigal files in path
132 132 Creating a list of files to procces included in [startDate,endDate]
133 133
134 134 Input:
135 135 path - Path to find files
136 136 '''
137 137
138 print 'Searching files {} in {} '.format(self.ext, path)
138 log.log('Searching files {} in {} '.format(self.ext, path), 'MADReader')
139 139 foldercounter = 0
140 140 fileList0 = glob.glob1(path, '*{}'.format(self.ext))
141 141 fileList0.sort()
142 142
143 143 self.fileList = []
144 144 self.dateFileList = []
145 145
146 146 startDate = self.startDate - datetime.timedelta(1)
147 147 endDate = self.endDate + datetime.timedelta(1)
148 148
149 149 for thisFile in fileList0:
150 150 year = thisFile[3:7]
151 151 if not year.isdigit():
152 152 continue
153 153
154 154 month = thisFile[7:9]
155 155 if not month.isdigit():
156 156 continue
157 157
158 158 day = thisFile[9:11]
159 159 if not day.isdigit():
160 160 continue
161 161
162 162 year, month, day = int(year), int(month), int(day)
163 163 dateFile = datetime.date(year, month, day)
164 164
165 165 if (startDate > dateFile) or (endDate < dateFile):
166 166 continue
167 167
168 168 self.fileList.append(thisFile)
169 169 self.dateFileList.append(dateFile)
170 170
171 171 return
172 172
173 173 def parseHeader(self):
174 174 '''
175 175 '''
176 176
177 177 self.output = {}
178 178 self.version = '2'
179 179 s_parameters = None
180 180 if self.ext == '.txt':
181 181 self.parameters = [s.strip().lower() for s in self.fp.readline().strip().split(' ') if s]
182 182 elif self.ext == '.hdf5':
183 183 metadata = self.fp['Metadata']
184 184 data = self.fp['Data']['Array Layout']
185 185 if 'Independent Spatial Parameters' in metadata:
186 186 s_parameters = [s[0].lower() for s in metadata['Independent Spatial Parameters']]
187 187 self.version = '3'
188 188 one = [s[0].lower() for s in data['1D Parameters']['Data Parameters']]
189 189 one_d = [1 for s in one]
190 190 two = [s[0].lower() for s in data['2D Parameters']['Data Parameters']]
191 191 two_d = [2 for s in two]
192 192 self.parameters = one + two
193 193 self.parameters_d = one_d + two_d
194 194
195 195 log.success('Parameters found: {}'.format(','.join(self.parameters)),
196 196 'MADReader')
197 197 if s_parameters:
198 198 log.success('Spatial parameters: {}'.format(','.join(s_parameters)),
199 199 'MADReader')
200 200
201 201 for param in self.oneDDict.keys():
202 202 if param.lower() not in self.parameters:
203 print('\x1b[33m[Warning]\x1b[0m Parameter \x1b[1;32m{}\x1b[0m not found will be ignored'.format(
204 param
205 ))
203 log.warning(
204 'Parameter {} not found will be ignored'.format(
205 param),
206 'MADReader')
206 207 self.oneDDict.pop(param, None)
207 208
208 209 for param, value in self.twoDDict.items():
209 210 if param.lower() not in self.parameters:
210 print('\x1b[33m[Warning]\x1b[0m Parameter \x1b[1;32m{}\x1b[0m not found will be ignored'.format(
211 param
212 ))
211 log.warning(
212 'Parameter {} not found, it will be ignored'.format(
213 param),
214 'MADReader')
213 215 self.twoDDict.pop(param, None)
214 216 continue
215 217 if isinstance(value, list):
216 218 if value[0] not in self.output:
217 219 self.output[value[0]] = []
218 220 self.output[value[0]].append(None)
219 221
220 222 def parseData(self):
221 223 '''
222 224 '''
223 225
224 226 if self.ext == '.txt':
225 227 self.data = numpy.genfromtxt(self.fp, missing_values=('missing'))
226 228 self.nrecords = self.data.shape[0]
227 229 self.ranges = numpy.unique(self.data[:,self.parameters.index(self.ind2DList[0].lower())])
228 230 elif self.ext == '.hdf5':
229 231 self.data = self.fp['Data']['Array Layout']
230 232 self.nrecords = len(self.data['timestamps'].value)
231 233 self.ranges = self.data['range'].value
232 234
233 235 def setNextFile(self):
234 236 '''
235 237 '''
236 238
237 239 file_id = self.fileId
238 240
239 241 if file_id == len(self.fileList):
240 print '\nNo more files in the folder'
241 print 'Total number of file(s) read : {}'.format(self.fileId)
242 log.success('No more files', 'MADReader')
242 243 self.flagNoMoreFiles = 1
243 244 return 0
244 245
245 print('\x1b[32m[Info]\x1b[0m Opening: {}'.format(
246 self.fileList[file_id]
247 ))
246 log.success(
247 'Opening: {}'.format(self.fileList[file_id]),
248 'MADReader'
249 )
250
248 251 filename = os.path.join(self.path, self.fileList[file_id])
249 252
250 253 if self.filename is not None:
251 254 self.fp.close()
252 255
253 256 self.filename = filename
254 257 self.filedate = self.dateFileList[file_id]
255 258
256 259 if self.ext=='.hdf5':
257 260 self.fp = h5py.File(self.filename, 'r')
258 261 else:
259 262 self.fp = open(self.filename, 'rb')
260 263
261 264 self.parseHeader()
262 265 self.parseData()
263 266 self.sizeOfFile = os.path.getsize(self.filename)
264 267 self.counter_records = 0
265 268 self.flagIsNewFile = 0
266 269 self.fileId += 1
267 270
268 271 return 1
269 272
270 273 def readNextBlock(self):
271 274
272 275 while True:
273
276 self.flagDiscontinuousBlock = 0
274 277 if self.flagIsNewFile:
275 278 if not self.setNextFile():
276 279 return 0
277 280
278 281 self.readBlock()
279 282
280 283 if (self.datatime < datetime.datetime.combine(self.startDate, self.startTime)) or \
281 284 (self.datatime > datetime.datetime.combine(self.endDate, self.endTime)):
282 print "\x1b[32m[Reading]\x1b[0m Record No. %d/%d -> %s \x1b[33m[Skipping]\x1b[0m" %(
285 log.warning(
286 'Reading Record No. {}/{} -> {} [Skipping]'.format(
283 287 self.counter_records,
284 288 self.nrecords,
285 self.datatime.ctime())
289 self.datatime.ctime()),
290 'MADReader')
286 291 continue
287 292 break
288 293
289 print "\x1b[32m[Reading]\x1b[0m Record No. %d/%d -> %s" %(
294 log.log(
295 'Reading Record No. {}/{} -> {}'.format(
290 296 self.counter_records,
291 297 self.nrecords,
292 self.datatime.ctime())
298 self.datatime.ctime()),
299 'MADReader')
293 300
294 301 return 1
295 302
296 303 def readBlock(self):
297 304 '''
298 305 '''
299 306 dum = []
300 307 if self.ext == '.txt':
301 308 dt = self.data[self.counter_records][:6].astype(int)
302 309 self.datatime = datetime.datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5])
303 310 while True:
304 311 dt = self.data[self.counter_records][:6].astype(int)
305 312 datatime = datetime.datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5])
306 313 if datatime == self.datatime:
307 314 dum.append(self.data[self.counter_records])
308 315 self.counter_records += 1
309 316 if self.counter_records == self.nrecords:
310 317 self.flagIsNewFile = True
311 318 break
312 319 continue
313 320 self.intervals.add((datatime-self.datatime).seconds)
321 if datatime.date() > self.datatime.date():
322 self.flagDiscontinuousBlock = 1
314 323 break
315 324 elif self.ext == '.hdf5':
316 325 datatime = datetime.datetime.utcfromtimestamp(
317 326 self.data['timestamps'][self.counter_records])
318 327 nHeights = len(self.ranges)
319 328 for n, param in enumerate(self.parameters):
320 329 if self.parameters_d[n] == 1:
321 330 dum.append(numpy.ones(nHeights)*self.data['1D Parameters'][param][self.counter_records])
322 331 else:
323 332 if self.version == '2':
324 333 dum.append(self.data['2D Parameters'][param][self.counter_records])
325 334 else:
326 335 tmp = self.data['2D Parameters'][param].value.T
327 336 dum.append(tmp[self.counter_records])
328 337 self.intervals.add((datatime-self.datatime).seconds)
338 if datatime.date()>self.datatime.date():
339 self.flagDiscontinuousBlock = 1
329 340 self.datatime = datatime
330 341 self.counter_records += 1
331 342 if self.counter_records == self.nrecords:
332 343 self.flagIsNewFile = True
333 344
334 345 self.buffer = numpy.array(dum)
335 346 return
336 347
337 348 def set_output(self):
338 349 '''
339 350 Storing data from buffer to dataOut object
340 351 '''
341 352
342 353 parameters = [None for __ in self.parameters]
343 354
344 355 for param, attr in self.oneDDict.items():
345 356 x = self.parameters.index(param.lower())
346 357 setattr(self.dataOut, attr, self.buffer[0][x])
347 358
348 359 for param, value in self.twoDDict.items():
349 360 x = self.parameters.index(param.lower())
350 361 if self.ext == '.txt':
351 362 y = self.parameters.index(self.ind2DList[0].lower())
352 363 ranges = self.buffer[:,y]
353 364 if self.ranges.size == ranges.size:
354 365 continue
355 366 index = numpy.where(numpy.in1d(self.ranges, ranges))[0]
356 367 dummy = numpy.zeros(self.ranges.shape) + numpy.nan
357 368 dummy[index] = self.buffer[:,x]
358 369 else:
359
360 370 dummy = self.buffer[x]
361 371
362 372 if isinstance(value, str):
363 373 if value not in self.ind2DList:
364 374 setattr(self.dataOut, value, dummy.reshape(1,-1))
365 375 elif isinstance(value, list):
366 376 self.output[value[0]][value[1]] = dummy
367 377 parameters[value[1]] = param
368 378
369 379 for key, value in self.output.items():
370 380 setattr(self.dataOut, key, numpy.array(value))
371 381
372 382 self.dataOut.parameters = [s for s in parameters if s]
373 383 self.dataOut.heightList = self.ranges
374 384 self.dataOut.utctime = (self.datatime - UT1970).total_seconds()
375 385 self.dataOut.utctimeInit = self.dataOut.utctime
376 386 self.dataOut.paramInterval = min(self.intervals)
377 387 self.dataOut.useLocalTime = False
378 388 self.dataOut.flagNoData = False
379 self.dataOut.started = self.started
389 self.dataOut.nrecords = self.nrecords
390 self.dataOut.flagDiscontinuousBlock = self.flagDiscontinuousBlock
380 391
381 392 def getData(self):
382 393 '''
383 394 Storing data from databuffer to dataOut object
384 395 '''
385 396 if self.flagNoMoreFiles:
386 397 self.dataOut.flagNoData = True
387 print 'No file left to process'
398 log.error('No file left to process', 'MADReader')
388 399 return 0
389 400
390 401 if not self.readNextBlock():
391 402 self.dataOut.flagNoData = True
392 403 return 0
393 404
394 405 self.set_output()
395 406
396 407 return 1
397 408
398 409
399 class MAD2Writer(Operation):
410 class MADWriter(Operation):
400 411
401 412 missing = -32767
402 ext = '.dat'
403 413
404 414 def __init__(self, **kwargs):
405 415
406 416 Operation.__init__(self, **kwargs)
407 417 self.dataOut = Parameters()
408 418 self.path = None
409 self.dataOut = None
419 self.fp = None
410 420
411 def run(self, dataOut, path, oneDDict, ind2DList='[]', twoDDict='{}', metadata='{}', **kwargs):
421 def run(self, dataOut, path, oneDDict, ind2DList='[]', twoDDict='{}',
422 metadata='{}', format='cedar', **kwargs):
412 423 '''
413 424 Inputs:
414 425 path - path where files will be created
415 426 oneDDict - json of one-dimensional parameters in record where keys
416 427 are Madrigal codes (integers or mnemonics) and values the corresponding
417 428 dataOut attribute e.g: {
418 429 'gdlatr': 'lat',
419 430 'gdlonr': 'lon',
420 431 'gdlat2':'lat',
421 432 'glon2':'lon'}
422 433 ind2DList - list of independent spatial two-dimensional parameters e.g:
423 434 ['heighList']
424 435 twoDDict - json of two-dimensional parameters in record where keys
425 436 are Madrigal codes (integers or mnemonics) and values the corresponding
426 437 dataOut attribute if multidimensional array specify as tupple
427 438 ('attr', pos) e.g: {
428 439 'gdalt': 'heightList',
429 440 'vn1p2': ('data_output', 0),
430 441 'vn2p2': ('data_output', 1),
431 442 'vn3': ('data_output', 2),
432 443 'snl': ('data_SNR', 'db')
433 444 }
434 445 metadata - json of madrigal metadata (kinst, kindat, catalog and header)
435 446 '''
436 447 if not self.isConfig:
437 self.setup(dataOut, path, oneDDict, ind2DList, twoDDict, metadata, **kwargs)
448 self.setup(path, oneDDict, ind2DList, twoDDict, metadata, format, **kwargs)
438 449 self.isConfig = True
439 450
451 self.dataOut = dataOut
440 452 self.putData()
441 453 return
442 454
443 def setup(self, dataOut, path, oneDDict, ind2DList, twoDDict, metadata, **kwargs):
455 def setup(self, path, oneDDict, ind2DList, twoDDict, metadata, format, **kwargs):
444 456 '''
445 457 Configure Operation
446 458 '''
447 459
448 self.dataOut = dataOut
449 self.nmodes = self.dataOut.nmodes
450 460 self.path = path
451 461 self.blocks = kwargs.get('blocks', None)
452 462 self.counter = 0
453 463 self.oneDDict = load_json(oneDDict)
454 464 self.twoDDict = load_json(twoDDict)
455 465 self.ind2DList = load_json(ind2DList)
456 466 meta = load_json(metadata)
457 467 self.kinst = meta.get('kinst')
458 468 self.kindat = meta.get('kindat')
459 469 self.catalog = meta.get('catalog', DEF_CATALOG)
460 470 self.header = meta.get('header', DEF_HEADER)
471 if format == 'cedar':
472 self.ext = '.dat'
473 self.extra_args = {}
474 elif format == 'hdf5':
475 self.ext = '.hdf5'
476 self.extra_args = {'ind2DList': self.ind2DList}
461 477
462 return
478 self.keys = [k.lower() for k in self.twoDDict]
479 if 'range' in self.keys:
480 self.keys.remove('range')
481 if 'gdalt' in self.keys:
482 self.keys.remove('gdalt')
463 483
464 484 def setFile(self):
465 485 '''
466 486 Create new cedar file object
467 487 '''
468 488
469 489 self.mnemonic = MNEMONICS[self.kinst] #TODO get mnemonic from madrigal
470 date = datetime.datetime.utcfromtimestamp(self.dataOut.utctime)
490 date = datetime.datetime.fromtimestamp(self.dataOut.utctime)
471 491
472 filename = '%s%s_%s%s' % (self.mnemonic,
492 filename = '{}{}{}'.format(self.mnemonic,
473 493 date.strftime('%Y%m%d_%H%M%S'),
474 self.dataOut.mode,
475 494 self.ext)
476 495
477 496 self.fullname = os.path.join(self.path, filename)
478 497
479 498 if os.path.isfile(self.fullname) :
480 print "Destination path '%s' already exists. Previous file deleted. " %self.fullname
499 log.warning(
500 'Destination path {} already exists. Previous file deleted.'.format(
501 self.fullname),
502 'MADWriter')
481 503 os.remove(self.fullname)
482 504
483 505 try:
484 print '[Writing] creating file : %s' % (self.fullname)
485 self.cedarObj = madrigal.cedar.MadrigalCedarFile(self.fullname, True)
506 log.success(
507 'Creating file: {}'.format(self.fullname),
508 'MADWriter')
509 self.fp = madrigal.cedar.MadrigalCedarFile(self.fullname, True)
486 510 except ValueError, e:
487 print '[Error]: Impossible to create a cedar object with "madrigal.cedar.MadrigalCedarFile" '
511 log.error(
512 'Impossible to create a cedar object with "madrigal.cedar.MadrigalCedarFile"',
513 'MADWriter')
488 514 return
489 515
490 516 return 1
491 517
492 518 def writeBlock(self):
493 519 '''
494 520 Add data records to cedar file taking data from oneDDict and twoDDict
495 521 attributes.
496 522 Allowed parameters in: parcodes.tab
497 523 '''
498 524
499 startTime = datetime.datetime.utcfromtimestamp(self.dataOut.utctime)
525 startTime = datetime.datetime.fromtimestamp(self.dataOut.utctime)
500 526 endTime = startTime + datetime.timedelta(seconds=self.dataOut.paramInterval)
501 nrows = len(getattr(self.dataOut, self.ind2DList))
527 heights = self.dataOut.heightList
528
529 if self.ext == '.dat':
530 invalid = numpy.isnan(self.dataOut.data_output)
531 self.dataOut.data_output[invalid] = self.missing
532 out = {}
533 for key, value in self.twoDDict.items():
534 key = key.lower()
535 if isinstance(value, str):
536 if 'db' in value.lower():
537 tmp = getattr(self.dataOut, value.replace('_db', ''))
538 SNRavg = numpy.average(tmp, axis=0)
539 tmp = 10*numpy.log10(SNRavg)
540 else:
541 tmp = getattr(self.dataOut, value)
542 out[key] = tmp.flatten()
543 elif isinstance(value, (tuple, list)):
544 attr, x = value
545 data = getattr(self.dataOut, attr)
546 out[key] = data[int(x)]
547
548 a = numpy.array([out[k] for k in self.keys])
549 nrows = numpy.array([numpy.isnan(a[:, x]).all() for x in range(len(heights))])
550 index = numpy.where(nrows == False)[0]
502 551
503 552 rec = madrigal.cedar.MadrigalDataRecord(
504 553 self.kinst,
505 554 self.kindat,
506 555 startTime.year,
507 556 startTime.month,
508 557 startTime.day,
509 558 startTime.hour,
510 559 startTime.minute,
511 560 startTime.second,
512 561 startTime.microsecond/10000,
513 562 endTime.year,
514 563 endTime.month,
515 564 endTime.day,
516 565 endTime.hour,
517 566 endTime.minute,
518 567 endTime.second,
519 568 endTime.microsecond/10000,
520 569 self.oneDDict.keys(),
521 570 self.twoDDict.keys(),
522 nrows
571 len(index),
572 **self.extra_args
523 573 )
524 574
525 575 # Setting 1d values
526 576 for key in self.oneDDict:
527 577 rec.set1D(key, getattr(self.dataOut, self.oneDDict[key]))
528 578
529 579 # Setting 2d values
530 invalid = numpy.isnan(self.dataOut.data_output)
531 self.dataOut.data_output[invalid] = self.missing
532 out = {}
533 for key, value in self.twoDDict.items():
534 if isinstance(value, str):
535 out[key] = getattr(self.dataOut, value)
536 elif isinstance(value, tuple):
537 attr, x = value
538 if isinstance(x, (int, float)):
539 out[key] = getattr(self.dataOut, attr)[int(x)]
540 elif x.lower()=='db':
541 tmp = getattr(self.dataOut, attr)
542 SNRavg = numpy.average(tmp, axis=0)
543 out[key] = 10*numpy.log10(SNRavg)
544
545 for n in range(nrows):
580 nrec = 0
581 for n in index:
546 582 for key in out:
547 rec.set2D(key, n, out[key][n])
548
549 self.cedarObj.append(rec)
550 self.cedarObj.dump()
551 print '[Writing] Record No. {} (mode {}).'.format(
552 self.counter,
553 self.dataOut.mode
554 )
583 rec.set2D(key, nrec, out[key][n])
584 nrec += 1
585
586 self.fp.append(rec)
587 if self.ext == '.hdf5' and self.counter % 500 == 0 and self.counter > 0:
588 self.fp.dump()
589 if self.counter % 10 == 0 and self.counter > 0:
590 log.log(
591 'Writing {} records'.format(
592 self.counter),
593 'MADWriter')
555 594
556 595 def setHeader(self):
557 596 '''
558 597 Create an add catalog and header to cedar file
559 598 '''
560 599
600 log.success('Closing file {}'.format(self.fullname), 'MADWriter')
601
602 if self.ext == '.dat':
603 self.fp.write()
604 else:
605 self.fp.dump()
606 self.fp.close()
607
561 608 header = madrigal.cedar.CatalogHeaderCreator(self.fullname)
562 609 header.createCatalog(**self.catalog)
563 610 header.createHeader(**self.header)
564 611 header.write()
565 612
566 613 def putData(self):
567 614
568 615 if self.dataOut.flagNoData:
569 616 return 0
570 617
618 if self.dataOut.flagDiscontinuousBlock or self.counter == self.blocks:
619 if self.counter > 0:
620 self.setHeader()
621 self.counter = 0
622
571 623 if self.counter == 0:
572 624 self.setFile()
573 625
574 if self.counter <= self.dataOut.nrecords:
575 626 self.writeBlock()
576 627 self.counter += 1
577 628
578 if self.counter == self.dataOut.nrecords or self.counter == self.blocks:
629 def close(self):
630
631 if self.counter > 0:
579 632 self.setHeader()
580 self.counter = 0
General Comments 0
You need to be logged in to leave comments. Login now