##// END OF EJS Templates
v2.9.2 :: Add 'download_files_advance' API function
eynilupu -
r12:76348ccca4d9
parent child
Show More
@@ -0,0 +1,228
1 #from ckanapi.datapackage import populate_schema_from_datastore
2 from ckanapi.cli import workers, dump
3 from ckanapi.cli.utils import pretty_json, completion_stats, compact_json, quiet_int_pipe
4 from datetime import datetime
5 import sys
6 import json
7 import os
8 import requests
9 import six
10
11 if sys.version_info.major == 3:
12 from urllib.parse import urlparse
13 else:
14 import urlparse
15
16 DL_CHUNK_SIZE = 100 * 1024
17
18 print()
19
20 def dump_things_change(ckan, thing, arguments, worker_pool=None, stdout=None, stderr=None, **kwargs):
21 if worker_pool is None:
22 worker_pool = workers.worker_pool
23 if stdout is None:
24 stdout = getattr(sys.__stdout__, 'buffer', sys.__stdout__)
25 if stderr is None:
26 stderr = getattr(sys.stderr, 'buffer', sys.stderr)
27
28 if arguments['--worker']:
29 return dump.dump_things_worker(ckan, thing, arguments)
30 '''
31 log = None
32 if arguments['--log']:
33 log = open(arguments['--log'], 'a')
34 '''
35 jsonl_output = stdout
36 if arguments['--datapackages']:
37 jsonl_output = open(os.devnull, 'wb')
38
39 names = arguments['ID_OR_NAME']
40
41 if names and isinstance(names[0], dict):
42 names = [rec.get('name',rec.get('id')) for rec in names]
43 '''
44 if arguments['--datapackages']:
45 arguments['--datastore-fields'] = True
46 '''
47 #----------------------------#
48 filtered_urls = {}
49 for name in names:
50 try:
51 response = getattr(ckan.action, 'url_resources')(id=name, **kwargs)
52 except:
53 _, exc_value, _ = sys.exc_info()
54 return exc_value
55 filtered_urls[name] = response
56 #----------------------------#
57
58 cmd = dump._worker_command_line(thing, arguments)
59 processes = int(arguments['--processes'])
60 if hasattr(ckan, 'parallel_limit'):
61 processes = min(processes, ckan.parallel_limit)
62 stats = completion_stats(processes)
63 pool = worker_pool(cmd, processes, enumerate(compact_json(n) + b'\n' for n in names))
64
65 results = {}
66 expecting_number = 0
67 with quiet_int_pipe() as errors:
68 for job_ids, finished, result in pool:
69 if not result:
70 return 1
71 timestamp, error, record = json.loads(result.decode('utf-8'))
72 results[finished] = record
73
74 if not arguments['--quiet']:
75 stderr.write('** Finished: {0} | Job IDs: {1} | Next Report: {2} | Error: {3} | Dataset Name: {4}\n'.format(
76 finished,
77 job_ids,
78 next(stats),
79 error,
80 record.get('name', '') if record else '',
81 ).encode('utf-8'))
82 '''
83 if log:
84 log.write(compact_json([
85 timestamp,
86 finished,
87 error,
88 record.get('name', '') if record else None,
89 ]) + b'\n')
90 '''
91 datapackages_path = arguments['--datapackages']
92 if datapackages_path:
93 create_datapackage_change(record, filtered_urls[record.get('name', '')], datapackages_path, stderr, arguments['--apikey'], arguments['--remote'], arguments['--insecure'])
94 while expecting_number in results:
95 record = results.pop(expecting_number)
96 if record:
97 jsonl_output.write(compact_json(record, sort_keys=True) + b'\n')
98 expecting_number += 1
99 if 'pipe' in errors:
100 return 1
101 if 'interrupt' in errors:
102 return 2
103
104 def create_datapackage_change(record, filtered_url, base_path, stderr, apikey, host_url, insecure):
105 resource_formats_to_ignore = ['API', 'api']
106 #----------------------------------------#
107 datapackage_dir = name_no_repetition(record.get('name', ''), base_path)
108 #----------------------------------------#
109 os.makedirs(os.path.join(datapackage_dir, 'data'))
110 record['path'] = datapackage_dir
111
112 ckan_resources = []
113 for resource in record.get('resources', []):
114 if resource['format'] in resource_formats_to_ignore:
115 continue
116
117 if not {'name': resource['name'], 'url': resource['url']} in filtered_url:
118 continue
119
120 if len(resource['url']) == 0:
121 continue
122
123 filename = name_no_repetition(resource['name'], os.path.join(datapackage_dir, 'data'), 'resource')
124 resource['path'] = os.path.join(datapackage_dir, 'data', filename)
125
126 cres = create_resource_change(resource, stderr, apikey, host_url, insecure)
127 if not cres:
128 continue
129 '''
130 #----------------------------------------#
131 dres = {'path': os.path.join('data', filename),
132 'description': cres.get('description', ''),
133 'format': cres.get('format', ''),
134 'name': cres.get('name', ''),
135 'title': cres.get('name', '').title()}
136 #----------------------------------------#
137 populate_schema_from_datastore(cres, dres)
138 '''
139 ckan_resources.append(resource)
140
141 dataset = dict(record, resources=ckan_resources)
142 datapackage = dataset_to_datapackage_change(dataset)
143
144 json_path = os.path.join(datapackage_dir, 'datapackage.json')
145 with open(json_path, 'wb') as out:
146 out.write(pretty_json(datapackage))
147
148 return datapackage_dir, datapackage, json_path
149
150 def create_resource_change(resource, stderr, apikey, host_url, insecure):
151 # ---------- REPLACE URL --------- #
152 if urlparse(host_url).netloc != 'www.igp.gob.pe' and urlparse(resource['url']).netloc == 'www.igp.gob.pe':
153 resource['url'] = resource['url'].replace(urlparse(resource['url']).scheme + '://' + urlparse(resource['url']).netloc,
154 urlparse(host_url).scheme + '://' + urlparse(host_url).netloc)
155 #----------------------------------#
156 try:
157 r = requests.get(resource['url'], headers={'Authorization': apikey}, stream=True, verify=not insecure)
158 #---------------------------------------#
159 try:
160 r.raise_for_status()
161 except requests.exceptions.HTTPError as e:
162 return False
163 #---------------------------------------#
164 with open(resource['path'], 'wb') as f:
165 for chunk in r.iter_content(chunk_size=DL_CHUNK_SIZE):
166 if chunk:
167 f.write(chunk)
168
169 except requests.ConnectionError:
170 stderr.write('URL {0} refused connection. The resource will not be downloaded\n'.format(resource['url']).encode('utf-8'))
171 except requests.exceptions.RequestException as e:
172 stderr.write('{0}\n'.format(str(e.args[0]) if len(e.args) > 0 else '').encode('utf-8'))
173 except Exception as e:
174 stderr.write('{0}'.format(str(e.args[0]) if len(e.args) > 0 else '').encode('utf-8'))
175 return resource
176
177 def dataset_to_datapackage_change(dataset_dict):
178 dp = {'name': dataset_dict['name'],
179 'id': dataset_dict['id'],
180 'path': dataset_dict['path'],
181 'last_update': datetime.strptime(dataset_dict['metadata_modified'], "%Y-%m-%dT%H:%M:%S.%f").strftime("%d-%b-%Y %I.%M %p")}
182
183 resources = dataset_dict.get('resources')
184 if resources:
185 dp['resources'] = [convert_to_datapackage_resource_change(r)
186 for r in resources]
187 return dp
188
189 def convert_to_datapackage_resource_change(resource_dict):
190 resource = {}
191
192 if resource_dict.get('id'):
193 resource['id'] = resource_dict['id']
194
195 if resource_dict.get('name'):
196 resource['name'] = resource_dict['name']
197
198 if resource_dict.get('path'):
199 resource['path'] = resource_dict['path']
200
201 schema = resource_dict.get('schema')
202 if isinstance(schema, six.string_types):
203 try:
204 resource['schema'] = json.loads(schema)
205 except ValueError:
206 resource['schema'] = schema
207 elif isinstance(schema, dict):
208 resource['schema'] = schema
209
210 return resource
211
212 def name_no_repetition(name, dir, option=''):
213 count = 0
214 while True:
215 count = count + 1
216 if not os.path.exists(os.path.join(dir, name)):
217 if option == 'resource':
218 return name
219 else:
220 return os.path.join(dir, name)
221
222 elif not os.path.exists(os.path.join(dir, '('+str(count)+')'+name)):
223 if option == 'resource':
224 return '('+str(count)+')'+name
225 else:
226 return os.path.join(dir, '('+str(count)+')'+name)
227 else:
228 pass No newline at end of file
1 NO CONTENT: modified file, binary diff hidden
@@ -1,935 +1,1022
1 1 from ckanapi import RemoteCKAN
2 2 from datetime import datetime
3 3 from tqdm import tqdm
4 from CKAN_JRO import logic_download
4 5 #from ckanapi.errors import NotAuthorized, NotFound, ValidationError, SearchQueryError, SearchError, CKANAPIError, ServerIncompatibleError
5 6 import sys
6 7 import platform
7 8 import os
8 9 import tempfile
9 10 import shutil
10 11 import zipfile
11 12 import concurrent.futures
12 13 import requests
13 14 import json
14 15 #import pathlib
15 16 import uuid
16 17
17 18 if sys.version_info.major == 3:
18 19 from urllib.parse import urlparse
19 20 else:
20 21 import urlparse
21 22
22 23 class JROAPI():
23 24 """
24 25 FINALIDAD:
25 26 Script para administrar y obtener la data del repositorio por medio de APIs.
26 27
27 28 REQUISITIOS PREVIOS:
28 29 - Paso 1: Tener "pip [Python 2]" o "pip3 [Python 3]" instalado:
29 30 - Paso 2: Instalar lo siguiente como admininstrador:
30 31 En Python 2
31 32 - pip install ckanapi==4.5
32 33 - pip install requests
33 34 - pip install futures
34 35 - pip install tqdm
35 36 En Python > 3
36 37 - pip3 install ckanapi==4.5
37 38 - pip3 install requests
38 39 - pip3 install tqdm
39 40
40 41 FUNCIONES DISPONIBLES:
41 42 - action
42 43 - upload_file
43 44 - upload_multiple_files
44 45 - upload_multiple_files_advance
45 46 - show
46 47 - search
47 48 - create
48 49 - patch
49 50 - delete
50 51 - download_files
51 52
52 53 EJEMPLOS:
53 54 #1:
54 55 with JROAPI('http://demo.example.com', Authorization='#########') as <access_name>:
55 56 ... some operation(s) ...
56 57 #2:
57 58 <access_name> = JROAPI('http://example.com', Authorization='#########')
58 59 ... some operation(s) ...
59 60 <access_name>.ckan.close()
60 61
61 62 REPORTAR ALGUN PROBLEMA:
62 63 Debe enviar un correo a eynilupu@igp.gob.pe detallando los siguientes pasos:
63 64 1) Correo para contactarlo
64 65 2) Descripcion del problema
65 66 3) ¿En que paso o seccion encontro el problema?
66 67 4) ¿Cual era el resultado que usted esperaba?
67 68 """
68 69 def __init__(self, url, Authorization=None, secure=True):
69 70 #-------- Check Secure -------#
70 71 self.verify = secure
71 72 if not secure and isinstance(secure, bool):
72 73 session = requests.Session()
73 74 session.verify = False
74 75 else:
75 76 session = None
76 77 #------------------------------#
77 78 self.url = url
78 79 ua = 'CKAN_JRO/2.9.2 (+'+str(self.url)+')'
79 80 #ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
80 81 self.ckan = RemoteCKAN(self.url, apikey=Authorization, user_agent=ua, session=session)
81 82 #self.ckan = RemoteCKAN(self.url, apikey=Authorization)
82 83 self.Authorization = Authorization
83 84 # Change for --> self.separator = os.sep
84 85 if platform.system() == 'Windows':
85 86 self.separator = '\\'
86 87 else:
87 88 self.separator = '/'
88 89
89 90 self.chunk_size = 1024
90 91 self.list = []
91 92 self.dict = {}
92 93 self.str = ''
93 94 self.check = 1
94 95 self.cont = 0
95 96
96 97 def __enter__(self):
97 98 return self
98 99
99 100 def __exit__(self, *args):
100 101 self.ckan.close()
101 102
102 103 def action(self, action, **kwargs):
103 104 """
104 105 FINALIDAD:
105 106 Funcion para llamar a las APIs disponibles
106 107
107 108 APIs DISPONIBLES:
108 109 CONSULTAR: "GUIA DE SCRIPT.pdf"
109 110
110 111 EJEMPLO:
111 112 <access_name>.action(<consuming API>, param_1 = <class 'param_1'>, ...)
112 113 """
113 114 #--------------- CASE: PACKAGE SEARCH ---------------#
114 115 if kwargs is not None:
115 116 if action == 'package_search':
116 117 self.list = ['facet_mincount', 'facet_limit', 'facet_field']
117 118 for facet in self.list:
118 119 if facet in kwargs:
119 120 kwargs[facet.replace('_', '.')] = kwargs[facet]
120 121 kwargs.pop(facet)
121 122 #----------------------------------------------------#
122 123 try:
123 124 return getattr(self.ckan.action, action)(**kwargs)
124 125 except:
125 126 _, exc_value, _ = sys.exc_info()
126 127 return exc_value
127 128
128 129 def upload_file(self, dataset_id, file_path, file_date, file_type, **kwargs):
129 130 # Agregar si es interruptido por teclado
130 131 '''
131 132 FINALIDAD:
132 133 Funcion para subir un unico archivo al repositorio del ROJ.
133 134
134 135 PARAMETROS DISPONIBLES:
135 136 CONSULTAR: "GUIA DE SCRIPT.pdf"
136 137
137 138 ESTRUCTURA:
138 139 <access_name>.upload_file(dataset_id = <class 'str'>, file_date = <class 'str'>, file_path = <class 'str'>, file_type = <class 'str'>, param_1 = <class 'param_1'>, ...)
139 140 '''
140 141 self.list = ['package_id', 'upload', 'voc_file_type', 'name'] #file_date
141 142 for key1, value1 in kwargs.items():
142 143 if not key1 in self.list:
143 144 self.dict[key1] = value1
144 145
145 146 #---------------------------#
146 147 if not 'others' in kwargs:
147 148 self.dict['others'] = ''
148 149 else:
149 150 if isinstance(kwargs['others'], list):
150 151 self.dict['others'] = json.dumps(kwargs['others'])
151 152 #---------------------------#
152 153
153 154 if not os.path.isfile(file_path):
154 155 return 'File "%s" not exist' % (file_path)
155 156
156 157 #if not 'format' in self.dict:
157 158 # self.str = ''.join(pathlib.Path(file_path).suffixes)
158 159 # if len(self.str) > 0:
159 160 # self.dict['format'] = self.str.upper()[1:]
160 161
161 162 #-------------------------PACKAGE SHOW-----------------------#
162 163 try:
163 164 dataset_show = getattr(self.ckan.action, 'package_show')(id=dataset_id)['resources']
164 165 except:
165 166 _, exc_value, _ = sys.exc_info()
166 167 print('ERROR obtaining metadata dataset:: Use the "print" for more information')
167 168 return exc_value
168 169
169 170 resources_name = []
170 171 for u in dataset_show:
171 172 resources_name.append(u['name'].lower())
172 173
173 174 if os.path.basename(file_path).lower() in resources_name:
174 175 return 'ERROR:: "%s" file already exist in this dataset' % (os.path.basename(file_path))
175 176 #------------------------------------------------------------#
176 177
177 178 try:
178 179 return getattr(self.ckan.action, 'resource_create')(package_id=dataset_id, file_date=file_date, upload=open(file_path, 'rb'), voc_file_type=file_type, name=os.path.basename(file_path), **self.dict)
179 180 except:
180 181 _, exc_value, _ = sys.exc_info()
181 182 return exc_value
182 183
183 184 def upload_multiple_files_advance(self, dataset_id, path_files, file_date, file_type, max_size=100, max_count=500, ignore_repetition=False, **kwargs):
184 185 # Agregar si es interruptido por teclado
185 186 '''
186 187 FINALIDAD:
187 188 Funcion para subir multiples archivos al repositorio del ROJ.
188 189
189 190 PARAMETROS DISPONIBLES:
190 191 CONSULTAR: "GUIA DE SCRIPT.pdf"
191 192
192 193 ESTRUCTURA:
193 194 <access_name>.upload_multiple_files_advance(dataset_id = <class 'str'>, path_files = <class 'list of strings'>, file_date = <class 'str'>, file_type = <class 'str'>, param_1 = <class 'param_1'>, ...)
194 195 '''
195 196 #-------------------------PACKAGE SHOW-----------------------#
196 197 try:
197 198 dataset_show = getattr(self.ckan.action, 'package_show')(id=dataset_id)['resources']
198 199 except:
199 200 _, exc_value, _ = sys.exc_info()
200 201 print('ERROR obtaining metadata dataset:: Use the "print" for more information')
201 202 return exc_value
202 203 #------------------------------------------------------------#
203 204 resources_name = []
204 205 for u in dataset_show:
205 206 resources_name.append(u['name'].lower())
206 207 #------------------------------------------------------------#
207 208 self.list = ['package_id', 'upload', 'voc_file_type', 'name']
208 209 for key1, value1 in kwargs.items():
209 210 if not key1 in self.list:
210 211 self.dict[key1] = value1
211 212 #------------------------------------------------------------#
212 213 if not 'others' in kwargs:
213 214 self.dict['others'] = ''
214 215 else:
215 216 if isinstance(kwargs['others'], list):
216 217 self.dict['others'] = json.dumps(kwargs['others'])
217 218 #------------------------------------------------------------#
218 219 total_list = []
219 220 #---------------CASO : "path" or "path_list"-----------------#
220 221 if type(path_files) is list:
221 222 if len(path_files) != 0:
222 223 path_files.sort()
223 224 for u in path_files:
224 225 if os.path.isfile(u):
225 226 if os.path.basename(u).lower() in resources_name:
226 227 if not ignore_repetition:
227 228 return 'ERROR:: "%s" file already exist in this dataset' % (os.path.basename(u))
228 229 print('WARRING:: "'+ str(os.path.basename(u)) +'" file was ignored because already exist in this dataset')
229 230 else:
230 231 total_list.append({'name':os.path.basename(u), 'size': os.stat(u).st_size, 'upload':open(u, 'rb')})
231 232 else:
232 233 return 'File "%s" does not exist' % (u)
233 234 else:
234 235 return 'ERROR:: "path_list is empty"'
235 236
236 237 elif type(path_files) is str:
237 238 if os.path.isdir(path_files):
238 239 path_order = [f for f in os.listdir(path_files) if os.path.isfile(os.path.join(path_files, f))]
239 240 path_order.sort()
240 241 if path_order:
241 242 for name in path_order:
242 243 if name.lower() in resources_name:
243 244 if not ignore_repetition:
244 245 return 'ERROR:: "%s" file already exist in this dataset' % (name)
245 246 print('WARRING:: "'+ name +'" file was ignored because already exist in this dataset')
246 247 else:
247 248 total_list.append({'name':name, 'size': os.stat(os.path.join(path_files, name)).st_size, 'upload':open(os.path.join(path_files, name), 'rb')})
248 249 else:
249 250 return "ERROR:: There aren't files in this directory"
250 251 else:
251 252 return 'ERROR:: Directory "%s" does not exist' % (path_files)
252 253 else:
253 254 return 'ERROR:: "path_files" must be a str or list'
254 255 #------------------------------------------------------------#
255 256 try:
256 257 uuid.UUID(str(dataset_id), version=4)
257 258 package_id_or_name = '"id": "' + str(dataset_id) + '"'
258 259 except ValueError:
259 260 package_id_or_name = '"name": "' + str(dataset_id) + '"'
260 261 #------------------------------------------------------------#
261 262 blocks = [[]]
262 263 size_file = 0
263 264 count_file = 0
264 265 inter_num = 0
265 266 for value in total_list:
266 267 if value['size'] > 1024 * 1024 * float(max_size):
267 268 return 'ERROR:: The size of the "%s" file is %sMB aprox, please change "max_size" value' % (value['name'], str(round(value['size']/(1024 * 1024), 2)))
268 269 if not 1 <= int(max_count) <= 999:
269 270 return 'ERROR:: The count of the number of files must be between 1 and 999, please change "max_count" value'
270 271
271 272 size_file = size_file + value['size']
272 273 count_file = count_file + 1
273 274 if size_file <= 1024 * 1024 * float(max_size) and count_file <= int(max_count):
274 275 del value['size']
275 276 blocks[inter_num].append(value)
276 277 else:
277 278 inter_num = inter_num + 1
278 279 size_file = value['size']
279 280 count_file = 1
280 281 blocks.append([])
281 282 del value['size']
282 283 blocks[inter_num].append(value)
283 284 #------------------------------------------------------------#
284 285 if len(blocks[0]) > 0:
285 286 print('BLOCK(S) IN TOTAL:: {}'.format(len(blocks)))
286 287 for count1, block in enumerate(blocks):
287 288 print('---- BLOCK N°{} ----'.format(count1 + 1))
288 289 resource_extend = []
289 290 files_dict = {}
290 291 for count2, value2 in enumerate(block):
291 292 value2['file_date'] = file_date
292 293 value2['voc_file_type'] = file_type
293 294 value2.update(self.dict)
294 295
295 296 #if not 'format' in value2:
296 297 # format = ''.join(pathlib.Path(value2['name']).suffixes)
297 298 # if len(format) > 0:
298 299 # value2['format'] = format.upper()[1:]
299 300
300 301 files_dict['update__resources__-'+ str(len(block)-count2) +'__upload'] = (value2['name'], value2['upload'])
301 302 del value2['upload']
302 303 resource_extend.append(value2)
303 304
304 305 print('BLOCK N°{} :: "{}" file(s) found >> uploading'.format(count1 + 1, len(block)))
305 306 try:
306 307 result = self.ckan.call_action(
307 308 'package_revise',
308 309 {'match': '{'+ str(package_id_or_name) +'}', 'update__resources__extend': json.dumps(resource_extend)},
309 310 files=files_dict
310 311 )
311 312 print('BLOCK N°{} :: Uploaded file(s) successfully'.format(count1 + 1))
312 313 if len(blocks) == count1 + 1:
313 314 return result
314 315 except:
315 316 print('ERROR :: Use the "print" for more information')
316 317 _, exc_value, _ = sys.exc_info()
317 318 return exc_value
318 319 else:
319 320 return "ERROR:: No file(s) found to upload"
320 321
321 322 def upload_multiple_files(self, dataset_id, path_files, date_files, type_files, ignore_repetition=False, **kwargs):
322 323 # Agregar si es interruptido por teclado
323 324 '''
324 325 FINALIDAD:
325 326 Funcion para subir multiples archivos al repositorio del ROJ.
326 327
327 328 PARAMETROS DISPONIBLES:
328 329 CONSULTAR: "GUIA DE SCRIPT.pdf"
329 330
330 331 ESTRUCTURA:
331 332 <access_name>.upload_multiple_files(dataset_id = <class 'str'>, path_files = <class 'str'> or <class 'list of strings'>, date_files = <class 'str'> or <class 'list of strings'>, type_files = <class 'str'> or <class 'list of strings'>, param_1 = <class 'param_1'>, ...)
332 333 '''
333 334 #-------------------------PACKAGE SHOW-----------------------#
334 335 try:
335 336 dataset_show = getattr(self.ckan.action, 'package_show')(id=dataset_id)['resources']
336 337 except:
337 338 _, exc_value, _ = sys.exc_info()
338 339 print('ERROR obtaining metadata dataset:: Use the "print" for more information')
339 340 return exc_value
340 341 #------------------------------------------------------------#
341 342 resources_name = []
342 343 for u in dataset_show:
343 344 resources_name.append(u['name'].lower())
344 345 #------------------------------------------------------------#
345 346
346 347 params_dict = {'upload':[], 'name':[]}
347 348 #if not 'format' in kwargs:
348 349 # params_dict.update({'format':[]})
349 350 #---------------CASO : "path" or "path_list"-----------------#
350 351 if type(path_files) is list:
351 352 if len(path_files) != 0:
352 353 path_files.sort()
353 354 for u in path_files:
354 355 if os.path.isfile(u):
355 356 if os.path.basename(u).lower() in resources_name:
356 357 if not ignore_repetition:
357 358 return 'ERROR:: "%s" file already exist in this dataset' % (os.path.basename(u))
358 359 print('WARRING:: "'+ str(os.path.basename(u)) +'" file was ignored because already exist in this dataset')
359 360 else:
360 361 params_dict['upload'].append(open(u, 'rb'))
361 362 params_dict['name'].append(os.path.basename(u))
362 363 #if not 'format' in kwargs:
363 364 # format = ''.join(pathlib.Path(u).suffixes)
364 365 # if len(format) > 0:
365 366 # params_dict['format'].append(format.upper()[1:])
366 367 # else:
367 368 # params_dict['format'].append('')
368 369 else:
369 370 return 'File "%s" does not exist' % (u)
370 371 else:
371 372 return 'ERROR:: "path_list is empty"'
372 373 elif type(path_files) is str:
373 374 if os.path.isdir(path_files):
374 375 path_order = [f for f in os.listdir(path_files) if os.path.isfile(os.path.join(path_files, f))]
375 376 path_order.sort()
376 377 if path_order:
377 378 for name in path_order:
378 379 if name.lower() in resources_name:
379 380 if not ignore_repetition:
380 381 return 'ERROR:: "%s" file already exist in this dataset' % (name)
381 382 print('WARRING:: "'+ str(name) +'" file was ignored because already exist in this dataset')
382 383 else:
383 384 params_dict['upload'].append(open(os.path.join(path_files, name), 'rb'))
384 385 params_dict['name'].append(name)
385 386 #if not 'format' in kwargs:
386 387 # format = ''.join(pathlib.Path(name).suffixes)
387 388 # if len(format) > 0:
388 389 # params_dict['format'].append(format.upper()[1:])
389 390 # else:
390 391 # params_dict['format'].append('')
391 392 else:
392 393 return "ERROR:: There aren't files in this directory"
393 394 else:
394 395 return 'ERROR:: Directory "%s" does not exist' % (path_files)
395 396 else:
396 397 return 'ERROR:: "path_files" must be a str or list'
397 398 #------------------------------------------------------------#
398 399 params_no_dict = {'package_id': dataset_id}
399 400 if type(date_files) is list:
400 401 params_dict['file_date'] = date_files
401 402 else:
402 403 params_no_dict['file_date'] = date_files
403 404
404 405 if type(type_files) is list:
405 406 params_dict['voc_file_type'] = type_files
406 407 else:
407 408 params_no_dict['voc_file_type'] = type_files
408 409
409 410 for key1, value1 in kwargs.items():
410 411 if not key1 in params_dict and not key1 in params_no_dict and key1 != 'others':
411 412 if type(value1) is list:
412 413 params_dict[key1] = value1
413 414 else:
414 415 params_no_dict[key1] = value1
415 416 #------------------------------------------#
416 417 if not 'others' in kwargs:
417 418 params_no_dict['others'] = ''
418 419 else:
419 420 if isinstance(kwargs['others'], tuple):
420 421 params_dict['others'] = [json.dumps(w) for w in kwargs['others']]
421 422 elif isinstance(kwargs['others'], list):
422 423 params_no_dict['others'] = json.dumps(kwargs['others'])
423 424 elif isinstance(kwargs['others'], str):
424 425 params_no_dict['others'] = kwargs['others']
425 426 else:
426 427 return 'ERROR:: "others" must be a tuple, list or str'
427 428 #------------------------------------------#
428 429 len_params_dict = []
429 430 for value2 in params_dict.values():
430 431 len_params_dict.append(len(value2))
431 432
432 433 if len(list(set(len_params_dict))) > 1:
433 434 return 'ERROR:: All lists must be the same length: %s' % (len(params_dict['name']))
434 435 #------------------------------------------------------------#
435 436 print('"{}" file(s) found >> uploading'.format(len(params_dict['name'])))
436 437 for v in range(len(params_dict['name'])):
437 438 try:
438 439 send = {}
439 440 for key_dict, value_dict in params_dict.items():
440 441 send[key_dict] = value_dict[v]
441 442 for key_no_dict, value_no_dict in params_no_dict.items():
442 443 send[key_no_dict] = value_no_dict
443 444
444 445 self.list.append(getattr(self.ckan.action, 'resource_create')(**send))
445 446 print('File #{} :: "{}" was uploaded successfully'.format(v+1, params_dict['name'][v]))
446 447 except:
447 448 _, exc_value, _ = sys.exc_info()
448 449 self.list.append(exc_value)
449 450 print('File #{} :: Error uploading "{}" file'.format(v+1, params_dict['name'][v]))
450 451 return self.list
451 452 #------------------------------------------------------------#
452 453
453 454 def show(self, type_option, id, **kwargs):
454 455 '''
455 456 FINALIDAD:
456 457 Funcion personalizada para una busqueda en especifico.
457 458
458 459 PARAMETROS DISPONIBLES:
459 460 CONSULTAR: "GUIA DE SCRIPT.pdf"
460 461
461 462 ESTRUCTURA:
462 463 <access_name>.show(type_option = <class 'str'>, id = <class 'str'>, param_1 = <class 'param_1'>, ...)
463 464 '''
464 465 if type(type_option) is str:
465 466 try:
466 467 if type_option == 'dataset':
467 468 return getattr(self.ckan.action, 'package_show')(id=id, **kwargs)
468 469 elif type_option == 'resource':
469 470 return getattr(self.ckan.action, 'resource_show')(id=id, **kwargs)
470 471 elif type_option == 'project':
471 472 return getattr(self.ckan.action, 'organization_show')(id=id, **kwargs)
472 473 elif type_option == 'collaborator':
473 474 return getattr(self.ckan.action, 'package_collaborator_list_for_user')(id=id, **kwargs)
474 475 elif type_option == 'member':
475 476 return getattr(self.ckan.action, 'organization_list_for_user')(id=id, **kwargs)
476 477 elif type_option == 'vocabulary':
477 478 return getattr(self.ckan.action, 'vocabulary_show')(id=id, **kwargs)
478 479 elif type_option == 'tag':
479 480 if not 'vocabulary_id' in kwargs:
480 481 print('Missing "vocabulary_id" value: assume it is a free tag')
481 482 return getattr(self.ckan.action, 'tag_show')(id=id, **kwargs)
482 483 elif type_option == 'user':
483 484 return getattr(self.ckan.action, 'user_show')(id=id, **kwargs)
484 485 elif type_option == 'job':
485 486 return getattr(self.ckan.action, 'job_show')(id=id, **kwargs)
486 487 else:
487 488 return 'ERROR:: "type_option = %s" is not accepted' % (type_option)
488 489 except:
489 490 _, exc_value, _ = sys.exc_info()
490 491 return exc_value
491 492 else:
492 493 return 'ERROR:: "type_option" must be a str'
493 494
494 495 def search(self, type_option, query=None, **kwargs):
495 496 '''
496 497 FINALIDAD:
497 498 Funcion personalizada para busquedas que satisfagan algun criterio.
498 499
499 500 PARAMETROS DISPONIBLES:
500 501 CONSULTAR: "GUIA DE SCRIPT.pdf"
501 502
502 503 ESTRUCTURA:
503 504 <access_name>.search(type_option = <class 'str'>, query = <class 'dict'>, param_1 = <class 'param_1'>, ...)
504 505 '''
505 506 if type(type_option) is str:
506 507 try:
507 508 if type_option == 'dataset':
508 509 key_replace = ['fq', 'fq_list', 'include_private']
509 510 key_point = ['facet_mincount', 'facet_limit', 'facet_field']
510 511 for key1, value1 in kwargs.items():
511 512 if not key1 in key_replace:
512 513 if key1 in key_point:
513 514 self.dict[key1.replace('_', '.')] = value1
514 515 else:
515 516 self.dict[key1] = value1
516 517
517 518 if query is not None:
518 519 if type(query) is dict:
519 520 self.dict['fq_list'] = []
520 521 #NUM_RESOURCES_MIN / NUM_RESOURCES_MAX
521 522 #----------------------------------------------------#
522 523 if 'dataset_start_date' in query:
523 524 if type(query['dataset_start_date']) is str:
524 525 try:
525 526 datetime.strptime(query['dataset_start_date'], '%Y-%m-%d')
526 527 if len(query['dataset_start_date']) != 10:
527 528 return '"dataset_start_date", must be: <YYYY-MM-DD>'
528 529 self.dict['fq_list'].append('dataset_start_date:"'+query['dataset_start_date']+'"')
529 530 self.list.append('dataset_start_date')
530 531 except:
531 532 return '"dataset_start_date" incorrect: "%s"' % (query['dataset_start_date'])
532 533 else:
533 534 return '"dataset_start_date" must be <str>'
534 535 #----------------------------------------------------#
535 536 if 'dataset_end_date' in query:
536 537 if type(query['dataset_end_date']) is str:
537 538 try:
538 539 datetime.strptime(query['dataset_end_date'], '%Y-%m-%d')
539 540 if len(query['dataset_end_date']) != 10:
540 541 return '"dataset_end_date", must be: <YYYY-MM-DD>'
541 542
542 543 if 'dataset_start_date' in query:
543 544 if query['dataset_start_date'] > query['dataset_end_date']:
544 545 return '"dataset_end_date" must be greater than "dataset_start_date"'
545 546
546 547 self.dict['fq_list'].append('dataset_end_date:"'+query['dataset_end_date']+'"')
547 548 self.list.append('dataset_end_date')
548 549 except:
549 550 return '"dataset_end_date" incorrect: "%s"' % (query['dataset_end_date'])
550 551 else:
551 552 return '"dataset_end_date" must be <str>'
552 553 #----------------------------------------------------#
553 554 for key, value in query.items():
554 555 if value is not None and not key in self.list:
555 556 self.dict['fq_list'].append(str(key)+':"'+str(value)+'"')
556 557 else:
557 558 return '"query" must be <dict>'
558 559
559 560 return getattr(self.ckan.action, 'package_search')(include_private=True, **self.dict)
560 561
561 562 elif type_option == 'resource':
562 563 for key1, value1 in kwargs.items():
563 564 if key1 != 'fields':
564 565 self.dict[key1] = value1
565 566
566 567 if query is not None:
567 568 if type(query) is dict:
568 569 #----------------------------------------------------#
569 570 if 'file_date_min' in query:
570 571 if type(query['file_date_min']) is str:
571 572 try:
572 573 datetime.strptime(query['file_date_min'], '%Y-%m-%d')
573 574 if len(query['file_date_min']) != 10:
574 575 return '"file_date_min", must be: <YYYY-MM-DD>'
575 576 except:
576 577 return '"file_date_min" incorrect: "%s"' % (query['file_date_min'])
577 578 else:
578 579 return '"file_date_min" must be <str>'
579 580 #----------------------------------------------------#
580 581 if 'file_date_max' in query:
581 582 if type(query['file_date_max']) is str:
582 583 try:
583 584 datetime.strptime(query['file_date_max'], '%Y-%m-%d')
584 585 if len(query['file_date_max']) != 10:
585 586 return '"file_date_max", must be: <YYYY-MM-DD>'
586 587
587 588 if 'file_date_min' in query:
588 589 if query['file_date_min'] > query['file_date_max']:
589 590 return '"file_date_max" must be greater than "file_date_min"'
590 591 except:
591 592 return '"file_date_max" incorrect: "%s"' % (query['file_date_max'])
592 593 else:
593 594 return '"file_date_max" must be <str>'
594 595 #----------------------------------------------------#
595 596 self.dict['query'] = query
596 597 else:
597 598 return '"query" must be <dict>'
598 599 return getattr(self.ckan.action, 'resources_search')(**self.dict)
599 600
600 601 elif type_option == 'tag':
601 602 for key1, value1 in kwargs.items():
602 603 if key1 != 'fields':
603 604 self.dict[key1] = value1
604 605
605 606 if not 'vocabulary_id' in kwargs:
606 607 print('Missing "vocabulary_id" value: tags that don’t belong to any vocabulary')
607 608 else:
608 609 print('Only tags that belong to "{}" vocabulary'.format(kwargs['vocabulary_id']))
609 610
610 611 if query is not None:
611 612 if type(query) is dict:
612 613 if 'search' in query:
613 614 if type(query['search']) is list or type(query['search']) is str:
614 615 self.dict['query'] = query['search']
615 616 else:
616 617 return '"search" must be <list> or <str>'
617 618 else:
618 619 return '"query" must be <dict>'
619 620 return getattr(self.ckan.action, 'tag_search')(**self.dict)
620 621
621 622 else:
622 623 return 'ERROR:: "type_option = %s" is not accepted' % (type_option)
623 624
624 625 except:
625 626 _, exc_value, _ = sys.exc_info()
626 627 return exc_value
627 628 else:
628 629 return 'ERROR:: "type_option" must be <str>'
629 630
630 631 def create(self, type_option, select=None, **kwargs):
631 632 '''
632 633 FINALIDAD:
633 634 Funcion personalizada para crear.
634 635
635 636 PARAMETROS DISPONIBLES:
636 637 CONSULTAR: "GUIA DE SCRIPT.pdf"
637 638
638 639 ESTRUCTURA:
639 640 <access_name>.create(type_option = <class 'str'>, param_1 = <class 'param_1'>, ...)
640 641 '''
641 642 if type(type_option) is str:
642 643 try:
643 644 if type_option == 'dataset':
644 645 return getattr(self.ckan.action, 'package_create')(**kwargs)
645 646 elif type_option == 'project':
646 647 return getattr(self.ckan.action, 'organization_create')(**kwargs)
647 648 elif type_option == 'member':
648 649 return getattr(self.ckan.action, 'organization_member_create')(**kwargs)
649 650 elif type_option == 'collaborator':
650 651 return getattr(self.ckan.action, 'package_collaborator_create')(**kwargs)
651 652 elif type_option == 'vocabulary':
652 653 return getattr(self.ckan.action, 'vocabulary_create')(**kwargs)
653 654 elif type_option == 'tag':
654 655 return getattr(self.ckan.action, 'tag_create')(**kwargs)
655 656 elif type_option == 'user':
656 657 return getattr(self.ckan.action, 'user_create')(**kwargs)
657 658 elif type_option == 'views':
658 659 if 'resource' == select:
659 660 self.list = ['package']
660 661 for key1, value1 in kwargs.items():
661 662 if not key1 in self.list:
662 663 self.dict[key1] = value1
663 664 return getattr(self.ckan.action, 'resource_create_default_resource_views')(**self.dict)
664 665 elif 'dataset' == select:
665 666 return getattr(self.ckan.action, 'package_create_default_resource_views')(**kwargs)
666 667 else:
667 668 return 'ERROR:: "select = %s" is not accepted' % (select)
668 669 else:
669 670 return 'ERROR:: "type_option = %s" is not accepted' % (type_option)
670 671 except:
671 672 _, exc_value, _ = sys.exc_info()
672 673 return exc_value
673 674 else:
674 675 return 'ERROR:: "type_option" must be <str>'
675 676
676 677 def patch(self, type_option, **kwargs):
677 678 '''
678 679 FINALIDAD:
679 680 Funciones personalizadas para actualizar
680 681
681 682 PARAMETROS DISPONIBLES:
682 683 CONSULTAR: "GUIA DE SCRIPT.pdf"
683 684
684 685 ESTRUCTURA:
685 686 <access_name>.patch(type_option = <class 'str'>, param_1 = <class 'param_1'>, ...)
686 687 '''
687 688 if type(type_option) is str:
688 689 try:
689 690 if type_option == 'dataset':
690 691 return getattr(self.ckan.action, 'package_patch')(**kwargs)
691 692 elif type_option == 'project':
692 693 return getattr(self.ckan.action, 'organization_patch')(**kwargs)
693 694 elif type_option == 'resource':
694 695 return getattr(self.ckan.action, 'resource_patch')(**kwargs)
695 696 elif type_option == 'member':
696 697 return getattr(self.ckan.action, 'organization_member_create')(**kwargs)
697 698 elif type_option == 'collaborator':
698 699 return getattr(self.ckan.action, 'package_collaborator_create')(**kwargs)
699 700 else:
700 701 return 'ERROR:: "type_option = %s" is not accepted' % (type_option)
701 702 except:
702 703 _, exc_value, _ = sys.exc_info()
703 704 return exc_value
704 705 else:
705 706 return 'ERROR:: "type_option" must be <str>'
706 707
707 708 def delete(self, type_option, select=None, **kwargs):
708 709 '''
709 710 FINALIDAD:
710 711 Función personalizada para eliminar y/o purgar.
711 712
712 713 PARAMETROS DISPONIBLES:
713 714 CONSULTAR: "GUIA DE SCRIPT.pdf"
714 715
715 716 ESTRUCTURA:
716 717 <access_name>.delete(type_option = <class 'str'>, param_1 = <class 'param_1'>, ...)
717 718 '''
718 719 if type(type_option) is str:
719 720 try:
720 721 if type_option == 'dataset':
721 722 if select is None:
722 723 return 'ERROR:: "select" must not be "None"'
723 724 else:
724 725 if 'delete' == select:
725 726 return getattr(self.ckan.action, 'package_delete')(**kwargs)
726 727 elif 'purge' == select:
727 728 return getattr(self.ckan.action, 'dataset_purge')(**kwargs)
728 729 else:
729 730 return 'ERROR:: "select = %s" is not accepted' % (select)
730 731 elif type_option == 'project':
731 732 if select is None:
732 733 return 'ERROR:: "select" must not be "None"'
733 734 else:
734 735 if 'delete' == select:
735 736 return getattr(self.ckan.action, 'organization_delete')(**kwargs)
736 737 elif 'purge' == select:
737 738 return getattr(self.ckan.action, 'organization_purge')(**kwargs)
738 739 else:
739 740 return 'ERROR:: "select = %s" is not accepted' % (select)
740 741 elif type_option == 'resource':
741 742 return getattr(self.ckan.action, 'resource_delete')(**kwargs)
742 743 elif type_option == 'vocabulary':
743 744 return getattr(self.ckan.action, 'vocabulary_delete')(**kwargs)
744 745 elif type_option == 'tag':
745 746 return getattr(self.ckan.action, 'tag_delete')(**kwargs)
746 747 elif type_option == 'user':
747 748 return getattr(self.ckan.action, 'user_delete')(**kwargs)
748 749 else:
749 750 return 'ERROR:: "type_option = %s" is not accepted' % (type_option)
750 751 except:
751 752 _, exc_value, _ = sys.exc_info()
752 753 return exc_value
753 754 else:
754 755 return 'ERROR:: "type_option" must be <str>'
755 756
756 757 def f_status_note(self, total, result, path):
757 758 file_txt = open(path+'status_note.txt', 'w')
758 759 file_txt = open(path+'status_note.txt', 'a')
759 760
760 761 file_txt.write('DOWNLOADED FILE(S): "%s"' % (len(result['name'])))
761 762 file_txt.write(''+ os.linesep)
762 763 for u in result['name']:
763 764 file_txt.write(' - '+ u + os.linesep)
764 765 file_txt.write(''+ os.linesep)
765 766
766 767 file_txt.write('FAILED FILE(S): "%s"' % (len(total['name'])-len(result['name'])))
767 768 file_txt.write(''+ os.linesep)
768 769 if len(total['name'])-len(result['name']) != 0:
769 770 for u in total['name']:
770 771 if not u in result['name']:
771 772 file_txt.write(' - '+ u + os.linesep)
772 773 else:
773 774 file_txt.write(' "None"'+ os.linesep)
774 775
775 776 def f_name(self, name_dataset, ext, tempdir):
776 777 while self.check:
777 778 self.str = ''
778 779 if self.cont == 0:
779 780 if os.path.exists(tempdir + name_dataset + ext):
780 781 self.str = name_dataset+'('+str(self.cont+1)+')'+ext
781 782 else:
782 783 self.check = self.check * 0
783 784 self.str = name_dataset + ext
784 785 else:
785 786 if not os.path.exists(tempdir + name_dataset+'('+str(self.cont)+')'+ext):
786 787 self.check = self.check * 0
787 788 self.str = name_dataset+'('+str(self.cont)+')'+ ext
788 789 self.cont = self.cont+1
789 790 return self.str
790 791
791 792 def f_zipdir(self, path, ziph, zip_name):
792 793 for root, _, files in os.walk(path):
793 794 print('.....')
794 795 print('Creating: "{}" >>'.format(zip_name))
795 796 for __file in tqdm(iterable=files, total=len(files)):
796 797 new_dir = os.path.relpath(os.path.join(root, __file), os.path.join(path, '..'))
797 798 ziph.write(os.path.join(root, __file), new_dir)
798 799 print('Created >>')
799 800
800 801 def download_by_step(self, response, tempdir_name):
801 802 try:
802 803 # ---------- REPLACE URL --------- #
803 804 if urlparse(self.url).netloc != 'www.igp.gob.pe' and urlparse(response['url']).netloc == 'www.igp.gob.pe':
804 805 response['url'] = response['url'].replace(urlparse(response['url']).scheme + '://' + urlparse(response['url']).netloc,
805 806 urlparse(self.url).scheme + '://' + urlparse(self.url).netloc)
806 807 #----------------------------------#
807 808 with requests.get(response['url'], stream=True, headers={'Authorization': self.Authorization}, verify=self.verify) as resp:
808 809 if resp.status_code == 200:
809 810 with open(tempdir_name+response['name'], 'wb') as file:
810 811 for chunk in resp.iter_content(chunk_size = self.chunk_size):
811 812 if chunk:
812 813 file.write(chunk)
813 814 except requests.exceptions.RequestException:
814 815 pass
815 816
816 817 def download_files(self, **kwargs):
817 818 '''
818 819 FINALIDAD:
819 820 Funcion personalizada para la descarga de archivos existentes de un dataset.
820 821
821 822 PARAMETROS DISPONIBLES:
822 823 CONSULTAR: "GUIA DE SCRIPT.pdf"
823 824
824 825 ESTRUCTURA:
825 826 <access_name>.download_files(id = <class 'str'>, param_1 = <class 'param_1'>, ...)
826 827 '''
827 828 dict_local = {}
828 829 #----------------------------------------------#
829 830 if 'zip' in kwargs:
830 831 if type(kwargs['zip']) is not bool:
831 832 return 'ERROR:: "zip" must be: <class "bool">'
832 833 else:
833 834 dict_local['zip'] = kwargs['zip']
834 835 else:
835 836 dict_local['zip'] = False
836 837 #----------------------------------------------#
837 838 if 'status_note' in kwargs:
838 839 if type(kwargs['status_note']) is not bool:
839 840 return 'ERROR:: "status_note" must be: <class "bool">'
840 841 else:
841 842 dict_local['status_note'] = kwargs['status_note']
842 843 else:
843 844 dict_local['status_note'] = False
844 845 #----------------------------------------------#
845 846 if 'path' in kwargs:
846 847 if type(kwargs['path']) is str:
847 848 if os.path.isdir(kwargs['path']) == False:
848 849 return 'ERROR:: "path" does not exist'
849 850 else:
850 851 if kwargs['path'][-1:] != self.separator:
851 852 dict_local['path'] = kwargs['path']+self.separator
852 853 else:
853 854 dict_local['path'] = kwargs['path']
854 855
855 856 txt = dict_local['path']+datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")+'.txt'
856 857 if int(platform.python_version()[0]) == 3:
857 858 try:
858 859 file_txt = open(txt, 'w')
859 860 file_txt.close()
860 861 os.remove(txt)
861 862 except PermissionError:
862 863 return 'ERROR:: Access denied, you are not authorized to write files: "%s"' % (dict_local['path'])
863 864 else:
864 865 try:
865 866 file_txt = open(txt, 'w')
866 867 file_txt.close()
867 868 os.remove(txt)
868 869 except:
869 870 return 'ERROR:: Access denied, you are not authorized to write files: "%s"' % (dict_local['path'])
870 871 else:
871 872 return 'ERROR:: "path" must be: <class "str">'
872 873 else:
873 874 dict_local['path'] = ''
874 875 #----------------------------------------------#
875 876 for key, value in kwargs.items():
876 877 if not key in dict_local:
877 878 self.dict[key] = value
878 879 try:
879 880 response = getattr(self.ckan.action, 'url_resources')(**self.dict)
880 881 except:
881 882 _, exc_value, _ = sys.exc_info()
882 883 return exc_value
883 884
884 885 if len(response) != 0:
885 886 #--------------TEMP PATH---------------#
886 887 if dict_local['zip']:
887 888 tempdir = tempfile.mkdtemp(prefix=kwargs['id']+'-')+self.separator
888 889 os.mkdir(tempdir+kwargs['id'])
889 890 dir_name = tempdir + kwargs['id'] + self.separator
890 891 else:
891 892 dir = self.f_name(kwargs['id'], '', dict_local['path'])
892 893 os.mkdir(dict_local['path'] + dir)
893 894 dir_name = dict_local['path'] + dir + self.separator
894 895 #-----------DOWNLOAD FILES-------------#
895 896 print('.....')
896 897 print('Downloading "{}" file(s) >>'.format(len(response)))
897 898 name_total = {'name': []}
898 899 with concurrent.futures.ThreadPoolExecutor() as executor:
899 900 for u in tqdm(iterable=response, total=len(response)):
900 901 name_total['name'].append(u['name'])
901 902 executor.submit(self.download_by_step, u, dir_name)
902 903 name_check = {}
903 904 name_check['name'] = [f for f in os.listdir(dir_name) if os.path.isfile(os.path.join(dir_name, f))]
904 905 print('"{}" downloaded file(s) successfully >>'.format(len(name_check['name'])))
905 906 #--------------------------------------#
906 907 if len(name_check['name']) != 0:
907 908 #----------Status Note---------#
908 909 if dict_local['status_note']:
909 910 print('.....')
910 911 print('Creating: "status_note.txt" >>')
911 912 self.f_status_note(name_total, name_check, dir_name)
912 913 print('Created>>')
913 914 #----------ZIP CREATE----------#
914 915 if dict_local['zip']:
915 916 zip_name = self.f_name(kwargs['id'], '.zip', dict_local['path'])
916 917 ziph = zipfile.ZipFile(dict_local['path'] + zip_name, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
917 918 self.f_zipdir(dir_name, ziph, zip_name)
918 919 ziph.close()
919 920 #Delete Temporal Path
920 921 if os.path.exists(tempdir[:-1]):
921 922 shutil.rmtree(tempdir[:-1])
922 923 #------------------------------#
923 924 print('.....')
924 925 return 'DOWNLOAD FINISHED'
925 926 else:
926 927 #Delete Temporal Path
927 928 if dict_local['zip']:
928 929 if os.path.exists(tempdir[:-1]):
929 930 shutil.rmtree(tempdir[:-1])
930 931 else:
931 932 if os.path.exists(dir_name[:-1]):
932 933 shutil.rmtree(dir_name[:-1])
933 934 return 'NO FILES WERE DOWNLOADED'
934 935 else:
935 return 'FILES NOT FOUND' No newline at end of file
936 return 'FILES NOT FOUND'
937
938 def download_files_advance(self, id_or_name, processes=1, path=os.path.expanduser("~"), **kwargs):
939 '''
940 FINALIDAD:
941 Funcion personalizada avanzada para la descarga de archivos existentes de un(os) dataset(s).
942
943 PARAMETROS DISPONIBLES:
944 CONSULTAR: "GUIA DE SCRIPT.pdf"
945
946 ESTRUCTURA:
947 <access_name>.download_files_advance(id_or_name= <class 'str' or 'list'>, param_1 = <class 'param_1'>, ...)
948 '''
949 #------------------ PATH ----------------------#
950 if isinstance(path, str):
951 if os.path.isdir(path):
952 if not path.endswith(os.sep):
953 path = path + os.sep
954 test_txt = path + datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")+'.txt'
955 try:
956 file_txt = open(test_txt, 'w')
957 file_txt.close()
958 os.remove(test_txt)
959 except:
960 return 'ERROR:: Access denied, you are not authorized to write files: "%s"' % (path)
961 else:
962 return 'ERROR:: "path" does not exist'
963 else:
964 return 'ERROR:: "path" must be: <class "str">'
965
966 #------------------ PROCESSES -----------------#
967 if not isinstance(processes, int):
968 return 'ERROR:: "processes" must be: <class "int">'
969
970 #------------------ ID OR NAME ----------------#
971 if isinstance(id_or_name, str):
972 id_or_name = [id_or_name]
973 elif isinstance(id_or_name, list):
974 id_or_name = list(map(str, id_or_name))
975 else:
976 return 'ERROR:: dataset "id_or_name" must be: <class "str" or "list">'
977 #----------------------------------------------#
978 arguments = {
979 '--apikey': self.Authorization,
980 '--ckan-user': None,
981 '--config': None,
982 '--datapackages': path,
983 '--datastore-fields': False,
984 '--get-request': False,
985 '--insecure': not self.verify,
986 '--log': '/home/soporte/DUMP/download.txt',
987 '--processes': str(processes),
988 '--quiet': False,
989 '--remote': self.url,
990 '--worker': False,
991 #'--all': False,
992 #'--gzip': False,
993 #'--output': None,
994 #'--max-records': None,
995 #'--output-json': False,
996 #'--output-jsonl': False,
997 #'--create-only': False,
998 #'--help': False,
999 #'--input': None,
1000 #'--input-json': False,
1001 #'--start-record': '1',
1002 #'--update-only': False,
1003 #'--upload-logo': False,
1004 #'--upload-resources': False,
1005 #'--version': False,
1006 'ID_OR_NAME': id_or_name,
1007 'datasets': True,
1008 'dump': True,
1009 #'ACTION_NAME': None,
1010 #'KEY:JSON': [],
1011 #'KEY=STRING': [],
1012 #'KEY@FILE': [],
1013 #'action': False,
1014 #'delete': False,
1015 #'groups': False,
1016 #'load': False,
1017 #'organizations': False,
1018 #'related': False,
1019 #'search': False,
1020 #'users': False
1021 }
1022 return logic_download.dump_things_change(self.ckan, 'datasets', arguments, **kwargs) No newline at end of file
@@ -1,12 +1,12
1 1 # encoding: utf-8
2 2 from setuptools import setup
3 3
4 4 setup(
5 5 name = "CKAN_JRO",
6 6 version = "2.9.2.0",
7 7 description = "Data Repository - JRO",
8 8 author = "Edson Ynilupu Mattos",
9 9 author_email = "eynilupu@igp.gob.pe",
10 url = "",
10 url = "http://intranet.igp.gob.pe:8082/DATABASES/ckanext-jro/api-cliente",
11 11 packages = ["CKAN_JRO"]
12 12 ) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now