##// END OF EJS Templates
Update restart_reception and Add restart_pedestal
eynilupu -
r443:8c926b45a918
parent child
Show More
@@ -0,0 +1,200
1 from django.core.management.base import BaseCommand
2 from apps.main.models import Experiment
3 from django.shortcuts import get_object_or_404
4 import os, fnmatch
5 import time
6 from datetime import datetime
7 import h5py
8 import numpy as np
9
10 class Command(BaseCommand):
11 """
12 Check pedestal acquisition each 10 minutes.
13 Example:
14 manage.py restart_pedestal
15 """
16 def handle(self, *args, **options):
17 #start = time.time()
18 #time.sleep(1)
19 restart_pedestal(self)
20 #end = time.time()
21 #self.stdout.write(f'TIME: "{end - start}"')
22
23 def check_experiment():
24 if len(Experiment.objects.filter(status=2)) > 0:
25 return True
26 else:
27 return False
28
29 def pedestal_start(self, id_exp):
30 all_status = Experiment.objects.filter(status=2)
31 check_id = False
32
33 if len(all_status) > 0:
34 check_id = all_status[0].pk
35
36 if check_id and check_id == id_exp:
37 exp = get_object_or_404(Experiment, pk=id_exp)
38 name = '{}-R@{}'.format(exp.name, datetime.now().strftime('%Y-%m-%dT%H-%M-%S'))
39 exp.pedestal.start_device(name_experiment=name)
40 self.stdout.write(f'"{exp.name}" experiment: Pedestal acquisition was restarted')
41
42 def pedestal_stop(self, id_exp):
43 all_status = Experiment.objects.filter(status=2)
44 check_id = False
45
46 if len(all_status) > 0:
47 check_id = all_status[0].pk
48
49 if check_id and check_id == id_exp:
50 exp = get_object_or_404(Experiment, pk=id_exp)
51 exp.pedestal.stop_device()
52 self.stdout.write(f'"{exp.name}" experiment: Pedestal acquisition "{exp.name}" was stopped')
53
54 def hdf5_list_content(get_file):
55 table_records = np.array(get_file).tolist()
56 table_dim = get_file.ndim
57 table_rows = get_file.shape[0]
58
59 if table_dim == 1 and table_rows >= 1:
60 #type(table_records[0]) -> float
61 return table_records
62 else:
63 return False
64
65 def hdf5_read(file):
66 dataspeed = {"ele_speed": False, "azi_speed": False}
67
68 for key, value in dataspeed.items():
69 with h5py.File(file, 'r') as hdf:
70 get = hdf.get('Data'+'/'+key)
71 if get is not None:
72 # 10 values
73 dataspeed[key] = hdf5_list_content(get)[-10:]
74
75 return dataspeed
76
77 def count_data(last_position):
78 pattern = "pos@*.h5"
79 count = 0
80 list_data = []
81
82 list_position = os.listdir(last_position)
83
84 for entry in sorted(list_position):
85 if fnmatch.fnmatch(entry, pattern):
86 count = count + 1
87 list_data.append(os.path.join(last_position, entry))
88
89 if len(list_data) > 1:
90 list_data = list_data[-2]
91 else:
92 list_data = False
93
94 return count, list_data
95
96 def response_data(datadir, old_path_datetime, old_position, new_position):
97 path_position = True
98 path_datetime = False
99 read_files = False
100
101 rootdir = os.path.join(datadir, 'position')
102 if os.path.isdir(rootdir):
103 path_datetime = path_data(os.path.join(datadir, 'position'))
104
105 if path_datetime:
106 if not old_path_datetime or path_datetime != old_path_datetime:
107 old_position, read_files = count_data(path_datetime)
108 time.sleep(65)
109 new_position, read_files = count_data(path_datetime)
110 else:
111 time.sleep(65)
112 else:
113 path_position = False
114
115 return path_position, path_datetime, old_position, new_position, read_files
116
117 def path_data(rootdir):
118 list_=[]
119 for it in os.scandir(rootdir):
120 if it.is_dir():
121 try:
122 datetime.strptime(it.path.split("/")[-1], "%Y-%m-%dT%H-00-00")
123 list_.append(it.path)
124 except ValueError:
125 pass
126
127 list_ = sorted(list_, reverse=True)
128 try:
129 return list_[0]
130 except:
131 return False
132
133 def check_count(datadir):
134 old_numbers = 0
135 new_numbers = 0
136 validation = False
137 path_datetime = False
138 speed = {"ele_speed": False, "azi_speed": False}
139
140 path_position, path_datetime, old_numbers, new_numbers, read_files = response_data(datadir, path_datetime, old_numbers, new_numbers)
141
142 for u in range(2):
143 if new_numbers > old_numbers:
144 validation = True
145
146 data = hdf5_read(read_files)
147 for key, value in data.items():
148 try:
149 if not max(data[key]) <= 0.1:
150 speed[key] = True
151 except:
152 pass
153 break
154 else:
155 if u < 1:
156 path_position, path_datetime, old_numbers, new_numbers, read_files = response_data(datadir, path_datetime, old_numbers, new_numbers)
157
158 return path_position, path_datetime, validation, speed
159
160 def restart_pedestal(self):
161 if check_experiment():
162
163 all_status = Experiment.objects.filter(status=2)
164 id_exp = all_status[0].pk
165 datadir_exp = all_status[0].reception_rx.datadir
166 datadir_exp = datadir_exp.replace(os.environ.get('EXPOSE_NAS', '/DATA_RM/DATA'), '/data')
167 datadir_exp = datadir_exp.replace('/rawdata', '')
168
169 path_position, path_datetime, validation, speed = check_count(datadir_exp)
170 if path_position:
171 # Execute the process
172 if validation:
173 self.stdout.write(f'Acquisition pedestal is running')
174 if speed['ele_speed'] or speed['azi_speed']:
175 self.stdout.write(f'Pedestal speeds on Azimuth and Elevation are running')
176 else:
177 for key, value in speed.items():
178 if not value:
179 self.stdout.write(f'Speed on {key} is <= 0.1, retry')
180
181 pedestal_stop(self, id_exp)
182 time.sleep(14)
183 #pedestal_reset(self, id_exp)
184 #time.sleep(2)
185 pedestal_start(self, id_exp)
186
187 else:
188 if not path_datetime:
189 self.stdout.write(f'No such directory with datetime format "%Y-%m-%dT%H-00-00", retry!')
190 else:
191 self.stdout.write(f'No file increment, retry')
192
193 pedestal_stop(self, id_exp)
194 time.sleep(14)
195 #pedestal_reset(self, id_exp)
196 #time.sleep(2)
197 pedestal_start(self, id_exp)
198
199 else:
200 self.stdout.write(f'No such directory: position, fail!') No newline at end of file
@@ -1,38 +1,39
1 1 #General settings
2 TZ=America/Lima
2 3 LC_ALL=C.UTF-8
3 4 SIRM_SITE=<SIRM SITE>
4 5 PROC_SITE=<PROC SITE>
5 6 CAM_SITE=<CAM SITE>
6 7 SCHAIN_SITE=<SCHAIN SITE>
7 8 GENERAL_PORT=<GENERAL PORT>
8 9 BROKER_URL=<BROKER SITE>
9 10 SOPHY_TOPIC=<SOPHY TOPIC>
10 11 TXA_SITE=<IP TXA>
11 12 TXB_SITE=<IP TXB>
12 13 SIRM_MAX_UPLOAD_SIZE_MB=<SIZE MB>
13 14
14 15 #Pedestal - az offset
15 16 AZ_OFFSET=<AZ OFFSET>
16 17
17 18 #Postgres settings
18 19 POSTGRES_PORT_5432_TCP_ADDR=sirm-postgres
19 20 POSTGRES_PORT_5432_TCP_PORT=5432
20 21 DB_NAME=radarsys
21 22 DB_USER=docker
22 23 DB_PASSWORD=docker
23 24 PGDATA=/var/lib/postgresql/data
24 25
25 26 #Volumes - path
26 27 EXPOSE_SIRM=./volumes/sirm
27 28 EXPOSE_PROC=./volumes/proc
28 EXPOSE_SCHAIN=./volumes/schain
29 29 EXPOSE_CAM=/path/to/cam
30 EXPOSE_SCHAIN=./volumes/schain
30 31 EXPOSE_NAS=/path/to/nas_data
31 32 EXPOSE_PGDATA=/path/to/pg_data
32 33 EXPOSE_CERTS=/path/to/certs
33 34 EXPOSE_DHPARAM=/path/to/dhparam
34 35
35 36 #Superuser settings
36 SIRM_USER=admin
37 SIRM_PASSWORD=soporte
38 SIRM_EMAIL=admin@igp.gob.pe No newline at end of file
37 SIRM_USER=*****
38 SIRM_PASSWORD=*******
39 SIRM_EMAIL=*****@igp.gob.pe No newline at end of file
@@ -1,269 +1,272
1 1 # docker-compose up -d --build
2 2 version: '3'
3 3
4 4 volumes:
5 5 sirm_web:
6 6 name: sirm_web
7 7 driver: local
8 8 driver_opts:
9 9 type: "none"
10 10 o: "bind"
11 11 device: "${EXPOSE_SIRM}"
12 12 sirm_pgdata:
13 13 name: sirm_pgdata
14 14 driver: local
15 15 driver_opts:
16 16 type: "none"
17 17 o: "bind"
18 18 device: "${EXPOSE_PGDATA}"
19 19 sirm_certs:
20 20 name: sirm_certs
21 21 driver: local
22 22 driver_opts:
23 23 type: "none"
24 24 o: "bind"
25 25 device: "${EXPOSE_CERTS}"
26 26 sirm_dhparam:
27 27 name: sirm_dhparam
28 28 driver: local
29 29 driver_opts:
30 30 type: "none"
31 31 o: "bind"
32 32 device: "${EXPOSE_DHPARAM}"
33 33 sirm_proc:
34 34 name: sirm_proc
35 35 driver: local
36 36 driver_opts:
37 37 type: "none"
38 38 o: "bind"
39 39 device: "${EXPOSE_PROC}"
40 40 sirm_nas:
41 41 name: sirm_nas
42 42 driver: local
43 43 driver_opts:
44 44 type: "none"
45 45 o: "bind"
46 46 device: "${EXPOSE_NAS}"
47 47 sirm_cam:
48 48 name: sirm_cam
49 49 driver: local
50 50 driver_opts:
51 51 type: "none"
52 52 o: "bind"
53 53 device: "${EXPOSE_CAM}"
54 54 sirm_schain:
55 55 name: sirm_schain
56 56 driver: local
57 57 driver_opts:
58 58 type: "none"
59 59 o: "bind"
60 60 device: "${EXPOSE_SCHAIN}"
61 61
62 62 services:
63 63 sirm-nginx-proxy:
64 64 container_name: sirm-nginx-proxy
65 65 restart: always
66 66 build:
67 67 context: ./images/
68 68 dockerfile: nginx-proxy/Dockerfile
69 69 args:
70 70 - SIRM_MAX_UPLOAD_SIZE_MB=${SIRM_MAX_UPLOAD_SIZE_MB}
71 71 depends_on:
72 72 - sirm-web
73 73 networks:
74 74 - frontend_sirm
75 75 - backend_sirm
76 76 ports:
77 77 - 0.0.0.0:${GENERAL_PORT}:80
78 78 volumes:
79 79 - /var/run/docker.sock:/tmp/docker.sock:ro
80 80 - sirm_certs:/etc/nginx/certs:ro
81 81 - sirm_dhparam:/etc/nginx/dhparam
82 82 logging:
83 83 driver: "json-file"
84 84 options:
85 85 max-size: "12m"
86 86
87 87 sirm-web:
88 88 container_name: 'sirm-web'
89 89 restart: always
90 90 build:
91 91 context: .
92 92 environment:
93 93 - LC_ALL=${LC_ALL}
94 94 - DB_USER=${DB_USER}
95 95 - DB_NAME=${DB_NAME}
96 96 - DB_PASSWORD=${DB_PASSWORD}
97 97 - POSTGRES_PORT_5432_TCP_ADDR=${POSTGRES_PORT_5432_TCP_ADDR}
98 98 - POSTGRES_PORT_5432_TCP_PORT=${POSTGRES_PORT_5432_TCP_PORT}
99 99 - EXPOSE_NAS=${EXPOSE_NAS}
100 100 - PROC_SITE=${PROC_SITE}
101 - SCHAIN_SITE=${SCHAIN_SITE}
101 102 - SIRM_USER=${SIRM_USER}
102 103 - SIRM_PASSWORD=${SIRM_PASSWORD}
103 104 - SIRM_EMAIL=${SIRM_EMAIL}
104 105 - AZ_OFFSET=${AZ_OFFSET}
105 106 - VIRTUAL_HOST=${SIRM_SITE}
106 107 volumes:
107 108 - 'sirm_web:/workspace/sirm'
108 109 - 'sirm_nas:/data'
109 110 depends_on:
110 111 - sirm-postgres
111 112 networks:
112 113 - frontend_sirm
113 114 - backend_sirm
114 115 labels:
115 116 ofelia.enabled: "true"
116 ofelia.job-exec.adq-exp.schedule: "@every 10s"
117 ofelia.job-exec.adq-exp.command: "python manage.py adq_exp"
118 ofelia.job-exec.restart-exp.schedule: "0 0 0/17 ? * *"
119 ofelia.job-exec.restart-exp.command: "python manage.py restart_exp"
117 ofelia.job-exec.restart-reception.schedule: "0 1/5 * * * *"
118 ofelia.job-exec.restart-reception.command: "python manage.py restart_reception"
119 ofelia.job-exec.restart-pedestal.schedule: "0 2/10 * * * *"
120 ofelia.job-exec.restart-pedestal.command: "python manage.py restart_pedestal"
121 ofelia.job-exec.restart-experiment.schedule: "0 0 5 * * *"
122 ofelia.job-exec.restart-experiment.command: "python manage.py restart_experiment"
120 123 logging:
121 124 driver: "json-file"
122 125 options:
123 126 max-size: "12m"
124 127
125 128 sirm-job:
126 129 container_name: 'sirm-job'
127 130 image: mcuadros/ofelia:latest
128 131 depends_on:
129 132 - sirm-web
130 133 networks:
131 134 - frontend_sirm
132 135 - backend_sirm
133 136 command: daemon --docker
134 137 volumes:
135 138 - /var/run/docker.sock:/var/run/docker.sock:ro
136 139 logging:
137 140 driver: "json-file"
138 141 options:
139 142 max-size: "12m"
140 143
141 144 sirm-postgres:
142 145 container_name: 'sirm-postgres'
143 146 restart: always
144 147 build:
145 148 context: ./images/
146 149 dockerfile: postgres/Dockerfile
147 150 args:
148 151 - PGDATA=${PGDATA}
149 152 environment:
150 153 - LC_ALL=${LC_ALL}
151 154 - DB_USER=${DB_USER}
152 155 - DB_NAME=${DB_NAME}
153 156 - DB_PASSWORD=${DB_PASSWORD}
154 157 - POSTGRES_PORT_5432_TCP_ADDR=${POSTGRES_PORT_5432_TCP_ADDR}
155 158 - POSTGRES_PORT_5432_TCP_PORT=${POSTGRES_PORT_5432_TCP_PORT}
156 159 volumes:
157 160 - sirm_pgdata:/var/lib/postgresql/data
158 161 networks:
159 162 - backend_sirm
160 163 logging:
161 164 driver: "json-file"
162 165 options:
163 166 max-size: "12m"
164 167
165 168 sirm-proc:
166 169 container_name: 'sirm-proc'
167 170 restart: always
168 171 build:
169 172 context: ./volumes/proc/
170 173 environment:
171 174 - BROKER_URL=${BROKER_URL}
172 175 - SOPHY_TOPIC=${SOPHY_TOPIC}
173 176 - TXA_SITE=${TXA_SITE}
174 177 - TXB_SITE=${TXB_SITE}
175 178 - SCHAIN_SITE=${SCHAIN_SITE}
176 179 - VIRTUAL_HOST=${PROC_SITE}
177 180 volumes:
178 181 - 'sirm_proc:/app'
179 182 - 'sirm_nas:/data'
180 183 networks:
181 184 - frontend_sirm
182 185 logging:
183 186 driver: "json-file"
184 187 options:
185 188 max-size: "12m"
186 189
187 190 sirm-monitor:
188 191 container_name: 'sirm-monitor'
189 192 restart: always
190 193 image: 'sirm_sirm-proc'
191 194 command: ["python", "monitor.py"]
192 195 environment:
193 196 - BROKER_URL=${BROKER_URL}
194 197 - TXA_SITE=${TXA_SITE}
195 198 - TXB_SITE=${TXB_SITE}
196 199 volumes:
197 200 - 'sirm_proc:/app'
198 201 - 'sirm_nas:/data'
199 202 networks:
200 203 - frontend_sirm
201 204 depends_on:
202 205 - sirm-proc
203 206 logging:
204 207 driver: "json-file"
205 208 options:
206 209 max-size: "12m"
207 210
208 211 sirm-acq:
209 212 container_name: 'sirm-acq'
210 213 restart: always
211 214 image: 'sirm_sirm-proc'
212 215 command: ["python", "acq.py"]
213 216 environment:
214 217 - BROKER_URL=${BROKER_URL}
215 218 - TXA_SITE=${TXA_SITE}
216 219 - TXB_SITE=${TXB_SITE}
217 220 - PROC_SITE=${PROC_SITE}
218 221 volumes:
219 222 - 'sirm_proc:/app'
220 223 - 'sirm_nas:/data'
221 224 networks:
222 225 - frontend_sirm
223 226 depends_on:
224 227 - sirm-proc
225 228 logging:
226 229 driver: "json-file"
227 230 options:
228 231 max-size: "12m"
229 232
230 233 sirm-cam:
231 234 container_name: 'sirm-cam'
232 235 image: bkjaya1952/ivms4200-v2.8.2.2_ml-linux
233 236 restart: always
234 237 environment:
235 238 - VIRTUAL_HOST=${CAM_SITE}
236 239 volumes:
237 240 - 'sirm_cam:/root/.wine/drive_c/iVMS-4200'
238 241 networks:
239 242 - frontend_sirm
240 243 logging:
241 244 driver: "json-file"
242 245 options:
243 246 max-size: "12m"
244 247
245 248 sirm-schain:
246 249 container_name: 'sirm-schain'
247 250 restart: always
248 251 build:
249 252 context: ./volumes/schain/
250 253 environment:
251 254 - BROKER_URL=${BROKER_URL}
252 255 - BACKEND=Agg
253 256 - TZ=${TZ}
254 257 - VIRTUAL_HOST=${SCHAIN_SITE}
255 258 volumes:
256 259 - 'sirm_nas:/data'
257 260 - 'sirm_schain:/app'
258 261 networks:
259 262 - frontend_sirm
260 263 logging:
261 264 driver: "json-file"
262 265 options:
263 266 max-size: "12m"
264 267
265 268 networks:
266 269 frontend_sirm:
267 270 name: frontend_sirm
268 271 backend_sirm:
269 272 name: backend_sirm No newline at end of file
@@ -1,15 +1,16
1 1 Django==4.0.3
2 2 django-bootstrap4==22.1
3 3 psycopg2-binary==2.9.3
4 4 django-polymorphic==3.1.0
5 5 gunicorn==20.1.0
6 6 requests==2.27.1
7 7 backports.zoneinfo==0.2.1
8 8 asgiref==3.5.0
9 9 sqlparse==0.4.2
10 10 beautifulsoup4==4.10.0
11 11 idna==3.3
12 12 urllib3==1.26.9
13 13 charset-normalizer==2.0.12
14 14 certifi==2021.10.8
15 soupsieve==2.3.1 No newline at end of file
15 soupsieve==2.3.1
16 h5py==3.7.0 No newline at end of file
@@ -1,43 +1,43
1 1 from django.core.management.base import BaseCommand
2 2 from apps.main.models import Experiment
3 3 from django.shortcuts import get_object_or_404
4 4 import time
5 5
6 6 class Command(BaseCommand):
7 7 """
8 Restart experiment each 06 hours.
8 Restart experiment every night at 05:00 am.
9 9 Example:
10 manage.py restart_exp --pk=1
10 manage.py restart_experiment
11 11 """
12 12 def handle(self, *args, **options):
13 13 restart_experiment(self)
14 14
15 15 def check_experiment():
16 16 if len(Experiment.objects.filter(status=2)) > 0:
17 17 return True
18 18 else:
19 19 return False
20 20
21 21 def experiment_start(self, id_exp):
22 22 exp = get_object_or_404(Experiment, pk=id_exp)
23 23 if exp.status != 2 and exp.mode_stop == 'res':
24 24 exp.status = exp.start()
25 25 exp.save()
26 26 self.stdout.write(f'Experiment "{exp.name}" was restarted')
27 27
28 28 def experiment_stop(self, id_exp):
29 29 exp = get_object_or_404(Experiment, pk=id_exp)
30 30 if exp.status == 2 or exp.status == 4 or exp.status == 5:
31 31 exp.status = exp.stop()
32 32 exp.mode_stop = 'res'
33 33 exp.save()
34 34 self.stdout.write(f'Experiment "{exp.name}" was stoped')
35 35
36 36 def restart_experiment(self):
37 37 if check_experiment():
38 38 all_status = Experiment.objects.filter(status=2)
39 39 id_exp = all_status[0].pk
40 40
41 41 experiment_stop(self, id_exp)
42 42 time.sleep(15)
43 43 experiment_start(self, id_exp) No newline at end of file
@@ -1,108 +1,153
1 1 from django.core.management.base import BaseCommand
2 2 from apps.main.models import Experiment
3 3 from django.shortcuts import get_object_or_404
4 4 import os, fnmatch
5 5 import time
6 6 from datetime import datetime
7 import requests
7 8
8 9 class Command(BaseCommand):
9 10 """
10 Check data acquisition each 10 seconds.
11 Check data acquisition each 05 minutes.
11 12 Example:
12 manage.py adq_exp
13 manage.py restart_reception
13 14 """
14 15 def handle(self, *args, **options):
15 restart_experiment(self)
16 #start = time.time()
17 time.sleep(15)
18 restart_acquisition(self)
19 #end = time.time()
20 #self.stdout.write(f'TIME: "{end - start}"')
16 21
17 22 def check_experiment():
18 23 if len(Experiment.objects.filter(status=2)) > 0:
19 24 return True
20 25 else:
21 26 return False
22 27
23 28 def acquisition_start(self, id_exp):
24 29 all_status = Experiment.objects.filter(status=2)
25 30 check_id = False
26 31
27 32 if len(all_status) > 0:
28 33 check_id = all_status[0].pk
29 34
30 35 if check_id and check_id == id_exp:
31 36 exp = get_object_or_404(Experiment, pk=id_exp)
32 37 name = '{}-R@{}'.format(exp.name, datetime.now().strftime('%Y-%m-%dT%H-%M-%S'))
33 38 exp.reception_rx.start_device(name_experiment = name, restart = True)
34 39 self.stdout.write(f'"{exp.name}" experiment: Data acquisition was restarted')
40 self.stdout.write(f'Restarting schain...')
41
42 r = requests.get('http://'+os.environ.get('SCHAIN_SITE', 'sophy-schain')+'/stop')
43 time.sleep(1)
44 r = requests.post('http://'+os.environ.get('SCHAIN_SITE', 'sophy-schain')+'/start', json={'name': exp.name})
35 45
36 46 def acquisition_stop(self, id_exp):
37 47 all_status = Experiment.objects.filter(status=2)
38 48 check_id = False
39 49
40 50 if len(all_status) > 0:
41 51 check_id = all_status[0].pk
42 52
43 53 if check_id and check_id == id_exp:
44 54 exp = get_object_or_404(Experiment, pk=id_exp)
45 55 exp.reception_rx.stop_device()
46 56 self.stdout.write(f'"{exp.name}" experiment: Data acquisition "{exp.name}" was stopped')
47 57
48 def count_data(datadir):
58 def count_data(last_channel):
49 59 pattern = "rf@*.h5"
50 rawdata = {'ch0': 0, 'ch1': 0}
60 count = 0
61 list_channel = os.listdir(last_channel)
62
63 for entry in sorted(list_channel):
64 if fnmatch.fnmatch(entry, pattern):
65 count = count + 1
66 return count
51 67
52 for key, value in rawdata.items():
53 last_channel = path_data(os.path.join(datadir, key))
54 if last_channel:
55 list_channel = os.listdir(last_channel)
56 for entry in sorted(list_channel):
57 if fnmatch.fnmatch(entry, pattern):
58 rawdata[key] = rawdata[key] + 1
59 return rawdata
68 def response_data(datadir, old_channel, old_rawdata, new_rawdata, search):
69 path_channels = {'ch0': True, 'ch1': True}
70 channel = {'ch0': False, 'ch1': False}
71
72 for key, value in path_channels.items():
73 rootdir = os.path.join(datadir, key)
74 if os.path.isdir(rootdir):
75 channel[key] = path_data(os.path.join(datadir, key))
76 if key in search:
77 if channel[key]:
78 if not old_channel[key] or channel[key] != old_channel[key]:
79 old_rawdata[key] = count_data(channel[key])
80 time.sleep(1)
81 new_rawdata[key] = count_data(channel[key])
82 else:
83 time.sleep(1)
84 else:
85 path_channels[key] = False
86
87 return path_channels, channel, old_rawdata, new_rawdata
60 88
61 89 def path_data(rootdir):
62 90 list_=[]
63 91 for it in os.scandir(rootdir):
64 92 if it.is_dir():
65 93 try:
66 94 datetime.strptime(it.path.split("/")[-1], "%Y-%m-%dT%H-00-00")
67 95 list_.append(it.path)
68 96 except ValueError:
69 97 pass
70 98
71 99 list_ = sorted(list_, reverse=True)
72 100 try:
73 101 return list_[0]
74 102 except:
75 103 return False
76 104
77 def check_count(datadir, old_numbers):
78 diff = {}
79 numbers = count_data(datadir)
105 def check_count(datadir):
106 old_numbers = {'ch0': 0, 'ch1': 0}
107 new_numbers = {'ch0': 0, 'ch1': 0}
108 validation = {'ch0': False, 'ch1': False}
109 channel = {'ch0': False, 'ch1': False}
80 110
81 for key, value in numbers.items():
82 if old_numbers[key] > numbers[key]:
83 old_numbers[key] = 0
84 diff[key] = numbers[key] - old_numbers[key]
85 return numbers, diff
111 path_channels, channel, old_numbers, new_numbers = response_data(datadir, channel, old_numbers, new_numbers, ['ch0', 'ch1'])
86 112
87 def restart_experiment(self):
88 old_numbers={'ch0': 0, 'ch1': 0}
89 for count in range(5):
90 time.sleep(1)
91 if check_experiment():
92 all_status = Experiment.objects.filter(status=2)
93 id_exp = all_status[0].pk
94 datadir_exp = all_status[0].reception_rx.datadir
95 datadir_exp = datadir_exp.replace(os.environ.get('EXPOSE_NAS', '/DATA_RM/DATA'), '/data')
96
97 old_numbers, diff = check_count(datadir_exp, old_numbers)
98 if diff['ch0'] > 0 and diff['ch1'] > 0:
99 self.stdout.write(f'Data acquisition is running')
113 for key, value in validation.items():
114 for _ in range(5):
115 if new_numbers[key] > old_numbers[key]:
116 validation[key] = True
117 break
100 118 else:
101 count = count + 1
102 if count == 5:
103 acquisition_stop(self, id_exp)
104 time.sleep(1)
105 acquisition_start(self, id_exp)
106 old_numbers={'ch0': 0, 'ch1': 0}
119 path_channels, channel, old_numbers, new_numbers = response_data(datadir, channel, old_numbers, new_numbers, [key])
120
121 return path_channels, channel, validation
122
123 def restart_acquisition(self):
124 if check_experiment():
125 all_status = Experiment.objects.filter(status=2)
126 id_exp = all_status[0].pk
127 datadir_exp = all_status[0].reception_rx.datadir
128 datadir_exp = datadir_exp.replace(os.environ.get('EXPOSE_NAS', '/DATA_RM/DATA'), '/data')
129
130 path_channels, channel, validation = check_count(datadir_exp)
131
132 if path_channels['ch0'] and path_channels['ch1']:
133 # Execute the process
134 if validation['ch0'] and validation['ch1']:
135 self.stdout.write(f'Data acquisition is running')
136 else:
137 if not channel['ch0'] or not channel['ch1']:
138 for key, value in channel.items():
139 if not value:
140 self.stdout.write(f'No such directory with datetime format "%Y-%m-%dT%H-00-00": channel["{key}"], retry!')
107 141 else:
108 self.stdout.write(f'An error ocurred while trying to read data acquisition, Retry!') No newline at end of file
142 for key, value in validation.items():
143 if not value:
144 self.stdout.write(f'No file increment: channel["{key}"]')
145
146 acquisition_stop(self, id_exp)
147 time.sleep(3)
148 acquisition_start(self, id_exp)
149
150 else:
151 for key, value in path_channels.items():
152 if not value:
153 self.stdout.write(f'No such directory: channel["{key}"], fail!') No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now