##// END OF EJS Templates
Implementation of voltages simulation for estimating correlations (some things need to be fixed)
yamaro -
r25:26
parent child
Show More
@@ -1,527 +1,572
1 1 #!/usr/bin/env python
2 2
3 3 #----------------------------------------------------------
4 4 # Original MATLAB code developed by Brian Harding
5 5 # Rewritten in Python by Yolian Amaro
6 6 # Python version 2.7
7 7 # May 15, 2014
8 8 # Jicamarca Radio Observatory
9 9 #----------------------------------------------------------
10 10
11 11 import time
12 12 import matplotlib.pyplot as plt
13 13 from scipy.optimize import root
14 from scipy.optimize import newton_krylov
14 15 from scipy.stats import nanmean
15 16
16 17 from y_hysell96 import *
17 18 from deb4_basis import *
18 19 from modelf import *
19 20 from irls_dn2 import *
20 21
21 22
22 23 #-------------------------------------------------------------------------------------------------
23 24 # Set parameters
24 25 #-------------------------------------------------------------------------------------------------
25 26
26 27 ## Calculate Forward Model
27 28 lambda1 = 6.0
28 29 k = 2*np.pi/lambda1
29 30
30 31 ## Magnetic Declination
31 32 dec = -1.24
32 33
33 34 ## Loads Jicamarca antenna positions
34 35 antpos = np.loadtxt("antpos.txt", comments="#", delimiter=";", unpack=False)
35 36
36 37 ## rx and ry -- for plotting purposes
37 38 rx = np.array( [[127.5000], [91.5000], [127.5000], [19.5000], [91.5000], [-127.5000], [-55.5000], [-220.8240]] )
38 39 ry = np.array( [[127.5000], [91.5000], [91.5000], [55.5000], [-19.5000], [-127.5000], [-127.5000], [-322.2940]] )
39 40
40 41 ## Plot of antenna positions
41 42 plt.figure(1)
42 43 plt.plot(rx, ry, 'ro')
43 44 plt.title("Jicamarca Antenna Positions")
44 45 plt.xlabel("rx")
45 46 plt.ylabel("ry")
46 47 plt.grid(True)
47 48 plt.plot(rx, rx, 'b-')
48 49 plt.draw()
49 50
50 51 ## Jicamarca is nominally at a 45 degree angle
51 52 theta = 45 - dec;
52 53
53 54 ## Rotation matrix from antenna coord to magnetic coord (East North) -> 45 degrees
54 55 theta_rad = np.radians(theta) # trig functions take radians as argument
55 56 val1 = float( np.cos(theta_rad) )
56 val2 = float( np.sin(theta_rad) )
57 val3 = float( -1*np.sin(theta_rad))
57 val2 = float( -1*np.sin(theta_rad))
58 val3 = float( np.sin(theta_rad) )
58 59 val4 = float( np.cos(theta_rad) )
59 60
60 61 # Rotation matrix from antenna coord to magnetic coord (East North)
61 R = np.array( [[val1, val3], [val2, val4]] )
62 R = np.array( [[val1, val2], [val3, val4]] )
62 63
63 64 # Rotate antenna positions to magnetic coord.
64 65 AR = np.dot(R.T, antpos)
65 66
66 plt.plot(AR[0], 0*AR[1], 'yo')
67 plt.title("Jicamarca Antenna Positions")
67 plt.plot(AR[0], 0*AR[1], 'yo') # antenna positions only considering magnetic East-West component (x, 0)
68 plt.title("Jicamarca Antenna Positions")
68 69 plt.xlabel("rx")
69 70 plt.ylabel("ry")
70 71 plt.grid(True)
71 72 plt.plot(rx, 0*rx, 'g-')
72 73 plt.draw()
73 74
74 print antpos
75 print "\n"
76 print AR
77 75
78 76 # Only take the East component
79 77 r = AR[0,:]
80 78 r.sort()
81
82 # ---NEW---
83 r1 = AR[1,:] # longitudes / distances
84 d = 40 # separation of antennas in meters
85 theta_w = 15 # angle between antenna and point of observation in degrees
86 # arg = k*(r + d*np.cos(theta_w)) # this is the exponent argument in the corr integral
87 # --------
88 79
89 80
90 81 # Truth model (high and low resolution)
91 82 Nt = (1024.0)*(16.0) # number of pixels in truth image: high resolution
92 83 thbound = 9.0/180*np.pi # the width of the domain in angle space
93 84 thetat = np.linspace(-thbound, thbound,Nt) # image domain
94 85 thetat = thetat.T # transpose
95 86 Nr = (256.0) # number of pixels in reconstructed image: low res
96 87 thetar = np.linspace(-thbound, thbound,Nr) # reconstruction domain
97 88 thetar = thetar.T # transpose
98 89
99 90
100 91 #-------------------------------------------------------------------------------------------------
101 92 # Model for f: Gaussian(s) with amplitudes a, centers mu, widths sig, and background constant b.
102 93 #-------------------------------------------------------------------------------------------------
103 94
104 95 # Triple Gaussian
105 96 # a = np.array([3, 5, 2]);
106 97 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi, 7.0/180*np.pi]);
107 98 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi, 0.3/180*np.pi]);
108 99 # b = 0; # background
109 100
110 101 # Double Gaussian
111 102 # a = np.array([3, 5]);
112 103 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi]);
113 104 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi]);
114 105 # b = 0; # background
115 106
116 107 # Single Gaussian
117 108 a = np.array( [3] ) # amplitude(s)
118 109 mu = np.array( [-3.0/180*np.pi] ) # center
119 110 sig = np.array( [2.0/180*np.pi] ) # width
120 111 b = 0 # background constant
121 112
122 113 # Empty matrices for factors
123 114 fact = np.zeros(shape=(Nt,1))
124 115 factr = np.zeros(shape=(Nr,1))
125 116
126 # Constructing Gaussian model
117 # Constructing Gaussian model - brightness function (?)
127 118 for i in range(0, a.size):
128 119 temp = (-(thetat-mu[i])**2/(sig[i]**2))
129 120 tempr = (-(thetar-mu[i])**2/(sig[i]**2))
130 121 for j in range(0, temp.size):
131 122 fact[j] = fact[j] + a[i]*np.exp(temp[j]);
132 123 for m in range(0, tempr.size):
133 124 factr[m] = factr[m] + a[i]*np.exp(tempr[m]);
134 125 fact = fact + b;
135 126 factr = factr + b;
136 127
137 128
138 129 # #-------------------------------------------------------------------------------------------------
139 130 # # Model for f: Square pulse
140 131 # #-------------------------------------------------------------------------------------------------
141 132 # for j in range(0, fact.size):
142 133 # if (theta > -5.0/180*np.pi and theta < 2.0/180*np.pi):
143 134 # fact[j] = 0
144 135 # else:
145 136 # fact[j] = 1
146 137 # for k in range(0, factr.size):
147 138 # if (thetar[k] > -5.0/180*np.pi and thetar[k] < 2/180*np.pi):
148 139 # factr[k] = 0
149 140 # else:
150 141 # factr[k] = 1
151 142
152 143 # #-------------------------------------------------------------------------------------------------
153 144 # # Model for f: Triangle pulse
154 145 # #-------------------------------------------------------------------------------------------------
155 146 # mu = -1.0/180*np.pi;
156 147 # sig = 5.0/180*np.pi;
157 148 # wind1 = theta > mu-sig and theta < mu;
158 149 # wind2 = theta < mu+sig and theta > mu;
159 150 # fact = wind1 * (theta - (mu - sig));
160 151 # factr = wind1 * (thetar - (mu - sig));
161 152 # fact = fact + wind2 * (-(theta-(mu+sig)));
162 153 # factr = factr + wind2 * (-(thetar-(mu+sig)));
163 154 #
164 155 # fact = np.divide(fact, (np.sum(fact)*2*thbound/Nt)); # normalize to integral(f)==1
165 156
166 157
167 158 I = sum(fact)[0];
168 159 fact = fact/I; # normalize to sum(f)==1
169 160 factr = factr/I; # normalize to sum(f)==1
170 161
171 162 # Plot Gaussian pulse(s)
172 163 plt.figure(2)
173 plt.plot(thetat, fact, 'r--')
164 plt.plot(thetat, fact, 'bo')
174 165 plt.plot(thetar, factr, 'ro')
175 plt.title("Truth models")
166 plt.title("Image and Reconstruction Domain")
176 167 plt.ylabel("Amplitude")
177 plt.legend(('High res','Low res'), loc='upper right')
168 plt.legend(('High res (img domain)','Low res (reconstruction domain)'), loc='upper right')
178 169 plt.draw()
179 170
180 171
181 172 #-------------------------------------------------------------------------------------------------
182 173 # Control the type and number of inversions with:
183 174 # SNRdBvec: the SNRs that will be used.
184 175 # NN: the number of trials for each SNR
185 176 #-------------------------------------------------------------------------------------------------
186 177
187 178 #SNRdBvec = np.linspace(5,20,10);
188 179 SNRdBvec = np.array([15]); # 15 dB
189 180 NN = 1; # number of trials at each SNR
190 181
191 182 # Statistics simulation (correlation, root mean square error)
192 183 corr = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
193 184 corrc = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
194 185 rmse = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
195 186
196 187 # For each SNR and trial
197 188 for snri in range(0, SNRdBvec.size):
198 189 SNRdB = SNRdBvec[snri];
199 190 SNR = 10**(SNRdB/10.0);
200 191
201 192 for Ni in range(0, NN):
193
194 # -------------------------------------- Voltages estimation----------------------------------------
195 v = np.zeros(shape=(r.size,1), dtype=object); # voltages matrix
196 R = np.zeros(shape=(r.size, r.size), dtype='complex128'); # corr matrix
197 mu1, sigma1 = 0, 1 # mean and standard deviation
198 num = 1
199
200 for t in range (0, 10):
201 num = t+1
202 print num
203 n1 = np.random.normal(mu1, sigma1, [Nt,1]) # random variable
204 n2 = np.random.normal(mu1, sigma1, [Nt,1]) # random variable
205
206 rho = (n1 + n2*1j)*np.sqrt(fact/2) # rho.size is Nt = 16834
207 # fact is size (Nt, 1) -> column vector
208 thetat = thetat.reshape(Nt,1) # thetat is now (Nt, 1)
209
210 for i in range(0, r.size):
211 v[i] = sum((rho)*np.exp(-1j*k*((r[i])*np.cos(thetat)))); # v is sum ( (Nt, 1)) = 1 value
212
213 # Computing correlations
214 for i1 in range(0, r.size):
215 for i2 in range(0,r.size):
216 R[i1,i2] = R[i1,i2] + (v[i1][0]*v[i2][0].conjugate())/ (np.sqrt(np.abs(v[i1][0])**2 * np.abs(v[i2][0])**2));
217
218 R = R / num
219 print "R from voltages"
220 print R
221
222 plt.figure(3);
223 plt.pcolor(abs(R));
224 plt.colorbar();
225 plt.title("Antenna Correlations")
226 plt.draw()
227 plt.show()
228
229 #--------------------------------------------------------------------------------------------------
230
202 231 # Calculate cross-correlation matrix (Fourier components of image)
203 232 # This is an inefficient way to do this.
204
233
234 # MATLAB CORR::: R(i1,i2) = fact.' * exp(j*k*(r(i1)-r(i2))*sin(thetat));
235
205 236 R = np.zeros(shape=(r.size, r.size), dtype=object);
206
237
238 thetat = thetat.reshape(Nt,)
207 239 for i1 in range(0, r.size):
208 240 for i2 in range(0,r.size):
209 R[i1,i2] = np.dot(fact.T, np.exp(1j*k*np.dot((r[i1]-r[i2]),np.sin(thetat))))
210 R[i1,i2] = sum(R[i1,i2])
211
241 R[i1,i2] = sum(np.dot(fact.T, np.exp(1j*k*((r[i1]-r[i2])*np.sin(thetat)))))
242
243 print "R w/o voltages\n", R
244 # plt.figure(3);
245 # plt.pcolor(abs(R));
246 # plt.colorbar();
247 # plt.title("Antenna Correlations")
248 # plt.xlabel("Antenna")
249 # plt.ylabel("Antenna")
250 # plt.draw()
251 # plt.show()
252
253
212 254 # Add uncertainty
213 255 # This is an ad-hoc way of adding "noise". It models some combination of
214 256 # receiver noise and finite integration times. We could use a more
215 257 # advanced model (like in Yu et al 2000) in the future.
216 258
217 259 # This is a way of adding noise while maintaining the
218 260 # positive-semi-definiteness of the matrix.
219 261
220 262 U = linalg.cholesky(R.astype(complex), lower=False); # U'*U = R
221 263
222 264 sigma_noise = (np.linalg.norm(U,'fro')/SNR);
223 265
224 266 nz = np.multiply((sigma_noise * (np.random.randn(U.shape[0]) + 1j*np.random.randn(U.shape[0]))/np.sqrt(2)), (abs(U) > 0).astype(float));
225 267
226 268 Unz = U + nz;
227 269 Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
228 plt.figure(3);
229 plt.pcolor(abs(R));
230 plt.colorbar();
270 # plt.figure(3);
271 # plt.pcolor(abs(R));
272 # plt.colorbar();
231 273
232 274 #-------------------------------------------------------------------------------------------------
233 275 # Fourier Inversion
234 276 #-------------------------------------------------------------------------------------------------
235 277 f_fourier = np.zeros(shape=(Nr,1), dtype=complex);
236 278
237 279 for i in range(0, thetar.size):
238 280 th = thetar[i];
239 w = np.exp(1j*k*np.dot(r,np.sin(th)));
240 temp = np.dot(w.T.conj(),U)
241 f_fourier[i] = np.dot(temp, w);
281 w = np.exp(1j*k*np.dot(r,np.sin(th)));
282 f_fourier[i] = np.dot(np.dot(w.T.conj(),U) , w);
242 283
243 284 f_fourier = f_fourier.real; # get rid of numerical imaginary noise
244 285
245 286
246 287 #-------------------------------------------------------------------------------------------------
247 288 # Capon Inversion
248 289 #-------------------------------------------------------------------------------------------------
249 290 f_capon = np.zeros(shape=(Nr,1));
250 291
251 292 tic_capon = time.time();
252 293
253 294 for i in range(0, thetar.size):
254 295 th = thetar[i];
255 296 w = np.exp(1j*k*np.dot(r,np.sin(th)));
256 297 f_capon[i] = 1/ ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real
257 298
258 299 toc_capon = time.time()
259 300 elapsed_time_capon = toc_capon - tic_capon;
260 301
261 302 f_capon = f_capon.real; # get rid of numerical imaginary noise
262 303
263 304
264 305 #-------------------------------------------------------------------------------------------------
265 306 # MaxEnt Inversion
266 307 #-------------------------------------------------------------------------------------------------
267 308
268 309 # Create the appropriate sensing matrix (split into real and imaginary # parts)
269 310 M = (r.size-1)*(r.size);
270 311 Ht = np.zeros(shape=(M,Nt)); # "true" sensing matrix
271 312 Hr = np.zeros(shape=(M,Nr)); # approximate sensing matrix for reconstruction
272 313
273 314 # Need to re-index our measurements from matrix R into vector g
274 315 g = np.zeros(shape=(M,1));
275 316 gnz = np.zeros(shape=(M,1)); # noisy version of g
276 317
277 318 # Triangular indexing to perform this re-indexing
278 319 T = np.ones(shape=(r.size,r.size));
279 320 [i1v,i2v] = np.where(np.triu(T,1) > 0); # converts linear to triangular indexing
280 321
281 322 # Build H
282 323 for i1 in range(0, r.size):
283 324 for i2 in range(i1+1, r.size):
284 idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward 1->27
325 idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward (1->27)
285 326 idx1 = 2*idx;
286 327 idx2 = 2*idx+1;
287 328 Hr[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
288 329 Hr[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
289 330 Ht[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
290 331 Ht[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
291 332 g[idx1] = (R[i1,i2]).real*Nr/Nt;
292 333 g[idx2] = (R[i1,i2]).imag*Nr/Nt;
293 334 gnz[idx1] = (Rnz[i1,i2]).real*Nr/Nt;
294 335 gnz[idx2] = (Rnz[i1,i2]).imag*Nr/Nt;
295 336
296 337 # Inversion
297 338 F = Nr/Nt; # normalization
298 339 sigma = 1; # set to 1 because the difference is accounted for in G
299 340
300 341 G = np.linalg.norm(g-gnz)**2 ; # pretend we know in advance the actual value of chi^2
301 342 lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
302 343
303 344
304 345 # Whitened solution
305 346 def myfun(lambda1):
306 347 return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
307 348
308 349
309 350 tic_maxEnt = time.time(); # start time maxEnt
310 351
311 lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14);
352 lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14); # krylov
353 # lambda1 = newton_krylov(myfun,lambda0, method='lgmres', x_tol=1e-14)
312 354
313 355 toc_maxEnt = time.time()
314 356 elapsed_time_maxent = toc_maxEnt - tic_maxEnt;
315 357
316 358 # Solution
317 359 lambda1 = lambda1.x;
318 360
319 361 f_maxent = modelf(lambda1, Hr, F);
320 362
321 363 #------ what is this needed for?-------
322 ystar = myfun(lambda1);
323 Lambda = np.sqrt(sum(lambda1**2*sigma**2)/(4*G));
324 ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
325 es = np.dot(Hr, f_maxent) - gnz;
326 chi2 = np.sum((es/sigma)**2);
364 # ystar = myfun(lambda1);
365 # Lambda = np.sqrt(sum(lambda1**2*sigma**2)/(4*G));
366 # ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
367 # es = np.dot(Hr, f_maxent) - gnz;
368 # chi2 = np.sum((es/sigma)**2);
327 369 #--------------------------------------
328 370
329 371
330 372 #-------------------------------------------------------------------------------------------------
331 373 # CS inversion using Iteratively Reweighted Least Squares (IRLS)
332 374 #-------------------------------------------------------------------------------------------------
333 375 # (Use Nr, thetar, gnz, and Hr from MaxEnt above)
334 376
335 377 Psi = deb4_basis(Nr);
336 378
337 379 # add "sum to 1" constraint
338 380 H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
339 381 g2 = np.concatenate( (gnz, np.array([[Nr/Nt]])), axis=0 );
340 382
341 383 tic_cs = time.time();
342 384
343 385 # Inversion
344 386 s = irls_dn2(np.dot(H2,Psi),g2,0.5,G);
345 387
346 388 toc_cs = time.time()
347 389 elapsed_time_cs = toc_cs - tic_cs;
348 390
349 391 # Brightness function
350 392 f_cs = np.dot(Psi,s);
351 393
352 394
353 395 print "shapes:"
354 396 print "Psi ", Psi.shape
355 397 print "s ", s.shape
356 398 print "f_CS ", f_cs.shape
399
357 400
358 # Plot
359 plt.figure(4)
360 plt.plot(thetar,f_cs,'r.-');
361 plt.plot(thetat,fact,'k-');
362 plt.title('Reconstruction with Compressed Sensing')
401 # # Plot
402 # plt.figure(4)
403 # plt.plot(thetar,f_cs,'r.-');
404 # plt.plot(thetat,fact,'k-');
405 # plt.title('Reconstruction with Compressed Sensing')
363 406
364 407
365 408 #-------------------------------------------------------------------------------------------------
366 409 # Scaling and shifting
367 410 # (Only necessary for Capon solution)
368 411 #-------------------------------------------------------------------------------------------------
369 412 f_capon = f_capon/np.max(f_capon)*np.max(fact);
370 413
371 414
372 415 #-------------------------------------------------------------------------------------------------
373 416 # Analyze stuff
374 417 #-------------------------------------------------------------------------------------------------
375 418
376 419 # Calculate MSE
377 420 rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
378 421 rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
379 422 rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
380 423 rmse_cs = np.sqrt(np.mean((f_cs - factr)**2));
381 424
382 425
383 426 relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
384 427 relrmse_capon = rmse_capon / np.linalg.norm(fact);
385 428 relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
386 429 relrmse_cs = rmse_cs / np.linalg.norm(fact);
387 430
388 431
389 432 # Calculate correlation
390 433 corr_fourier = np.dot(f_fourier.T.conj(),factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
391 434 corr_capon = np.dot(f_capon.T.conj(),factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
392 435 corr_maxent = np.dot(f_maxent.T.conj(),factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
393 436 corr_cs = np.dot(f_cs.T.conj(),factr) / (np.linalg.norm(f_cs)*np.linalg.norm(factr));
394 437
395 438
396 439 # Calculate centered correlation
397 440 f0 = factr - np.mean(factr);
398 441 f1 = f_fourier - np.mean(f_fourier);
399
400 442 corrc_fourier = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
401 443 f1 = f_capon - np.mean(f_capon);
402 444 corrc_capon = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
403 445 f1 = f_maxent - np.mean(f_maxent);
404 446 corrc_maxent = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
405 447 f1 = f_cs - np.mean(f_cs);
406 448 corrc_cs = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
407 449
408 450
409 451 #-------------------------------------------------------------------------------------------------
410 452 # Plot stuff
411 453 #-------------------------------------------------------------------------------------------------
412 454
413 455 #---- Capon----#
414 456 plt.figure(5)
415 457 plt.subplot(3, 1, 1)
416 458 plt.plot(180/np.pi*thetar, f_capon, 'r', label='Capon')
417 459 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
418 460 plt.ylabel('Power (arbitrary units)')
419 461 plt.legend(loc='upper right')
420 462
421 463 # formatting y-axis
422 464 locs,labels = plt.yticks()
423 465 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
424 466 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
425 467
426 468
427 469 #---- MaxEnt----#
428 470 plt.subplot(3, 1, 2)
429 471 plt.plot(180/np.pi*thetar, f_maxent, 'r', label='MaxEnt')
430 472 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
431 473 plt.ylabel('Power (arbitrary units)')
432 474 plt.legend(loc='upper right')
433 475
434 476 # formatting y-axis
435 477 locs,labels = plt.yticks()
436 478 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
437 479 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
438 480
439 481
440 482 #---- Compressed Sensing----#
441 483 plt.subplot(3, 1, 3)
442 484 plt.plot(180/np.pi*thetar, f_cs, 'r', label='CS')
443 485 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
444 486 plt.ylabel('Power (arbitrary units)')
445 487 plt.legend(loc='upper right')
446 488
447 489 # formatting y-axis
448 490 locs,labels = plt.yticks()
449 491 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
450 492 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
451 493
452 494 # # Store Results
453 495 corr[0, snri, Ni] = corr_fourier;
454 496 corr[1, snri, Ni] = corr_capon;
455 497 corr[2, snri, Ni] = corr_maxent;
456 498 corr[3, snri, Ni] = corr_cs;
457 499
458 500 rmse[0,snri,Ni] = relrmse_fourier;
459 501 rmse[1,snri,Ni] = relrmse_capon;
460 502 rmse[2,snri,Ni] = relrmse_maxent;
461 503 rmse[3,snri,Ni] = relrmse_cs;
462 504
463 505 corrc[0,snri,Ni] = corrc_fourier;
464 506 corrc[1,snri,Ni] = corrc_capon;
465 507 corrc[2,snri,Ni] = corrc_maxent;
466 508 corrc[3,snri,Ni] = corrc_cs;
467 509
468 510 print "--------Time performace--------"
469 511 print 'Capon:\t', elapsed_time_capon, 'sec';
470 512 print 'Maxent:\t',elapsed_time_maxent, 'sec';
471 513 print 'CS:\t',elapsed_time_cs, 'sec\n';
472 514
473 515 print (NN*(snri)+Ni+1), '/', (SNRdBvec.size*NN), '\n';
474 516
475 517
476 518 #-------------------------------------------------------------------------------------------------
477 519 # Analyze and plot statistics
478 520 #-------------------------------------------------------------------------------------------------
479 521
480 522 metric = corr; # set this to rmse, corr, or corrc
481 523
482 524 # Remove outliers (this part was experimental and wasn't used in the paper)
483 525 # nsig = 3;
484 526 # for i = 1:4
485 527 # for snri = 1:length(SNRdBvec)
486 528 # av = mean(met(i,snri,:));
487 529 # s = std(met(i,snri,:));
488 530 # idx = abs(met(i,snri,:) - av) > nsig*s;
489 531 # met(i,snri,idx) = nan;
490 532 # if sum(idx)>0
491 533 # fprintf('i=%i, snr=%i, %i/%i pts removed\n',...
492 534 # i,round(SNRdBvec(snri)),sum(idx),length(idx));
493 535 # end
494 536 # end
495 537 # end
496 538
497 539
498 540 # Avg ignoring NaNs
499 541 ave = np.zeros(shape=(4))
500 542
501 543 print metric
502
503 ave[0] = nanmean(metric[0,:,:]); # Fourier
504 ave[1] = nanmean(metric[1,:,:]); # Capon
505 ave[2] = nanmean(metric[2,:,:]); # MaxEnt
506 ave[3] = nanmean(metric[3,:,:]); # Compressed Sensing
544 print metric.shape
545
546 ave[0] = nanmean(metric[0,0,:]); # Fourier
547 ave[1] = nanmean(metric[1,0,:]); # Capon
548 ave[2] = nanmean(metric[2,0,:]); # MaxEnt
549 ave[3] = nanmean(metric[3,0,:]); # Compressed Sensing
550
551 print ave
507 552
508 553 # Plot based on chosen metric
509 554 plt.figure(6);
510 555 f = plt.scatter(SNRdBvec, ave[0], marker='+', color='b', s=60); # Fourier
511 556 c = plt.scatter(SNRdBvec, ave[1], marker='o', color= 'g', s=60); # Capon
512 557 me= plt.scatter(SNRdBvec, ave[2], marker='s', color= 'c', s=60); # MaxEnt
513 558 cs= plt.scatter(SNRdBvec, ave[3], marker='*', color='r', s=60); # Compressed Sensing
514 559
515 560 plt.legend((f,c,me,cs),('Fourier','Capon', 'MaxEnt', 'Comp. Sens.'),scatterpoints=1, loc='upper right')
516 561 plt.xlabel('SNR')
517 562 plt.ylabel('Correlation with Truth')
518 563
519 564 print "--------Correlations--------"
520 565 print "Fourier:", ave[0]
521 566 print "Capon:\t", ave[1]
522 567 print "MaxEnt:\t", ave[2]
523 568 print "CS:\t", ave[3]
524 569
525 570 plt.show()
526 571
527 572
@@ -1,65 +1,65
1 1 '''
2 2 Created on May 29, 2014
3 3
4 4 @author: Yolian Amaro
5 5 '''
6 6
7 7 import numpy as np
8 8
9 9 def dualfilt1():
10 10
11 11 # Kingsbury Q-filters for the dual-tree complex DWT
12 12 #
13 13 # USAGE:
14 14 # [af, sf] = dualfilt1
15 15 # OUTPUT:
16 16 # af{i}, i = 1,2 - analysis filters for tree i
17 17 # sf{i}, i = 1,2 - synthesis filters for tree i
18 18 # note: af{2} is the reverse of af{1}
19 19 # REFERENCE:
20 20 # N. G. Kingsbury, "A dual-tree complex wavelet
21 21 # transform with improved orthogonality and symmetry
22 22 # properties", Proceedings of the IEEE Int. Conf. on
23 23 # Image Proc. (ICIP), 2000
24 24 # See dualtree
25 25 #
26 26 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
27 27 # http://taco.poly.edu/WaveletSoftware/
28 28
29 # These cofficients are rounded to 8 decimal places.
29 # These coefficients are rounded to 8 decimal places.
30 30
31 31 a1 = np.array([
32 32 [ 0.03516384000000, 0],
33 33 [ 0, 0],
34 34 [-0.08832942000000, -0.11430184000000],
35 35 [ 0.23389032000000, 0],
36 36 [ 0.76027237000000, 0.58751830000000],
37 37 [ 0.58751830000000, -0.76027237000000],
38 38 [ 0, 0.23389032000000],
39 39 [-0.11430184000000, 0.08832942000000],
40 40 [ 0, 0],
41 41 [ 0, -0.03516384000000]
42 42 ]);
43 43
44 44 a2 = np.array([
45 45 [ 0, -0.03516384000000],
46 46 [ 0, 0],
47 47 [-0.11430184000000, 0.08832942000000],
48 48 [ 0, 0.23389032000000],
49 49 [ 0.58751830000000, -0.76027237000000],
50 50 [ 0.76027237000000, 0.58751830000000],
51 51 [ 0.23389032000000, 0],
52 52 [ -0.08832942000000, -0.11430184000000],
53 53 [ 0, 0],
54 54 [ 0.03516384000000, 0]
55 55 ]);
56 56
57 57 af = np.array([ [a1,a2] ], dtype=object)
58 58
59 59 s1 = a1[::-1]
60 60 s2 = a2[::-1]
61 61
62 62 sf = np.array([ [s1,s2] ], dtype=object)
63 63
64 64
65 65 return af, sf
@@ -1,52 +1,52
1 1 '''
2 2 Created on May 30, 2014
3 3
4 4 @author: Yolian Amaro
5 5 '''
6 6
7 7 from irls_dn import *
8 8 from scipy.optimize import brentq
9 9
10 10 def irls_dn2(A,b,p,G):
11 11
12 12 # Minimize ||u||_p subject to ||A*u-b||_2^2 <= G (0 < p <= 1)
13 13
14 14 # What this function actually does is finds the lambda1 so that the solution
15 15 # to the following problem satisfies ||A*u-b||_2^2 <= G:
16 16 # Minimize lambda1*||u||_p + ||A*u-b||_2
17 17
18 18 # Start with a large lambda1, and do a line search until fidelity <= G.
19 19 # (Inversions with large lambda1 are really fast anyway).
20 20
21 21 # Then spin up fsolve to localize the root even better
22 22
23 23 # Line Search
24 24
25 25 alpha = 2.0; # Line search parameter
26 26 lambda1 = 1e5; # What's a reasonable but safe initial guess?
27 27 u = irls_dn(A,b,p,lambda1);
28 28
29 29
30 30 fid = norm(np.dot(A,u)-b)**2;
31 31
32 32 print '----------------------------------\n';
33 33
34 34 while (fid >= G):
35 35 lambda1 = lambda1 / alpha; # Balance between speed and accuracy
36 36 u = irls_dn(A,b,p,lambda1);
37 37 fid = norm(np.dot(A,u)-b)**2;
38 38 print 'lambda = %2e \t' % lambda1, '||A*u-b||^2 = %.1f' % fid;
39 39
40 # Refinement using fzero/ brentq
40 # Refinement using brentq (equiv to fzero in matlab)
41 41 lambda0 = np.array([lambda1,lambda1*alpha]); # interval with zero-crossing
42 42
43 43 def myfun(lambda1):
44 44 return norm(np.dot(A, irls_dn(A,b,p,lambda1)) - b)**2 - G;
45 45
46 46 # Find zero-crossing at given interval (lambda1, lambda1*alpha)
47 47 lambda1 = brentq(myfun, lambda0[0], lambda0[1], xtol=0.01*lambda1)
48 48
49 49 u = irls_dn(A,b,p,lambda1);
50 50
51 51 return u;
52 52
General Comments 0
You need to be logged in to leave comments. Login now