##// END OF EJS Templates
Implementation of voltages simulation for estimating correlations (some things need to be fixed)
yamaro -
r25:26
parent child
Show More
@@ -1,527 +1,572
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 #----------------------------------------------------------
3 #----------------------------------------------------------
4 # Original MATLAB code developed by Brian Harding
4 # Original MATLAB code developed by Brian Harding
5 # Rewritten in Python by Yolian Amaro
5 # Rewritten in Python by Yolian Amaro
6 # Python version 2.7
6 # Python version 2.7
7 # May 15, 2014
7 # May 15, 2014
8 # Jicamarca Radio Observatory
8 # Jicamarca Radio Observatory
9 #----------------------------------------------------------
9 #----------------------------------------------------------
10
10
11 import time
11 import time
12 import matplotlib.pyplot as plt
12 import matplotlib.pyplot as plt
13 from scipy.optimize import root
13 from scipy.optimize import root
14 from scipy.optimize import newton_krylov
14 from scipy.stats import nanmean
15 from scipy.stats import nanmean
15
16
16 from y_hysell96 import *
17 from y_hysell96 import *
17 from deb4_basis import *
18 from deb4_basis import *
18 from modelf import *
19 from modelf import *
19 from irls_dn2 import *
20 from irls_dn2 import *
20
21
21
22
22 #-------------------------------------------------------------------------------------------------
23 #-------------------------------------------------------------------------------------------------
23 # Set parameters
24 # Set parameters
24 #-------------------------------------------------------------------------------------------------
25 #-------------------------------------------------------------------------------------------------
25
26
26 ## Calculate Forward Model
27 ## Calculate Forward Model
27 lambda1 = 6.0
28 lambda1 = 6.0
28 k = 2*np.pi/lambda1
29 k = 2*np.pi/lambda1
29
30
30 ## Magnetic Declination
31 ## Magnetic Declination
31 dec = -1.24
32 dec = -1.24
32
33
33 ## Loads Jicamarca antenna positions
34 ## Loads Jicamarca antenna positions
34 antpos = np.loadtxt("antpos.txt", comments="#", delimiter=";", unpack=False)
35 antpos = np.loadtxt("antpos.txt", comments="#", delimiter=";", unpack=False)
35
36
36 ## rx and ry -- for plotting purposes
37 ## rx and ry -- for plotting purposes
37 rx = np.array( [[127.5000], [91.5000], [127.5000], [19.5000], [91.5000], [-127.5000], [-55.5000], [-220.8240]] )
38 rx = np.array( [[127.5000], [91.5000], [127.5000], [19.5000], [91.5000], [-127.5000], [-55.5000], [-220.8240]] )
38 ry = np.array( [[127.5000], [91.5000], [91.5000], [55.5000], [-19.5000], [-127.5000], [-127.5000], [-322.2940]] )
39 ry = np.array( [[127.5000], [91.5000], [91.5000], [55.5000], [-19.5000], [-127.5000], [-127.5000], [-322.2940]] )
39
40
40 ## Plot of antenna positions
41 ## Plot of antenna positions
41 plt.figure(1)
42 plt.figure(1)
42 plt.plot(rx, ry, 'ro')
43 plt.plot(rx, ry, 'ro')
43 plt.title("Jicamarca Antenna Positions")
44 plt.title("Jicamarca Antenna Positions")
44 plt.xlabel("rx")
45 plt.xlabel("rx")
45 plt.ylabel("ry")
46 plt.ylabel("ry")
46 plt.grid(True)
47 plt.grid(True)
47 plt.plot(rx, rx, 'b-')
48 plt.plot(rx, rx, 'b-')
48 plt.draw()
49 plt.draw()
49
50
50 ## Jicamarca is nominally at a 45 degree angle
51 ## Jicamarca is nominally at a 45 degree angle
51 theta = 45 - dec;
52 theta = 45 - dec;
52
53
53 ## Rotation matrix from antenna coord to magnetic coord (East North) -> 45 degrees
54 ## Rotation matrix from antenna coord to magnetic coord (East North) -> 45 degrees
54 theta_rad = np.radians(theta) # trig functions take radians as argument
55 theta_rad = np.radians(theta) # trig functions take radians as argument
55 val1 = float( np.cos(theta_rad) )
56 val1 = float( np.cos(theta_rad) )
56 val2 = float( np.sin(theta_rad) )
57 val2 = float( -1*np.sin(theta_rad))
57 val3 = float( -1*np.sin(theta_rad))
58 val3 = float( np.sin(theta_rad) )
58 val4 = float( np.cos(theta_rad) )
59 val4 = float( np.cos(theta_rad) )
59
60
60 # Rotation matrix from antenna coord to magnetic coord (East North)
61 # Rotation matrix from antenna coord to magnetic coord (East North)
61 R = np.array( [[val1, val3], [val2, val4]] )
62 R = np.array( [[val1, val2], [val3, val4]] )
62
63
63 # Rotate antenna positions to magnetic coord.
64 # Rotate antenna positions to magnetic coord.
64 AR = np.dot(R.T, antpos)
65 AR = np.dot(R.T, antpos)
65
66
66 plt.plot(AR[0], 0*AR[1], 'yo')
67 plt.plot(AR[0], 0*AR[1], 'yo') # antenna positions only considering magnetic East-West component (x, 0)
67 plt.title("Jicamarca Antenna Positions")
68 plt.title("Jicamarca Antenna Positions")
68 plt.xlabel("rx")
69 plt.xlabel("rx")
69 plt.ylabel("ry")
70 plt.ylabel("ry")
70 plt.grid(True)
71 plt.grid(True)
71 plt.plot(rx, 0*rx, 'g-')
72 plt.plot(rx, 0*rx, 'g-')
72 plt.draw()
73 plt.draw()
73
74
74 print antpos
75 print "\n"
76 print AR
77
75
78 # Only take the East component
76 # Only take the East component
79 r = AR[0,:]
77 r = AR[0,:]
80 r.sort()
78 r.sort()
81
82 # ---NEW---
83 r1 = AR[1,:] # longitudes / distances
84 d = 40 # separation of antennas in meters
85 theta_w = 15 # angle between antenna and point of observation in degrees
86 # arg = k*(r + d*np.cos(theta_w)) # this is the exponent argument in the corr integral
87 # --------
88
79
89
80
90 # Truth model (high and low resolution)
81 # Truth model (high and low resolution)
91 Nt = (1024.0)*(16.0) # number of pixels in truth image: high resolution
82 Nt = (1024.0)*(16.0) # number of pixels in truth image: high resolution
92 thbound = 9.0/180*np.pi # the width of the domain in angle space
83 thbound = 9.0/180*np.pi # the width of the domain in angle space
93 thetat = np.linspace(-thbound, thbound,Nt) # image domain
84 thetat = np.linspace(-thbound, thbound,Nt) # image domain
94 thetat = thetat.T # transpose
85 thetat = thetat.T # transpose
95 Nr = (256.0) # number of pixels in reconstructed image: low res
86 Nr = (256.0) # number of pixels in reconstructed image: low res
96 thetar = np.linspace(-thbound, thbound,Nr) # reconstruction domain
87 thetar = np.linspace(-thbound, thbound,Nr) # reconstruction domain
97 thetar = thetar.T # transpose
88 thetar = thetar.T # transpose
98
89
99
90
100 #-------------------------------------------------------------------------------------------------
91 #-------------------------------------------------------------------------------------------------
101 # Model for f: Gaussian(s) with amplitudes a, centers mu, widths sig, and background constant b.
92 # Model for f: Gaussian(s) with amplitudes a, centers mu, widths sig, and background constant b.
102 #-------------------------------------------------------------------------------------------------
93 #-------------------------------------------------------------------------------------------------
103
94
104 # Triple Gaussian
95 # Triple Gaussian
105 # a = np.array([3, 5, 2]);
96 # a = np.array([3, 5, 2]);
106 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi, 7.0/180*np.pi]);
97 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi, 7.0/180*np.pi]);
107 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi, 0.3/180*np.pi]);
98 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi, 0.3/180*np.pi]);
108 # b = 0; # background
99 # b = 0; # background
109
100
110 # Double Gaussian
101 # Double Gaussian
111 # a = np.array([3, 5]);
102 # a = np.array([3, 5]);
112 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi]);
103 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi]);
113 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi]);
104 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi]);
114 # b = 0; # background
105 # b = 0; # background
115
106
116 # Single Gaussian
107 # Single Gaussian
117 a = np.array( [3] ) # amplitude(s)
108 a = np.array( [3] ) # amplitude(s)
118 mu = np.array( [-3.0/180*np.pi] ) # center
109 mu = np.array( [-3.0/180*np.pi] ) # center
119 sig = np.array( [2.0/180*np.pi] ) # width
110 sig = np.array( [2.0/180*np.pi] ) # width
120 b = 0 # background constant
111 b = 0 # background constant
121
112
122 # Empty matrices for factors
113 # Empty matrices for factors
123 fact = np.zeros(shape=(Nt,1))
114 fact = np.zeros(shape=(Nt,1))
124 factr = np.zeros(shape=(Nr,1))
115 factr = np.zeros(shape=(Nr,1))
125
116
126 # Constructing Gaussian model
117 # Constructing Gaussian model - brightness function (?)
127 for i in range(0, a.size):
118 for i in range(0, a.size):
128 temp = (-(thetat-mu[i])**2/(sig[i]**2))
119 temp = (-(thetat-mu[i])**2/(sig[i]**2))
129 tempr = (-(thetar-mu[i])**2/(sig[i]**2))
120 tempr = (-(thetar-mu[i])**2/(sig[i]**2))
130 for j in range(0, temp.size):
121 for j in range(0, temp.size):
131 fact[j] = fact[j] + a[i]*np.exp(temp[j]);
122 fact[j] = fact[j] + a[i]*np.exp(temp[j]);
132 for m in range(0, tempr.size):
123 for m in range(0, tempr.size):
133 factr[m] = factr[m] + a[i]*np.exp(tempr[m]);
124 factr[m] = factr[m] + a[i]*np.exp(tempr[m]);
134 fact = fact + b;
125 fact = fact + b;
135 factr = factr + b;
126 factr = factr + b;
136
127
137
128
138 # #-------------------------------------------------------------------------------------------------
129 # #-------------------------------------------------------------------------------------------------
139 # # Model for f: Square pulse
130 # # Model for f: Square pulse
140 # #-------------------------------------------------------------------------------------------------
131 # #-------------------------------------------------------------------------------------------------
141 # for j in range(0, fact.size):
132 # for j in range(0, fact.size):
142 # if (theta > -5.0/180*np.pi and theta < 2.0/180*np.pi):
133 # if (theta > -5.0/180*np.pi and theta < 2.0/180*np.pi):
143 # fact[j] = 0
134 # fact[j] = 0
144 # else:
135 # else:
145 # fact[j] = 1
136 # fact[j] = 1
146 # for k in range(0, factr.size):
137 # for k in range(0, factr.size):
147 # if (thetar[k] > -5.0/180*np.pi and thetar[k] < 2/180*np.pi):
138 # if (thetar[k] > -5.0/180*np.pi and thetar[k] < 2/180*np.pi):
148 # factr[k] = 0
139 # factr[k] = 0
149 # else:
140 # else:
150 # factr[k] = 1
141 # factr[k] = 1
151
142
152 # #-------------------------------------------------------------------------------------------------
143 # #-------------------------------------------------------------------------------------------------
153 # # Model for f: Triangle pulse
144 # # Model for f: Triangle pulse
154 # #-------------------------------------------------------------------------------------------------
145 # #-------------------------------------------------------------------------------------------------
155 # mu = -1.0/180*np.pi;
146 # mu = -1.0/180*np.pi;
156 # sig = 5.0/180*np.pi;
147 # sig = 5.0/180*np.pi;
157 # wind1 = theta > mu-sig and theta < mu;
148 # wind1 = theta > mu-sig and theta < mu;
158 # wind2 = theta < mu+sig and theta > mu;
149 # wind2 = theta < mu+sig and theta > mu;
159 # fact = wind1 * (theta - (mu - sig));
150 # fact = wind1 * (theta - (mu - sig));
160 # factr = wind1 * (thetar - (mu - sig));
151 # factr = wind1 * (thetar - (mu - sig));
161 # fact = fact + wind2 * (-(theta-(mu+sig)));
152 # fact = fact + wind2 * (-(theta-(mu+sig)));
162 # factr = factr + wind2 * (-(thetar-(mu+sig)));
153 # factr = factr + wind2 * (-(thetar-(mu+sig)));
163 #
154 #
164 # fact = np.divide(fact, (np.sum(fact)*2*thbound/Nt)); # normalize to integral(f)==1
155 # fact = np.divide(fact, (np.sum(fact)*2*thbound/Nt)); # normalize to integral(f)==1
165
156
166
157
167 I = sum(fact)[0];
158 I = sum(fact)[0];
168 fact = fact/I; # normalize to sum(f)==1
159 fact = fact/I; # normalize to sum(f)==1
169 factr = factr/I; # normalize to sum(f)==1
160 factr = factr/I; # normalize to sum(f)==1
170
161
171 # Plot Gaussian pulse(s)
162 # Plot Gaussian pulse(s)
172 plt.figure(2)
163 plt.figure(2)
173 plt.plot(thetat, fact, 'r--')
164 plt.plot(thetat, fact, 'bo')
174 plt.plot(thetar, factr, 'ro')
165 plt.plot(thetar, factr, 'ro')
175 plt.title("Truth models")
166 plt.title("Image and Reconstruction Domain")
176 plt.ylabel("Amplitude")
167 plt.ylabel("Amplitude")
177 plt.legend(('High res','Low res'), loc='upper right')
168 plt.legend(('High res (img domain)','Low res (reconstruction domain)'), loc='upper right')
178 plt.draw()
169 plt.draw()
179
170
180
171
181 #-------------------------------------------------------------------------------------------------
172 #-------------------------------------------------------------------------------------------------
182 # Control the type and number of inversions with:
173 # Control the type and number of inversions with:
183 # SNRdBvec: the SNRs that will be used.
174 # SNRdBvec: the SNRs that will be used.
184 # NN: the number of trials for each SNR
175 # NN: the number of trials for each SNR
185 #-------------------------------------------------------------------------------------------------
176 #-------------------------------------------------------------------------------------------------
186
177
187 #SNRdBvec = np.linspace(5,20,10);
178 #SNRdBvec = np.linspace(5,20,10);
188 SNRdBvec = np.array([15]); # 15 dB
179 SNRdBvec = np.array([15]); # 15 dB
189 NN = 1; # number of trials at each SNR
180 NN = 1; # number of trials at each SNR
190
181
191 # Statistics simulation (correlation, root mean square error)
182 # Statistics simulation (correlation, root mean square error)
192 corr = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
183 corr = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
193 corrc = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
184 corrc = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
194 rmse = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
185 rmse = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
195
186
196 # For each SNR and trial
187 # For each SNR and trial
197 for snri in range(0, SNRdBvec.size):
188 for snri in range(0, SNRdBvec.size):
198 SNRdB = SNRdBvec[snri];
189 SNRdB = SNRdBvec[snri];
199 SNR = 10**(SNRdB/10.0);
190 SNR = 10**(SNRdB/10.0);
200
191
201 for Ni in range(0, NN):
192 for Ni in range(0, NN):
193
194 # -------------------------------------- Voltages estimation----------------------------------------
195 v = np.zeros(shape=(r.size,1), dtype=object); # voltages matrix
196 R = np.zeros(shape=(r.size, r.size), dtype='complex128'); # corr matrix
197 mu1, sigma1 = 0, 1 # mean and standard deviation
198 num = 1
199
200 for t in range (0, 10):
201 num = t+1
202 print num
203 n1 = np.random.normal(mu1, sigma1, [Nt,1]) # random variable
204 n2 = np.random.normal(mu1, sigma1, [Nt,1]) # random variable
205
206 rho = (n1 + n2*1j)*np.sqrt(fact/2) # rho.size is Nt = 16834
207 # fact is size (Nt, 1) -> column vector
208 thetat = thetat.reshape(Nt,1) # thetat is now (Nt, 1)
209
210 for i in range(0, r.size):
211 v[i] = sum((rho)*np.exp(-1j*k*((r[i])*np.cos(thetat)))); # v is sum ( (Nt, 1)) = 1 value
212
213 # Computing correlations
214 for i1 in range(0, r.size):
215 for i2 in range(0,r.size):
216 R[i1,i2] = R[i1,i2] + (v[i1][0]*v[i2][0].conjugate())/ (np.sqrt(np.abs(v[i1][0])**2 * np.abs(v[i2][0])**2));
217
218 R = R / num
219 print "R from voltages"
220 print R
221
222 plt.figure(3);
223 plt.pcolor(abs(R));
224 plt.colorbar();
225 plt.title("Antenna Correlations")
226 plt.draw()
227 plt.show()
228
229 #--------------------------------------------------------------------------------------------------
230
202 # Calculate cross-correlation matrix (Fourier components of image)
231 # Calculate cross-correlation matrix (Fourier components of image)
203 # This is an inefficient way to do this.
232 # This is an inefficient way to do this.
204
233
234 # MATLAB CORR::: R(i1,i2) = fact.' * exp(j*k*(r(i1)-r(i2))*sin(thetat));
235
205 R = np.zeros(shape=(r.size, r.size), dtype=object);
236 R = np.zeros(shape=(r.size, r.size), dtype=object);
206
237
238 thetat = thetat.reshape(Nt,)
207 for i1 in range(0, r.size):
239 for i1 in range(0, r.size):
208 for i2 in range(0,r.size):
240 for i2 in range(0,r.size):
209 R[i1,i2] = np.dot(fact.T, np.exp(1j*k*np.dot((r[i1]-r[i2]),np.sin(thetat))))
241 R[i1,i2] = sum(np.dot(fact.T, np.exp(1j*k*((r[i1]-r[i2])*np.sin(thetat)))))
210 R[i1,i2] = sum(R[i1,i2])
242
211
243 print "R w/o voltages\n", R
244 # plt.figure(3);
245 # plt.pcolor(abs(R));
246 # plt.colorbar();
247 # plt.title("Antenna Correlations")
248 # plt.xlabel("Antenna")
249 # plt.ylabel("Antenna")
250 # plt.draw()
251 # plt.show()
252
253
212 # Add uncertainty
254 # Add uncertainty
213 # This is an ad-hoc way of adding "noise". It models some combination of
255 # This is an ad-hoc way of adding "noise". It models some combination of
214 # receiver noise and finite integration times. We could use a more
256 # receiver noise and finite integration times. We could use a more
215 # advanced model (like in Yu et al 2000) in the future.
257 # advanced model (like in Yu et al 2000) in the future.
216
258
217 # This is a way of adding noise while maintaining the
259 # This is a way of adding noise while maintaining the
218 # positive-semi-definiteness of the matrix.
260 # positive-semi-definiteness of the matrix.
219
261
220 U = linalg.cholesky(R.astype(complex), lower=False); # U'*U = R
262 U = linalg.cholesky(R.astype(complex), lower=False); # U'*U = R
221
263
222 sigma_noise = (np.linalg.norm(U,'fro')/SNR);
264 sigma_noise = (np.linalg.norm(U,'fro')/SNR);
223
265
224 nz = np.multiply((sigma_noise * (np.random.randn(U.shape[0]) + 1j*np.random.randn(U.shape[0]))/np.sqrt(2)), (abs(U) > 0).astype(float));
266 nz = np.multiply((sigma_noise * (np.random.randn(U.shape[0]) + 1j*np.random.randn(U.shape[0]))/np.sqrt(2)), (abs(U) > 0).astype(float));
225
267
226 Unz = U + nz;
268 Unz = U + nz;
227 Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
269 Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
228 plt.figure(3);
270 # plt.figure(3);
229 plt.pcolor(abs(R));
271 # plt.pcolor(abs(R));
230 plt.colorbar();
272 # plt.colorbar();
231
273
232 #-------------------------------------------------------------------------------------------------
274 #-------------------------------------------------------------------------------------------------
233 # Fourier Inversion
275 # Fourier Inversion
234 #-------------------------------------------------------------------------------------------------
276 #-------------------------------------------------------------------------------------------------
235 f_fourier = np.zeros(shape=(Nr,1), dtype=complex);
277 f_fourier = np.zeros(shape=(Nr,1), dtype=complex);
236
278
237 for i in range(0, thetar.size):
279 for i in range(0, thetar.size):
238 th = thetar[i];
280 th = thetar[i];
239 w = np.exp(1j*k*np.dot(r,np.sin(th)));
281 w = np.exp(1j*k*np.dot(r,np.sin(th)));
240 temp = np.dot(w.T.conj(),U)
282 f_fourier[i] = np.dot(np.dot(w.T.conj(),U) , w);
241 f_fourier[i] = np.dot(temp, w);
242
283
243 f_fourier = f_fourier.real; # get rid of numerical imaginary noise
284 f_fourier = f_fourier.real; # get rid of numerical imaginary noise
244
285
245
286
246 #-------------------------------------------------------------------------------------------------
287 #-------------------------------------------------------------------------------------------------
247 # Capon Inversion
288 # Capon Inversion
248 #-------------------------------------------------------------------------------------------------
289 #-------------------------------------------------------------------------------------------------
249 f_capon = np.zeros(shape=(Nr,1));
290 f_capon = np.zeros(shape=(Nr,1));
250
291
251 tic_capon = time.time();
292 tic_capon = time.time();
252
293
253 for i in range(0, thetar.size):
294 for i in range(0, thetar.size):
254 th = thetar[i];
295 th = thetar[i];
255 w = np.exp(1j*k*np.dot(r,np.sin(th)));
296 w = np.exp(1j*k*np.dot(r,np.sin(th)));
256 f_capon[i] = 1/ ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real
297 f_capon[i] = 1/ ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real
257
298
258 toc_capon = time.time()
299 toc_capon = time.time()
259 elapsed_time_capon = toc_capon - tic_capon;
300 elapsed_time_capon = toc_capon - tic_capon;
260
301
261 f_capon = f_capon.real; # get rid of numerical imaginary noise
302 f_capon = f_capon.real; # get rid of numerical imaginary noise
262
303
263
304
264 #-------------------------------------------------------------------------------------------------
305 #-------------------------------------------------------------------------------------------------
265 # MaxEnt Inversion
306 # MaxEnt Inversion
266 #-------------------------------------------------------------------------------------------------
307 #-------------------------------------------------------------------------------------------------
267
308
268 # Create the appropriate sensing matrix (split into real and imaginary # parts)
309 # Create the appropriate sensing matrix (split into real and imaginary # parts)
269 M = (r.size-1)*(r.size);
310 M = (r.size-1)*(r.size);
270 Ht = np.zeros(shape=(M,Nt)); # "true" sensing matrix
311 Ht = np.zeros(shape=(M,Nt)); # "true" sensing matrix
271 Hr = np.zeros(shape=(M,Nr)); # approximate sensing matrix for reconstruction
312 Hr = np.zeros(shape=(M,Nr)); # approximate sensing matrix for reconstruction
272
313
273 # Need to re-index our measurements from matrix R into vector g
314 # Need to re-index our measurements from matrix R into vector g
274 g = np.zeros(shape=(M,1));
315 g = np.zeros(shape=(M,1));
275 gnz = np.zeros(shape=(M,1)); # noisy version of g
316 gnz = np.zeros(shape=(M,1)); # noisy version of g
276
317
277 # Triangular indexing to perform this re-indexing
318 # Triangular indexing to perform this re-indexing
278 T = np.ones(shape=(r.size,r.size));
319 T = np.ones(shape=(r.size,r.size));
279 [i1v,i2v] = np.where(np.triu(T,1) > 0); # converts linear to triangular indexing
320 [i1v,i2v] = np.where(np.triu(T,1) > 0); # converts linear to triangular indexing
280
321
281 # Build H
322 # Build H
282 for i1 in range(0, r.size):
323 for i1 in range(0, r.size):
283 for i2 in range(i1+1, r.size):
324 for i2 in range(i1+1, r.size):
284 idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward 1->27
325 idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward (1->27)
285 idx1 = 2*idx;
326 idx1 = 2*idx;
286 idx2 = 2*idx+1;
327 idx2 = 2*idx+1;
287 Hr[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
328 Hr[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
288 Hr[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
329 Hr[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
289 Ht[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
330 Ht[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
290 Ht[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
331 Ht[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
291 g[idx1] = (R[i1,i2]).real*Nr/Nt;
332 g[idx1] = (R[i1,i2]).real*Nr/Nt;
292 g[idx2] = (R[i1,i2]).imag*Nr/Nt;
333 g[idx2] = (R[i1,i2]).imag*Nr/Nt;
293 gnz[idx1] = (Rnz[i1,i2]).real*Nr/Nt;
334 gnz[idx1] = (Rnz[i1,i2]).real*Nr/Nt;
294 gnz[idx2] = (Rnz[i1,i2]).imag*Nr/Nt;
335 gnz[idx2] = (Rnz[i1,i2]).imag*Nr/Nt;
295
336
296 # Inversion
337 # Inversion
297 F = Nr/Nt; # normalization
338 F = Nr/Nt; # normalization
298 sigma = 1; # set to 1 because the difference is accounted for in G
339 sigma = 1; # set to 1 because the difference is accounted for in G
299
340
300 G = np.linalg.norm(g-gnz)**2 ; # pretend we know in advance the actual value of chi^2
341 G = np.linalg.norm(g-gnz)**2 ; # pretend we know in advance the actual value of chi^2
301 lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
342 lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
302
343
303
344
304 # Whitened solution
345 # Whitened solution
305 def myfun(lambda1):
346 def myfun(lambda1):
306 return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
347 return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
307
348
308
349
309 tic_maxEnt = time.time(); # start time maxEnt
350 tic_maxEnt = time.time(); # start time maxEnt
310
351
311 lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14);
352 lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14); # krylov
353 # lambda1 = newton_krylov(myfun,lambda0, method='lgmres', x_tol=1e-14)
312
354
313 toc_maxEnt = time.time()
355 toc_maxEnt = time.time()
314 elapsed_time_maxent = toc_maxEnt - tic_maxEnt;
356 elapsed_time_maxent = toc_maxEnt - tic_maxEnt;
315
357
316 # Solution
358 # Solution
317 lambda1 = lambda1.x;
359 lambda1 = lambda1.x;
318
360
319 f_maxent = modelf(lambda1, Hr, F);
361 f_maxent = modelf(lambda1, Hr, F);
320
362
321 #------ what is this needed for?-------
363 #------ what is this needed for?-------
322 ystar = myfun(lambda1);
364 # ystar = myfun(lambda1);
323 Lambda = np.sqrt(sum(lambda1**2*sigma**2)/(4*G));
365 # Lambda = np.sqrt(sum(lambda1**2*sigma**2)/(4*G));
324 ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
366 # ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
325 es = np.dot(Hr, f_maxent) - gnz;
367 # es = np.dot(Hr, f_maxent) - gnz;
326 chi2 = np.sum((es/sigma)**2);
368 # chi2 = np.sum((es/sigma)**2);
327 #--------------------------------------
369 #--------------------------------------
328
370
329
371
330 #-------------------------------------------------------------------------------------------------
372 #-------------------------------------------------------------------------------------------------
331 # CS inversion using Iteratively Reweighted Least Squares (IRLS)
373 # CS inversion using Iteratively Reweighted Least Squares (IRLS)
332 #-------------------------------------------------------------------------------------------------
374 #-------------------------------------------------------------------------------------------------
333 # (Use Nr, thetar, gnz, and Hr from MaxEnt above)
375 # (Use Nr, thetar, gnz, and Hr from MaxEnt above)
334
376
335 Psi = deb4_basis(Nr);
377 Psi = deb4_basis(Nr);
336
378
337 # add "sum to 1" constraint
379 # add "sum to 1" constraint
338 H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
380 H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
339 g2 = np.concatenate( (gnz, np.array([[Nr/Nt]])), axis=0 );
381 g2 = np.concatenate( (gnz, np.array([[Nr/Nt]])), axis=0 );
340
382
341 tic_cs = time.time();
383 tic_cs = time.time();
342
384
343 # Inversion
385 # Inversion
344 s = irls_dn2(np.dot(H2,Psi),g2,0.5,G);
386 s = irls_dn2(np.dot(H2,Psi),g2,0.5,G);
345
387
346 toc_cs = time.time()
388 toc_cs = time.time()
347 elapsed_time_cs = toc_cs - tic_cs;
389 elapsed_time_cs = toc_cs - tic_cs;
348
390
349 # Brightness function
391 # Brightness function
350 f_cs = np.dot(Psi,s);
392 f_cs = np.dot(Psi,s);
351
393
352
394
353 print "shapes:"
395 print "shapes:"
354 print "Psi ", Psi.shape
396 print "Psi ", Psi.shape
355 print "s ", s.shape
397 print "s ", s.shape
356 print "f_CS ", f_cs.shape
398 print "f_CS ", f_cs.shape
399
357
400
358 # Plot
401 # # Plot
359 plt.figure(4)
402 # plt.figure(4)
360 plt.plot(thetar,f_cs,'r.-');
403 # plt.plot(thetar,f_cs,'r.-');
361 plt.plot(thetat,fact,'k-');
404 # plt.plot(thetat,fact,'k-');
362 plt.title('Reconstruction with Compressed Sensing')
405 # plt.title('Reconstruction with Compressed Sensing')
363
406
364
407
365 #-------------------------------------------------------------------------------------------------
408 #-------------------------------------------------------------------------------------------------
366 # Scaling and shifting
409 # Scaling and shifting
367 # (Only necessary for Capon solution)
410 # (Only necessary for Capon solution)
368 #-------------------------------------------------------------------------------------------------
411 #-------------------------------------------------------------------------------------------------
369 f_capon = f_capon/np.max(f_capon)*np.max(fact);
412 f_capon = f_capon/np.max(f_capon)*np.max(fact);
370
413
371
414
372 #-------------------------------------------------------------------------------------------------
415 #-------------------------------------------------------------------------------------------------
373 # Analyze stuff
416 # Analyze stuff
374 #-------------------------------------------------------------------------------------------------
417 #-------------------------------------------------------------------------------------------------
375
418
376 # Calculate MSE
419 # Calculate MSE
377 rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
420 rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
378 rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
421 rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
379 rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
422 rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
380 rmse_cs = np.sqrt(np.mean((f_cs - factr)**2));
423 rmse_cs = np.sqrt(np.mean((f_cs - factr)**2));
381
424
382
425
383 relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
426 relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
384 relrmse_capon = rmse_capon / np.linalg.norm(fact);
427 relrmse_capon = rmse_capon / np.linalg.norm(fact);
385 relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
428 relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
386 relrmse_cs = rmse_cs / np.linalg.norm(fact);
429 relrmse_cs = rmse_cs / np.linalg.norm(fact);
387
430
388
431
389 # Calculate correlation
432 # Calculate correlation
390 corr_fourier = np.dot(f_fourier.T.conj(),factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
433 corr_fourier = np.dot(f_fourier.T.conj(),factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
391 corr_capon = np.dot(f_capon.T.conj(),factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
434 corr_capon = np.dot(f_capon.T.conj(),factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
392 corr_maxent = np.dot(f_maxent.T.conj(),factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
435 corr_maxent = np.dot(f_maxent.T.conj(),factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
393 corr_cs = np.dot(f_cs.T.conj(),factr) / (np.linalg.norm(f_cs)*np.linalg.norm(factr));
436 corr_cs = np.dot(f_cs.T.conj(),factr) / (np.linalg.norm(f_cs)*np.linalg.norm(factr));
394
437
395
438
396 # Calculate centered correlation
439 # Calculate centered correlation
397 f0 = factr - np.mean(factr);
440 f0 = factr - np.mean(factr);
398 f1 = f_fourier - np.mean(f_fourier);
441 f1 = f_fourier - np.mean(f_fourier);
399
400 corrc_fourier = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
442 corrc_fourier = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
401 f1 = f_capon - np.mean(f_capon);
443 f1 = f_capon - np.mean(f_capon);
402 corrc_capon = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
444 corrc_capon = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
403 f1 = f_maxent - np.mean(f_maxent);
445 f1 = f_maxent - np.mean(f_maxent);
404 corrc_maxent = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
446 corrc_maxent = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
405 f1 = f_cs - np.mean(f_cs);
447 f1 = f_cs - np.mean(f_cs);
406 corrc_cs = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
448 corrc_cs = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
407
449
408
450
409 #-------------------------------------------------------------------------------------------------
451 #-------------------------------------------------------------------------------------------------
410 # Plot stuff
452 # Plot stuff
411 #-------------------------------------------------------------------------------------------------
453 #-------------------------------------------------------------------------------------------------
412
454
413 #---- Capon----#
455 #---- Capon----#
414 plt.figure(5)
456 plt.figure(5)
415 plt.subplot(3, 1, 1)
457 plt.subplot(3, 1, 1)
416 plt.plot(180/np.pi*thetar, f_capon, 'r', label='Capon')
458 plt.plot(180/np.pi*thetar, f_capon, 'r', label='Capon')
417 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
459 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
418 plt.ylabel('Power (arbitrary units)')
460 plt.ylabel('Power (arbitrary units)')
419 plt.legend(loc='upper right')
461 plt.legend(loc='upper right')
420
462
421 # formatting y-axis
463 # formatting y-axis
422 locs,labels = plt.yticks()
464 locs,labels = plt.yticks()
423 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
465 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
424 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
466 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
425
467
426
468
427 #---- MaxEnt----#
469 #---- MaxEnt----#
428 plt.subplot(3, 1, 2)
470 plt.subplot(3, 1, 2)
429 plt.plot(180/np.pi*thetar, f_maxent, 'r', label='MaxEnt')
471 plt.plot(180/np.pi*thetar, f_maxent, 'r', label='MaxEnt')
430 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
472 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
431 plt.ylabel('Power (arbitrary units)')
473 plt.ylabel('Power (arbitrary units)')
432 plt.legend(loc='upper right')
474 plt.legend(loc='upper right')
433
475
434 # formatting y-axis
476 # formatting y-axis
435 locs,labels = plt.yticks()
477 locs,labels = plt.yticks()
436 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
478 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
437 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
479 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
438
480
439
481
440 #---- Compressed Sensing----#
482 #---- Compressed Sensing----#
441 plt.subplot(3, 1, 3)
483 plt.subplot(3, 1, 3)
442 plt.plot(180/np.pi*thetar, f_cs, 'r', label='CS')
484 plt.plot(180/np.pi*thetar, f_cs, 'r', label='CS')
443 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
485 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
444 plt.ylabel('Power (arbitrary units)')
486 plt.ylabel('Power (arbitrary units)')
445 plt.legend(loc='upper right')
487 plt.legend(loc='upper right')
446
488
447 # formatting y-axis
489 # formatting y-axis
448 locs,labels = plt.yticks()
490 locs,labels = plt.yticks()
449 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
491 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
450 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
492 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
451
493
452 # # Store Results
494 # # Store Results
453 corr[0, snri, Ni] = corr_fourier;
495 corr[0, snri, Ni] = corr_fourier;
454 corr[1, snri, Ni] = corr_capon;
496 corr[1, snri, Ni] = corr_capon;
455 corr[2, snri, Ni] = corr_maxent;
497 corr[2, snri, Ni] = corr_maxent;
456 corr[3, snri, Ni] = corr_cs;
498 corr[3, snri, Ni] = corr_cs;
457
499
458 rmse[0,snri,Ni] = relrmse_fourier;
500 rmse[0,snri,Ni] = relrmse_fourier;
459 rmse[1,snri,Ni] = relrmse_capon;
501 rmse[1,snri,Ni] = relrmse_capon;
460 rmse[2,snri,Ni] = relrmse_maxent;
502 rmse[2,snri,Ni] = relrmse_maxent;
461 rmse[3,snri,Ni] = relrmse_cs;
503 rmse[3,snri,Ni] = relrmse_cs;
462
504
463 corrc[0,snri,Ni] = corrc_fourier;
505 corrc[0,snri,Ni] = corrc_fourier;
464 corrc[1,snri,Ni] = corrc_capon;
506 corrc[1,snri,Ni] = corrc_capon;
465 corrc[2,snri,Ni] = corrc_maxent;
507 corrc[2,snri,Ni] = corrc_maxent;
466 corrc[3,snri,Ni] = corrc_cs;
508 corrc[3,snri,Ni] = corrc_cs;
467
509
468 print "--------Time performace--------"
510 print "--------Time performace--------"
469 print 'Capon:\t', elapsed_time_capon, 'sec';
511 print 'Capon:\t', elapsed_time_capon, 'sec';
470 print 'Maxent:\t',elapsed_time_maxent, 'sec';
512 print 'Maxent:\t',elapsed_time_maxent, 'sec';
471 print 'CS:\t',elapsed_time_cs, 'sec\n';
513 print 'CS:\t',elapsed_time_cs, 'sec\n';
472
514
473 print (NN*(snri)+Ni+1), '/', (SNRdBvec.size*NN), '\n';
515 print (NN*(snri)+Ni+1), '/', (SNRdBvec.size*NN), '\n';
474
516
475
517
476 #-------------------------------------------------------------------------------------------------
518 #-------------------------------------------------------------------------------------------------
477 # Analyze and plot statistics
519 # Analyze and plot statistics
478 #-------------------------------------------------------------------------------------------------
520 #-------------------------------------------------------------------------------------------------
479
521
480 metric = corr; # set this to rmse, corr, or corrc
522 metric = corr; # set this to rmse, corr, or corrc
481
523
482 # Remove outliers (this part was experimental and wasn't used in the paper)
524 # Remove outliers (this part was experimental and wasn't used in the paper)
483 # nsig = 3;
525 # nsig = 3;
484 # for i = 1:4
526 # for i = 1:4
485 # for snri = 1:length(SNRdBvec)
527 # for snri = 1:length(SNRdBvec)
486 # av = mean(met(i,snri,:));
528 # av = mean(met(i,snri,:));
487 # s = std(met(i,snri,:));
529 # s = std(met(i,snri,:));
488 # idx = abs(met(i,snri,:) - av) > nsig*s;
530 # idx = abs(met(i,snri,:) - av) > nsig*s;
489 # met(i,snri,idx) = nan;
531 # met(i,snri,idx) = nan;
490 # if sum(idx)>0
532 # if sum(idx)>0
491 # fprintf('i=%i, snr=%i, %i/%i pts removed\n',...
533 # fprintf('i=%i, snr=%i, %i/%i pts removed\n',...
492 # i,round(SNRdBvec(snri)),sum(idx),length(idx));
534 # i,round(SNRdBvec(snri)),sum(idx),length(idx));
493 # end
535 # end
494 # end
536 # end
495 # end
537 # end
496
538
497
539
498 # Avg ignoring NaNs
540 # Avg ignoring NaNs
499 ave = np.zeros(shape=(4))
541 ave = np.zeros(shape=(4))
500
542
501 print metric
543 print metric
502
544 print metric.shape
503 ave[0] = nanmean(metric[0,:,:]); # Fourier
545
504 ave[1] = nanmean(metric[1,:,:]); # Capon
546 ave[0] = nanmean(metric[0,0,:]); # Fourier
505 ave[2] = nanmean(metric[2,:,:]); # MaxEnt
547 ave[1] = nanmean(metric[1,0,:]); # Capon
506 ave[3] = nanmean(metric[3,:,:]); # Compressed Sensing
548 ave[2] = nanmean(metric[2,0,:]); # MaxEnt
549 ave[3] = nanmean(metric[3,0,:]); # Compressed Sensing
550
551 print ave
507
552
508 # Plot based on chosen metric
553 # Plot based on chosen metric
509 plt.figure(6);
554 plt.figure(6);
510 f = plt.scatter(SNRdBvec, ave[0], marker='+', color='b', s=60); # Fourier
555 f = plt.scatter(SNRdBvec, ave[0], marker='+', color='b', s=60); # Fourier
511 c = plt.scatter(SNRdBvec, ave[1], marker='o', color= 'g', s=60); # Capon
556 c = plt.scatter(SNRdBvec, ave[1], marker='o', color= 'g', s=60); # Capon
512 me= plt.scatter(SNRdBvec, ave[2], marker='s', color= 'c', s=60); # MaxEnt
557 me= plt.scatter(SNRdBvec, ave[2], marker='s', color= 'c', s=60); # MaxEnt
513 cs= plt.scatter(SNRdBvec, ave[3], marker='*', color='r', s=60); # Compressed Sensing
558 cs= plt.scatter(SNRdBvec, ave[3], marker='*', color='r', s=60); # Compressed Sensing
514
559
515 plt.legend((f,c,me,cs),('Fourier','Capon', 'MaxEnt', 'Comp. Sens.'),scatterpoints=1, loc='upper right')
560 plt.legend((f,c,me,cs),('Fourier','Capon', 'MaxEnt', 'Comp. Sens.'),scatterpoints=1, loc='upper right')
516 plt.xlabel('SNR')
561 plt.xlabel('SNR')
517 plt.ylabel('Correlation with Truth')
562 plt.ylabel('Correlation with Truth')
518
563
519 print "--------Correlations--------"
564 print "--------Correlations--------"
520 print "Fourier:", ave[0]
565 print "Fourier:", ave[0]
521 print "Capon:\t", ave[1]
566 print "Capon:\t", ave[1]
522 print "MaxEnt:\t", ave[2]
567 print "MaxEnt:\t", ave[2]
523 print "CS:\t", ave[3]
568 print "CS:\t", ave[3]
524
569
525 plt.show()
570 plt.show()
526
571
527
572
@@ -1,65 +1,65
1 '''
1 '''
2 Created on May 29, 2014
2 Created on May 29, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 import numpy as np
7 import numpy as np
8
8
9 def dualfilt1():
9 def dualfilt1():
10
10
11 # Kingsbury Q-filters for the dual-tree complex DWT
11 # Kingsbury Q-filters for the dual-tree complex DWT
12 #
12 #
13 # USAGE:
13 # USAGE:
14 # [af, sf] = dualfilt1
14 # [af, sf] = dualfilt1
15 # OUTPUT:
15 # OUTPUT:
16 # af{i}, i = 1,2 - analysis filters for tree i
16 # af{i}, i = 1,2 - analysis filters for tree i
17 # sf{i}, i = 1,2 - synthesis filters for tree i
17 # sf{i}, i = 1,2 - synthesis filters for tree i
18 # note: af{2} is the reverse of af{1}
18 # note: af{2} is the reverse of af{1}
19 # REFERENCE:
19 # REFERENCE:
20 # N. G. Kingsbury, "A dual-tree complex wavelet
20 # N. G. Kingsbury, "A dual-tree complex wavelet
21 # transform with improved orthogonality and symmetry
21 # transform with improved orthogonality and symmetry
22 # properties", Proceedings of the IEEE Int. Conf. on
22 # properties", Proceedings of the IEEE Int. Conf. on
23 # Image Proc. (ICIP), 2000
23 # Image Proc. (ICIP), 2000
24 # See dualtree
24 # See dualtree
25 #
25 #
26 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
26 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
27 # http://taco.poly.edu/WaveletSoftware/
27 # http://taco.poly.edu/WaveletSoftware/
28
28
29 # These cofficients are rounded to 8 decimal places.
29 # These coefficients are rounded to 8 decimal places.
30
30
31 a1 = np.array([
31 a1 = np.array([
32 [ 0.03516384000000, 0],
32 [ 0.03516384000000, 0],
33 [ 0, 0],
33 [ 0, 0],
34 [-0.08832942000000, -0.11430184000000],
34 [-0.08832942000000, -0.11430184000000],
35 [ 0.23389032000000, 0],
35 [ 0.23389032000000, 0],
36 [ 0.76027237000000, 0.58751830000000],
36 [ 0.76027237000000, 0.58751830000000],
37 [ 0.58751830000000, -0.76027237000000],
37 [ 0.58751830000000, -0.76027237000000],
38 [ 0, 0.23389032000000],
38 [ 0, 0.23389032000000],
39 [-0.11430184000000, 0.08832942000000],
39 [-0.11430184000000, 0.08832942000000],
40 [ 0, 0],
40 [ 0, 0],
41 [ 0, -0.03516384000000]
41 [ 0, -0.03516384000000]
42 ]);
42 ]);
43
43
44 a2 = np.array([
44 a2 = np.array([
45 [ 0, -0.03516384000000],
45 [ 0, -0.03516384000000],
46 [ 0, 0],
46 [ 0, 0],
47 [-0.11430184000000, 0.08832942000000],
47 [-0.11430184000000, 0.08832942000000],
48 [ 0, 0.23389032000000],
48 [ 0, 0.23389032000000],
49 [ 0.58751830000000, -0.76027237000000],
49 [ 0.58751830000000, -0.76027237000000],
50 [ 0.76027237000000, 0.58751830000000],
50 [ 0.76027237000000, 0.58751830000000],
51 [ 0.23389032000000, 0],
51 [ 0.23389032000000, 0],
52 [ -0.08832942000000, -0.11430184000000],
52 [ -0.08832942000000, -0.11430184000000],
53 [ 0, 0],
53 [ 0, 0],
54 [ 0.03516384000000, 0]
54 [ 0.03516384000000, 0]
55 ]);
55 ]);
56
56
57 af = np.array([ [a1,a2] ], dtype=object)
57 af = np.array([ [a1,a2] ], dtype=object)
58
58
59 s1 = a1[::-1]
59 s1 = a1[::-1]
60 s2 = a2[::-1]
60 s2 = a2[::-1]
61
61
62 sf = np.array([ [s1,s2] ], dtype=object)
62 sf = np.array([ [s1,s2] ], dtype=object)
63
63
64
64
65 return af, sf
65 return af, sf
@@ -1,52 +1,52
1 '''
1 '''
2 Created on May 30, 2014
2 Created on May 30, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 from irls_dn import *
7 from irls_dn import *
8 from scipy.optimize import brentq
8 from scipy.optimize import brentq
9
9
10 def irls_dn2(A,b,p,G):
10 def irls_dn2(A,b,p,G):
11
11
12 # Minimize ||u||_p subject to ||A*u-b||_2^2 <= G (0 < p <= 1)
12 # Minimize ||u||_p subject to ||A*u-b||_2^2 <= G (0 < p <= 1)
13
13
14 # What this function actually does is finds the lambda1 so that the solution
14 # What this function actually does is finds the lambda1 so that the solution
15 # to the following problem satisfies ||A*u-b||_2^2 <= G:
15 # to the following problem satisfies ||A*u-b||_2^2 <= G:
16 # Minimize lambda1*||u||_p + ||A*u-b||_2
16 # Minimize lambda1*||u||_p + ||A*u-b||_2
17
17
18 # Start with a large lambda1, and do a line search until fidelity <= G.
18 # Start with a large lambda1, and do a line search until fidelity <= G.
19 # (Inversions with large lambda1 are really fast anyway).
19 # (Inversions with large lambda1 are really fast anyway).
20
20
21 # Then spin up fsolve to localize the root even better
21 # Then spin up fsolve to localize the root even better
22
22
23 # Line Search
23 # Line Search
24
24
25 alpha = 2.0; # Line search parameter
25 alpha = 2.0; # Line search parameter
26 lambda1 = 1e5; # What's a reasonable but safe initial guess?
26 lambda1 = 1e5; # What's a reasonable but safe initial guess?
27 u = irls_dn(A,b,p,lambda1);
27 u = irls_dn(A,b,p,lambda1);
28
28
29
29
30 fid = norm(np.dot(A,u)-b)**2;
30 fid = norm(np.dot(A,u)-b)**2;
31
31
32 print '----------------------------------\n';
32 print '----------------------------------\n';
33
33
34 while (fid >= G):
34 while (fid >= G):
35 lambda1 = lambda1 / alpha; # Balance between speed and accuracy
35 lambda1 = lambda1 / alpha; # Balance between speed and accuracy
36 u = irls_dn(A,b,p,lambda1);
36 u = irls_dn(A,b,p,lambda1);
37 fid = norm(np.dot(A,u)-b)**2;
37 fid = norm(np.dot(A,u)-b)**2;
38 print 'lambda = %2e \t' % lambda1, '||A*u-b||^2 = %.1f' % fid;
38 print 'lambda = %2e \t' % lambda1, '||A*u-b||^2 = %.1f' % fid;
39
39
40 # Refinement using fzero/ brentq
40 # Refinement using brentq (equiv to fzero in matlab)
41 lambda0 = np.array([lambda1,lambda1*alpha]); # interval with zero-crossing
41 lambda0 = np.array([lambda1,lambda1*alpha]); # interval with zero-crossing
42
42
43 def myfun(lambda1):
43 def myfun(lambda1):
44 return norm(np.dot(A, irls_dn(A,b,p,lambda1)) - b)**2 - G;
44 return norm(np.dot(A, irls_dn(A,b,p,lambda1)) - b)**2 - G;
45
45
46 # Find zero-crossing at given interval (lambda1, lambda1*alpha)
46 # Find zero-crossing at given interval (lambda1, lambda1*alpha)
47 lambda1 = brentq(myfun, lambda0[0], lambda0[1], xtol=0.01*lambda1)
47 lambda1 = brentq(myfun, lambda0[0], lambda0[1], xtol=0.01*lambda1)
48
48
49 u = irls_dn(A,b,p,lambda1);
49 u = irls_dn(A,b,p,lambda1);
50
50
51 return u;
51 return u;
52
52
General Comments 0
You need to be logged in to leave comments. Login now