##// END OF EJS Templates
Cleaned code, fixed bugs/errors, updated plotting part....
yamaro -
r23:24
parent child
Show More
@@ -1,63 +1,63
1 '''
1 '''
2 Created on May 26, 2014
2 Created on May 26, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 import pywt
7 #import pywt
8 import numpy as np
8 import numpy as np
9
9
10 def FSfarras():
10 def FSfarras():
11 #function [af, sf] = FSfarras
11 #function [af, sf] = FSfarras
12
12
13 # Farras filters organized for the dual-tree
13 # Farras filters organized for the dual-tree
14 # complex DWT.
14 # complex DWT.
15 #
15 #
16 # USAGE:
16 # USAGE:
17 # [af, sf] = FSfarras
17 # [af, sf] = FSfarras
18 # OUTPUT:
18 # OUTPUT:
19 # af{i}, i = 1,2 - analysis filters for tree i
19 # af{i}, i = 1,2 - analysis filters for tree i
20 # sf{i}, i = 1,2 - synthesis filters for tree i
20 # sf{i}, i = 1,2 - synthesis filters for tree i
21 # See farras, dualtree, dualfilt1.
21 # See farras, dualtree, dualfilt1.
22 #
22 #
23 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
23 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
24 # http://taco.poly.edu/WaveletSoftware/
24 # http://taco.poly.edu/WaveletSoftware/
25 #
25 #
26 # Translated to Python by Yolian Amaro
26 # Translated to Python by Yolian Amaro
27
27
28
28
29 a1 = np.array( [
29 a1 = np.array( [
30 [ 0, 0],
30 [ 0, 0],
31 [-0.08838834764832, -0.01122679215254],
31 [-0.08838834764832, -0.01122679215254],
32 [ 0.08838834764832, 0.01122679215254],
32 [ 0.08838834764832, 0.01122679215254],
33 [ 0.69587998903400, 0.08838834764832],
33 [ 0.69587998903400, 0.08838834764832],
34 [ 0.69587998903400, 0.08838834764832],
34 [ 0.69587998903400, 0.08838834764832],
35 [ 0.08838834764832, -0.69587998903400],
35 [ 0.08838834764832, -0.69587998903400],
36 [-0.08838834764832, 0.69587998903400],
36 [-0.08838834764832, 0.69587998903400],
37 [ 0.01122679215254, -0.08838834764832],
37 [ 0.01122679215254, -0.08838834764832],
38 [ 0.01122679215254, -0.08838834764832],
38 [ 0.01122679215254, -0.08838834764832],
39 [0, 0]
39 [0, 0]
40 ] );
40 ] );
41
41
42 a2 = np.array([
42 a2 = np.array([
43 [ 0.01122679215254, 0],
43 [ 0.01122679215254, 0],
44 [ 0.01122679215254, 0],
44 [ 0.01122679215254, 0],
45 [-0.08838834764832, -0.08838834764832],
45 [-0.08838834764832, -0.08838834764832],
46 [ 0.08838834764832, -0.08838834764832],
46 [ 0.08838834764832, -0.08838834764832],
47 [ 0.69587998903400, 0.69587998903400],
47 [ 0.69587998903400, 0.69587998903400],
48 [ 0.69587998903400, -0.69587998903400],
48 [ 0.69587998903400, -0.69587998903400],
49 [ 0.08838834764832, 0.08838834764832],
49 [ 0.08838834764832, 0.08838834764832],
50 [-0.08838834764832, 0.08838834764832],
50 [-0.08838834764832, 0.08838834764832],
51 [ 0, 0.01122679215254],
51 [ 0, 0.01122679215254],
52 [ 0, -0.01122679215254]
52 [ 0, -0.01122679215254]
53 ]);
53 ]);
54
54
55
55
56 af = np.array([ [a1,a2] ], dtype=object)
56 af = np.array([ [a1,a2] ], dtype=object)
57
57
58 s1 = a1[::-1]
58 s1 = a1[::-1]
59 s2 = a2[::-1]
59 s2 = a2[::-1]
60
60
61 sf = np.array([ [s1,s2] ], dtype=object)
61 sf = np.array([ [s1,s2] ], dtype=object)
62
62
63 return af, sf
63 return af, sf
@@ -1,526 +1,496
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 #----------------------------------------------------------
3 #----------------------------------------------------------
4 # Original MATLAB code developed by Brian Harding
4 # Original MATLAB code developed by Brian Harding
5 # Rewritten in Python by Yolian Amaro
5 # Rewritten in Python by Yolian Amaro
6 # Python version 2.7
6 # Python version 2.7
7 # May 15, 2014
7 # May 15, 2014
8 # Jicamarca Radio Observatory
8 # Jicamarca Radio Observatory
9 #----------------------------------------------------------
9 #----------------------------------------------------------
10
10
11 import time
11 import time
12 import matplotlib.pyplot as plt
12 import matplotlib.pyplot as plt
13 from scipy.optimize import root
13 from scipy.optimize import root
14
14 from scipy.stats import nanmean
15 from y_hysell96 import*
15
16 from y_hysell96 import *
16 from deb4_basis import *
17 from deb4_basis import *
17 from modelf import *
18 from modelf import *
18 from irls_dn2 import *
19 from irls_dn2 import *
19 #from scipy.optimize import fsolve
20
20
21
21
22 #-------------------------------------------------------------------------------------------------
22 #-------------------------------------------------------------------------------------------------
23 # Set parameters
23 # Set parameters
24 #-------------------------------------------------------------------------------------------------
24 #-------------------------------------------------------------------------------------------------
25
25
26 ## Calculate Forward Model
26 ## Calculate Forward Model
27 lambda1 = 6.0
27 lambda1 = 6.0
28 k = 2*np.pi/lambda1
28 k = 2*np.pi/lambda1
29
29
30 ## Magnetic Declination
30 ## Magnetic Declination
31 dec = -1.24
31 dec = -1.24
32
32
33 ## Loads Jicamarca antenna positions
33 ## Loads Jicamarca antenna positions
34 antpos = np.loadtxt("antpos.txt", comments="#", delimiter=";", unpack=False)
34 antpos = np.loadtxt("antpos.txt", comments="#", delimiter=";", unpack=False)
35
35
36 ## rx and ry -- for plotting purposes
36 ## rx and ry -- for plotting purposes
37 rx = np.array( [[127.5000], [91.5000], [127.5000], [19.5000], [91.5000], [-127.5000], [-55.5000], [-220.8240]] )
37 rx = np.array( [[127.5000], [91.5000], [127.5000], [19.5000], [91.5000], [-127.5000], [-55.5000], [-220.8240]] )
38 ry = np.array( [[127.5000], [91.5000], [91.5000], [55.5000], [-19.5000], [-127.5000], [-127.5000], [-322.2940]] )
38 ry = np.array( [[127.5000], [91.5000], [91.5000], [55.5000], [-19.5000], [-127.5000], [-127.5000], [-322.2940]] )
39
39
40 ## Plot of antenna positions
40 ## Plot of antenna positions
41 plt.figure(1)
41 plt.figure(1)
42 plt.plot(rx, ry, 'ro')
42 plt.plot(rx, ry, 'ro')
43 plt.draw()
43 plt.draw()
44
44
45 ## Jicamarca is nominally at a 45 degree angle
45 ## Jicamarca is nominally at a 45 degree angle
46 theta = 45 - dec;
46 theta = 45 - dec;
47
47
48 ## Rotation matrix from antenna coord to magnetic coord (East North)
48 ## Rotation matrix from antenna coord to magnetic coord (East North)
49 theta_rad = np.radians(theta) # trig functions take radians as argument
49 theta_rad = np.radians(theta) # trig functions take radians as argument
50 val1 = float( np.cos(theta_rad) )
50 val1 = float( np.cos(theta_rad) )
51 val2 = float( np.sin(theta_rad) )
51 val2 = float( np.sin(theta_rad) )
52 val3 = float( -1*np.sin(theta_rad))
52 val3 = float( -1*np.sin(theta_rad))
53 val4 = float( np.cos(theta_rad) )
53 val4 = float( np.cos(theta_rad) )
54
54
55 # Rotation matrix from antenna coord to magnetic coord (East North)
55 # Rotation matrix from antenna coord to magnetic coord (East North)
56 R = np.array( [[val1, val3], [val2, val4]] )
56 R = np.array( [[val1, val3], [val2, val4]] )
57
57
58 # Rotate antenna positions to magnetic coord.
58 # Rotate antenna positions to magnetic coord.
59 AR = np.dot(R.T, antpos)
59 AR = np.dot(R.T, antpos)
60
60
61 # Only take the East component
61 # Only take the East component
62 r = AR[0,:]
62 r = AR[0,:]
63 r.sort()
63 r.sort()
64
64
65 # Truth model (high and low resolution)
65 # Truth model (high and low resolution)
66 Nt = (1024.0)*(16.0) # number of pixels in truth image: high resolution
66 Nt = (1024.0)*(16.0) # number of pixels in truth image: high resolution
67 thbound = 9.0/180*np.pi # the width of the domain in angle space
67 thbound = 9.0/180*np.pi # the width of the domain in angle space
68 thetat = np.linspace(-thbound, thbound,Nt) # image domain
68 thetat = np.linspace(-thbound, thbound,Nt) # image domain
69 thetat = thetat.T # transpose
69 thetat = thetat.T # transpose
70 Nr = (256.0) # number of pixels in reconstructed image: low res
70 Nr = (256.0) # number of pixels in reconstructed image: low res
71 thetar = np.linspace(-thbound, thbound,Nr) # reconstruction domain
71 thetar = np.linspace(-thbound, thbound,Nr) # reconstruction domain
72 thetar = thetar.T # transpose
72 thetar = thetar.T # transpose
73
73
74
74
75 #-------------------------------------------------------------------------------------------------
75 #-------------------------------------------------------------------------------------------------
76 # Model for f: Gaussian(s) with amplitudes a, centers mu, widths sig, and background constant b.
76 # Model for f: Gaussian(s) with amplitudes a, centers mu, widths sig, and background constant b.
77 #-------------------------------------------------------------------------------------------------
77 #-------------------------------------------------------------------------------------------------
78
78
79 # Triple Gaussian
79 # Triple Gaussian
80 # a = np.array([3, 5, 2]);
80 # a = np.array([3, 5, 2]);
81 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi, 7.0/180*np.pi]);
81 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi, 7.0/180*np.pi]);
82 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi, 0.3/180*np.pi]);
82 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi, 0.3/180*np.pi]);
83 # b = 0; # background
83 # b = 0; # background
84
84
85 # Double Gaussian
85 # Double Gaussian
86 # a = np.array([3, 5]);
86 # a = np.array([3, 5]);
87 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi]);
87 # mu = np.array([-5.0/180*np.pi, 2.0/180*np.pi]);
88 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi]);
88 # sig = np.array([2.0/180*np.pi, 1.5/180*np.pi]);
89 # b = 0; # background
89 # b = 0; # background
90
90
91 # Single Gaussian
91 # Single Gaussian
92 a = np.array( [3] )
92 a = np.array( [3] )
93 mu = np.array( [-3.0/180*np.pi] )
93 mu = np.array( [-3.0/180*np.pi] )
94 sig = np.array( [2.0/180*np.pi] )
94 sig = np.array( [2.0/180*np.pi] )
95 b = 0
95 b = 0
96
96
97 # Empty matrices for factors
97 # Empty matrices for factors
98 fact = np.zeros(shape=(Nt,1))
98 fact = np.zeros(shape=(Nt,1))
99 factr = np.zeros(shape=(Nr,1))
99 factr = np.zeros(shape=(Nr,1))
100
100
101 # DFT Kernels
101 # DFT Kernels
102 for i in range(0, a.size):
102 for i in range(0, a.size):
103 temp = (-(thetat-mu[i])**2/(sig[i]**2))
103 temp = (-(thetat-mu[i])**2/(sig[i]**2))
104 tempr = (-(thetar-mu[i])**2/(sig[i]**2))
104 tempr = (-(thetar-mu[i])**2/(sig[i]**2))
105 for j in range(0, temp.size):
105 for j in range(0, temp.size):
106 fact[j] = fact[j] + a[i]*np.exp(temp[j]);
106 fact[j] = fact[j] + a[i]*np.exp(temp[j]);
107 for m in range(0, tempr.size):
107 for m in range(0, tempr.size):
108 factr[m] = factr[m] + a[i]*np.exp(tempr[m]);
108 factr[m] = factr[m] + a[i]*np.exp(tempr[m]);
109
109
110 fact = fact + b;
110 fact = fact + b;
111 factr = factr + b;
111 factr = factr + b;
112
112
113 # #-------------------------------------------------------------------------------------------------
113 # #-------------------------------------------------------------------------------------------------
114 # # Model for f: Square pulse
114 # # Model for f: Square pulse
115 # #-------------------------------------------------------------------------------------------------
115 # #-------------------------------------------------------------------------------------------------
116 # for j in range(0, fact.size):
116 # for j in range(0, fact.size):
117 # if (theta > -5.0/180*np.pi and theta < 2.0/180*np.pi):
117 # if (theta > -5.0/180*np.pi and theta < 2.0/180*np.pi):
118 # fact[j] = 0
118 # fact[j] = 0
119 # else:
119 # else:
120 # fact[j] = 1
120 # fact[j] = 1
121 # for k in range(0, factr.size):
121 # for k in range(0, factr.size):
122 # if (thetar[k] > -5.0/180*np.pi and thetar[k] < 2/180*np.pi):
122 # if (thetar[k] > -5.0/180*np.pi and thetar[k] < 2/180*np.pi):
123 # factr[k] = 0
123 # factr[k] = 0
124 # else:
124 # else:
125 # factr[k] = 1
125 # factr[k] = 1
126
126
127 # #-------------------------------------------------------------------------------------------------
127 # #-------------------------------------------------------------------------------------------------
128 # # Model for f: Triangle pulse
128 # # Model for f: Triangle pulse
129 # #-------------------------------------------------------------------------------------------------
129 # #-------------------------------------------------------------------------------------------------
130 # mu = -1.0/180*np.pi;
130 # mu = -1.0/180*np.pi;
131 # sig = 5.0/180*np.pi;
131 # sig = 5.0/180*np.pi;
132 # wind1 = theta > mu-sig and theta < mu;
132 # wind1 = theta > mu-sig and theta < mu;
133 # wind2 = theta < mu+sig and theta > mu;
133 # wind2 = theta < mu+sig and theta > mu;
134 # fact = wind1 * (theta - (mu - sig));
134 # fact = wind1 * (theta - (mu - sig));
135 # factr = wind1 * (thetar - (mu - sig));
135 # factr = wind1 * (thetar - (mu - sig));
136 # fact = fact + wind2 * (-(theta-(mu+sig)));
136 # fact = fact + wind2 * (-(theta-(mu+sig)));
137 # factr = factr + wind2 * (-(thetar-(mu+sig)));
137 # factr = factr + wind2 * (-(thetar-(mu+sig)));
138
138
139
140 # fact = fact/(sum(fact)[0]*2*thbound/Nt); # normalize to integral(f)==1
139 # fact = fact/(sum(fact)[0]*2*thbound/Nt); # normalize to integral(f)==1
140
141
141
142 I = sum(fact)[0];
142 I = sum(fact)[0];
143 fact = fact/I; # normalize to sum(f)==1
143 fact = fact/I; # normalize to sum(f)==1
144 factr = factr/I; # normalize to sum(f)==1
144 factr = factr/I; # normalize to sum(f)==1
145
145
146 # Plot Gaussian pulse(s)
146 # Plot Gaussian pulse(s)
147 plt.figure(2)
147 plt.figure(2)
148 plt.plot(thetat, fact, 'r--')
148 plt.plot(thetat, fact, 'r--')
149 plt.plot(thetar, factr, 'ro')
149 plt.plot(thetar, factr, 'ro')
150 plt.draw()
150 plt.draw()
151
151
152
152
153 #-------------------------------------------------------------------------------------------------
153 #-------------------------------------------------------------------------------------------------
154 # Control the type and number of inversions with:
154 # Control the type and number of inversions with:
155 # SNRdBvec: the SNRs that will be used.
155 # SNRdBvec: the SNRs that will be used.
156 # NN: the number of trials for each SNR
156 # NN: the number of trials for each SNR
157 #-------------------------------------------------------------------------------------------------
157 #-------------------------------------------------------------------------------------------------
158
158
159 #SNRdBvec = np.linspace(5,20,10);
159 #SNRdBvec = np.linspace(5,20,10);
160 SNRdBvec = np.array([15]); # 15 dB
160 SNRdBvec = np.array([15]); # 15 dB
161 NN = 1; # number of trials at each SNR
161 NN = 1; # number of trials at each SNR
162
162
163 # Statistics simulation (correlation, root mean square)
163 # Statistics simulation (correlation, root mean square)
164 corr = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
164 corr = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
165 corrc = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
165 corrc = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
166 rmse = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
166 rmse = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
167
167
168 # For each SNR and trial
168 # For each SNR and trial
169 for snri in range(0, SNRdBvec.size):
169 for snri in range(0, SNRdBvec.size):
170 SNRdB = SNRdBvec[snri];
170 SNRdB = SNRdBvec[snri];
171 SNR = 10**(SNRdB/10.0);
171 SNR = 10**(SNRdB/10.0);
172
172
173 for Ni in range(0, NN):
173 for Ni in range(0, NN):
174 # Calculate cross-correlation matrix (Fourier components of image)
174 # Calculate cross-correlation matrix (Fourier components of image)
175 # This is an inefficient way to do this.
175 # This is an inefficient way to do this.
176
176
177 R = np.zeros(shape=(r.size, r.size), dtype=object);
177 R = np.zeros(shape=(r.size, r.size), dtype=object);
178
178
179 for i1 in range(0, r.size):
179 for i1 in range(0, r.size):
180 for i2 in range(0,r.size):
180 for i2 in range(0,r.size):
181 R[i1,i2] = np.dot(fact.T, np.exp(1j*k*np.dot((r[i1]-r[i2]),np.sin(thetat))))
181 R[i1,i2] = np.dot(fact.T, np.exp(1j*k*np.dot((r[i1]-r[i2]),np.sin(thetat))))
182 R[i1,i2] = sum(R[i1,i2])
182 R[i1,i2] = sum(R[i1,i2])
183
183
184 # Add uncertainty
184 # Add uncertainty
185 # This is an ad-hoc way of adding "noise". It models some combination of
185 # This is an ad-hoc way of adding "noise". It models some combination of
186 # receiver noise and finite integration times. We could use a more
186 # receiver noise and finite integration times. We could use a more
187 # advanced model (like in Yu et al 2000) in the future.
187 # advanced model (like in Yu et al 2000) in the future.
188
188
189 # This is a way of adding noise while maintaining the
189 # This is a way of adding noise while maintaining the
190 # positive-semi-definiteness of the matrix.
190 # positive-semi-definiteness of the matrix.
191
191
192 U = linalg.cholesky(R.astype(complex), lower=False); # U'*U = R
192 U = linalg.cholesky(R.astype(complex), lower=False); # U'*U = R
193
193
194 sigma_noise = (np.linalg.norm(U,'fro')/SNR);
194 sigma_noise = (np.linalg.norm(U,'fro')/SNR);
195
195
196 # temp1 = (-2*np.random.rand(U.shape[0], U.shape[1]) + 2)
196 # temp1 = (-2*np.random.rand(U.shape[0], U.shape[1]) + 2)
197 # temp2 = 1j*(-2*np.random.rand(U.shape[0], U.shape[1]) + 2)
197 # temp2 = 1j*(-2*np.random.rand(U.shape[0], U.shape[1]) + 2)
198 # temp3 = ((abs(U) > 0).astype(float)) # upper triangle of 1's
198 # temp3 = ((abs(U) > 0).astype(float)) # upper triangle of 1's
199 # temp4 = (sigma_noise * (temp1 + temp2))/np.sqrt(2.0)
199 # temp4 = (sigma_noise * (temp1 + temp2))/np.sqrt(2.0)
200 #
200 #
201 # nz = np.multiply(temp4,temp3)
201 # nz = np.multiply(temp4,temp3)
202
202
203 nz = np.multiply( sigma_noise * (np.random.randn(U.shape[0]) + 1j*np.random.randn(U.shape[0]))/np.sqrt(2) , (abs(U) > 0).astype(float));
203 nz = np.multiply( sigma_noise * (np.random.randn(U.shape[0]) + 1j*np.random.randn(U.shape[0]))/np.sqrt(2) , (abs(U) > 0).astype(float));
204
204
205 Unz = U + nz;
205 Unz = U + nz;
206 Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
206 Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
207 plt.figure(3);
207 plt.figure(3);
208 plt.pcolor(abs(Rnz));
208 plt.pcolor(abs(Rnz));
209 plt.colorbar();
209 plt.colorbar();
210
210
211 #-------------------------------------------------------------------------------------------------
211 #-------------------------------------------------------------------------------------------------
212 # Fourier Inversion
212 # Fourier Inversion
213 #-------------------------------------------------------------------------------------------------
213 #-------------------------------------------------------------------------------------------------
214 f_fourier = np.zeros(shape=(Nr,1), dtype=complex);
214 f_fourier = np.zeros(shape=(Nr,1), dtype=complex);
215
215
216 for i in range(0, thetar.size):
216 for i in range(0, thetar.size):
217 th = thetar[i];
217 th = thetar[i];
218 w = np.exp(1j*k*np.dot(r,np.sin(th)));
218 w = np.exp(1j*k*np.dot(r,np.sin(th)));
219 temp = np.dot(w.T.conj(),U)
219 temp = np.dot(w.T.conj(),U)
220 f_fourier[i] = np.dot(temp, w);
220 f_fourier[i] = np.dot(temp, w);
221
221
222 f_fourier = f_fourier.real; # get rid of numerical imaginary noise
222 f_fourier = f_fourier.real; # get rid of numerical imaginary noise
223
223
224
224
225 #-------------------------------------------------------------------------------------------------
225 #-------------------------------------------------------------------------------------------------
226 # Capon Inversion
226 # Capon Inversion
227 #-------------------------------------------------------------------------------------------------
227 #-------------------------------------------------------------------------------------------------
228 f_capon = np.zeros(shape=(Nr,1));
228 f_capon = np.zeros(shape=(Nr,1));
229
229
230 tic_capon = time.time();
230 tic_capon = time.time();
231
231
232 for i in range(0, thetar.size):
232 for i in range(0, thetar.size):
233 th = thetar[i];
233 th = thetar[i];
234 w = np.exp(1j*k*np.dot(r,np.sin(th)));
234 w = np.exp(1j*k*np.dot(r,np.sin(th)));
235 f_capon[i] = np.divide(1, ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real)
235 f_capon[i] = 1/ ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real
236
236
237 toc_capon = time.time()
237 toc_capon = time.time()
238
239 elapsed_time_capon = toc_capon - tic_capon;
238 elapsed_time_capon = toc_capon - tic_capon;
240
239
241 f_capon = f_capon.real; # get rid of numerical imaginary noise
240 f_capon = f_capon.real; # get rid of numerical imaginary noise
242
241
243
242
244 #-------------------------------------------------------------------------------------------------
243 #-------------------------------------------------------------------------------------------------
245 # MaxEnt Inversion
244 # MaxEnt Inversion
246 #-------------------------------------------------------------------------------------------------
245 #-------------------------------------------------------------------------------------------------
247
246
248 # Create the appropriate sensing matrix (split into real and imaginary # parts)
247 # Create the appropriate sensing matrix (split into real and imaginary # parts)
249 M = (r.size-1)*(r.size);
248 M = (r.size-1)*(r.size);
250 Ht = np.zeros(shape=(M,Nt)); # "true" sensing matrix
249 Ht = np.zeros(shape=(M,Nt)); # "true" sensing matrix
251 Hr = np.zeros(shape=(M,Nr)); # approximate sensing matrix for reconstruction
250 Hr = np.zeros(shape=(M,Nr)); # approximate sensing matrix for reconstruction
252
251
253 # Need to re-index our measurements from matrix R into vector g
252 # Need to re-index our measurements from matrix R into vector g
254 g = np.zeros(shape=(M,1));
253 g = np.zeros(shape=(M,1));
255 gnz = np.zeros(shape=(M,1)); # noisy version of g
254 gnz = np.zeros(shape=(M,1)); # noisy version of g
256
255
257 # Triangular indexing to perform this re-indexing
256 # Triangular indexing to perform this re-indexing
258 T = np.ones(shape=(r.size,r.size));
257 T = np.ones(shape=(r.size,r.size));
259 [i1v,i2v] = np.where(np.triu(T,1) > 0); # converts linear to triangular indexing
258 [i1v,i2v] = np.where(np.triu(T,1) > 0); # converts linear to triangular indexing
260
259
261 # Build H
260 # Build H
262 for i1 in range(0, r.size):
261 for i1 in range(0, r.size):
263 for i2 in range(i1+1, r.size):
262 for i2 in range(i1+1, r.size):
264 idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward
263 idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward
265 idx1 = 2*idx;
264 idx1 = 2*idx;
266 idx2 = 2*idx+1;
265 idx2 = 2*idx+1;
267 Hr[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
266 Hr[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
268 Hr[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
267 Hr[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetar)).T.conj();
269 Ht[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
268 Ht[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
270 Ht[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
269 Ht[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetat)).T.conj()*Nr/Nt;
271 g[idx1] = (R[i1,i2]).real*Nr/Nt;
270 g[idx1] = (R[i1,i2]).real*Nr/Nt;
272 g[idx2] = (R[i1,i2]).imag*Nr/Nt;
271 g[idx2] = (R[i1,i2]).imag*Nr/Nt;
273 gnz[idx1] = (Rnz[i1,i2]).real*Nr/Nt;
272 gnz[idx1] = (Rnz[i1,i2]).real*Nr/Nt;
274 gnz[idx2] = (Rnz[i1,i2]).imag*Nr/Nt;
273 gnz[idx2] = (Rnz[i1,i2]).imag*Nr/Nt;
275
274
276 # Inversion
275 # Inversion
277 F = Nr/Nt; # normalization
276 F = Nr/Nt; # normalization
278 sigma = 1; # set to 1 because the difference is accounted for in G
277 sigma = 1; # set to 1 because the difference is accounted for in G
279
278
280 G = np.linalg.norm(g-gnz)**2 ; # pretend we know in advance the actual value of chi^2
279 G = np.linalg.norm(g-gnz)**2 ; # pretend we know in advance the actual value of chi^2
281 lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
280 lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
282
281
283
282
284 # Whitened solution
283 # Whitened solution
285 def myfun(lambda1):
284 def myfun(lambda1):
286 return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
285 return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
287
286
288
287
289 tic_maxEnt = time.time(); # start time maxEnt
288 tic_maxEnt = time.time(); # start time maxEnt
290
289
291 lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14);
290 lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14);
292
291
293 toc_maxEnt = time.time()
292 toc_maxEnt = time.time()
294 elapsed_time_maxent = toc_maxEnt - tic_maxEnt;
293 elapsed_time_maxent = toc_maxEnt - tic_maxEnt;
295
294
296 # Solution
295 # Solution
297 lambda1 = lambda1.x;
296 lambda1 = lambda1.x;
298
297
299 f_maxent = modelf(lambda1, Hr, F);
298 f_maxent = modelf(lambda1, Hr, F);
300 ystar = myfun(lambda1);
299 ystar = myfun(lambda1);
301 Lambda = np.sqrt(sum(lambda1**2*sigma**2)/(4*G));
300 Lambda = np.sqrt(sum(lambda1**2*sigma**2)/(4*G));
302 ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
301 ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
303 es = np.dot(Hr, f_maxent) - gnz; # should be same as ep
302 es = np.dot(Hr, f_maxent) - gnz;
304 chi2 = np.sum((es/sigma)**2);
303 chi2 = np.sum((es/sigma)**2);
305
304
306
305
307 #-------------------------------------------------------------------------------------------------
306 #-------------------------------------------------------------------------------------------------
308 # CS inversion using Iteratively Reweighted Least Squares (IRLS)
307 # CS inversion using Iteratively Reweighted Least Squares (IRLS)
309 #-------------------------------------------------------------------------------------------------
308 #-------------------------------------------------------------------------------------------------
310 # (Use Nr, thetar, gnz, and Hr from MaxEnt above)
309 # (Use Nr, thetar, gnz, and Hr from MaxEnt above)
311
310
312 Psi = deb4_basis(Nr);
311 Psi = deb4_basis(Nr);
313
314 # ------------Remove this-------------------------------------------
315 # wavelet1 = pywt.Wavelet('db4')
316 # Phi, Psi, x = wavelet1.wavefun(level=3)
317 #-------------------------------------------------------------------
318
312
319 # add "sum to 1" constraint
313 # add "sum to 1" constraint
320 H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
314 H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
321 g2 = np.concatenate( (gnz, np.array([[Nr/Nt]])), axis=0 );
315 g2 = np.concatenate( (gnz, np.array([[Nr/Nt]])), axis=0 );
322
316
323 tic_cs = time.time();
317 tic_cs = time.time();
324
325
326 # plt.figure(4)
327 # plt.imshow(Psi)#, interpolation='nearest')
328 # #plt.xticks([]); plt.yticks([])
329 # plt.show()
330
318
331 # Inversion
319 # Inversion
332 s = irls_dn2(np.dot(H2,Psi),g2,0.5,G);
320 s = irls_dn2(np.dot(H2,Psi),g2,0.5,G);
333
321
334 #print s
322 toc_cs = time.time()
323 elapsed_time_cs = toc_cs - tic_cs;
335
324
336 # Brightness function
325 # Brightness function
337 f_cs = np.dot(Psi,s);
326 f_cs = np.dot(Psi,s);
338
327
339 toc_cs = time.time()
328
340 elapsed_time_cs = toc_cs - tic_cs;
341
329
342 # Plot
330 # Plot
343 plt.figure(4)
331 plt.figure(4)
344 plt.plot(thetar,f_cs,'r.-');
332 plt.plot(thetar,f_cs,'r.-');
345 plt.plot(thetat,fact,'k-');
333 plt.plot(thetat,fact,'k-');
346
334
347
335
348 #-------------------------------------------------------------------------------------------------
336 #-------------------------------------------------------------------------------------------------
349 # Scaling and shifting
337 # Scaling and shifting
350 # (Only necessary for Capon solution)
338 # (Only necessary for Capon solution)
351 #-------------------------------------------------------------------------------------------------
339 #-------------------------------------------------------------------------------------------------
352 f_capon = f_capon/np.max(f_capon)*np.max(fact);
340 f_capon = f_capon/np.max(f_capon)*np.max(fact);
353
341
354
342
355 #-------------------------------------------------------------------------------------------------
343 #-------------------------------------------------------------------------------------------------
356 # Analyze stuff
344 # Analyze stuff
357 #-------------------------------------------------------------------------------------------------
345 #-------------------------------------------------------------------------------------------------
358
346
359 # Calculate MSE
347 # Calculate MSE
360 rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
348 rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
361 rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
349 rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
362 rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
350 rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
363 rmse_cs = np.sqrt(np.mean((f_cs - factr)**2));
351 rmse_cs = np.sqrt(np.mean((f_cs - factr)**2));
364
352
365
353
366 relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
354 relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
367 relrmse_capon = rmse_capon / np.linalg.norm(fact);
355 relrmse_capon = rmse_capon / np.linalg.norm(fact);
368 relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
356 relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
369 relrmse_cs = rmse_cs / np.linalg.norm(fact);
357 relrmse_cs = rmse_cs / np.linalg.norm(fact);
370
358
371
359
372 # Calculate correlation
360 # Calculate correlation
373 corr_fourier = np.dot(f_fourier.T.conj(),factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
361 corr_fourier = np.dot(f_fourier.T.conj(),factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
374 corr_capon = np.dot(f_capon.T.conj(),factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
362 corr_capon = np.dot(f_capon.T.conj(),factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
375 corr_maxent = np.dot(f_maxent.T.conj(),factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
363 corr_maxent = np.dot(f_maxent.T.conj(),factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
376 corr_cs = np.dot(f_cs.T.conj(),factr) / (np.linalg.norm(f_cs)*np.linalg.norm(factr));
364 corr_cs = np.dot(f_cs.T.conj(),factr) / (np.linalg.norm(f_cs)*np.linalg.norm(factr));
377
365
378
366
379 # Calculate centered correlation
367 # Calculate centered correlation
380 f0 = factr - np.mean(factr);
368 f0 = factr - np.mean(factr);
381 f1 = f_fourier - np.mean(f_fourier);
369 f1 = f_fourier - np.mean(f_fourier);
382
370
383 corrc_fourier = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
371 corrc_fourier = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
384 f1 = f_capon - np.mean(f_capon);
372 f1 = f_capon - np.mean(f_capon);
385 corrc_capon = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
373 corrc_capon = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
386 f1 = f_maxent - np.mean(f_maxent);
374 f1 = f_maxent - np.mean(f_maxent);
387 corrc_maxent = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
375 corrc_maxent = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
388 f1 = f_cs - np.mean(f_cs);
376 f1 = f_cs - np.mean(f_cs);
389 corrc_cs = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
377 corrc_cs = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
390
378
391
379
392 #-------------------------------------------------------------------------------------------------
380 #-------------------------------------------------------------------------------------------------
393 # Plot stuff
381 # Plot stuff
394 #-------------------------------------------------------------------------------------------------
382 #-------------------------------------------------------------------------------------------------
395
383
396 #---- Capon----
384 #---- Capon----#
397 plt.figure(5)
385 plt.figure(5)
398 plt.subplot(3, 1, 1)
386 plt.subplot(3, 1, 1)
399 plt.plot(180/np.pi*thetar, f_capon, 'r', label='Capon')
387 plt.plot(180/np.pi*thetar, f_capon, 'r', label='Capon')
400 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
388 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
401 plt.ylabel('Power (arbitrary units)')
389 plt.ylabel('Power (arbitrary units)')
402 plt.legend(loc='upper right')
390 plt.legend(loc='upper right')
403
391
404 # formatting y-axis
392 # formatting y-axis
405 locs,labels = plt.yticks()
393 locs,labels = plt.yticks()
406 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
394 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
407 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
395 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
408
396
409
397
410 #---- MaxEnt----
398 #---- MaxEnt----#
411 plt.subplot(3, 1, 2)
399 plt.subplot(3, 1, 2)
412 plt.plot(180/np.pi*thetar, f_maxent, 'r', label='MaxEnt')
400 plt.plot(180/np.pi*thetar, f_maxent, 'r', label='MaxEnt')
413 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
401 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
414 plt.ylabel('Power (arbitrary units)')
402 plt.ylabel('Power (arbitrary units)')
415 plt.legend(loc='upper right')
403 plt.legend(loc='upper right')
416
404
417 # formatting y-axis
405 # formatting y-axis
418 locs,labels = plt.yticks()
406 locs,labels = plt.yticks()
419 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
407 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
420 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
408 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
421
409
422
410
423 #---- Compressed Sensing----
411 #---- Compressed Sensing----#
424 plt.subplot(3, 1, 3)
412 plt.subplot(3, 1, 3)
425 plt.plot(180/np.pi*thetar, f_cs, 'r', label='CS')
413 plt.plot(180/np.pi*thetar, f_cs, 'r', label='CS')
426 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
414 plt.plot(180/np.pi*thetat,fact, 'k--', label='Truth')
427 plt.ylabel('Power (arbitrary units)')
415 plt.ylabel('Power (arbitrary units)')
428 plt.legend(loc='upper right')
416 plt.legend(loc='upper right')
429
417
430 # formatting y-axis
418 # formatting y-axis
431 locs,labels = plt.yticks()
419 locs,labels = plt.yticks()
432 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
420 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
433 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
421 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
434
435
436 # # PLOT PARA COMPRESSED SENSING
437 # #
438 # # subplot(3,1,3);
439 # # plot(180/pi*thetar,f_cs,'r-');
440 # # hold on;
441 # # plot(180/pi*thetat,fact,'k--');
442 # # hold off;
443 # # ylim([min(f_cs) 1.1*max(fact)]);
444 # # # title(sprintf('rel. RMSE: #.2e\tCorr: #.3f Corrc: #.3f', relrmse_cs, corr_cs, corrc_cs));
445 # # # title 'Compressed Sensing - Debauchies Wavelets'
446 # # xlabel 'Degrees'
447 # # ylabel({'Power';'(arbitrary units)'})
448 # # legend('Comp. Sens.','Truth');
449 # #
450 # # # set(gcf,'Position',[749 143 528 881]); # CSL
451 # # # set(gcf,'Position',[885 -21 528 673]); # macbook
452 # # pause(0.01);
453
454
422
455 # # Store Results
423 # # Store Results
456 corr[0, snri, Ni] = corr_fourier;
424 corr[0, snri, Ni] = corr_fourier;
457 corr[1, snri, Ni] = corr_capon;
425 corr[1, snri, Ni] = corr_capon;
458 corr[2, snri, Ni] = corr_maxent;
426 corr[2, snri, Ni] = corr_maxent;
459 corr[3, snri, Ni] = corr_cs;
427 corr[3, snri, Ni] = corr_cs;
460
428
461 rmse[0,snri,Ni] = relrmse_fourier;
429 rmse[0,snri,Ni] = relrmse_fourier;
462 rmse[1,snri,Ni] = relrmse_capon;
430 rmse[1,snri,Ni] = relrmse_capon;
463 rmse[2,snri,Ni] = relrmse_maxent;
431 rmse[2,snri,Ni] = relrmse_maxent;
464 rmse[3,snri,Ni] = relrmse_cs;
432 rmse[3,snri,Ni] = relrmse_cs;
465
433
466 corrc[0,snri,Ni] = corrc_fourier;
434 corrc[0,snri,Ni] = corrc_fourier;
467 corrc[1,snri,Ni] = corrc_capon;
435 corrc[1,snri,Ni] = corrc_capon;
468 corrc[2,snri,Ni] = corrc_maxent;
436 corrc[2,snri,Ni] = corrc_maxent;
469 corrc[3,snri,Ni] = corrc_cs;
437 corrc[3,snri,Ni] = corrc_cs;
470
438
471
439 print "--------Time performace--------"
472 print 'Capon:\t', elapsed_time_capon, 'sec';
440 print 'Capon:\t', elapsed_time_capon, 'sec';
473 print 'Maxent:\t',elapsed_time_maxent, 'sec';
441 print 'Maxent:\t',elapsed_time_maxent, 'sec';
474 print 'CS:\t',elapsed_time_cs, 'sec';
442 print 'CS:\t',elapsed_time_cs, 'sec\n';
475
443
476 print (NN*(snri+1) + Ni), '/', (SNRdBvec.size*NN);
444 print (NN*(snri+1) + Ni), '/', (SNRdBvec.size*NN), '\n';
477
445
478 print corr
446
479
447 #-------------------------------------------------------------------------------------------------
480 print corr.shape
448 # Analyze and plot statistics
481
449 #-------------------------------------------------------------------------------------------------
482
450
483 ## Analyze and plot statistics
484
485 metric = corr; # set this to rmse, corr, or corrc
451 metric = corr; # set this to rmse, corr, or corrc
486
452
487 # Remove outliers (this part was experimental and wasn't used in the paper)
453 # Remove outliers (this part was experimental and wasn't used in the paper)
488 # nsig = 3;
454 # nsig = 3;
489 # for i = 1:4
455 # for i = 1:4
490 # for snri = 1:length(SNRdBvec)
456 # for snri = 1:length(SNRdBvec)
491 # av = mean(met(i,snri,:));
457 # av = mean(met(i,snri,:));
492 # s = std(met(i,snri,:));
458 # s = std(met(i,snri,:));
493 # idx = abs(met(i,snri,:) - av) > nsig*s;
459 # idx = abs(met(i,snri,:) - av) > nsig*s;
494 # met(i,snri,idx) = nan;
460 # met(i,snri,idx) = nan;
495 # if sum(idx)>0
461 # if sum(idx)>0
496 # fprintf('i=%i, snr=%i, %i/%i pts removed\n',...
462 # fprintf('i=%i, snr=%i, %i/%i pts removed\n',...
497 # i,round(SNRdBvec(snri)),sum(idx),length(idx));
463 # i,round(SNRdBvec(snri)),sum(idx),length(idx));
498 # end
464 # end
499 # end
465 # end
500 # end
466 # end
501
467
502 # Avg ignoring NaNs
468
503 def nanmean(data, **args):
469 # Avg ignoring NaNs
504 return numpy.ma.filled(numpy.ma.masked_array(data,numpy.isnan(data)).mean(**args), fill_value=numpy.nan)
470 ave = np.zeros(shape=(4))
505
471
506 # ave = np.zeros(shape=(4))
472 ave[0] = nanmean(metric[0,:,:]); # Fourier
507 #
473 ave[1] = nanmean(metric[1,:,:]); # Capon
508 # ave[0] = nanmean(metric, axis=0);
474 ave[2] = nanmean(metric[2,:,:]); # MaxEnt
509 # ave[1] = nanmean(metric, axis=1);
475 ave[3] = nanmean(metric[3,:,:]); # Compressed Sensing
510 # ave[2] = nanmean(metric, axis=2);
476
511 # ave[3] = nanmean(metric, axis=3);
477 # Plot based on chosen metric
512
513 #print ave
514 plt.figure(6);
478 plt.figure(6);
515 f = plt.scatter(SNRdBvec, corr[0], marker='+', color='b', s=60); # Fourier
479 f = plt.scatter(SNRdBvec, ave[0], marker='+', color='b', s=60); # Fourier
516 c = plt.scatter(SNRdBvec, corr[1], marker='o', color= 'c', s=60); # Capon
480 c = plt.scatter(SNRdBvec, ave[1], marker='o', color= 'g', s=60); # Capon
517 me= plt.scatter(SNRdBvec, corr[2], marker='s', color= 'y', s=60); # MaxEnt
481 me= plt.scatter(SNRdBvec, ave[2], marker='s', color= 'c', s=60); # MaxEnt
518 cs= plt.scatter(SNRdBvec, corr[3], marker='*', color='r', s=60); # Compressed Sensing
482 cs= plt.scatter(SNRdBvec, ave[3], marker='*', color='r', s=60); # Compressed Sensing
519
483
520
521 plt.legend((f,c,me,cs),('Fourier','Capon', 'MaxEnt', 'Comp. Sens.'),scatterpoints=1, loc='upper right')
484 plt.legend((f,c,me,cs),('Fourier','Capon', 'MaxEnt', 'Comp. Sens.'),scatterpoints=1, loc='upper right')
522 plt.xlabel('SNR')
485 plt.xlabel('SNR')
523 plt.ylabel('Correlation with Truth')
486 plt.ylabel('Correlation with Truth')
524
487
488 print "--------Correlations--------"
489 print "Fourier:", ave[0]
490 print "Capon:\t", ave[1]
491 print "MaxEnt:\t", ave[2]
492 print "CS:\t", ave[3]
493
525 plt.show()
494 plt.show()
526
495
496
@@ -1,41 +1,40
1 '''
1 '''
2 Created on May 26, 2014
2 Created on May 26, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 import numpy as np
8 from FSfarras import *
7 from FSfarras import *
9 from dualfilt1 import *
8 from dualfilt1 import *
10 from dualtree import *
9 from dualtree import *
11 from idualtree import *
10 from idualtree import *
12
11
13 # Debauchie 4 Wavelet
12 # Debauchie 4 Wavelet
14 def deb4_basis(N):
13 def deb4_basis(N):
15
14
16 Psi = np.zeros(shape=(N,2*N+1));
15 Psi = np.zeros(shape=(N,2*N+1));
17 idx = 0;
16 idx = 0;
18 J = 4;
17 J = 4;
19 [Faf, Fsf] = FSfarras();
18 [Faf, Fsf] = FSfarras();
20 [af, sf] = dualfilt1();
19 [af, sf] = dualfilt1();
21
20
22 # compute transform of zero vector
21 # compute transform of zero vector
23 x = np.zeros(shape=(1,N));
22 x = np.zeros(shape=(1,N));
24 w = dualtree(x, J, Faf, af);
23 w = dualtree(x, J, Faf, af);
25
24
26
25
27 # Uses both real and imaginary wavelets
26 # Uses both real and imaginary wavelets
28 for i in range (0, J+1):
27 for i in range (0, J+1):
29 for j in range (0, 2):
28 for j in range (0, 2):
30 for k in range (0, (w[i][j]).size):
29 for k in range (0, (w[i][j]).size):
31 w[i][j][0,k] = 1;
30 w[i][j][0,k] = 1;
32 y = idualtree(w, J, Fsf, sf);
31 y = idualtree(w, J, Fsf, sf);
33 w[i][j][0,k] = 0;
32 w[i][j][0,k] = 0;
34 # store it
33 # store it
35 Psi[:,idx] = y.T.conj();
34 Psi[:,idx] = y.T.conj();
36 idx = idx + 1;
35 idx = idx + 1;
37
36
38 # Add uniform vector (seems to be useful if there's a background
37 # Add uniform vector (seems to be useful if there's a background
39 Psi[:,2*N] = 1/np.sqrt(N);
38 Psi[:,2*N] = 1/np.sqrt(N);
40
39
41 return Psi No newline at end of file
40 return Psi
@@ -1,94 +1,70
1 '''
1 '''
2 Created on May 27, 2014
2 Created on May 27, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 #from scipy.sparse import eye
8 from scipy import linalg
7 from scipy import linalg
9 import scipy.sparse as sps
8 import scipy.sparse as sps
10 import numpy as np
9 import numpy as np
11 from numpy.linalg import norm
10 from numpy.linalg import norm
12
11
13 def irls_dn(A,b,p,lambda1):
12 def irls_dn(A,b,p,lambda1):
14
13
15
14
16 # Minimize lambda*||u||_p + ||A*u-b||_2, 0 < p <= 1
15 # Minimize lambda*||u||_p + ||A*u-b||_2, 0 < p <= 1
17 # using Iterative Reweighted Least Squares
16 # using Iterative Reweighted Least Squares
18 # (see http://math.lanl.gov/Research/Publications/Docs/chartrand-2008-iteratively.pdf
17 # (see http://math.lanl.gov/Research/Publications/Docs/chartrand-2008-iteratively.pdf
19 # and http://web.eecs.umich.edu/~aey/sparse/sparse11.pdf)
18 # and http://web.eecs.umich.edu/~aey/sparse/sparse11.pdf)
20
19
21 # Note to self: I found that "warm-starting" didn't really help too much.
20 # Note to self: I found that "warm-starting" didn't really help too much.
22
21
23 [M,N] = A.shape;
22 [M,N] = A.shape;
24 # Initialize and precompute:
23 # Initialize and precompute:
25 eps = 1e-2; # damping parameter
24 eps = 1e-2; # damping parameter
26
25
27 [Q,R] = linalg.qr(A.T.conj(), mode='economic');
26 [Q,R] = linalg.qr(A.T.conj(), mode='economic');
28
27
29
28
30 c = linalg.solve(R.T.conj(),b); # will be used later also
29 c = linalg.solve(R.T.conj(),b); # will be used later also
31 u = np.dot(Q,c); # minimum 2-norm solution
30 u = np.dot(Q,c); # minimum 2-norm solution
32 I = sps.eye(M);
31 I = sps.eye(M);
33
32
34 # Temporary N x N matrix
33 # Temporary N x N matrix
35 temp = np.zeros(shape=(N,N))
34 temp = np.zeros(shape=(N,N))
36
37 #---------- not needed, defined above--------------
38 # Spacing of floating point numbers
39 #eps = np.spacing(1)
40 #--------------------------------------------------
41
35
42 # Loop until damping parameter is small enough
36 # Loop until damping parameter is small enough
43 while (eps > 1e-7):
37 while (eps > 1e-7):
44 epschange = 0;
38 epschange = 0;
45 # Loop until it's time to change eps
39 # Loop until it's time to change eps
46 while (not(epschange)):
40 while (not(epschange)):
47 # main loop
41 # main loop
48 # u_n = W*A'*(A*W*A'+ lambda*I)^-1 * b
42 # u_n = W*A'*(A*W*A'+ lambda*I)^-1 * b
49 # where W = diag(1/w)
43 # where W = diag(1/w)
50 # where w = (u.^2 + eps).^(p/2-1)
44 # where w = (u.^2 + eps).^(p/2-1)
51
45
52 # Update
46 # Update
53 w = (u**2 + eps)**(1-p/2.0);
47 w = (u**2 + eps)**(1-p/2.0);
54
55 # #---- Very inefficient- REMOVE THIS PART------
56 # k = 0
57 # # Sparse matrix
58 # for i in range (0, N):
59 # for j in range (0,N):
60 # if(i==j):
61 # temp[i,j] = w[k]
62 # k = k+1
63 #--------------------------------------------------
64
65 np.fill_diagonal(temp, w)
48 np.fill_diagonal(temp, w)
66 #-----------------------------------------------
49
67
68 # Compressed Sparse Matrix
50 # Compressed Sparse Matrix
69 W = sps.csr_matrix(temp); #Compressed Sparse Row matrix
51 W = sps.csr_matrix(temp); #Compressed Sparse Row matrix
70
52
71
72 WAT = W*A.T.conj();
53 WAT = W*A.T.conj();
73
74 #print "WAT", WAT.shape
75 #print "np.dot(A,WAT)", np.dot(A,WAT).shape
76 #print "np.dot(lambda1,I)", np.dot(lambda1,I).shape
77 #print "linalg.solve((np.dot(A,WAT) + np.dot(lambda1,I)), b)", linalg.solve((np.dot(A,WAT) + np.dot(lambda1,I)), b).shape
78
54
79 u_new = np.dot(WAT , linalg.solve((np.dot(A,WAT) + np.dot(lambda1,I)), b));
55 u_new = np.dot(WAT , linalg.solve((np.dot(A,WAT) + np.dot(lambda1,I)), b));
80
56
81 # See if this subproblem is converging
57 # See if this subproblem is converging
82 delu = norm(u_new-u)/norm(u);
58 delu = norm(u_new-u)/norm(u);
83 epschange = delu < (np.sqrt(eps)/100.0);
59 epschange = delu < (np.sqrt(eps)/100.0);
84
60
85 # Make update
61 # Make update
86 u = u_new;
62 u = u_new;
87
63
88
64
89 eps = eps/10.0; # decrease eps
65 eps = eps/10.0; # decrease eps
90
66
91 return u
67 return u
92
68
93
69
94
70
@@ -1,74 +1,53
1 '''
1 '''
2 Created on May 30, 2014
2 Created on May 30, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 from irls_dn import *
7 from irls_dn import *
8 from scipy.optimize import fsolve
8 from scipy.optimize import brentq
9 import numpy as np
10 from scipy.optimize import *
11 from dogleg import *
12 from numpy.linalg import norm
13
14 import matplotlib.pyplot as plt
15
9
16 def irls_dn2(A,b,p,G):
10 def irls_dn2(A,b,p,G):
17
11
18 # Minimize ||u||_p subject to ||A*u-b||_2^2 <= G (0 < p <= 1)
12 # Minimize ||u||_p subject to ||A*u-b||_2^2 <= G (0 < p <= 1)
19
13
20 # What this function actually does is finds the lambda1 so that the solution
14 # What this function actually does is finds the lambda1 so that the solution
21 # to the following problem satisfies ||A*u-b||_2^2 <= G:
15 # to the following problem satisfies ||A*u-b||_2^2 <= G:
22 # Minimize lambda1*||u||_p + ||A*u-b||_2
16 # Minimize lambda1*||u||_p + ||A*u-b||_2
23
17
24 # Start with a large lambda1, and do a line search until fidelity <= G.
18 # Start with a large lambda1, and do a line search until fidelity <= G.
25 # (Inversions with large lambda1 are really fast anyway).
19 # (Inversions with large lambda1 are really fast anyway).
26
20
27 # Then spin up fsolve to localize the root even better
21 # Then spin up fsolve to localize the root even better
28
22
29 # Line Search
23 # Line Search
30
24
31 alpha = 2.0; # Line search parameter
25 alpha = 2.0; # Line search parameter
32 lambda1 = 1e5; # What's a reasonable but safe initial guess?
26 lambda1 = 1e5; # What's a reasonable but safe initial guess?
33 u = irls_dn(A,b,p,lambda1);
27 u = irls_dn(A,b,p,lambda1);
34 #print "u\n", u
28
35
29
36 fid = norm(np.dot(A,u)-b)**2;
30 fid = norm(np.dot(A,u)-b)**2;
37
31
38 print '----------------------------------\n';
32 print '----------------------------------\n';
39
33
40 while (fid >= G):
34 while (fid >= G):
41 lambda1 = lambda1 / alpha; # Balance between speed and accuracy
35 lambda1 = lambda1 / alpha; # Balance between speed and accuracy
42 u = irls_dn(A,b,p,lambda1);
36 u = irls_dn(A,b,p,lambda1);
43 fid = norm(np.dot(A,u)-b)**2;
37 fid = norm(np.dot(A,u)-b)**2;
44 print 'lambda = %2e \t' % lambda1, '||A*u-b||^2 = %.1f\n' % fid;
38 print 'lambda = %2e \t' % lambda1, '||A*u-b||^2 = %.1f' % fid;
45 #print u
46
39
47 # Refinement using fzero/ brentq
40 # Refinement using fzero/ brentq
48 lambda0 = np.array([lambda1,lambda1*alpha]); # interval with zero-crossing
41
49
42 lambda0 = np.array([lambda1,lambda1*alpha]); # interval with zero-crossing
50
43
51 def myfun(lambda1):
44 def myfun(lambda1):
52 temp1 = np.dot(A, irls_dn(A,b,p,lambda1))
45 return norm(np.dot(A, irls_dn(A,b,p,lambda1)) - b)**2 - G;
53 temp2 = norm(temp1-b)
54 temp3 = temp2**2-G
55 #return np.linalg.norm(np.dot(A, irls_dn(A,b,p,lambda1)) - b)**2 - G;
56 return temp3
57
58 print "tolerancia=", 0.01*lambda1
59
46
60 #lambda1 = root(myfun, lambda1, method='krylov', tol=0.01*lambda1);
47 # Find zero-crossing at given interval (lambda1, lambda1*alpha)
61 #lambda1 = lambda1.x
62
63 print "lambda0[0]", lambda0[0]
64 print "lambda0[1]", lambda0[1]
65
66 lambda1 = brentq(myfun, lambda0[0], lambda0[1], xtol=0.01*lambda1)
48 lambda1 = brentq(myfun, lambda0[0], lambda0[1], xtol=0.01*lambda1)
67
68 print "lambda final=", lambda1
69
49
70 u = irls_dn(A,b,p,lambda1);
50 u = irls_dn(A,b,p,lambda1);
71
51
72
73 return u;
52 return u;
74
53
@@ -1,64 +1,63
1 '''
1 '''
2 Created on Jun 5, 2014
2 Created on Jun 5, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 from multirate import *
7 from multirate import upfirdn
8 import numpy as np
9 from cshift import *
8 from cshift import *
10
9
11 def sfb(lo, hi, sf):
10 def sfb(lo, hi, sf):
12
11
13 # Synthesis filter bank
12 # Synthesis filter bank
14 #
13 #
15 # USAGE:
14 # USAGE:
16 # y = sfb(lo, hi, sf)
15 # y = sfb(lo, hi, sf)
17 # INPUT:
16 # INPUT:
18 # lo - low frqeuency input
17 # lo - low frqeuency input
19 # hi - high frequency input
18 # hi - high frequency input
20 # sf - synthesis filters
19 # sf - synthesis filters
21 # sf(:, 1) - lowpass filter (even length)
20 # sf(:, 1) - lowpass filter (even length)
22 # sf(:, 2) - highpass filter (even length)
21 # sf(:, 2) - highpass filter (even length)
23 # OUTPUT:
22 # OUTPUT:
24 # y - output signal
23 # y - output signal
25 # See also afb
24 # See also afb
26 #
25 #
27 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
26 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
28 # http://taco.poly.edu/WaveletSoftware/
27 # http://taco.poly.edu/WaveletSoftware/
29
28
30 N = 2*lo.size;
29 N = 2*lo.size;
31 L = sf.size/2;
30 L = sf.size/2;
32
31
33 # Need to change format for upfirdn funct:
32 # Need to change format for upfirdn funct:
34 lo = lo.T.conj()
33 lo = lo.T.conj()
35 lo = lo.reshape(lo.size)
34 lo = lo.reshape(lo.size)
36
35
37 #print 'sfb hi', hi
36 #print 'sfb hi', hi
38
37
39 # Need to change format for upfirdn funct:
38 # Need to change format for upfirdn funct:
40 hi = hi.T.conj()
39 hi = hi.T.conj()
41 hi = hi.reshape(hi.size)
40 hi = hi.reshape(hi.size)
42
41
43 #hi = hi.reshape(1, hi.size)
42 #hi = hi.reshape(1, hi.size)
44
43
45
44
46 lo = upfirdn(lo, sf[:,0], 2, 1);
45 lo = upfirdn(lo, sf[:,0], 2, 1);
47 hi = upfirdn(hi, sf[:,1], 2, 1);
46 hi = upfirdn(hi, sf[:,1], 2, 1);
48
47
49 lo = lo[0:lo.size-1]
48 lo = lo[0:lo.size-1]
50 hi = hi[0:hi.size-1]
49 hi = hi[0:hi.size-1]
51
50
52 y = lo + hi;
51 y = lo + hi;
53 y[0:L-2] = y[0:L-2] + y[N+ np.arange(0,L-2)]; #CHECK IF ARANGE IS CORRECT
52 y[0:L-2] = y[0:L-2] + y[N+ np.arange(0,L-2)]; #CHECK IF ARANGE IS CORRECT
54 y = y[0:N];
53 y = y[0:N];
55
54
56 #print 'y en sbf\n', y.shape
55 #print 'y en sbf\n', y.shape
57
56
58 y = y.reshape(1, y.size)
57 y = y.reshape(1, y.size)
59 #print 'y en sbf\n', y.shape
58 #print 'y en sbf\n', y.shape
60
59
61 y = cshift(y, 1-L/2);
60 y = cshift(y, 1-L/2);
62
61
63 return y;
62 return y;
64
63
@@ -1,32 +1,31
1 '''
1 '''
2 Created on May 22, 2014
2 Created on May 22, 2014
3
3
4 @author: Yolian Amaro
4 @author: Yolian Amaro
5 '''
5 '''
6
6
7 import numpy as np
8 from modelf import *
7 from modelf import *
9
8
10 def y_hysell96(lambda1,g,sigma,F,G,H):
9 def y_hysell96(lambda1,g,sigma,F,G,H):
11 # Y_HYSELL96 Implements set of nonlinear equations to solve Hysell96 MaxEnt
10 # Y_HYSELL96 Implements set of nonlinear equations to solve Hysell96 MaxEnt
12 # y(lambda) = 0
11 # y(lambda) = 0
13 # decision variables: lambda
12 # decision variables: lambda
14 # g: data
13 # g: data
15 # sigma: uncertainties (length of g)
14 # sigma: uncertainties (length of g)
16 # F: sum(f)
15 # F: sum(f)
17 # G: desired value for chi^2
16 # G: desired value for chi^2
18 # H: linear operator mapping image (f) to data (g)
17 # H: linear operator mapping image (f) to data (g)
19 # This function is a helper function that returns 0 when a value of lambda
18 # This function is a helper function that returns 0 when a value of lambda
20 # is chosen that satisfies the equations.
19 # is chosen that satisfies the equations.
21
20
22 # model for f
21 # model for f
23 f = modelf(lambda1, H,F);
22 f = modelf(lambda1, H,F);
24
23
25 # solve for Lambda and e
24 # solve for Lambda and e
26 Lambda = np.sqrt(np.sum(np.multiply(lambda1**2,sigma**2))/(4*G)); # positive root (right?)
25 Lambda = np.sqrt(np.sum(np.multiply(lambda1**2,sigma**2))/(4*G)); # positive root (right?)
27 e = np.multiply(-lambda1,sigma**2) / (2*Lambda);
26 e = np.multiply(-lambda1,sigma**2) / (2*Lambda);
28
27
29 # measurement equation
28 # measurement equation
30 y = g + e - np.dot(H, f);
29 y = g + e - np.dot(H, f);
31
30
32 return y
31 return y
General Comments 0
You need to be logged in to leave comments. Login now