|
@@
-1,430
+1,468
|
|
1
|
#!/usr/bin/env python
|
|
1
|
#!/usr/bin/env python
|
|
2
|
|
|
2
|
|
|
3
|
#----------------------------------------------------------
|
|
3
|
#----------------------------------------------------------
|
|
4
|
# Original MATLAB code developed by Brian Harding
|
|
4
|
# Original MATLAB code developed by Brian Harding
|
|
5
|
# Rewritten in python by Yolian Amaro
|
|
5
|
# Rewritten in python by Yolian Amaro
|
|
6
|
# Python version 2.7
|
|
6
|
# Python version 2.7
|
|
7
|
# May 15, 2014
|
|
7
|
# May 15, 2014
|
|
8
|
# Jicamarca Radio Observatory
|
|
8
|
# Jicamarca Radio Observatory
|
|
9
|
#----------------------------------------------------------
|
|
9
|
#----------------------------------------------------------
|
|
10
|
|
|
10
|
|
|
11
|
import math
|
|
11
|
import math
|
|
12
|
#import cmath
|
|
|
|
|
13
|
#import scipy
|
|
|
|
|
14
|
#import matplotlib
|
|
|
|
|
15
|
import numpy as np
|
|
12
|
import numpy as np
|
|
16
|
#from numpy import linalg
|
|
|
|
|
17
|
import matplotlib.pyplot as plt
|
|
13
|
import matplotlib.pyplot as plt
|
|
18
|
from scipy import linalg
|
|
14
|
from scipy import linalg
|
|
19
|
import time
|
|
15
|
import time
|
|
20
|
from y_hysell96 import*
|
|
16
|
from y_hysell96 import*
|
|
21
|
from deb4_basis import *
|
|
17
|
from deb4_basis import *
|
|
22
|
from modelf import *
|
|
18
|
from modelf import *
|
|
23
|
from scipy.optimize import fsolve
|
|
19
|
#from scipy.optimize import fsolve
|
|
24
|
from scipy.optimize import root
|
|
20
|
from scipy.optimize import root
|
|
25
|
import pywt
|
|
21
|
import pywt
|
|
|
|
|
22
|
from irls_dn2 import *
|
|
26
|
|
|
23
|
|
|
27
|
|
|
24
|
|
|
28
|
## Calculate Forward Model
|
|
25
|
## Calculate Forward Model
|
|
29
|
lambda1 = 6.0
|
|
26
|
lambda1 = 6.0
|
|
30
|
k = 2*math.pi/lambda1
|
|
27
|
k = 2*math.pi/lambda1
|
|
31
|
|
|
28
|
|
|
32
|
## Calculate Magnetic Declination
|
|
29
|
## Calculate Magnetic Declination
|
|
33
|
|
|
30
|
|
|
34
|
# [~,~,dec] = igrf11magm(350e3, -11-56/60, -76-52/60, 2012); check this
|
|
31
|
# [~,~,dec] = igrf11magm(350e3, -11-56/60, -76-52/60, 2012); check this
|
|
35
|
|
|
32
|
|
|
36
|
# or calculate it with the above function
|
|
33
|
# or calculate it with the above function
|
|
37
|
dec = -1.24
|
|
34
|
dec = -1.24
|
|
38
|
|
|
35
|
|
|
39
|
# loads rx, ry (Jicamarca antenna positions) #this can be done with numpy.loadtxt()
|
|
36
|
# loads rx, ry (Jicamarca antenna positions) #this can be done with numpy.loadtxt()
|
|
40
|
rx = np.array( [[127.5000], [91.5000], [127.5000], [19.5000], [91.5000], [-127.5000], [-55.5000], [-220.8240]] )
|
|
37
|
rx = np.array( [[127.5000], [91.5000], [127.5000], [19.5000], [91.5000], [-127.5000], [-55.5000], [-220.8240]] )
|
|
41
|
ry = np.array( [[127.5000], [91.5000], [91.5000], [55.5000], [-19.5000], [-127.5000], [-127.5000], [-322.2940]] )
|
|
38
|
ry = np.array( [[127.5000], [91.5000], [91.5000], [55.5000], [-19.5000], [-127.5000], [-127.5000], [-322.2940]] )
|
|
42
|
|
|
39
|
|
|
43
|
antpos = np.array( [[127.5000, 91.5000, 127.5000, 19.5000, 91.5000, -127.5000, -55.5000, -220.8240],
|
|
40
|
antpos = np.array( [[127.5000, 91.5000, 127.5000, 19.5000, 91.5000, -127.5000, -55.5000, -220.8240],
|
|
44
|
[127.5000, 91.5000, 91.5000, 55.5000, -19.5000, -127.5000, -127.5000, -322.2940]] )
|
|
41
|
[127.5000, 91.5000, 91.5000, 55.5000, -19.5000, -127.5000, -127.5000, -322.2940]] )
|
|
45
|
|
|
42
|
|
|
46
|
plt.figure(1)
|
|
43
|
plt.figure(1)
|
|
47
|
plt.plot(rx, ry, 'ro')
|
|
44
|
plt.plot(rx, ry, 'ro')
|
|
48
|
plt.draw()
|
|
45
|
plt.draw()
|
|
49
|
|
|
46
|
|
|
50
|
# Jicamarca is nominally at a 45 degree angle
|
|
47
|
# Jicamarca is nominally at a 45 degree angle
|
|
51
|
theta = 45 - dec;
|
|
48
|
theta = 45 - dec;
|
|
52
|
|
|
49
|
|
|
53
|
# Rotation matrix from antenna coord to magnetic coord (East North)
|
|
50
|
# Rotation matrix from antenna coord to magnetic coord (East North)
|
|
54
|
theta_rad = math.radians(theta) # trig functions take radians as argument
|
|
51
|
theta_rad = math.radians(theta) # trig functions take radians as argument
|
|
55
|
val1 = float( math.cos(theta_rad) )
|
|
52
|
val1 = float( math.cos(theta_rad) )
|
|
56
|
val2 = float( math.sin(theta_rad) )
|
|
53
|
val2 = float( math.sin(theta_rad) )
|
|
57
|
val3 = float( -1*math.sin(theta_rad))
|
|
54
|
val3 = float( -1*math.sin(theta_rad))
|
|
58
|
val4 = float( math.cos(theta_rad) )
|
|
55
|
val4 = float( math.cos(theta_rad) )
|
|
59
|
|
|
56
|
|
|
60
|
# Rotation matrix from antenna coord to magnetic coord (East North)
|
|
57
|
# Rotation matrix from antenna coord to magnetic coord (East North)
|
|
61
|
R = np.array( [[val1, val3], [val2, val4]] );
|
|
58
|
R = np.array( [[val1, val3], [val2, val4]] );
|
|
62
|
|
|
59
|
|
|
63
|
# Rotate antenna positions to magnetic coord.
|
|
60
|
# Rotate antenna positions to magnetic coord.
|
|
64
|
AR = np.dot(R.T, antpos);
|
|
61
|
AR = np.dot(R.T, antpos);
|
|
65
|
|
|
62
|
|
|
66
|
# Only take the East component
|
|
63
|
# Only take the East component
|
|
67
|
r = AR[0,:]
|
|
64
|
r = AR[0,:]
|
|
68
|
r.sort() # ROW VECTOR?
|
|
65
|
r.sort() # ROW VECTOR?
|
|
69
|
|
|
66
|
|
|
70
|
# Truth model (high and low resolution)
|
|
67
|
# Truth model (high and low resolution)
|
|
71
|
Nt = (1024.0)*(16.0); # number of pixels in truth image: high resolution
|
|
68
|
Nt = (1024.0)*(16.0); # number of pixels in truth image: high resolution
|
|
72
|
thbound = 9.0/180*math.pi; # the width of the domain in angle space
|
|
69
|
thbound = 9.0/180*math.pi; # the width of the domain in angle space
|
|
73
|
thetat = np.linspace(-thbound, thbound,Nt) # image domain
|
|
70
|
thetat = np.linspace(-thbound, thbound,Nt) # image domain
|
|
74
|
thetat = np.transpose(thetat) # transpose # FUNCIONA??????????????????????????????
|
|
71
|
thetat = np.transpose(thetat) # transpose # FUNCIONA??????????????????????????????
|
|
75
|
Nr = (256.0); # number of pixels in reconstructed image: low res
|
|
72
|
Nr = (256.0); # number of pixels in reconstructed image: low res
|
|
76
|
thetar = np.linspace(-thbound, thbound,Nr) # reconstruction domain
|
|
73
|
thetar = np.linspace(-thbound, thbound,Nr) # reconstruction domain
|
|
77
|
thetar = np.transpose(thetar) #transpose # FUNCIONA?????????????????????????????
|
|
74
|
thetar = np.transpose(thetar) #transpose # FUNCIONA?????????????????????????????
|
|
78
|
|
|
75
|
|
|
79
|
# Model for f: Gaussian(s) with amplitudes a, centers mu, widths sig, and
|
|
76
|
# Model for f: Gaussian(s) with amplitudes a, centers mu, widths sig, and
|
|
80
|
# background constant b.
|
|
77
|
# background constant b.
|
|
81
|
|
|
78
|
|
|
82
|
# Triple Gaussian
|
|
79
|
# Triple Gaussian
|
|
83
|
# a = np.array([3, 5, 2]);
|
|
80
|
# a = np.array([3, 5, 2]);
|
|
84
|
# mu = np.array([-5.0/180*math.pi, 2.0/180*math.pi, 7.0/180*math.pi]);
|
|
81
|
# mu = np.array([-5.0/180*math.pi, 2.0/180*math.pi, 7.0/180*math.pi]);
|
|
85
|
# sig = np.array([2.0/180*math.pi, 1.5/180*math.pi, 0.3/180*math.pi]);
|
|
82
|
# sig = np.array([2.0/180*math.pi, 1.5/180*math.pi, 0.3/180*math.pi]);
|
|
86
|
# b = 0; # background
|
|
83
|
# b = 0; # background
|
|
87
|
|
|
84
|
|
|
88
|
# Double Gaussian
|
|
85
|
# Double Gaussian
|
|
89
|
# a = np.array([3, 5]);
|
|
86
|
# a = np.array([3, 5]);
|
|
90
|
# mu = np.array([-5.0/180*math.pi, 2.0/180*math.pi]);
|
|
87
|
# mu = np.array([-5.0/180*math.pi, 2.0/180*math.pi]);
|
|
91
|
# sig = np.array([2.0/180*math.pi, 1.5/180*math.pi]);
|
|
88
|
# sig = np.array([2.0/180*math.pi, 1.5/180*math.pi]);
|
|
92
|
# b = 0; # background
|
|
89
|
# b = 0; # background
|
|
93
|
|
|
90
|
|
|
94
|
# Single Gaussian
|
|
91
|
# Single Gaussian
|
|
95
|
a = np.array( [3] );
|
|
92
|
a = np.array( [3] );
|
|
96
|
mu = np.array( [-3.0/180*math.pi] )
|
|
93
|
mu = np.array( [-3.0/180*math.pi] )
|
|
97
|
sig = np.array( [2.0/180*math.pi] )
|
|
94
|
sig = np.array( [2.0/180*math.pi] )
|
|
98
|
b = 0;
|
|
95
|
b = 0;
|
|
99
|
|
|
96
|
|
|
100
|
fact = np.zeros(shape=(Nt,1));
|
|
97
|
fact = np.zeros(shape=(Nt,1));
|
|
101
|
factr = np.zeros(shape=(Nr,1));
|
|
98
|
factr = np.zeros(shape=(Nr,1));
|
|
102
|
|
|
99
|
|
|
103
|
for i in range(0, a.size):
|
|
100
|
for i in range(0, a.size):
|
|
104
|
temp = (-(thetat-mu[i])**2/(sig[i]**2))
|
|
101
|
temp = (-(thetat-mu[i])**2/(sig[i]**2))
|
|
105
|
tempr = (-(thetar-mu[i])**2/(sig[i]**2))
|
|
102
|
tempr = (-(thetar-mu[i])**2/(sig[i]**2))
|
|
106
|
for j in range(0, temp.size):
|
|
103
|
for j in range(0, temp.size):
|
|
107
|
fact[j] = fact[j] + a[i]*math.exp(temp[j]);
|
|
104
|
fact[j] = fact[j] + a[i]*math.exp(temp[j]);
|
|
108
|
for m in range(0, tempr.size):
|
|
105
|
for m in range(0, tempr.size):
|
|
109
|
factr[m] = factr[m] + a[i]*math.exp(tempr[m]);
|
|
106
|
factr[m] = factr[m] + a[i]*math.exp(tempr[m]);
|
|
110
|
fact = fact + b;
|
|
107
|
fact = fact + b;
|
|
111
|
factr = factr + b;
|
|
108
|
factr = factr + b;
|
|
112
|
|
|
109
|
|
|
113
|
# # model for f: Square pulse
|
|
110
|
# # model for f: Square pulse
|
|
114
|
# for j in range(0, fact.size):
|
|
111
|
# for j in range(0, fact.size):
|
|
115
|
# if (theta > -5.0/180*math.pi and theta < 2.0/180*math.pi):
|
|
112
|
# if (theta > -5.0/180*math.pi and theta < 2.0/180*math.pi):
|
|
116
|
# fact[j] = 0
|
|
113
|
# fact[j] = 0
|
|
117
|
# else:
|
|
114
|
# else:
|
|
118
|
# fact[j] = 1
|
|
115
|
# fact[j] = 1
|
|
119
|
# for k in range(0, factr.size):
|
|
116
|
# for k in range(0, factr.size):
|
|
120
|
# if (thetar[k] > -5.0/180*math.pi and thetar[k] < 2/180*math.pi):
|
|
117
|
# if (thetar[k] > -5.0/180*math.pi and thetar[k] < 2/180*math.pi):
|
|
121
|
# fact[k] = 0
|
|
118
|
# fact[k] = 0
|
|
122
|
# else:
|
|
119
|
# else:
|
|
123
|
# fact[k] = 1
|
|
120
|
# fact[k] = 1
|
|
124
|
#
|
|
121
|
#
|
|
125
|
#
|
|
122
|
#
|
|
126
|
# # model for f: triangle pulse
|
|
123
|
# # model for f: triangle pulse
|
|
127
|
# mu = -1.0/180*math.pi;
|
|
124
|
# mu = -1.0/180*math.pi;
|
|
128
|
# sig = 5.0/180*math.pi;
|
|
125
|
# sig = 5.0/180*math.pi;
|
|
129
|
# wind1 = theta > mu-sig and theta < mu;
|
|
126
|
# wind1 = theta > mu-sig and theta < mu;
|
|
130
|
# wind2 = theta < mu+sig and theta > mu;
|
|
127
|
# wind2 = theta < mu+sig and theta > mu;
|
|
131
|
# fact = wind1 * (theta - (mu - sig));
|
|
128
|
# fact = wind1 * (theta - (mu - sig));
|
|
132
|
# factr = wind1 * (thetar - (mu - sig));
|
|
129
|
# factr = wind1 * (thetar - (mu - sig));
|
|
133
|
# fact = fact + wind2 * (-(theta-(mu+sig)));
|
|
130
|
# fact = fact + wind2 * (-(theta-(mu+sig)));
|
|
134
|
# factr = factr + wind2 * (-(thetar-(mu+sig)));
|
|
131
|
# factr = factr + wind2 * (-(thetar-(mu+sig)));
|
|
135
|
|
|
132
|
|
|
136
|
|
|
133
|
|
|
137
|
# fact = fact/(sum(fact)[0]*2*thbound/Nt); # normalize to integral(f)==1
|
|
134
|
# fact = fact/(sum(fact)[0]*2*thbound/Nt); # normalize to integral(f)==1
|
|
138
|
I = sum(fact)[0];
|
|
135
|
I = sum(fact)[0];
|
|
139
|
fact = fact/I; # normalize to sum(f)==1
|
|
136
|
fact = fact/I; # normalize to sum(f)==1
|
|
140
|
factr = factr/I; # normalize to sum(f)==1
|
|
137
|
factr = factr/I; # normalize to sum(f)==1
|
|
141
|
#plot(thetat,fact,'r'); hold on;
|
|
138
|
#plt.figure()
|
|
142
|
#plot(thetar,factr,'k.'); hold off;
|
|
139
|
#plt.plot(thetat,fact,'r');
|
|
|
|
|
140
|
#plt.plot(thetar,factr,'k.');
|
|
143
|
#xlim([min(thetat) max(thetat)]);
|
|
141
|
#xlim([min(thetat) max(thetat)]);
|
|
144
|
|
|
142
|
|
|
145
|
#x = np.linspace(thetat.min(), thetat.max) ????
|
|
143
|
#x = np.linspace(thetat.min(), thetat.max) ????
|
|
146
|
#for i in range(0, thetat.size):
|
|
144
|
#for i in range(0, thetat.size):
|
|
147
|
plt.figure(2)
|
|
145
|
plt.figure(2)
|
|
148
|
plt.plot(thetat, fact, 'r--')
|
|
146
|
plt.plot(thetat, fact, 'r--')
|
|
149
|
plt.plot(thetar, factr, 'ro')
|
|
147
|
plt.plot(thetar, factr, 'ro')
|
|
150
|
plt.draw()
|
|
148
|
plt.draw()
|
|
151
|
# xlim([min(thetat) max(thetat)]); FALTA ARREGLAR ESTO
|
|
149
|
# xlim([min(thetat) max(thetat)]); FALTA ARREGLAR ESTO
|
|
152
|
|
|
150
|
|
|
153
|
|
|
151
|
|
|
154
|
##
|
|
152
|
##
|
|
155
|
# Control the type and number of inversions with:
|
|
153
|
# Control the type and number of inversions with:
|
|
156
|
# SNRdBvec: the SNRs that will be used.
|
|
154
|
# SNRdBvec: the SNRs that will be used.
|
|
157
|
# NN: the number of trials for each SNR
|
|
155
|
# NN: the number of trials for each SNR
|
|
158
|
|
|
156
|
|
|
159
|
#SNRdBvec = np.linspace(5,20,10);
|
|
157
|
#SNRdBvec = np.linspace(5,20,10);
|
|
160
|
SNRdBvec = np.array([15]);
|
|
158
|
SNRdBvec = np.array([15]);
|
|
161
|
NN = 1; # number of trial at each SNR
|
|
159
|
NN = 1; # number of trial at each SNR
|
|
162
|
|
|
160
|
|
|
163
|
# if using vector arguments should be: (4,SNRdBvec.size,NN)
|
|
161
|
# if using vector arguments should be: (4,SNRdBvec.size,NN)
|
|
164
|
corr = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
|
|
162
|
corr = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
|
|
165
|
corrc = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
|
|
163
|
corrc = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
|
|
166
|
rmse = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
|
|
164
|
rmse = np.zeros(shape=(4,SNRdBvec.size,NN)); # (method, SNR, trial)
|
|
167
|
|
|
165
|
|
|
168
|
for snri in range(0, SNRdBvec.size): # change 1 for SNRdBvec.size when using SNRdBvec as vector
|
|
166
|
for snri in range(0, SNRdBvec.size): # change 1 for SNRdBvec.size when using SNRdBvec as vector
|
|
169
|
for Ni in range(0, NN):
|
|
167
|
for Ni in range(0, NN):
|
|
170
|
SNRdB = SNRdBvec[snri];
|
|
168
|
SNRdB = SNRdBvec[snri];
|
|
171
|
SNR = 10**(SNRdB/10.0);
|
|
169
|
SNR = 10**(SNRdB/10.0);
|
|
172
|
|
|
170
|
|
|
173
|
# Calculate cross-correlation matrix (Fourier components of image)
|
|
171
|
# Calculate cross-correlation matrix (Fourier components of image)
|
|
174
|
# This is an inefficient way to do this.
|
|
172
|
# This is an inefficient way to do this.
|
|
175
|
R = np.zeros(shape=(r.size, r.size), dtype=object);
|
|
173
|
R = np.zeros(shape=(r.size, r.size), dtype=object);
|
|
176
|
|
|
174
|
|
|
177
|
for i1 in range(0, r.size):
|
|
175
|
for i1 in range(0, r.size):
|
|
178
|
for i2 in range(0,r.size):
|
|
176
|
for i2 in range(0,r.size):
|
|
179
|
R[i1,i2] = np.dot(fact.T, np.exp(1j*k*np.dot((r[i1]-r[i2]),np.sin(thetat))))
|
|
177
|
R[i1,i2] = np.dot(fact.T, np.exp(1j*k*np.dot((r[i1]-r[i2]),np.sin(thetat))))
|
|
180
|
R[i1,i2] = sum(R[i1,i2])
|
|
178
|
R[i1,i2] = sum(R[i1,i2])
|
|
181
|
|
|
179
|
|
|
182
|
# Add uncertainty
|
|
180
|
# Add uncertainty
|
|
183
|
# This is an ad-hoc way of adding "noise". It models some combination of
|
|
181
|
# This is an ad-hoc way of adding "noise". It models some combination of
|
|
184
|
# receiver noise and finite integration times. We could use a more
|
|
182
|
# receiver noise and finite integration times. We could use a more
|
|
185
|
# advanced model (like in Yu et al 2000) in the future.
|
|
183
|
# advanced model (like in Yu et al 2000) in the future.
|
|
186
|
|
|
184
|
|
|
187
|
# This is a way of adding noise while maintaining the
|
|
185
|
# This is a way of adding noise while maintaining the
|
|
188
|
# positive-semi-definiteness of the matrix.
|
|
186
|
# positive-semi-definiteness of the matrix.
|
|
189
|
|
|
187
|
|
|
190
|
U = linalg.cholesky(R.astype(complex), lower=False); # U'*U = R
|
|
188
|
U = linalg.cholesky(R.astype(complex), lower=False); # U'*U = R
|
|
191
|
|
|
189
|
|
|
192
|
sigma_noise = (np.linalg.norm(U,'fro')/SNR);
|
|
190
|
sigma_noise = (np.linalg.norm(U,'fro')/SNR);
|
|
193
|
|
|
191
|
|
|
194
|
temp1 = (-1*np.random.rand(U.shape[0], U.shape[1]) + 0.5)
|
|
192
|
temp1 = (-1*np.random.rand(U.shape[0], U.shape[1]) + 0.5)
|
|
195
|
temp2 = 1j*(-1*np.random.rand(U.shape[0], U.shape[1]) + 0.5)
|
|
193
|
temp2 = 1j*(-1*np.random.rand(U.shape[0], U.shape[1]) + 0.5)
|
|
196
|
temp3 = ((abs(U) > 0).astype(float)) # upper triangle of 1's
|
|
194
|
temp3 = ((abs(U) > 0).astype(float)) # upper triangle of 1's
|
|
197
|
temp4 = (sigma_noise * (temp1 + temp2))/np.sqrt(2.0)
|
|
195
|
temp4 = (sigma_noise * (temp1 + temp2))/np.sqrt(2.0)
|
|
198
|
|
|
196
|
|
|
199
|
nz = np.multiply(temp4, temp3)
|
|
197
|
nz = np.multiply(temp4, temp3)
|
|
200
|
|
|
198
|
|
|
|
|
|
199
|
#---------------------- Eliminar esto:------------------------------------------
|
|
|
|
|
200
|
#nz = ((abs(np.multiply(temp4, temp3)) > 0).astype(int))
|
|
201
|
#nz = ((abs(np.dot(temp4, temp3)) > 0).astype(int))
|
|
201
|
#nz = ((abs(np.dot(temp4, temp3)) > 0).astype(int))
|
|
202
|
|
|
|
|
|
203
|
|
|
|
|
|
204
|
#nz = np.dot(np.dot(sigma_noise, (temp1 + temp2)/math.sqrt(2), temp3 ));
|
|
202
|
#nz = np.dot(np.dot(sigma_noise, (temp1 + temp2)/math.sqrt(2), temp3 ));
|
|
205
|
#nz = np.dot(sigma_noise, (np.dot((np.random.rand(8,8) + j*np.random.rand(8,8))/math.sqrt(2.0) , (abs(U) > 0).astype(int))));
|
|
203
|
#nz = np.dot(sigma_noise, (np.dot((np.random.rand(8,8) + j*np.random.rand(8,8))/math.sqrt(2.0) , (abs(U) > 0).astype(int))));
|
|
206
|
|
|
204
|
#--------------------------------------------------------------------------------
|
|
|
|
|
205
|
|
|
207
|
Unz = U + nz;
|
|
206
|
Unz = U + nz;
|
|
208
|
Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
|
|
207
|
Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
|
|
209
|
plt.figure(3);
|
|
208
|
plt.figure(3);
|
|
210
|
plt.pcolor(abs(Rnz));
|
|
209
|
plt.pcolor(abs(Rnz));
|
|
211
|
plt.colorbar();
|
|
210
|
plt.colorbar();
|
|
212
|
|
|
211
|
|
|
213
|
# Fourier Inversion ###################
|
|
212
|
# Fourier Inversion ###################
|
|
214
|
f_fourier = np.zeros(shape=(Nr,1), dtype=complex);
|
|
213
|
f_fourier = np.zeros(shape=(Nr,1), dtype=complex);
|
|
215
|
|
|
214
|
|
|
216
|
for i in range(0, thetar.size):
|
|
215
|
for i in range(0, thetar.size):
|
|
217
|
th = thetar[i];
|
|
216
|
th = thetar[i];
|
|
218
|
w = np.exp(1j*k*np.dot(r,np.sin(th)));
|
|
217
|
w = np.exp(1j*k*np.dot(r,np.sin(th)));
|
|
219
|
|
|
218
|
|
|
220
|
temp = np.dot(w.T.conj(),U)
|
|
219
|
temp = np.dot(w.T.conj(),U)
|
|
221
|
|
|
220
|
|
|
222
|
f_fourier[i] = np.dot(temp, w);
|
|
221
|
f_fourier[i] = np.dot(temp, w);
|
|
223
|
|
|
222
|
|
|
224
|
f_fourier = f_fourier.real; # get rid of numerical imaginary noise
|
|
223
|
f_fourier = f_fourier.real; # get rid of numerical imaginary noise
|
|
225
|
|
|
224
|
|
|
226
|
#print f_fourier
|
|
225
|
#print f_fourier
|
|
227
|
|
|
226
|
|
|
228
|
|
|
227
|
|
|
229
|
# Capon Inversion ######################
|
|
228
|
# Capon Inversion ######################
|
|
230
|
|
|
229
|
|
|
231
|
f_capon = np.zeros(shape=(Nr,1));
|
|
230
|
f_capon = np.zeros(shape=(Nr,1));
|
|
232
|
|
|
231
|
|
|
233
|
#tic_capon = time.time();
|
|
232
|
tic_capon = time.time();
|
|
234
|
|
|
233
|
|
|
235
|
for i in range(0, thetar.size):
|
|
234
|
for i in range(0, thetar.size):
|
|
236
|
th = thetar[i];
|
|
235
|
th = thetar[i];
|
|
237
|
w = np.exp(1j*k*np.dot(r,np.sin(th)));
|
|
236
|
w = np.exp(1j*k*np.dot(r,np.sin(th)));
|
|
238
|
f_capon[i] = np.divide(1, ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real)
|
|
237
|
f_capon[i] = np.divide(1, ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real)
|
|
239
|
|
|
238
|
|
|
240
|
#toc_capon = time.time()
|
|
239
|
|
|
241
|
|
|
240
|
toc_capon = time.time()
|
|
242
|
# elapsed_time_capon = toc_capon - tic_capon;
|
|
241
|
|
|
|
|
|
242
|
elapsed_time_capon = toc_capon - tic_capon;
|
|
243
|
|
|
243
|
|
|
244
|
f_capon = f_capon.real; # get rid of numerical imaginary noise
|
|
244
|
f_capon = f_capon.real; # get rid of numerical imaginary noise
|
|
245
|
|
|
245
|
|
|
246
|
# MaxEnt Inversion #####################
|
|
246
|
# MaxEnt Inversion #####################
|
|
247
|
|
|
247
|
|
|
248
|
# create the appropriate sensing matrix (split into real and imaginary # parts)
|
|
248
|
# create the appropriate sensing matrix (split into real and imaginary # parts)
|
|
249
|
M = (r.size-1)*(r.size);
|
|
249
|
M = (r.size-1)*(r.size);
|
|
250
|
Ht = np.zeros(shape=(M,Nt)); # "true" sensing matrix
|
|
250
|
Ht = np.zeros(shape=(M,Nt)); # "true" sensing matrix
|
|
251
|
Hr = np.zeros(shape=(M,Nr)); # approximate sensing matrix for reconstruction
|
|
251
|
Hr = np.zeros(shape=(M,Nr)); # approximate sensing matrix for reconstruction
|
|
252
|
|
|
252
|
|
|
253
|
# need to re-index our measurements from matrix R into vector g
|
|
253
|
# need to re-index our measurements from matrix R into vector g
|
|
254
|
g = np.zeros(shape=(M,1));
|
|
254
|
g = np.zeros(shape=(M,1));
|
|
255
|
gnz = np.zeros(shape=(M,1)); # noisy version of g
|
|
255
|
gnz = np.zeros(shape=(M,1)); # noisy version of g
|
|
256
|
|
|
256
|
|
|
257
|
# triangular indexing to perform this re-indexing
|
|
257
|
# triangular indexing to perform this re-indexing
|
|
258
|
T = np.ones(shape=(r.size,r.size));
|
|
258
|
T = np.ones(shape=(r.size,r.size));
|
|
259
|
[i1v,i2v] = np.where(np.triu(T,1) > 0); # converts linear to triangular indexing
|
|
259
|
[i1v,i2v] = np.where(np.triu(T,1) > 0); # converts linear to triangular indexing
|
|
260
|
|
|
260
|
|
|
261
|
# build H
|
|
261
|
# build H
|
|
262
|
for i1 in range(0, r.size):
|
|
262
|
for i1 in range(0, r.size):
|
|
263
|
for i2 in range(i1+1, r.size):
|
|
263
|
for i2 in range(i1+1, r.size):
|
|
264
|
idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward
|
|
264
|
idx = np.where(np.logical_and((i1==i1v), (i2==i2v)))[0]; # kind of awkward
|
|
265
|
idx1 = 2*idx; # because index starts at 0
|
|
265
|
idx1 = 2*idx; # because index starts at 0
|
|
266
|
idx2 = 2*idx+1;
|
|
266
|
idx2 = 2*idx+1;
|
|
267
|
Hr[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetar)).T;
|
|
267
|
Hr[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetar)).T;
|
|
268
|
Hr[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetar)).T;
|
|
268
|
Hr[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetar)).T;
|
|
269
|
Ht[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetat)).T*Nr/Nt;
|
|
269
|
Ht[idx1,:] = np.cos(k*(r[i1]-r[i2])*np.sin(thetat)).T*Nr/Nt;
|
|
270
|
Ht[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetat)).T*Nr/Nt;
|
|
270
|
Ht[idx2,:] = np.sin(k*(r[i1]-r[i2])*np.sin(thetat)).T*Nr/Nt;
|
|
271
|
g[idx1] = (R[i1,i2]).real*Nr/Nt; # check this again later
|
|
271
|
g[idx1] = (R[i1,i2]).real*Nr/Nt; # check this again later
|
|
272
|
g[idx2] = (R[i1,i2]).imag*Nr/Nt; # check again
|
|
272
|
g[idx2] = (R[i1,i2]).imag*Nr/Nt; # check again
|
|
273
|
gnz[idx1] = (Rnz[i1,i2]).real*Nr/Nt;
|
|
273
|
gnz[idx1] = (Rnz[i1,i2]).real*Nr/Nt;
|
|
274
|
gnz[idx2] = (Rnz[i1,i2]).imag*Nr/Nt;
|
|
274
|
gnz[idx2] = (Rnz[i1,i2]).imag*Nr/Nt;
|
|
275
|
|
|
275
|
|
|
276
|
# inversion
|
|
276
|
# inversion
|
|
277
|
F = Nr/Nt; # normalization
|
|
277
|
F = Nr/Nt; # normalization
|
|
278
|
sigma = 1; # set to 1 because the difference is accounted for in G
|
|
278
|
sigma = 1; # set to 1 because the difference is accounted for in G
|
|
279
|
|
|
279
|
|
|
280
|
##### ADD *10 for consistency with old model, NEED TO VERIFY THIS!!!!? line below
|
|
280
|
##### ADD *10 for consistency with old model, NEED TO VERIFY THIS!!!!? line below
|
|
281
|
G = np.linalg.norm(g-gnz)**2; # pretend we know in advance the actual value of chi^2
|
|
281
|
G = np.linalg.norm(g-gnz)**2 ; # pretend we know in advance the actual value of chi^2
|
|
|
|
|
282
|
|
|
|
|
|
283
|
tic_maxent = time.time();
|
|
282
|
|
|
284
|
|
|
283
|
lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
|
|
285
|
lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
|
|
284
|
|
|
286
|
|
|
|
|
|
287
|
toc_maxent = time.time()
|
|
|
|
|
288
|
elapsed_time_maxent = toc_maxent - tic_maxent;
|
|
|
|
|
289
|
|
|
285
|
# Whitened solution
|
|
290
|
# Whitened solution
|
|
286
|
def myfun(lambda1):
|
|
291
|
def myfun(lambda1):
|
|
287
|
return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
|
|
292
|
return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
|
|
288
|
|
|
293
|
|
|
289
|
tic_maxEnt = time.time();
|
|
294
|
tic_maxEnt = time.time();
|
|
290
|
|
|
295
|
|
|
291
|
#sol1 = fsolve(myfun,lambda0.ravel(), args=(), xtol=1e-14, maxfev=100000);
|
|
296
|
#sol1 = fsolve(myfun,lambda0.ravel(), args=(), xtol=1e-14, maxfev=100000);
|
|
292
|
lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14);
|
|
297
|
lambda1 = root(myfun,lambda0, method='krylov', tol=1e-14);
|
|
293
|
|
|
298
|
|
|
294
|
#print lambda1
|
|
299
|
#print lambda1
|
|
295
|
#print lambda1.x
|
|
300
|
#print lambda1.x
|
|
296
|
|
|
301
|
|
|
297
|
lambda1 = lambda1.x;
|
|
302
|
lambda1 = lambda1.x;
|
|
298
|
|
|
303
|
|
|
299
|
toc_maxEnt = time.time();
|
|
304
|
toc_maxEnt = time.time();
|
|
300
|
f_maxent = modelf(lambda1, Hr, F);
|
|
305
|
f_maxent = modelf(lambda1, Hr, F);
|
|
301
|
ystar = myfun(lambda1);
|
|
306
|
ystar = myfun(lambda1);
|
|
302
|
Lambda = np.sqrt(sum(lambda1**2.*sigma**2)/(4*G));
|
|
307
|
Lambda = np.sqrt(sum(lambda1**2.*sigma**2)/(4*G));
|
|
303
|
ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
|
|
308
|
ep = np.multiply(-lambda1,sigma**2)/ (2*Lambda);
|
|
304
|
es = np.dot(Hr, f_maxent) - gnz; # should be same as ep
|
|
309
|
es = np.dot(Hr, f_maxent) - gnz; # should be same as ep
|
|
305
|
chi2 = np.sum((es/sigma)**2);
|
|
310
|
chi2 = np.sum((es/sigma)**2);
|
|
306
|
|
|
311
|
|
|
307
|
|
|
312
|
|
|
308
|
|
|
|
|
|
309
|
# CS inversion using irls ########################
|
|
313
|
# CS inversion using irls ########################
|
|
310
|
|
|
314
|
|
|
311
|
# (Use Nr, thetar, gnz, and Hr from MaxEnt above)
|
|
315
|
# (Use Nr, thetar, gnz, and Hr from MaxEnt above)
|
|
312
|
|
|
316
|
|
|
313
|
#Psi = deb4_basis(Nr); ###### REPLACED BY LINE BELOW (?)
|
|
317
|
Psi = deb4_basis(Nr); ###### REPLACED BY LINE BELOW (?)
|
|
314
|
|
|
318
|
|
|
315
|
wavelet1 = pywt.Wavelet('db4')
|
|
319
|
# REMOVE THIS?--------------------------------
|
|
316
|
Phi, Psi, x = wavelet1.wavefun(level=3)
|
|
320
|
#wavelet1 = pywt.Wavelet('db4')
|
|
|
|
|
321
|
#Phi, Psi, x = wavelet1.wavefun(level=3)
|
|
|
|
|
322
|
# --------------------------------------------
|
|
317
|
|
|
323
|
|
|
318
|
# add "sum to 1" constraint
|
|
324
|
# add "sum to 1" constraint
|
|
319
|
H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
|
|
325
|
H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
|
|
320
|
N_temp = np.array([[Nr/Nt]]);
|
|
326
|
N_temp = np.array([[Nr/Nt]]);
|
|
321
|
g2 = np.concatenate( (gnz, N_temp), axis=0 );
|
|
327
|
g2 = np.concatenate( (gnz, N_temp), axis=0 );
|
|
322
|
|
|
328
|
H2 = H2.T.conj();
|
|
323
|
|
|
329
|
|
|
324
|
#s = irls_dn2(H2*Psi,g2,0.5,G);
|
|
330
|
print 'H2 shape', H2.shape
|
|
|
|
|
331
|
print 'Psi shape', Psi.shape
|
|
|
|
|
332
|
|
|
|
|
|
333
|
s = irls_dn2(H2*Psi,g2,0.5,G);
|
|
325
|
# f_cs = Psi*s;
|
|
334
|
# f_cs = Psi*s;
|
|
326
|
#
|
|
335
|
#
|
|
327
|
# # plot
|
|
336
|
# # plot
|
|
328
|
# plot(thetar,f_cs,'r.-');
|
|
337
|
# plot(thetar,f_cs,'r.-');
|
|
329
|
# hold on;
|
|
338
|
# hold on;
|
|
330
|
# plot(thetat,fact,'k-');
|
|
339
|
# plot(thetat,fact,'k-');
|
|
331
|
# hold off;
|
|
340
|
# hold off;
|
|
332
|
|
|
341
|
|
|
333
|
|
|
342
|
|
|
334
|
# # # Scaling and shifting
|
|
343
|
# # # Scaling and shifting
|
|
335
|
# # # Only necessary for capon solution
|
|
344
|
# # # Only necessary for capon solution
|
|
336
|
|
|
|
|
|
337
|
|
|
|
|
|
338
|
f_capon = f_capon/np.max(f_capon)*np.max(fact);
|
|
|
|
|
339
|
|
|
|
|
|
340
|
|
|
|
|
|
341
|
### analyze stuff ######################
|
|
|
|
|
342
|
# calculate MSE
|
|
|
|
|
343
|
rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
|
|
|
|
|
344
|
rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
|
|
|
|
|
345
|
rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
|
|
|
|
|
346
|
#rmse_cs = np.sqrt(np.mean((f_cs - factr).^2));
|
|
|
|
|
347
|
|
|
345
|
|
|
348
|
relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
|
|
346
|
|
|
349
|
relrmse_capon = rmse_capon / np.linalg.norm(fact);
|
|
347
|
f_capon = f_capon/np.max(f_capon)*np.max(fact);
|
|
350
|
relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
|
|
348
|
|
|
351
|
#relrmse_cs = rmse_cs / np.norm(fact);
|
|
349
|
|
|
352
|
|
|
350
|
### analyze stuff ######################
|
|
353
|
factr = factr.T.conj()
|
|
351
|
# calculate MSE
|
|
354
|
|
|
352
|
rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
|
|
355
|
# calculate correlation
|
|
353
|
rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
|
|
356
|
corr_fourier = np.dot(f_fourier,factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
|
|
354
|
rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
|
|
357
|
corr_capon = np.dot(f_capon,factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
|
|
355
|
#rmse_cs = np.sqrt(np.mean((f_cs - factr).^2));
|
|
358
|
corr_maxent = np.dot(f_maxent,factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
|
|
356
|
|
|
359
|
#corr_cs = np.dot(f_cs,factr) / (norm(f_cs)*norm(factr));
|
|
357
|
|
|
360
|
|
|
358
|
relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
|
|
361
|
# calculate centered correlation
|
|
359
|
relrmse_capon = rmse_capon / np.linalg.norm(fact);
|
|
362
|
f0 = factr - np.mean(factr);
|
|
360
|
relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
|
|
363
|
f1 = f_fourier - np.mean(f_fourier);
|
|
361
|
#relrmse_cs = rmse_cs / np.norm(fact);
|
|
364
|
corrc_fourier = np.dot(f0,f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
|
|
362
|
|
|
365
|
f1 = f_capon - np.mean(f_capon);
|
|
363
|
# To be able to perform dot product (align matrices) done below within the dot calculations
|
|
366
|
corrc_capon = np.dot(f0,f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
|
|
364
|
|
|
367
|
f1 = f_maxent - np.mean(f_maxent);
|
|
365
|
|
|
368
|
corrc_maxent = np.dot(f0,f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
|
|
366
|
#f_fourier = f_fourier.T.conj()
|
|
369
|
#f1 = f_cs - mean(f_cs);
|
|
367
|
#f_capon = f_capon.T.conj()
|
|
370
|
#corrc_cs = dot(f0,f1) / (norm(f0)*norm(f1));
|
|
368
|
#f_maxent = f_maxent.T.conj()
|
|
371
|
|
|
369
|
|
|
372
|
|
|
370
|
#factr = factr.T.conj()
|
|
373
|
|
|
371
|
|
|
374
|
# # # plot stuff #########################
|
|
372
|
# calculate correlation
|
|
375
|
|
|
373
|
|
|
376
|
#---- Capon----
|
|
374
|
corr_fourier = np.dot(f_fourier.T.conj(),factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
|
|
377
|
plt.figure(4)
|
|
375
|
corr_capon = np.dot(f_capon.T.conj(),factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
|
|
378
|
plt.subplot(2, 1, 1)
|
|
376
|
corr_maxent = np.dot(f_maxent.T.conj(),factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
|
|
379
|
plt.plot(180/math.pi*thetar, f_capon, 'r', label='Capon')
|
|
377
|
#corr_cs = np.dot(f_cs,factr) / (norm(f_cs)*norm(factr));
|
|
380
|
plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
|
|
378
|
|
|
381
|
plt.ylabel('Power (arbitrary units)')
|
|
379
|
|
|
382
|
plt.legend(loc='upper right')
|
|
380
|
# calculate centered correlation
|
|
383
|
|
|
381
|
f0 = factr - np.mean(factr);
|
|
384
|
# formatting y-axis
|
|
382
|
f1 = f_fourier - np.mean(f_fourier);
|
|
385
|
locs,labels = plt.yticks()
|
|
383
|
|
|
386
|
plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
|
|
384
|
corrc_fourier = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
|
|
387
|
plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
|
|
385
|
f1 = f_capon - np.mean(f_capon);
|
|
388
|
|
|
386
|
corrc_capon = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
|
|
389
|
|
|
387
|
f1 = f_maxent - np.mean(f_maxent);
|
|
390
|
#---- MaxEnt----
|
|
388
|
corrc_maxent = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
|
|
391
|
plt.subplot(2, 1, 2)
|
|
389
|
#f1 = f_cs - mean(f_cs);
|
|
392
|
plt.plot(180/math.pi*thetar, f_maxent, 'r', label='MaxEnt')
|
|
390
|
#corrc_cs = dot(f0,f1) / (norm(f0)*norm(f1));
|
|
393
|
plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
|
|
391
|
|
|
394
|
plt.ylabel('Power (arbitrary units)')
|
|
392
|
|
|
395
|
plt.legend(loc='upper right')
|
|
393
|
|
|
396
|
|
|
394
|
# # # plot stuff #########################
|
|
397
|
# formatting y-axis
|
|
395
|
|
|
398
|
locs,labels = plt.yticks()
|
|
396
|
#---- Capon----
|
|
399
|
plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
|
|
397
|
plt.figure(4)
|
|
400
|
plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
|
|
398
|
plt.subplot(2, 1, 1)
|
|
401
|
|
|
399
|
plt.plot(180/math.pi*thetar, f_capon, 'r', label='Capon')
|
|
402
|
plt.show()
|
|
400
|
plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
|
|
403
|
|
|
401
|
plt.ylabel('Power (arbitrary units)')
|
|
404
|
|
|
402
|
plt.legend(loc='upper right')
|
|
405
|
# # subplot(3,1,2);
|
|
403
|
|
|
406
|
# # plot(180/pi*thetar,f_maxent,'r-');
|
|
404
|
# formatting y-axis
|
|
407
|
# # hold on;
|
|
405
|
locs,labels = plt.yticks()
|
|
408
|
# # plot(180/pi*thetat,fact,'k--');
|
|
406
|
plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
|
|
409
|
# # hold off;
|
|
407
|
plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
|
|
410
|
# # ylim([min(f_cs) 1.1*max(fact)]);
|
|
408
|
|
|
411
|
# # # title(sprintf('rel. RMSE: #.2e\tCorr: #.3f Corrc: #.3f', relrmse_maxent, corr_maxent, corrc_maxent));
|
|
409
|
|
|
412
|
# # ylabel({'Power';'(arbitrary units)'})
|
|
410
|
#---- MaxEnt----
|
|
413
|
# # # title 'Maximum Entropy Method'
|
|
411
|
plt.subplot(2, 1, 2)
|
|
414
|
# # legend('MaxEnt','Truth');
|
|
412
|
plt.plot(180/math.pi*thetar, f_maxent, 'r', label='MaxEnt')
|
|
415
|
# #
|
|
413
|
plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
|
|
416
|
# # subplot(3,1,3);
|
|
414
|
plt.ylabel('Power (arbitrary units)')
|
|
417
|
# # plot(180/pi*thetar,f_cs,'r-');
|
|
415
|
plt.legend(loc='upper right')
|
|
418
|
# # hold on;
|
|
416
|
|
|
419
|
# # plot(180/pi*thetat,fact,'k--');
|
|
417
|
# formatting y-axis
|
|
420
|
# # hold off;
|
|
418
|
locs,labels = plt.yticks()
|
|
421
|
# # ylim([min(f_cs) 1.1*max(fact)]);
|
|
419
|
plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
|
|
422
|
# # # title(sprintf('rel. RMSE: #.2e\tCorr: #.3f Corrc: #.3f', relrmse_cs, corr_cs, corrc_cs));
|
|
420
|
plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
|
|
423
|
# # # title 'Compressed Sensing - Debauchies Wavelets'
|
|
421
|
|
|
424
|
# # xlabel 'Degrees'
|
|
422
|
plt.show()
|
|
425
|
# # ylabel({'Power';'(arbitrary units)'})
|
|
423
|
|
|
426
|
# # legend('Comp. Sens.','Truth');
|
|
424
|
|
|
427
|
# #
|
|
425
|
# # PLOT PARA COMPRESSED SENSING
|
|
428
|
# # # set(gcf,'Position',[749 143 528 881]); # CSL
|
|
426
|
# #
|
|
429
|
# # # set(gcf,'Position',[885 -21 528 673]); # macbook
|
|
427
|
# # subplot(3,1,3);
|
|
430
|
# # pause(0.01);
No newline at end of file
|
|
428
|
# # plot(180/pi*thetar,f_cs,'r-');
|
|
|
|
|
429
|
# # hold on;
|
|
|
|
|
430
|
# # plot(180/pi*thetat,fact,'k--');
|
|
|
|
|
431
|
# # hold off;
|
|
|
|
|
432
|
# # ylim([min(f_cs) 1.1*max(fact)]);
|
|
|
|
|
433
|
# # # title(sprintf('rel. RMSE: #.2e\tCorr: #.3f Corrc: #.3f', relrmse_cs, corr_cs, corrc_cs));
|
|
|
|
|
434
|
# # # title 'Compressed Sensing - Debauchies Wavelets'
|
|
|
|
|
435
|
# # xlabel 'Degrees'
|
|
|
|
|
436
|
# # ylabel({'Power';'(arbitrary units)'})
|
|
|
|
|
437
|
# # legend('Comp. Sens.','Truth');
|
|
|
|
|
438
|
# #
|
|
|
|
|
439
|
# # # set(gcf,'Position',[749 143 528 881]); # CSL
|
|
|
|
|
440
|
# # # set(gcf,'Position',[885 -21 528 673]); # macbook
|
|
|
|
|
441
|
# # pause(0.01);
|
|
|
|
|
442
|
|
|
|
|
|
443
|
|
|
|
|
|
444
|
# # Store Results
|
|
|
|
|
445
|
corr[0, snri, Ni] = corr_fourier;
|
|
|
|
|
446
|
corr[1, snri, Ni] = corr_capon;
|
|
|
|
|
447
|
corr[2, snri, Ni] = corr_maxent;
|
|
|
|
|
448
|
#corr[3, snri, Ni] = corr_cs;
|
|
|
|
|
449
|
|
|
|
|
|
450
|
rmse[0,snri,Ni] = relrmse_fourier;
|
|
|
|
|
451
|
rmse[1,snri,Ni] = relrmse_capon;
|
|
|
|
|
452
|
rmse[2,snri,Ni] = relrmse_maxent;
|
|
|
|
|
453
|
#rmse[3,snri,Ni] = relrmse_cs;
|
|
|
|
|
454
|
|
|
|
|
|
455
|
corrc[0,snri,Ni] = corrc_fourier;
|
|
|
|
|
456
|
corrc[1,snri,Ni] = corrc_capon;
|
|
|
|
|
457
|
corrc[2,snri,Ni] = corrc_maxent;
|
|
|
|
|
458
|
#corrc[3,snri,Ni] = corrc_cs;
|
|
|
|
|
459
|
|
|
|
|
|
460
|
|
|
|
|
|
461
|
print 'Capon:\t', elapsed_time_capon, 'sec';
|
|
|
|
|
462
|
print 'Maxent:\t',elapsed_time_maxent, 'sec';
|
|
|
|
|
463
|
#print 'CS:\t%3.3f sec\n',elapsed_time_cs;
|
|
|
|
|
464
|
|
|
|
|
|
465
|
print (NN*(snri+1) + Ni), '/', (SNRdBvec.size*NN);
|
|
|
|
|
466
|
|
|
|
|
|
467
|
print corr
|
|
|
|
|
468
|
No newline at end of file
|