##// END OF EJS Templates
Partial implementation of wavelet transform. Implemented functions: dualfilt1, FSfarras, irls_dn (partial), irls_dn2 (partial), deb4_basis (partial).
yamaro -
r17:18
parent child
Show More
@@ -0,0 +1,65
1 '''
2 Created on May 26, 2014
3
4 @author: Yolian Amaro
5 '''
6
7 import pywt
8 import numpy as np
9
10 def FSfarras():
11 #function [af, sf] = FSfarras
12
13 # Farras filters organized for the dual-tree
14 # complex DWT.
15 #
16 # USAGE:
17 # [af, sf] = FSfarras
18 # OUTPUT:
19 # af{i}, i = 1,2 - analysis filters for tree i
20 # sf{i}, i = 1,2 - synthesis filters for tree i
21 # See farras, dualtree, dualfilt1.
22 #
23 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
24 # http://taco.poly.edu/WaveletSoftware/
25 #
26 # Translated to Python by Yolian Amaro
27
28
29
30 a1 = np.array( [
31 [ 0, 0],
32 [-0.08838834764832, -0.01122679215254],
33 [ 0.08838834764832, 0.01122679215254],
34 [ 0.69587998903400, 0.08838834764832],
35 [ 0.69587998903400, 0.08838834764832],
36 [ 0.08838834764832, -0.69587998903400],
37 [-0.08838834764832, 0.69587998903400],
38 [ 0.01122679215254, -0.08838834764832],
39 [ 0.01122679215254, -0.08838834764832],
40 [0, 0]
41 ] );
42
43 a2 = np.array([
44 [ 0.01122679215254, 0],
45 [ 0.01122679215254, 0],
46 [-0.08838834764832, -0.08838834764832],
47 [ 0.08838834764832, -0.08838834764832],
48 [ 0.69587998903400, 0.69587998903400],
49 [ 0.69587998903400, -0.69587998903400],
50 [ 0.08838834764832, 0.08838834764832],
51 [-0.08838834764832, 0.08838834764832],
52 [ 0, 0.01122679215254],
53 [ 0, -0.01122679215254]
54 ]);
55
56 #print a2.shape
57
58 af = np.array([ [a1,a2] ], dtype=object)
59
60 s1 = a1[::-1]
61 s2 = a2[::-1]
62
63 sf = np.array([ [s1,s2] ], dtype=object)
64
65 return af, sf
@@ -0,0 +1,38
1 '''
2 Created on May 26, 2014
3
4 @author: Yolian Amaro
5 '''
6
7 import numpy as np
8 import FSfarras
9 import dualfilt1
10
11 def deb4_basis(N):
12
13 Psi = np.zeros(shape=(N,2*N+1));
14 idx = 1;
15
16 J = 4;
17 [Faf, Fsf] = FSfarras;
18 [af, sf] = dualfilt1;
19 # #
20 # # # compute transform of zero vector
21 # # x = zeros(1,N);
22 # # w = dualtree(x, J, Faf, af);
23 # #
24 # # # Uses both real and imaginary wavelets
25 # # for i in range (1, J+1):
26 # # for j in range (1, 2):
27 # # for k in range (1, (w[i][j]).size):
28 # # w[i][j](k) = 1;
29 # # y = idualtree(w, J, Fsf, sf);
30 # # w[i][j](k) = 0;
31 # # # store it
32 # # Psi(:,idx) = y.T.conj();
33 # # idx = idx + 1;
34 # #
35 # # # Add uniform vector (seems to be useful if there's a background
36 # # Psi(:,2*N+1) = 1/np.sqrt(N);
37 #
38 # return Psi No newline at end of file
@@ -0,0 +1,65
1 '''
2 Created on May 29, 2014
3
4 @author: Yolian Amaro
5 '''
6
7 import numpy as np
8
9 def dualfilt1():
10
11 # Kingsbury Q-filters for the dual-tree complex DWT
12 #
13 # USAGE:
14 # [af, sf] = dualfilt1
15 # OUTPUT:
16 # af{i}, i = 1,2 - analysis filters for tree i
17 # sf{i}, i = 1,2 - synthesis filters for tree i
18 # note: af{2} is the reverse of af{1}
19 # REFERENCE:
20 # N. G. Kingsbury, "A dual-tree complex wavelet
21 # transform with improved orthogonality and symmetry
22 # properties", Proceedings of the IEEE Int. Conf. on
23 # Image Proc. (ICIP), 2000
24 # See dualtree
25 #
26 # WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
27 # http://taco.poly.edu/WaveletSoftware/
28
29 # These cofficients are rounded to 8 decimal places.
30
31 a1 = np.array([
32 [ 0.03516384000000, 0],
33 [ 0, 0],
34 [-0.08832942000000, -0.11430184000000],
35 [ 0.23389032000000, 0],
36 [ 0.76027237000000, 0.58751830000000],
37 [ 0.58751830000000, -0.76027237000000],
38 [ 0, 0.23389032000000],
39 [-0.11430184000000, 0.08832942000000],
40 [ 0, 0],
41 [ 0, -0.03516384000000]
42 ]);
43
44 a2 = np.array([
45 [ 0, -0.03516384000000],
46 [ 0, 0],
47 [-0.11430184000000, 0.08832942000000],
48 [ 0, 0.23389032000000],
49 [ 0.58751830000000, -0.76027237000000],
50 [ 0.76027237000000, 0.58751830000000],
51 [ 0.23389032000000, 0],
52 [ -0.08832942000000, -0.11430184000000],
53 [ 0, 0],
54 [ 0.03516384000000, 0]
55 ]);
56
57 af = np.array([ [a1,a2] ], dtype=object)
58
59 s1 = a1[::-1]
60 s2 = a2[::-1]
61
62 sf = np.array([ [s1,s2] ], dtype=object)
63
64
65 return af, sf
@@ -0,0 +1,80
1 '''
2 Created on May 27, 2014
3
4 @author: Yolian Amaro
5 '''
6
7 #from scipy.sparse import eye
8 from scipy import linalg
9 import scipy.sparse as sps
10 import numpy as np
11
12 def irls_dn(A,b,p,lambda1):
13
14
15 # Minimize lambda*||u||_p + ||A*u-b||_2, 0 < p <= 1
16 # using Iterative Reweighted Least Squares
17 # (see http://math.lanl.gov/Research/Publications/Docs/chartrand-2008-iteratively.pdf
18 # and http://web.eecs.umich.edu/~aey/sparse/sparse11.pdf)
19
20 # Note to self: I found that "warm-starting" didn't really help too much.
21
22 [M,N] = A.shape;
23 # Initialize and precompute:
24 eps = 1e-2; # damping parameter
25 [Q,R] = linalg.qr(A.T.conj(),0);
26 print A.shape
27 print R.shape
28 print b.shape
29 c = linalg.solve(R.T.conj(),b); # will be used later also
30 u = Q*c; # minimum 2-norm solution
31 I = sps.eye(M);
32
33 # Spacing of floating point numbers
34 eps = np.spacing(1)
35
36 # Loop until damping parameter is small enough
37 while (eps > 1e-7):
38 epschange = 0;
39 # Loop until it's time to change eps
40 while (~epschange):
41 # main loop
42 # u_n = W*A'*(A*W*A'+ lambda*I)^-1 * b
43 # where W = diag(1/w)
44 # where w = (u.^2 + eps).^(p/2-1)
45
46 # Update
47 w = (u**2 + eps)**(1-p/2);
48
49 # Empty temporary N x N matrix
50 temp = np.zeros(shape=(N,N))
51
52 # Sparse matrix
53 for i in range (1, N):
54 for j in range (1,N):
55 if(i==j):
56 temp[i,j] = w
57
58 # Compressed Sparse Matrix
59 W = sps.csr_matrix(temp); #Compressed Sparse Row matrix
60
61
62 WAT = W*A.T.conj();
63 u_new = WAT * ( linalg.solve (A*WAT + lambda1*I), b);
64
65 # See if this subproblem is converging
66 delu = np.linalg.norm(u_new-u)/np.linalg.norm(u);
67 epschange = delu < (np.sqrt(eps)/100);
68
69 # Make update
70 u = u_new;
71
72
73 eps = eps/10; # decrease eps
74 # Print info
75 print 'eps =',eps;
76
77 return u
78
79
80
@@ -0,0 +1,50
1 '''
2 Created on May 27, 2014
3
4 @author: Yolian Amaro
5 '''
6
7 from irls_dn import *
8
9 def irls_dn2(A,b,p,G):
10
11 # Minimize ||u||_p subject to ||A*u-b||_2^2 <= G (0 < p <= 1)
12
13 # What this function actually does is finds the lambda1 so that the solution
14 # to the following problem satisfies ||A*u-b||_2^2 <= G:
15 # Minimize lambda1*||u||_p + ||A*u-b||_2
16
17 # Start with a large lambda1, and do a line search until fidelity <= G.
18 # (Inversions with large lambda1 are really fast anyway).
19
20 # Then spin up fzero to localize the root even better
21
22 # Line Search
23
24 alpha = 2; # Line search parameter
25 lambda1 = 1e5; # What's a reasonable but safe initial guess?
26 u = irls_dn(A,b,p,lambda1);
27 # fid = np.norm(A*u-b)^2;
28 #
29 # print '----------------------------------\n';
30 #
31 # while (fid >= G)
32 # lambda1 = lambda1 / alpha; # Balance between speed and accuracy
33 # u = irls_dn(A,b,p,lambda1);
34 # fid = np.norm(A*u-b)^2;
35 # print 'lambda1 = #2e \t ||A*u-b||^2 = #.1f\n',lambda1,fid);
36 #
37 # # Refinement using fzero
38 # lambda10 = [lambda1 lambda1*alpha]; # interval with zero-crossing
39 # f = @(lambda1) np.norm(A*irls_dn(A,b,p,lambda1) - b)^2 - G;
40 # opts = optimset('fzero');
41 # # opts.Display = 'iter';
42 # opts.Display = 'none';
43 # opts.TolX = 0.01*lambda1;
44 # lambda1 = fzero(f,lambda10,opts);
45 #
46 # u = irls_dn(A,b,p,lambda1);
47 #
48 #
49 # return u;
50
@@ -9,20 +9,17
9 9 #----------------------------------------------------------
10 10
11 11 import math
12 #import cmath
13 #import scipy
14 #import matplotlib
15 12 import numpy as np
16 #from numpy import linalg
17 13 import matplotlib.pyplot as plt
18 14 from scipy import linalg
19 15 import time
20 16 from y_hysell96 import*
21 17 from deb4_basis import *
22 18 from modelf import *
23 from scipy.optimize import fsolve
19 #from scipy.optimize import fsolve
24 20 from scipy.optimize import root
25 21 import pywt
22 from irls_dn2 import *
26 23
27 24
28 25 ## Calculate Forward Model
@@ -138,8 +135,9
138 135 I = sum(fact)[0];
139 136 fact = fact/I; # normalize to sum(f)==1
140 137 factr = factr/I; # normalize to sum(f)==1
141 #plot(thetat,fact,'r'); hold on;
142 #plot(thetar,factr,'k.'); hold off;
138 #plt.figure()
139 #plt.plot(thetat,fact,'r');
140 #plt.plot(thetar,factr,'k.');
143 141 #xlim([min(thetat) max(thetat)]);
144 142
145 143 #x = np.linspace(thetat.min(), thetat.max) ????
@@ -197,13 +195,14
197 195 temp4 = (sigma_noise * (temp1 + temp2))/np.sqrt(2.0)
198 196
199 197 nz = np.multiply(temp4, temp3)
200
198
199 #---------------------- Eliminar esto:------------------------------------------
200 #nz = ((abs(np.multiply(temp4, temp3)) > 0).astype(int))
201 201 #nz = ((abs(np.dot(temp4, temp3)) > 0).astype(int))
202
203
204 202 #nz = np.dot(np.dot(sigma_noise, (temp1 + temp2)/math.sqrt(2), temp3 ));
205 203 #nz = np.dot(sigma_noise, (np.dot((np.random.rand(8,8) + j*np.random.rand(8,8))/math.sqrt(2.0) , (abs(U) > 0).astype(int))));
206
204 #--------------------------------------------------------------------------------
205
207 206 Unz = U + nz;
208 207 Rnz = np.dot(Unz.T.conj(),Unz); # the noisy version of R
209 208 plt.figure(3);
@@ -230,16 +229,17
230 229
231 230 f_capon = np.zeros(shape=(Nr,1));
232 231
233 #tic_capon = time.time();
232 tic_capon = time.time();
234 233
235 234 for i in range(0, thetar.size):
236 235 th = thetar[i];
237 236 w = np.exp(1j*k*np.dot(r,np.sin(th)));
238 237 f_capon[i] = np.divide(1, ( np.dot( w.T.conj(), (linalg.solve(Rnz,w)) ) ).real)
239 238
240 #toc_capon = time.time()
241
242 # elapsed_time_capon = toc_capon - tic_capon;
239
240 toc_capon = time.time()
241
242 elapsed_time_capon = toc_capon - tic_capon;
243 243
244 244 f_capon = f_capon.real; # get rid of numerical imaginary noise
245 245
@@ -278,10 +278,15
278 278 sigma = 1; # set to 1 because the difference is accounted for in G
279 279
280 280 ##### ADD *10 for consistency with old model, NEED TO VERIFY THIS!!!!? line below
281 G = np.linalg.norm(g-gnz)**2; # pretend we know in advance the actual value of chi^2
281 G = np.linalg.norm(g-gnz)**2 ; # pretend we know in advance the actual value of chi^2
282
283 tic_maxent = time.time();
282 284
283 285 lambda0 = 1e-5*np.ones(shape=(M,1)); # initial condition (can be set to anything)
284 286
287 toc_maxent = time.time()
288 elapsed_time_maxent = toc_maxent - tic_maxent;
289
285 290 # Whitened solution
286 291 def myfun(lambda1):
287 292 return y_hysell96(lambda1,gnz,sigma,F,G,Hr);
@@ -304,24 +309,28
304 309 es = np.dot(Hr, f_maxent) - gnz; # should be same as ep
305 310 chi2 = np.sum((es/sigma)**2);
306 311
307
308
312
309 313 # CS inversion using irls ########################
310 314
311 315 # (Use Nr, thetar, gnz, and Hr from MaxEnt above)
312 316
313 #Psi = deb4_basis(Nr); ###### REPLACED BY LINE BELOW (?)
314
315 wavelet1 = pywt.Wavelet('db4')
316 Phi, Psi, x = wavelet1.wavefun(level=3)
317 Psi = deb4_basis(Nr); ###### REPLACED BY LINE BELOW (?)
318
319 # REMOVE THIS?--------------------------------
320 #wavelet1 = pywt.Wavelet('db4')
321 #Phi, Psi, x = wavelet1.wavefun(level=3)
322 # --------------------------------------------
317 323
318 324 # add "sum to 1" constraint
319 325 H2 = np.concatenate( (Hr, np.ones(shape=(1,Nr))), axis=0 );
320 326 N_temp = np.array([[Nr/Nt]]);
321 327 g2 = np.concatenate( (gnz, N_temp), axis=0 );
322
323
324 #s = irls_dn2(H2*Psi,g2,0.5,G);
328 H2 = H2.T.conj();
329
330 print 'H2 shape', H2.shape
331 print 'Psi shape', Psi.shape
332
333 s = irls_dn2(H2*Psi,g2,0.5,G);
325 334 # f_cs = Psi*s;
326 335 #
327 336 # # plot
@@ -331,100 +340,129
331 340 # hold off;
332 341
333 342
334 # # # Scaling and shifting
335 # # # Only necessary for capon solution
336
337
338 f_capon = f_capon/np.max(f_capon)*np.max(fact);
339
340
341 ### analyze stuff ######################
342 # calculate MSE
343 rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
344 rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
345 rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
346 #rmse_cs = np.sqrt(np.mean((f_cs - factr).^2));
343 # # # Scaling and shifting
344 # # # Only necessary for capon solution
347 345
348 relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
349 relrmse_capon = rmse_capon / np.linalg.norm(fact);
350 relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
351 #relrmse_cs = rmse_cs / np.norm(fact);
352
353 factr = factr.T.conj()
354
355 # calculate correlation
356 corr_fourier = np.dot(f_fourier,factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
357 corr_capon = np.dot(f_capon,factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
358 corr_maxent = np.dot(f_maxent,factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
359 #corr_cs = np.dot(f_cs,factr) / (norm(f_cs)*norm(factr));
360
361 # calculate centered correlation
362 f0 = factr - np.mean(factr);
363 f1 = f_fourier - np.mean(f_fourier);
364 corrc_fourier = np.dot(f0,f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
365 f1 = f_capon - np.mean(f_capon);
366 corrc_capon = np.dot(f0,f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
367 f1 = f_maxent - np.mean(f_maxent);
368 corrc_maxent = np.dot(f0,f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
369 #f1 = f_cs - mean(f_cs);
370 #corrc_cs = dot(f0,f1) / (norm(f0)*norm(f1));
371
372
373
374 # # # plot stuff #########################
375
376 #---- Capon----
377 plt.figure(4)
378 plt.subplot(2, 1, 1)
379 plt.plot(180/math.pi*thetar, f_capon, 'r', label='Capon')
380 plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
381 plt.ylabel('Power (arbitrary units)')
382 plt.legend(loc='upper right')
383
384 # formatting y-axis
385 locs,labels = plt.yticks()
386 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
387 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
388
389
390 #---- MaxEnt----
391 plt.subplot(2, 1, 2)
392 plt.plot(180/math.pi*thetar, f_maxent, 'r', label='MaxEnt')
393 plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
394 plt.ylabel('Power (arbitrary units)')
395 plt.legend(loc='upper right')
396
397 # formatting y-axis
398 locs,labels = plt.yticks()
399 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
400 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
401
402 plt.show()
403
404
405 # # subplot(3,1,2);
406 # # plot(180/pi*thetar,f_maxent,'r-');
407 # # hold on;
408 # # plot(180/pi*thetat,fact,'k--');
409 # # hold off;
410 # # ylim([min(f_cs) 1.1*max(fact)]);
411 # # # title(sprintf('rel. RMSE: #.2e\tCorr: #.3f Corrc: #.3f', relrmse_maxent, corr_maxent, corrc_maxent));
412 # # ylabel({'Power';'(arbitrary units)'})
413 # # # title 'Maximum Entropy Method'
414 # # legend('MaxEnt','Truth');
415 # #
416 # # subplot(3,1,3);
417 # # plot(180/pi*thetar,f_cs,'r-');
418 # # hold on;
419 # # plot(180/pi*thetat,fact,'k--');
420 # # hold off;
421 # # ylim([min(f_cs) 1.1*max(fact)]);
422 # # # title(sprintf('rel. RMSE: #.2e\tCorr: #.3f Corrc: #.3f', relrmse_cs, corr_cs, corrc_cs));
423 # # # title 'Compressed Sensing - Debauchies Wavelets'
424 # # xlabel 'Degrees'
425 # # ylabel({'Power';'(arbitrary units)'})
426 # # legend('Comp. Sens.','Truth');
427 # #
428 # # # set(gcf,'Position',[749 143 528 881]); # CSL
429 # # # set(gcf,'Position',[885 -21 528 673]); # macbook
430 # # pause(0.01); No newline at end of file
346
347 f_capon = f_capon/np.max(f_capon)*np.max(fact);
348
349
350 ### analyze stuff ######################
351 # calculate MSE
352 rmse_fourier = np.sqrt(np.mean((f_fourier - factr)**2));
353 rmse_capon = np.sqrt(np.mean((f_capon - factr)**2));
354 rmse_maxent = np.sqrt(np.mean((f_maxent - factr)**2));
355 #rmse_cs = np.sqrt(np.mean((f_cs - factr).^2));
356
357
358 relrmse_fourier = rmse_fourier / np.linalg.norm(fact);
359 relrmse_capon = rmse_capon / np.linalg.norm(fact);
360 relrmse_maxent = rmse_maxent / np.linalg.norm(fact);
361 #relrmse_cs = rmse_cs / np.norm(fact);
362
363 # To be able to perform dot product (align matrices) done below within the dot calculations
364
365
366 #f_fourier = f_fourier.T.conj()
367 #f_capon = f_capon.T.conj()
368 #f_maxent = f_maxent.T.conj()
369
370 #factr = factr.T.conj()
371
372 # calculate correlation
373
374 corr_fourier = np.dot(f_fourier.T.conj(),factr) / (np.linalg.norm(f_fourier)*np.linalg.norm(factr));
375 corr_capon = np.dot(f_capon.T.conj(),factr) / (np.linalg.norm(f_capon)*np.linalg.norm(factr));
376 corr_maxent = np.dot(f_maxent.T.conj(),factr) / (np.linalg.norm(f_maxent)*np.linalg.norm(factr));
377 #corr_cs = np.dot(f_cs,factr) / (norm(f_cs)*norm(factr));
378
379
380 # calculate centered correlation
381 f0 = factr - np.mean(factr);
382 f1 = f_fourier - np.mean(f_fourier);
383
384 corrc_fourier = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
385 f1 = f_capon - np.mean(f_capon);
386 corrc_capon = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
387 f1 = f_maxent - np.mean(f_maxent);
388 corrc_maxent = np.dot(f0.T.conj(),f1) / (np.linalg.norm(f0)*np.linalg.norm(f1));
389 #f1 = f_cs - mean(f_cs);
390 #corrc_cs = dot(f0,f1) / (norm(f0)*norm(f1));
391
392
393
394 # # # plot stuff #########################
395
396 #---- Capon----
397 plt.figure(4)
398 plt.subplot(2, 1, 1)
399 plt.plot(180/math.pi*thetar, f_capon, 'r', label='Capon')
400 plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
401 plt.ylabel('Power (arbitrary units)')
402 plt.legend(loc='upper right')
403
404 # formatting y-axis
405 locs,labels = plt.yticks()
406 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
407 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
408
409
410 #---- MaxEnt----
411 plt.subplot(2, 1, 2)
412 plt.plot(180/math.pi*thetar, f_maxent, 'r', label='MaxEnt')
413 plt.plot(180/math.pi*thetat,fact, 'k--', label='Truth')
414 plt.ylabel('Power (arbitrary units)')
415 plt.legend(loc='upper right')
416
417 # formatting y-axis
418 locs,labels = plt.yticks()
419 plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e4))
420 plt.text(0.0, 1.01, '1e-4', fontsize=10, transform = plt.gca().transAxes)
421
422 plt.show()
423
424
425 # # PLOT PARA COMPRESSED SENSING
426 # #
427 # # subplot(3,1,3);
428 # # plot(180/pi*thetar,f_cs,'r-');
429 # # hold on;
430 # # plot(180/pi*thetat,fact,'k--');
431 # # hold off;
432 # # ylim([min(f_cs) 1.1*max(fact)]);
433 # # # title(sprintf('rel. RMSE: #.2e\tCorr: #.3f Corrc: #.3f', relrmse_cs, corr_cs, corrc_cs));
434 # # # title 'Compressed Sensing - Debauchies Wavelets'
435 # # xlabel 'Degrees'
436 # # ylabel({'Power';'(arbitrary units)'})
437 # # legend('Comp. Sens.','Truth');
438 # #
439 # # # set(gcf,'Position',[749 143 528 881]); # CSL
440 # # # set(gcf,'Position',[885 -21 528 673]); # macbook
441 # # pause(0.01);
442
443
444 # # Store Results
445 corr[0, snri, Ni] = corr_fourier;
446 corr[1, snri, Ni] = corr_capon;
447 corr[2, snri, Ni] = corr_maxent;
448 #corr[3, snri, Ni] = corr_cs;
449
450 rmse[0,snri,Ni] = relrmse_fourier;
451 rmse[1,snri,Ni] = relrmse_capon;
452 rmse[2,snri,Ni] = relrmse_maxent;
453 #rmse[3,snri,Ni] = relrmse_cs;
454
455 corrc[0,snri,Ni] = corrc_fourier;
456 corrc[1,snri,Ni] = corrc_capon;
457 corrc[2,snri,Ni] = corrc_maxent;
458 #corrc[3,snri,Ni] = corrc_cs;
459
460
461 print 'Capon:\t', elapsed_time_capon, 'sec';
462 print 'Maxent:\t',elapsed_time_maxent, 'sec';
463 #print 'CS:\t%3.3f sec\n',elapsed_time_cs;
464
465 print (NN*(snri+1) + Ni), '/', (SNRdBvec.size*NN);
466
467 print corr
468 No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now