Skip to content

Instantly share code, notes, and snippets.

@kirlf
Last active April 11, 2022 16:51
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kirlf/8e77cc17b7b1be4e35dbf651ff82f759 to your computer and use it in GitHub Desktop.
Save kirlf/8e77cc17b7b1be4e35dbf651ff82f759 to your computer and use it in GitHub Desktop.
%MatLab/Octave
clear all; close all; clc
%% Initialization
% channel parameters
sigmaS = 1; %signal power
sigmaN = 0.01; %noise power
% CSI (channel state information):
channel = [0.722-1j*0.779; -0.257-1j*0.722; -0.789-1j*1.862];
M = 5; % filter order
% step sizes
mu_LMS = [0.01,0.07];
mu_SG = [0.01,0.07];
NS = 1000; %number of symbols
NEnsembles = 1000; %number of ensembles
%% Compute Rxx and p
%the maximum index of channel taps (l=0,1...L):
L = length(channel) - 1;
H = convmtx(channel, M-L); %channel matrix (Toeplitz structure)
Rnn = sigmaN*eye(M); %the noise covariance matrix
%the received signal covariance matrix:
Rxx = sigmaS*(H*H')+sigmaN*eye(M);
%the cross-correlation vector
%between the tap-input vector and the desired response:
p = sigmaS*H(:,1);
% An inline function to calculate MSE(w) for a weight vector w
calc_MSE = @(w) real(w'*Rxx*w - w'*p - p'*w + sigmaS);
%% Adaptive Equalization
N_test = 2;
MSE_LMS = zeros(NEnsembles, NS, N_test);
MSE_SG = zeros(NEnsembles, NS, N_test);
MSE_RLS = zeros(NEnsembles, NS, N_test);
for nEnsemble = 1:NEnsembles
%initial symbols:
symbols = sigmaS*sign(randn(1,NS));
%received noisy symbols:
X = H*hankel(symbols(1:M-L),[symbols(M-L:end),zeros(1,M-L-1)]) + ...
sqrt(sigmaN)*(randn(M,NS)+1j*randn(M,NS))/sqrt(2);
for n_mu = 1:N_test
w_LMS = zeros(M,1);
w_SG = zeros(M,1);
p_SG = zeros(M,1);
R_SG = zeros(M);
for n = 1:NS
%% LMS - Least Mean Square
e = symbols(n) - w_LMS'*X(:,n);
w_LMS = w_LMS + mu_LMS(n_mu)*X(:,n)*conj(e);
MSE_LMS(nEnsemble,n,n_mu)= calc_MSE(w_LMS);
%% SG - Stochastic gradient
R_SG = 1/n*((n-1)*R_SG + X(:,n)*X(:,n)');
p_SG = 1/n*((n-1)*p_SG + X(:,n)*conj(symbols(n)));
w_SG = w_SG + mu_SG(n_mu)*(p_SG - R_SG*w_SG);
MSE_SG(nEnsemble,n,n_mu)= calc_MSE(w_SG);
end
end
%RLS - Recursive Least Squares
lambda_RLS = [0.8; 1]; %forgetting factors
for n_lambda=1:length(lambda_RLS)
%Initialize the weight vectors for RLS
delta = 1;
w_RLS = zeros(M,1);
P = eye(M)/delta; % (n-1)-th iteration, where n = 1,2...
PI = zeros(M,1); % n-th iteration
K = zeros(M,1);
for n=1:NS
% the recursive process of RLS
PI = P*X(:,n);
K = PI/(lambda_RLS(n_lambda)+X(:,n)'*PI);
ee = symbols(n) - w_RLS'*X(:,n);
w_RLS = w_RLS + K*conj(ee);
MSE_RLS(nEnsemble,n,n_lambda)= calc_MSE(w_RLS);
P = P/lambda_RLS(n_lambda) - K/lambda_RLS(n_lambda)*X(:,n)'*P;
end
end
end
MSE_LMS_1 = mean(MSE_LMS(:,:,1));
MSE_LMS_2 = mean(MSE_LMS(:,:,2));
MSE_SG_1 = mean(MSE_SG(:,:,1));
MSE_SG_2 = mean(MSE_SG(:,:,2));
MSE_RLS_1 = mean(MSE_RLS(:,:,1));
MSE_RLS_2 = mean(MSE_RLS(:,:,2));
n = 1:NS;
m = [1 3 6 10 30 60 100 300 600 1000];
figure(1)
loglog(m, MSE_LMS_1(m),'x','linewidth',2, 'color','blue');
hold all;
loglog(m, MSE_LMS_2(m),'o','linewidth',2, 'color','blue');
loglog(m, MSE_SG_1(m),'x','linewidth',2, 'color','red');
loglog(m, MSE_SG_2(m),'o','linewidth',2, 'color','red');
loglog(m, MSE_RLS_1(m),'x','linewidth',2, 'color','green');
loglog(m, MSE_RLS_2(m),'o','linewidth',2, 'color','green');
wopt = Rxx\p;
MSEopt = calc_MSE(wopt);
loglog(n, MSE_LMS_1(n),'linewidth',2, 'color','blue');
loglog(n, MSE_LMS_2(n),'linewidth',2, 'color','blue');
loglog(n, MSE_SG_1(n),'linewidth',2, 'color','red');
loglog(n, MSE_SG_2(n),'linewidth',2, 'color','red');
loglog(n, MSE_RLS_1(n),'linewidth',2, 'color','green');
loglog(n, MSE_RLS_2(n),'linewidth',2, 'color','green');
loglog(n, MSEopt*ones(size(n)), 'color','black','linewidth',2);
grid on
xlabel('Ns');
ylabel('Mean-Squares Error');
title('LMS, SG, RLS')
legend(['LMS, \mu=' num2str(mu_LMS(1))],['LMS, \mu=' num2str(mu_LMS(2))],...
['SG, \mu=' num2str(mu_SG(1))],['SG, \mu=' num2str(mu_SG(2))],...
['RLS, \lambda=' num2str(lambda_RLS(1))],['RLS, \lambda=' num2str(lambda_RLS(2))],...
'Wiener solution')
%MatLab/Octave
clear all; close all; clc
%% Initialization
% channel parameters
sigmaS = 1; %signal power
sigmaN = 0.01; %noise power
% CSI (channel state information):
% the channel for the transmission of the first NS1 training symbols
channel1 = [0.722 - 0.779i; -0.257 - 0.722i; -0.789 - 1.862i];
% the channel for the transmission of the next NS2 training symbols
channel2 = [-0.831 - 0.661i;-1.071 - 0.961i; -0.551 - 0.311i];
M = 5; % filter order
% step sizes
mu_LMS = [0.01,0.07];
mu_SG = [0.01,0.07];
% symbols / ensembles
NS1 = 500;
NS2 = 500;
NS = NS1+NS2;
NEnsembles = 1000; %number of ensembles
%% Compute Rxx and p
%the maximum index of channel taps (l=0,1...L):
L = length(channel1) - 1;
H = convmtx(channel1, M-L); %channel matrix (Toeplitz structure)
Rnn = sigmaN*eye(M); %the noise covariance matrix
% Inline functions:
calc_Rxx = @(channel) ...
sigmaS*(convmtx(channel, M-L)*convmtx(channel, M-L)')+sigmaN*eye(M);
calc_p = @(channel) sigmaS*(convmtx(channel,M-L))*[1; zeros(M-L-1, 1)];
Rxx = zeros(M,M,2);
p = zeros(M,2);
A = calc_Rxx(channel1);
Rxx(:,:,1) = calc_Rxx(channel1);
Rxx(:,:,2) = calc_Rxx(channel2);
p(:,1) = calc_p(channel1);
p(:,2) = calc_p(channel2);
% An inline function to calculate MSE(w) for a weight vector w
calc_MSE = @(w, ch) real(w'*Rxx(:,:,ch)*w - w'*p(:, ch) - p(:, ch)'*w + sigmaS);
%% Adaptive Equalization
N_test = 2;
MSE_LMS = zeros(NEnsembles, NS, N_test);
MSE_SG = zeros(NEnsembles, NS, N_test);
MSE_RLS = zeros(NEnsembles, NS, N_test);
for nEnsemble = 1:NEnsembles
%initial symbols:
symbols1 = sigmaS*sign(randn(1,NS1));
symbols2 = sigmaS*sign(randn(1,NS2));
%received noisy symbols:
X1 = convmtx(channel1, M-L)*hankel(symbols1(1:M-L),[symbols1(M-L:end),zeros(1,M-L-1)]) + ...
sqrt(sigmaN)*(randn(M,NS1)+1j*randn(M,NS1))/sqrt(2);
X2 = convmtx(channel2, M-L)*hankel(symbols2(1:M-L),[symbols2(M-L:end),zeros(1,M-L-1)]) + ...
sqrt(sigmaN)*(randn(M,NS2)+1j*randn(M,NS2))/sqrt(2);
X = [X1, X2];
symbols = [symbols1, symbols2];
for n_mu = 1:N_test
w_LMS = zeros(M,1);
w_SG = zeros(M,1);
p_SG = zeros(M,1);
R_SG = zeros(M);
for n = 1:NS
if n <= NS1, curh = 1; else curh = 2; end
%% LMS - Least Mean Square
e = symbols(n) - w_LMS'*X(:,n);
w_LMS = w_LMS + mu_LMS(n_mu)*X(:,n)*conj(e);
MSE_LMS(nEnsemble,n,n_mu)= calc_MSE(w_LMS, curh);
%% SG - Stochastic gradient
R_SG = 1/n*((n-1)*R_SG + X(:,n)*X(:,n)');
p_SG = 1/n*((n-1)*p_SG + X(:,n)*conj(symbols(n)));
w_SG = w_SG + mu_SG(n_mu)*(p_SG - R_SG*w_SG);
MSE_SG(nEnsemble,n,n_mu)= calc_MSE(w_SG, curh);
end
end
%RLS - Recursive Least Squares
lambda_RLS = [0.8; 1]; %forgetting factors
for n_lambda=1:length(lambda_RLS)
%Initialize the weight vectors for RLS
delta = 1;
w_RLS = zeros(M,1);
P = eye(M)/delta; % (n-1)-th iteration, where n = 1,2...
PI = zeros(M,1); % n-th iteration
K = zeros(M,1);
for n=1:NS
if n <= NS1, curh = 1; else curh = 2; end
% the recursive process of RLS
PI = P*X(:,n);
K = PI/(lambda_RLS(n_lambda)+X(:,n)'*PI);
ee = symbols(n) - w_RLS'*X(:,n);
w_RLS = w_RLS + K*conj(ee);
MSE_RLS(nEnsemble,n,n_lambda)= calc_MSE(w_RLS, curh);
P = P/lambda_RLS(n_lambda) - K/lambda_RLS(n_lambda)*X(:,n)'*P;
end
end
end
%% Wiener Solution
MSE_Wiener(1:NS1) = calc_MSE(Rxx(:,:,1)\p(:,1),1);
MSE_Wiener(NS1+1:NS) = calc_MSE(Rxx(:,:,2)\p(:,2),2);
MSE_LMS_1 = mean(MSE_LMS(:,:,1));
MSE_LMS_2 = mean(MSE_LMS(:,:,2));
MSE_SG_1 = mean(MSE_SG(:,:,1));
MSE_SG_2 = mean(MSE_SG(:,:,2));
MSE_RLS_1 = mean(MSE_RLS(:,:,1));
MSE_RLS_2 = mean(MSE_RLS(:,:,2));
figure(1)
n = 1:NS;
m= [2 4 6 10 30 60 100 300 600 1000];
semilogy(m, MSE_LMS_1(m),'+','linewidth',2, 'color','blue');
hold all;
semilogy(m, MSE_LMS_2(m),'o','linewidth',2, 'color','blue');
semilogy(m, MSE_SG_1(m),'+','linewidth',2, 'color','red');
semilogy(m, MSE_SG_2(m),'o','linewidth',2, 'color','red');
semilogy(m, MSE_RLS_1(m),'+','linewidth',2, 'color','green');
semilogy(m, MSE_RLS_2(m),'o','linewidth',2, 'color','green');
semilogy(n, MSE_Wiener(n), 'color','black','linewidth',2);
semilogy(n, MSE_LMS_1(n),'linewidth',2, 'color','blue');
semilogy(n, MSE_LMS_2(n),'linewidth',2, 'color','blue');
semilogy(n, MSE_SG_1(n),'linewidth',2, 'color','red');
semilogy(n, MSE_SG_2(n),'linewidth',2, 'color','red');
semilogy(n, MSE_RLS_1(n),'linewidth',2, 'color','green');
semilogy(n, MSE_RLS_2(n),'linewidth',2, 'color','green');
grid on
xlabel('Ns');
ylabel('MSE');
title(['LMS, SG, RLS, \sigma_N= ' num2str(sigmaN) ', \sigma_S= '...
num2str(sigmaS) ', M= ' num2str(M) ', L= ' num2str(L) ]);
legend(['LMS, \mu=' num2str(mu_LMS(1))],['LMS, \mu=' num2str(mu_LMS(2))],...
['SG, \mu=' num2str(mu_SG(1))],['SG, \mu=' num2str(mu_SG(2))],...
['RLS, \lambda=' num2str(lambda_RLS(1))],['RLS, \lambda=' ...
num2str(lambda_RLS(2))],'Weiner solution',2);
axis([0 NS 0.002 1])
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz
def convmtx(h,n):
return toeplitz(np.hstack([h, np.zeros(n-1)]), np.hstack([h[0], np.zeros(n-1)]))
def MSE_calc(sigmaS, R, p, w):
w = w.reshape(w.shape[0], 1)
wH = np.conj(w).reshape(1, w.shape[0])
p = p.reshape(p.shape[0], 1)
pH = np.conj(p).reshape(1, p.shape[0])
MSE = sigmaS - np.dot(wH, p) - np.dot(pH, w) + np.dot(np.dot(wH, R), w)
return MSE[0, 0]
def mu_opt_calc(gamma, R):
gamma = gamma.reshape(gamma.shape[0], 1)
gammaH = np.conj(gamma).reshape(1, gamma.shape[0])
mu_opt = np.dot(gammaH, gamma) / np.dot(np.dot(gammaH, R), gamma)
return mu_opt[0, 0]
M = 5 # number of sensors
h = np.array([0.722-1j*0.779, -0.257-1j*0.722, -0.789-1j*1.862])
L = len(h)-1 # number of signal sources
H = convmtx(h,M-L)
sigmaS = 1 # the desired signal's (s(n)) power
sigmaN = 0.01 # the noise's (n(n)) power
# The correlation matrix of the received signal:
# Rxx = E{x(n)x(n)^{H}}, where ^{H} means Hermitian
Rxx = (sigmaS)*(np.dot(H,np.matrix(H).H))+(sigmaN)*np.identity(M)
# The cross-correlation vector between the tap-input vector x(n) and the desired response s(n):
p = (sigmaS)*H[:,0]
p = p.reshape((len(p), 1))
# Solution of the Wiener-Hopf equation:
wopt = np.dot(np.linalg.inv(Rxx), p)
MSEopt = MSE_calc(sigmaS, Rxx, p, wopt)
# Steepest descent algorithm testing:
coeff = np.array([1, 0.9, 0.5, 0.2, 0.1])
lamda_max = max(np.linalg.eigvals(Rxx))
mus = 2/lamda_max*coeff # different step sizes
N_steps = 100
MSE = np.empty((len(mus), N_steps), dtype=complex)
for mu_idx, mu in enumerate(mus):
w = np.zeros((M,1), dtype=complex)
for N_i in range(N_steps):
w = w - mu*(np.dot(Rxx, w) - p)
MSE[mu_idx, N_i] = MSE_calc(sigmaS, Rxx, p, w)
MSEoptmu = np.empty((1, N_steps), dtype=complex)
w = np.zeros((M,1), dtype=complex)
for N_i in range(N_steps):
gamma = p - np.dot(Rxx,w)
mu_opt = mu_opt_calc(gamma, Rxx)
w = w - mu_opt*(np.dot(Rxx,w) - p)
MSEoptmu[:, N_i] = MSE_calc(sigmaS, Rxx, p, w)
x = [i for i in range(1, N_steps+1)]
plt.figure(figsize=(5, 4), dpi=300)
for idx, item in enumerate(coeff):
if item == 1:
item = ''
plt.loglog(x, np.abs(MSE[idx, :]), label='$\mu = '+str(item)+'\mu_{max}$')
plt.loglog(x, np.abs(MSEoptmu[0, :]), label='$\mu = \mu_{opt}$')
plt.loglog(x, np.abs(MSEopt*np.ones((len(x), 1), dtype=complex)), label = 'Wiener solution')
plt.grid(True)
plt.xlabel('Number of steps')
plt.ylabel('Mean-Square Error')
plt.title('Steepest descent')
plt.legend(loc='best')
plt.minorticks_on()
plt.grid(which='major')
plt.grid(which='minor', linestyle=':')
plt.savefig('SD.png')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment