2

私は単純な非線形関数 y=x.^2 を持っています。ここで、x と y は n 次元のベクトルで、正方形は成分ごとの正方形です。Matlab の自動エンコーダーを使用して、低次元ベクトルで y を近似したいと考えています。問題は、低次元空間が n-1 に設定されていても、再構成された y が歪んでいることです。私のトレーニング データは次のよう になります。これは、低次元空間から再構築された典型的な結果です。私のMatlabコードを以下に示します。

%% Training data
inputSize=100;
hiddenSize1 = 80;

epo=1000;
dataNum=6000;
rng(123);
y=rand(2,dataNum);
xTrain=zeros(inputSize,dataNum);
for i=1:dataNum
    xTrain(:,i)=linspace(y(1,i),y(2,i),inputSize).^2;
end

%scaling the data to [-1,1]
for i=1:inputSize
    meanX=0.5; %mean(xTrain(i,:));
    sd=max(xTrain(i,:))-min(xTrain(i,:));
    xTrain(i,:) = (xTrain(i,:)- meanX)./sd;
end

%% Training the first Autoencoder

% Create the network. 
autoenc1 = feedforwardnet(hiddenSize1);
autoenc1.trainFcn = 'trainscg';
autoenc1.trainParam.epochs = epo;

% Do not use process functions at the input or output
autoenc1.inputs{1}.processFcns = {};
autoenc1.outputs{2}.processFcns = {};

% Set the transfer function for both layers to the logistic sigmoid
autoenc1.layers{1}.transferFcn = 'tansig';
autoenc1.layers{2}.transferFcn = 'tansig';

% Use all of the data for training
autoenc1.divideFcn = 'dividetrain';
autoenc1.performFcn = 'mae';
%% Train the autoencoder
autoenc1 = train(autoenc1,xTrain,xTrain);
%%
% Create an empty network
autoEncoder = network;

% Set the number of inputs and layers
autoEncoder.numInputs = 1;
autoEncoder.numlayers = 1;

% Connect the 1st (and only) layer to the 1st input, and also connect the
% 1st layer to the output
autoEncoder.inputConnect(1,1) = 1;
autoEncoder.outputConnect = 1;

% Add a connection for a bias term to the first layer
autoEncoder.biasConnect = 1;

% Set the size of the input and the 1st layer
autoEncoder.inputs{1}.size = inputSize;
autoEncoder.layers{1}.size = hiddenSize1;

% Use the logistic sigmoid transfer function for the first layer
autoEncoder.layers{1}.transferFcn = 'tansig';

% Copy the weights and biases from the first layer of the trained
% autoencoder to this network
autoEncoder.IW{1,1} = autoenc1.IW{1,1};
autoEncoder.b{1,1} = autoenc1.b{1,1};


%%
% generate the features
feat1 = autoEncoder(xTrain);

%%
% Create an empty network
autoDecoder = network;

% Set the number of inputs and layers
autoDecoder.numInputs = 1;
autoDecoder.numlayers = 1;

% Connect the 1st (and only) layer to the 1st input, and also connect the
% 1st layer to the output
autoDecoder.inputConnect(1,1) = 1;
autoDecoder.outputConnect(1) = 1;

% Add a connection for a bias term to the first layer
autoDecoder.biasConnect(1) = 1;

% Set the size of the input and the 1st layer
autoDecoder.inputs{1}.size = hiddenSize1;
autoDecoder.layers{1}.size = inputSize;

% Use the logistic sigmoid transfer function for the first layer
autoDecoder.layers{1}.transferFcn = 'tansig';

% Copy the weights and biases from the first layer of the trained
% autoencoder to this network

autoDecoder.IW{1,1} = autoenc1.LW{2,1};
autoDecoder.b{1,1} = autoenc1.b{2,1};

%% Reconstruction
desired=xTrain(:,50);
input=feat1(:,50);
output = autoDecoder(input);

figure
plot(output)
hold on
plot(desired,'r')
4

1 に答える 1