Skip to content
Permalink
master
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
%%______________________________________________________________________________
%% This file is the main file to demonstrate an 1D curve example using GP regression
%% This file is created by Kai Zhou 7/31/2019
%%____________________________________________________________________________________
clc
clear
%%% Section 1_________________________________________________________________________
%%% Load data: for demonstration purpose,one 1-D analytical curve is used as the
%%% model to be captured
%%% In real cases,need to load true data
nSamp = 1000;
DataInp = linspace(0,1,nSamp);
DataInp = DataInp';
Dataoutp = 5*abs(0.25-DataInp.^2)+4*abs(0.3-DataInp.^3)+0.5*abs(0.5-DataInp.^4).*sin(DataInp*2*pi*10);
global numInp
numInp = 1; % number of input features
numOup = 1; % number of output features - single-response GP only allows this parameter to be 1
DataInp_ori = DataInp;
Dataoutp_ori = Dataoutp;
%%%__________________________________________________________________________________
%%% Section 2_________________________________________________________________________
%%% Data pre-processing in order to facilitate learning process
dataProcessMethod = 2;
para1 = zeros(1,numInp);
para2 = zeros(1,numInp);
for i1 = 1:numInp
[DataInp(:,i1),para1(i1),para2(i1)] = DataPreProcess(DataInp(:,i1),dataProcessMethod);
end
dataProcessMethod = 2;
[Dataoutp,outpMin,outpMax] = DataPreProcess(Dataoutp,dataProcessMethod);
%%%___________________________________________________________________________________
%%% Section 3__________________________________________________________________________
%%% Split training and testing datasets
%%% here uniform sampling is used; In real cases, random sampling is
%%% preferred using function (randperm)
%%% In this example, 50 (5%) datasets for training, and other 990 datasets for
%%% testing - can play with this parameter to see how performance changes
train_ind = floor(linspace(1,nSamp,0.05*nSamp));
test_ind = setdiff(1:nSamp,train_ind);
dataInpTrain = DataInp(train_ind,:);
dataOupTrain = Dataoutp(train_ind,:);
test_ind = setdiff(1:nSamp,train_ind);
dataInpTest = DataInp(test_ind,:);
dataOupTest = Dataoutp(test_ind,:);
%%%___________________________________________________________________________________
%%% Section 4__________________________________________________________________________
%%% Train model and do prediction
[dataOupTest_pred, dataOupTest_cov, hyper_opt] = SR_GPR(dataInpTrain,dataOupTrain,dataInpTest);
%%%____________________________________________________________________________________
%%% Section 5__________________________________________________________________________
%%% Map datasets back to original space
dataProcessMethod = 2;
for i1 = 1:numInp
dataInpTrain(:,i1) = DataPreProcessBack(dataInpTrain(:,i1),para1(i1),para2(i1), dataProcessMethod);
dataInpTest(:,i1) = DataPreProcessBack(dataInpTest(:,i1),para1(i1), para2(i1), dataProcessMethod);
end
dataProcessMethod = 2;
dataOupTrain = DataPreProcessBack(dataOupTrain,outpMin,outpMax, dataProcessMethod);
dataOupTest = DataPreProcessBack(dataOupTest,outpMin,outpMax, dataProcessMethod);
dataOupTest_pred_temp = DataPreProcessBack(dataOupTest_pred,outpMin,outpMax, dataProcessMethod);
for i1 = 1:length(test_ind)
for i2 = i1:length(test_ind)
dataOupTest_cov(i1,i2) = dataOupTest_cov(i1,i2)*dataOupTest_pred_temp(i1)*dataOupTest_pred_temp(i2)/dataOupTest_pred(i1)/dataOupTest_pred(i2);
dataOupTest_cov(i2,i1) = dataOupTest_cov(i1,i2);
end
end
dataOupTest_pred = dataOupTest_pred_temp;
%%%_____________________________________________________________________________________
%%% Section 6__________________________________________________________________________
%%% Result visualization
figure (1)
subplot(3,1,1)
plot(DataInp_ori,Dataoutp_ori,'LineWidth',2);
hold on
plot(dataInpTrain,dataOupTrain,'d','LineWidth',2,...
'MarkerSize',2)
legend('Actual Model', 'Training Data Points')
set(gca, "FontSize", 13)
xlabel('Input','FontSize',14)
ylabel('Output','FontSize',14)
title('Actual Model with Training Data Points','FontSize',14)
subplot(3,1,2)
Dataoutp_r = zeros(nSamp,numOup);
Dataoutp_r_cov = zeros(nSamp,1);
Dataoutp_r(train_ind) = dataOupTrain;
Dataoutp_r(test_ind) = dataOupTest_pred;
Dataoutp_r_cov(test_ind) = diag(dataOupTest_cov);
xx = [DataInp_ori', fliplr(DataInp_ori')];
yy1 = Dataoutp_r + 6.*sqrt(Dataoutp_r_cov);
yy2 = Dataoutp_r - 6.*sqrt(Dataoutp_r_cov);
yy = [yy1',fliplr(yy2')];
fill(xx,yy,[200/255,200/255,200/255]);
hold on
plot(DataInp_ori,Dataoutp_r,'r','LineWidth',2);
hold on
plot(dataInpTrain,dataOupTrain,'kd','LineWidth',2,...
'MarkerSize',2)
xlabel('Input','FontSize',14)
ylabel('Output','FontSize',14)
title('Predicted Outputs','FontSize',14)
hold on
legend('95% confidence interval','Predicted Outputs/Mean', 'Training Data Points')
set(gca, "FontSize", 13)
subplot(3,1,3)
error = abs(Dataoutp_r-Dataoutp_ori)./Dataoutp_ori*100;
plot(DataInp_ori,error,'-+','LineWidth',2)
set(gca, "FontSize", 13)
xlabel('Input','FontSize',14)
ylabel('Error(%)','FontSize',14)
title('Prediction Error','FontSize',14)
%%%_____________________________________________________________________________________