minf(X)=x1^2 +x2^2-2x1 +1 s.t. g(x)=-x2 +3<=0
你好同学,内点法和外点法的程序前人都已经做了,你根据这个改一下就行:
外点法:
% Exterior Penalty optimization Problem
% This method helps to convert the Constrained optimization problem into
% unconstrained then applied Golden Section algorithm to solve
% unconstrained problem
% Coded based on algorithm is given in SS Rao Optimization book
% Algorithm is given in SS Rao book
% Coded by : Narayan Das Ahirwar
% Contact me: ndahirwar93@gmail.com
% Matlab function used; syms(for drfinr thr variables), fmincon
% Gradient(calculate the gradient of objective function with respect to defined variables),
% Subs(for substitute the new point [x,y] instead of previous point and calculate),
% Solve(for solve the equation withrespect to variables)
% VPA (Variable precision arithmetic for numerically evaluates each element
% of the double matrix and results you will get in sym type)
%%
clc
clear
format long
% Function Definition (Enter your Function here):
syms x;
%Objective function for constraint optimization problem````
func = (x-1)^2;
%Constrains :
g1(1) = 2-x;
g1(2) = x-4;
%tolerances
eps1 = 0.001;
conv = 1; %initialize the convergance criteria
% Initial Guess (Choose Initial Guesses):
i = 1;
rk(1) = 0.001;
q = 2;
k = 1;
%Convert the constrained problem into unconstrained problem
f = func + rk(1)*(g1(1)^q+g1(2)^q);
x_1(k,i) = -2; x_2(k,i) = 2; %lb = lower bound and ub = upper bound
accuracy = 1/100;
N = 6; %maximum number of experiment
gamma = 1.618;
while conv > eps1
%% function for Golden section method
%loop
for i = 1:N
L0 = x_2(k,i)-x_1(k,i);
L_star = L0/gamma^2;
x1 = x_1(k,i)+L_star;
x2 = x_2(k,i)-L_star;
x_1(k,i) = min(x_1(k,i),x_2(k,i));
x_2(k,i) = max(x_1(k,i),x_2(k,i));
if subs(f,[x],[x1])< subs(f,[x],[x2])
x_2(k,i+1) = x2;
x_1(k,i+1) =x_1(k,i);
elseif subs(f,[x],[x2])<subs(f,[x],[x1])
x_1(k,i+1) =x1;
x_2(k,i+1) = x_2(k,i);
else
break;
end
end
for j=1:i
Iter(j) = j;
lower_limit(j) =x_1(k,j);
upper_limit(j) =x_2(k,j);
Optimum(j) = (x_1(k,j)+x_2(k,j))/2;
Optimum_value(j) = vpa((subs(func,[x],[Optimum(j)])),4);
end
Iterations = Iter';
Lower_limit = lower_limit';
Upper_limit = upper_limit';
OPtimum = Optimum';
OPtimum_value = Optimum_value';
T = table(Iterations,Lower_limit,Upper_limit,OPtimum,OPtimum_value);
Phi1 = subs(func,[x],[x_1(end,end-1)]);
Phi2 = subs(func,[x],[x_2(end,end)]);
conv = abs(abs(Phi2)-abs(Phi1))/abs(Phi1);
rk(k+1) = 10*rk(k); %Penalty parameter update for new iteration
k= k+1;
x_1(k,i) = x_1(k-1,end);
x_2(k,i) = x_2(k-1,end);
disp(T)
end
Lower_limit = vpa(lower_limit,4)
upper_limit = vpa(upper_limit,4)
Optima = vpa((lower_limit+upper_limit)/2,4)
Objective_value = vpa((subs(f,[x],[Optima])),4)
hold on
x = [-5:0.01:5];
y = [-5:0.001:5];
ylim = [-5:5];
f = @(x) (x-1).^2;
y1 = @(x) (2-x);
y2 = @(x) (x-4);
plot(x,f(x),'color','b');
hold on
plot(x,y1(x),'color','m');
hold on
plot(x,y2(x),'color','g')
hold on
caption = sprintf('y = (x-1)^%d',2);
text('FontWeight','bold','FontSize',12,'String','y = (x-1)^2','Position',[2.85 57.14 1.42],...
'Color','b');
plot(Optima(end), f(Optima(end)),'r*','MarkerSize',10)
title("Exterior penalty with Golden section method f = (x-1)^2");
annotation('textarrow',[0.58 0.60],[0.35 0.28],'Color','r', 'String', "Optimum Point");
annotation('textarrow',[0.39 0.51],[0.23 0.21],'Color','g', 'String', "2-x <= 0");
annotation('textarrow',[0.37 0.24],[0.57 0.36],'Color','m', 'String', "x-4 <= 0");
annotation('textarrow',[0.35 0.22],[0.64 0.66],'Color','b', 'String', "f = (x-1)^2");
xlim = [-5:5];
ylim = [-5:5];
xlabel('x')
ylabel('Objective Function(x)')
hold off
内点法:
INTERIOR Penalty optimization Problem
% This method helps to convert the Constrained optimization problem into
% unconstrained then applied Conjugate gradient algorithm to solve
% unconstrained problem
% Coded based on algorithm is given in SS Rao Optimization book
% Algorithm is given in SS Rao book
% Coded by : Narayan Das Ahirwar
% Contact me: ndahirwar93@gmail.com
% Matlab function used; syms(for drfinr thr variables), fmincon
% Gradient(calculate the gradient of objective function with respect to defined variables),
% Subs(for substitute the new point [x,y] instead of previous point and calculate),
% Solve(for solve the equation withrespect to variables)
% VPA (Variable precision arithmetic for numerically evaluates each element
% of the double matrix and results you will get in sym type)
%%
clc
clear
format long
% Define the variables
syms x1 x2;
%Objective function for constraint optimization problem````
func = (x1-1)^2 + (x2-2)^2;
%Constrains :
g1(1) = 2*x1-x2;
g1(2) = x1-5;
%tolerances
eps1 = 0.001;
conv = 1; %initialize the convergance criteria
% Initial Guess (Choose Initial Guesses):
i = 1;
x_1(i) = 0.5;
x_2(i) = 3;
rk(1) = 120;
%Convert the constrained problem into unconstrained problem
f = func - rk(1)*(1/g1(1)+1/g1(2));
k = 1
while conv > eps1
Grad_f = gradient(f);
S = -subs(Grad_f,[x1,x2],[x_1(i),x_2(i)]);
%loop
while norm(S)> eps1
%calculate the step length
syms lambda
func_lambda = subs(f, [x1,x2], [x_1(i)+S(1)*lambda,x_2(i)+lambda*S(2)]);
dfunc_lambda = (diff(func_lambda,lambda));
lambda = vpa(solve(dfunc_lambda==0,lambda),6);
lambda = lambda(imag(lambda)==0);
for k = 1:size(lambda)
fun_lambda_value(k) = subs(f,[x1,x2],[x_1(i)+lambda(k,1)*S(1),x_2(i)+lambda(k,1)*S(2)]);
end
[value, index] = min(fun_lambda_value);
%compute the step length
lambda = lambda(index);
%replace the old value with new value for unconstrained
x_1(i+1) = x_1(i)+lambda*S(1);
x_2(i+1) = x_2(i)+lambda*S(2);
Grad_old = subs(Grad_f,[x1,x2],[x_1(i),x_2(i)]);
i = i+1;
Grad_new = subs(Grad_f,[x1,x2],[x_1(i),x_2(i)]);
%update the search direction
S = -(Grad_new)+((norm(Grad_new))^2/(norm(Grad_old))^2)*S;
end
Phi1 = subs(func,[x1,x2],[x_1(i-1),x_2(i-1)]);
Phi2 = subs(func,[x1,x2],[x_1(i),x_2(i)]);
conv = abs(abs(Phi2)-abs(Phi1))/abs(Phi1);
rk(k+1) = 0.01*rk(k); %reduce the value of Rk by 0.01 times to reach the near by optimum point
k= k+1;
end
Iter = 1:i;
K = 1:k
X_coordinate = x_1';
Y_coordinate = x_2';
Iterations = Iter';
Rk = rk'
for i=1:length(X_coordinate)
Objective_value(i) = double(subs(f,[x1,x2], [x_1(i),x_2(i)]));
end
Objective_value = Objective_value';
T = table(Iterations,X_coordinate,Y_coordinate, Objective_value);
fprintf('Initial Objective Function Value: %d\n\n',double(subs(f,[x1,x2], [x_1(1),x_2(1)])));
fprintf('Number of Iterations for Convergence: %d\n\n', i);
fprintf('Point of Minima: [%d,%d]\n\n', x_1(i), x_2(i));
fprintf('Objective Function Minimum Value: %d\n\n', double(subs(f,[x1,x2], [x_1(i),x_2(i)])));
%Contour and plotting:
h = figure;
filename = 'Interior Penalty Method';
hold on;
fcontour(func,[-8,8,-8,8],'MeshDensity',100);
colormap 'jet'
[x1,x2] = meshgrid(-8:0.05:8);
Z1 = (2*x1-x2 == 0) ; %first constraint
Z2= (x1 <= 5); %2nd constraint
contour(x1,x2,Z1,'-b','LineWidth',0.5)
contour(x1,x2,Z2,'-g','LineWidth',0.5)
for j = 1:i-1
%update the points on plot from initiial guess to final optimum point
plot(x_1(j),x_2(j),'Og','LineWidth',2);
plot(x_1(j+1),x_2(j+1),'Or','LineWidth',2);
plot(x_1(j:j+1),x_2(j:j+1),'*-k')
plot(x_1(j+1),x_2(j+1),'*r','LineWidth',2);
xlabel('x1')
ylabel('x2')
title("Interior Penalty method f = (x1-1)^2 + (x2-2)^2");
annotation('textarrow',[0.683 0.753],[0.254 0.430],'Color','g','String', "x1 <= 5");
annotation('textarrow',[0.523 0.478],[0.228 0.418],'Color','b', 'String', "2*x1-x2 == 0");
annotation('textarrow',[0.394 0.395],[0.547 0.65],'Color','r','String', "Optimum Point (x1,x2) ");
annotation('textarrow',[0.530 0.533],[0.838 0.690],'Color','g','String', "Initial Point (x1,x2) ");
end
disp(T)