CoSApp tutorials on optimization
Sellar optimization problem¶
Multi Domain Optimization (MDO) problem proposed by Sellar et al. in
Sellar, R., Batill, S., & Renaud, J. (1996). Response surface based, concurrent subspace optimization for multidisciplinary system design. In 34th Aerospace Sciences Meeting and Exhibit (p. 714).
The MDO problem is written as follows:
Minimize \(x^2 + z_{2} + y_1^2 + e^{-y_2}\) with respect to \(x,\,z\), where:
\(y_1 = z_{1}^2 + z_{2} + x - 0.2\,y_2\)
\(y_2 = \sqrt{y_1} + z_{1} + z_{2}\)
subject to general constraints:
\(0 \leq z_{1} \leq 10\)
\(0 \leq z_{2} \leq 10\)
\(0 \leq x \leq 10\)
\(y_1 \geq 3.16\)
\(y_2 \leq 24\)
[1]:
from cosapp.base import System
import numpy as np
class SellarDiscipline1(System):
"""Component containing Discipline 1.
"""
def setup(self):
self.add_inward('z', np.zeros(2))
self.add_inward('x')
self.add_inward('y2')
self.add_outward('y1')
def compute(self):
"""Evaluates equation
y1 = z1**2 + z2 + x - 0.2 * y2
"""
self.y1 = self.z[0]**2 + self.z[1] + self.x - 0.2 * self.y2
class SellarDiscipline2(System):
"""Component containing Discipline 2.
"""
def setup(self):
self.add_inward('z', np.zeros(2))
self.add_inward('y1')
self.add_outward('y2')
def compute(self):
"""Evaluates equation
y2 = sqrt(|y1|) + z1 + z2
"""
self.y2 = np.sqrt(abs(self.y1)) + self.z[0] + self.z[1]
[2]:
class Sellar(System):
"""System modeling the Sellar case.
"""
def setup(self):
d1 = self.add_child(SellarDiscipline1('d1'), pulling=['x', 'z', 'y1'])
d2 = self.add_child(SellarDiscipline2('d2'), pulling=['z', 'y2'])
# Couple sub-systems d1 and d2:
self.connect(d1, d2, ['y1', 'y2'])
Define optimization problem¶
[3]:
import time
from cosapp.drivers import Optimizer, NonLinearSolver
s = Sellar('s')
optim = s.add_driver(Optimizer('optim', method='SLSQP', tol=1e-12, verbose=1))
optim.add_child(NonLinearSolver('solver', tol=1e-12)) # to solve cyclic dependencies
# Set optimization problem
optim.set_minimum('x**2 + z[1] + y1 + exp(-y2)')
optim.add_unknown(['x', 'z'])
optim.add_constraints([
'y1 >= 3.16',
'y2 <= 24',
'0 <= x <= 10',
'0 <= z <= 10',
])
optim
Solve problem¶
In cell below, we initialize the system, and solve our optimization problem. Beforehand, we add a recorder and turn on option monitor
, to keep a record of the system state at all iterations.
[4]:
from cosapp.recorders import DataFrameRecorder
# Add recorder to monitor iterations
optim.add_recorder(
DataFrameRecorder(
includes = ["*", "drivers['optim'].objective"],
excludes = "d?.*",
)
)
optim.options['monitor'] = True
# Initialization
s.x = 1.0
s.z = np.array([5.0, 2.0])
s.y1 = 1.0
s.y2 = 1.0
s.run_once()
# Run simulation
# s.exec_order = ('d2', 'd1') # should not change results
start_time = time.time()
s.run_drivers()
print(f"Time: {time.time() - start_time:.3f} s")
for varname in ('x', 'z', 'y1', 'y2'):
print(f"s.{varname} = {s[varname]}")
print(f"objective = {optim.objective}")
print(f"s.y1 = 3.16 + {s.y1 - 3.16}")
Optimization terminated successfully (Exit mode 0)
Current function value: 3.1833939516401712
Iterations: 10
Function evaluations: 46
Gradient evaluations: 9
Time: 0.821 s
s.x = 1.1109542737025862e-17
s.z = [ 1.97763888e+00 -8.26695537e-14]
s.y1 = 3.1599999999996338
s.y2 = 3.7552777669259645
objective = 3.1833939516401712
s.y1 = 3.16 + -3.6637359812630166e-13
Analyze convergence¶
[5]:
df = optim.recorder.export_data()
df = df.rename(columns={"drivers['optim'].objective": "objective"})
df.drop(['Section', 'Status', 'Error code', 'Reference'], axis=1)
[5]:
objective | x | y1 | y2 | z | |
---|---|---|---|---|---|
0 | 28.588308 | 1.000000e+00 | 25.588302 | 12.058488 | [5.0, 2.0] |
1 | 8.561528 | -3.663736e-14 | 7.734337 | 6.470772 | [2.8640615850011732, 0.8256429731941652] |
2 | 3.728100 | 4.108893e-15 | 3.710724 | 4.052648 | [2.1263240018500835, 1.1235457009206584e-13] |
3 | 3.203561 | 7.436853e-14 | 3.180434 | 3.766754 | [1.9833770828672335, 2.7158227002120385e-12] |
4 | 3.183426 | 8.141018e-15 | 3.160033 | 3.755296 | [1.9776481150815908, 2.9991766538626304e-14] |
5 | 3.183394 | -5.445580e-15 | 3.160000 | 3.755278 | [1.9776388834872525, 3.657013439252168e-15] |
6 | 3.183394 | -1.932977e-14 | 3.160000 | 3.755278 | [1.9776388834628016, 8.260625423031724e-15] |
7 | 3.179496 | -5.734948e-13 | 3.156049 | 3.753055 | [1.976527285157952, 3.2653978632890356e-07] |
8 | 3.183394 | 1.110954e-17 | 3.160000 | 3.755278 | [1.9776388834630323, -8.26695536715569e-14] |
9 | 3.183394 | 1.110954e-17 | 3.160000 | 3.755278 | [1.9776388834630323, 1.4901078524293984e-08] |
10 | 3.183394 | 1.110954e-17 | 3.160000 | 3.755278 | [1.9776388834630323, -8.26695536715569e-14] |
[6]:
import plotly.express as px
fig = px.line(df, y="objective",
hover_data = {
'x': True,
'z': True,
'y1': True,
'y2': True,
},
title = "Convergence",
labels = {'index': 'iteration'}
)
fig.update_layout(
height = 600,
hovermode = 'x',
)
fig.update_traces(
mode = 'lines+markers',
)
fig.show()
Alternative formulation¶
Specify constraints on unknowns with arguments lower_bounds
and upper_bounds
in Optimizer.add_unknown
. This alternative declaration may lead to a faster and more stable resolution, as bounds are not treated as nonlinear constraints.
[7]:
import time
from cosapp.drivers import Optimizer, NonLinearSolver
s = Sellar('s')
optim = s.add_driver(Optimizer('optim', method='SLSQP', tol=1e-12, verbose=True))
optim.add_child(NonLinearSolver('solver', tol=1e-12)) # to solve cyclic dependencies
# Set optimization problem
optim.set_minimum('x**2 + z[1] + y1 + exp(-y2)')
optim.add_unknown('x', lower_bound=0, upper_bound=10)
optim.add_unknown('z', lower_bound=0, upper_bound=10)
optim.add_constraints([
'y1 >= 3.16',
'y2 <= 24',
])
# Initialization
s.x = 1.0
s.z = np.array([5.0, 2.0])
s.y1 = 1.0
s.y2 = 1.0
s.run_once()
# Run simulation
# s.exec_order = ('d2', 'd1') # should not change results
start_time = time.time()
s.run_drivers()
print(f"Time: {time.time() - start_time:.3f} s")
for varname in ('x', 'z', 'y1', 'y2'):
print(f"s.{varname} = {s[varname]}")
print(f"objective = {optim.objective}")
print(f"y1 = 3.16 + {s.y1 - 3.16}")
Optimization terminated successfully (Exit mode 0)
Current function value: 3.1833939516406082
Iterations: 14
Function evaluations: 88
Gradient evaluations: 14
Time: 0.638 s
s.x = 0.0
s.z = [1.97763888e+00 2.49355451e-16]
s.y1 = 3.159999999999994
s.y2 = 3.7552777669262327
objective = 3.1833939516406082
y1 = 3.16 + -6.217248937900877e-15