Created
January 24, 2015 16:18
-
-
Save thearn/8ba8e17ed0d4f97ae205 to your computer and use it in GitHub Desktop.
Array Squared Error Component
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from openmdao.main.api import Component | |
from openmdao.main.datatypes.api import Float, Array | |
import numpy as np | |
class ArraySquaredError(Component): | |
""" | |
Computes the square of the norm of the distance (error) between two | |
n-dimensional arrays "current" and "target", with analytic derivatives. | |
Meant for use in models which seek to minimize the distance/error between | |
two numerical arrays, perhaps as an objective or a constraint. | |
'current' and 'target' must be ndarrays of the same shape and dimension. | |
Initialization argument 'shape' can be either a positive integer (if the | |
two arrays to be compared are 1D), or a tuple of positive integers (to | |
specify a higher dimensional shape). | |
""" | |
norm = Float(0.0, iotype='out', desc='square of the norm of the difference\ | |
between "current" and "target"') | |
def __init__(self, shape): | |
super(ArraySquaredError, self).__init__() | |
self.add("current", Array(np.zeros(shape), iotype='in', | |
desc='Current array')) | |
self.add("target", Array(np.zeros(shape), iotype='in', | |
desc='Target (reference) array')) | |
def execute(self): | |
""" | |
Computing the square of the norm as an output leads to simpler | |
derivative calculations. | |
""" | |
self.norm = np.linalg.norm(self.current - self.target) ** 2 | |
def list_deriv_vars(self): | |
return ('current','target'),( 'norm',) | |
def provideJ(self): | |
""" | |
Computes twice the difference between "current" and "target", which is | |
the principal calculation of the partial derivatives. | |
""" | |
self.dnorm_dcurrent = 2 * (self.current - self.target) | |
def apply_deriv(self, arg, result): | |
if 'norm' in result: | |
if 'current' in arg: | |
result['norm'] += arg['current'].dot(self.dnorm_dcurrent) | |
if 'target' in arg: | |
result['norm'] += - arg['target'].dot(self.dnorm_dcurrent) | |
def apply_derivT(self, arg, result): | |
if 'norm' in arg: | |
if 'current' in result: | |
result['current'] += arg['norm'] * self.dnorm_dcurrent | |
if 'target' in result: | |
result['target'] += - arg['norm'] * self.dnorm_dcurrent | |
if __name__ == '__main__': | |
#------------ 1D | |
n = np.random.randint(1, 15) | |
m = np.random.randint(1, 15) | |
ecomp = ArraySquaredError((n, m)) | |
ecomp.current = np.random.randn(n, m) | |
ecomp.target = np.random.randn(n, m) | |
ecomp.run() | |
a = ecomp.check_gradient(mode="adjoint") | |
#------------ 2D | |
n = 3 | |
m = 4 | |
ecomp = ArraySquaredError((n, m)) | |
ecomp.current = np.random.randn(n, m) | |
ecomp.target = np.random.randn(n, m) | |
print ecomp.current | |
print ecomp.target | |
ecomp.run() | |
a = ecomp.check_gradient(mode="adjoint") | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment