aboutsummaryrefslogtreecommitdiff
path: root/unsupported/test/autodiff.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'unsupported/test/autodiff.cpp')
-rw-r--r--unsupported/test/autodiff.cpp206
1 files changed, 200 insertions, 6 deletions
diff --git a/unsupported/test/autodiff.cpp b/unsupported/test/autodiff.cpp
index 087e7c542..85743137e 100644
--- a/unsupported/test/autodiff.cpp
+++ b/unsupported/test/autodiff.cpp
@@ -16,7 +16,8 @@ EIGEN_DONT_INLINE Scalar foo(const Scalar& x, const Scalar& y)
using namespace std;
// return x+std::sin(y);
EIGEN_ASM_COMMENT("mybegin");
- return static_cast<Scalar>(x*2 - pow(x,2) + 2*sqrt(y*y) - 4 * sin(x) + 2 * cos(y) - exp(-0.5*x*x));
+ // pow(float, int) promotes to pow(double, double)
+ return x*2 - 1 + static_cast<Scalar>(pow(1+x,2)) + 2*sqrt(y*y+0) - 4 * sin(0+x) + 2 * cos(y+0) - exp(Scalar(-0.5)*x*x+0);
//return x+2*y*x;//x*2 -std::pow(x,2);//(2*y/x);// - y*2;
EIGEN_ASM_COMMENT("myend");
}
@@ -104,6 +105,89 @@ struct TestFunc1
}
};
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+/* Test functor for the C++11 features. */
+template <typename Scalar>
+struct integratorFunctor
+{
+ typedef Matrix<Scalar, 2, 1> InputType;
+ typedef Matrix<Scalar, 2, 1> ValueType;
+
+ /*
+ * Implementation starts here.
+ */
+ integratorFunctor(const Scalar gain) : _gain(gain) {}
+ integratorFunctor(const integratorFunctor& f) : _gain(f._gain) {}
+ const Scalar _gain;
+
+ template <typename T1, typename T2>
+ void operator() (const T1 &input, T2 *output, const Scalar dt) const
+ {
+ T2 &o = *output;
+
+ /* Integrator to test the AD. */
+ o[0] = input[0] + input[1] * dt * _gain;
+ o[1] = input[1] * _gain;
+ }
+
+ /* Only needed for the test */
+ template <typename T1, typename T2, typename T3>
+ void operator() (const T1 &input, T2 *output, T3 *jacobian, const Scalar dt) const
+ {
+ T2 &o = *output;
+
+ /* Integrator to test the AD. */
+ o[0] = input[0] + input[1] * dt * _gain;
+ o[1] = input[1] * _gain;
+
+ if (jacobian)
+ {
+ T3 &j = *jacobian;
+
+ j(0, 0) = 1;
+ j(0, 1) = dt * _gain;
+ j(1, 0) = 0;
+ j(1, 1) = _gain;
+ }
+ }
+
+};
+
+template<typename Func> void forward_jacobian_cpp11(const Func& f)
+{
+ typedef typename Func::ValueType::Scalar Scalar;
+ typedef typename Func::ValueType ValueType;
+ typedef typename Func::InputType InputType;
+ typedef typename AutoDiffJacobian<Func>::JacobianType JacobianType;
+
+ InputType x = InputType::Random(InputType::RowsAtCompileTime);
+ ValueType y, yref;
+ JacobianType j, jref;
+
+ const Scalar dt = internal::random<double>();
+
+ jref.setZero();
+ yref.setZero();
+ f(x, &yref, &jref, dt);
+
+ //std::cerr << "y, yref, jref: " << "\n";
+ //std::cerr << y.transpose() << "\n\n";
+ //std::cerr << yref << "\n\n";
+ //std::cerr << jref << "\n\n";
+
+ AutoDiffJacobian<Func> autoj(f);
+ autoj(x, &y, &j, dt);
+
+ //std::cerr << "y j (via autodiff): " << "\n";
+ //std::cerr << y.transpose() << "\n\n";
+ //std::cerr << j << "\n\n";
+
+ VERIFY_IS_APPROX(y, yref);
+ VERIFY_IS_APPROX(j, jref);
+}
+#endif
+
template<typename Func> void forward_jacobian(const Func& f)
{
typename Func::InputType x = Func::InputType::Random(f.inputs());
@@ -127,8 +211,8 @@ template<typename Func> void forward_jacobian(const Func& f)
VERIFY_IS_APPROX(j, jref);
}
-
// TODO also check actual derivatives!
+template <int>
void test_autodiff_scalar()
{
Vector2f p = Vector2f::Random();
@@ -139,7 +223,9 @@ void test_autodiff_scalar()
VERIFY_IS_APPROX(res.value(), foo(p.x(),p.y()));
}
+
// TODO also check actual derivatives!
+template <int>
void test_autodiff_vector()
{
Vector2f p = Vector2f::Random();
@@ -148,11 +234,12 @@ void test_autodiff_vector()
VectorAD ap = p.cast<AD>();
ap.x().derivatives() = Vector2f::UnitX();
ap.y().derivatives() = Vector2f::UnitY();
-
+
AD res = foo<VectorAD>(ap);
VERIFY_IS_APPROX(res.value(), foo(p));
}
+template <int>
void test_autodiff_jacobian()
{
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,2>()) ));
@@ -160,14 +247,121 @@ void test_autodiff_jacobian()
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ CALL_SUBTEST(( forward_jacobian_cpp11(integratorFunctor<double>(10)) ));
+#endif
+}
+
+
+template <int>
+void test_autodiff_hessian()
+{
+ typedef AutoDiffScalar<VectorXd> AD;
+ typedef Matrix<AD,Eigen::Dynamic,1> VectorAD;
+ typedef AutoDiffScalar<VectorAD> ADD;
+ typedef Matrix<ADD,Eigen::Dynamic,1> VectorADD;
+ VectorADD x(2);
+ double s1 = internal::random<double>(), s2 = internal::random<double>(), s3 = internal::random<double>(), s4 = internal::random<double>();
+ x(0).value()=s1;
+ x(1).value()=s2;
+
+ //set unit vectors for the derivative directions (partial derivatives of the input vector)
+ x(0).derivatives().resize(2);
+ x(0).derivatives().setZero();
+ x(0).derivatives()(0)= 1;
+ x(1).derivatives().resize(2);
+ x(1).derivatives().setZero();
+ x(1).derivatives()(1)=1;
+
+ //repeat partial derivatives for the inner AutoDiffScalar
+ x(0).value().derivatives() = VectorXd::Unit(2,0);
+ x(1).value().derivatives() = VectorXd::Unit(2,1);
+
+ //set the hessian matrix to zero
+ for(int idx=0; idx<2; idx++) {
+ x(0).derivatives()(idx).derivatives() = VectorXd::Zero(2);
+ x(1).derivatives()(idx).derivatives() = VectorXd::Zero(2);
+ }
+
+ ADD y = sin(AD(s3)*x(0) + AD(s4)*x(1));
+
+ VERIFY_IS_APPROX(y.value().derivatives()(0), y.derivatives()(0).value());
+ VERIFY_IS_APPROX(y.value().derivatives()(1), y.derivatives()(1).value());
+ VERIFY_IS_APPROX(y.value().derivatives()(0), s3*std::cos(s1*s3+s2*s4));
+ VERIFY_IS_APPROX(y.value().derivatives()(1), s4*std::cos(s1*s3+s2*s4));
+ VERIFY_IS_APPROX(y.derivatives()(0).derivatives(), -std::sin(s1*s3+s2*s4)*Vector2d(s3*s3,s4*s3));
+ VERIFY_IS_APPROX(y.derivatives()(1).derivatives(), -std::sin(s1*s3+s2*s4)*Vector2d(s3*s4,s4*s4));
+
+ ADD z = x(0)*x(1);
+ VERIFY_IS_APPROX(z.derivatives()(0).derivatives(), Vector2d(0,1));
+ VERIFY_IS_APPROX(z.derivatives()(1).derivatives(), Vector2d(1,0));
+}
+
+double bug_1222() {
+ typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
+ const double _cv1_3 = 1.0;
+ const AD chi_3 = 1.0;
+ // this line did not work, because operator+ returns ADS<DerType&>, which then cannot be converted to ADS<DerType>
+ const AD denom = chi_3 + _cv1_3;
+ return denom.value();
+}
+
+double bug_1223() {
+ using std::min;
+ typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
+
+ const double _cv1_3 = 1.0;
+ const AD chi_3 = 1.0;
+ const AD denom = 1.0;
+
+ // failed because implementation of min attempts to construct ADS<DerType&> via constructor AutoDiffScalar(const Real& value)
+ // without initializing m_derivatives (which is a reference in this case)
+ #define EIGEN_TEST_SPACE
+ const AD t = min EIGEN_TEST_SPACE (denom / chi_3, 1.0);
+
+ const AD t2 = min EIGEN_TEST_SPACE (denom / (chi_3 * _cv1_3), 1.0);
+
+ return t.value() + t2.value();
+}
+
+// regression test for some compilation issues with specializations of ScalarBinaryOpTraits
+void bug_1260() {
+ Matrix4d A;
+ Vector4d v;
+ A*v;
+}
+
+// check a compilation issue with numext::max
+double bug_1261() {
+ typedef AutoDiffScalar<Matrix2d> AD;
+ typedef Matrix<AD,2,1> VectorAD;
+
+ VectorAD v;
+ const AD maxVal = v.maxCoeff();
+ const AD minVal = v.minCoeff();
+ return maxVal.value() + minVal.value();
+}
+
+double bug_1264() {
+ typedef AutoDiffScalar<Vector2d> AD;
+ const AD s;
+ const Matrix<AD, 3, 1> v1;
+ const Matrix<AD, 3, 1> v2 = (s + 3.0) * v1;
+ return v2(0).value();
}
void test_autodiff()
{
for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1( test_autodiff_scalar() );
- CALL_SUBTEST_2( test_autodiff_vector() );
- CALL_SUBTEST_3( test_autodiff_jacobian() );
+ CALL_SUBTEST_1( test_autodiff_scalar<1>() );
+ CALL_SUBTEST_2( test_autodiff_vector<1>() );
+ CALL_SUBTEST_3( test_autodiff_jacobian<1>() );
+ CALL_SUBTEST_4( test_autodiff_hessian<1>() );
}
+
+ bug_1222();
+ bug_1223();
+ bug_1260();
+ bug_1261();
}