aboutsummaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
Diffstat (limited to 'docs')
-rw-r--r--docs/source/_templates/layout.html13
-rw-r--r--docs/source/_themes/armstrong/LICENSE26
-rw-r--r--docs/source/_themes/armstrong/globaltoc.html11
-rw-r--r--docs/source/_themes/armstrong/layout.html80
-rw-r--r--docs/source/_themes/armstrong/rtd-themes.conf65
-rw-r--r--docs/source/_themes/armstrong/static/rtd.css_t781
-rw-r--r--docs/source/_themes/armstrong/theme.conf65
-rw-r--r--docs/source/acknowledgements.rst25
-rw-r--r--docs/source/bibliography.rst10
-rw-r--r--docs/source/building.rst603
-rw-r--r--docs/source/conf.py12
-rw-r--r--docs/source/contributing.rst22
-rw-r--r--docs/source/faqs.rst282
-rw-r--r--docs/source/features.rst92
-rw-r--r--docs/source/history.rst27
-rw-r--r--docs/source/index.rst84
-rw-r--r--docs/source/introduction.rst81
-rw-r--r--docs/source/license.rst2
-rw-r--r--docs/source/modeling.rst323
-rw-r--r--docs/source/reading.rst10
-rw-r--r--docs/source/solving.rst908
-rw-r--r--docs/source/tutorial.rst179
-rw-r--r--docs/source/version_history.rst221
23 files changed, 2062 insertions, 1860 deletions
diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html
new file mode 100644
index 0000000..61c8eb5
--- /dev/null
+++ b/docs/source/_templates/layout.html
@@ -0,0 +1,13 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script>
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+ ga('create', 'UA-49769510-1', 'ceres-solver.org');
+ ga('send', 'pageview');
+</script>
+{% endblock %}
diff --git a/docs/source/_themes/armstrong/LICENSE b/docs/source/_themes/armstrong/LICENSE
deleted file mode 100644
index 894aa01..0000000
--- a/docs/source/_themes/armstrong/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2011 Bay Citizen & Texas Tribune
-
-Original ReadTheDocs.org code
-Copyright (c) 2010 Charles Leifer, Eric Holscher, Bobby Grace
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/docs/source/_themes/armstrong/globaltoc.html b/docs/source/_themes/armstrong/globaltoc.html
deleted file mode 100644
index 20d8641..0000000
--- a/docs/source/_themes/armstrong/globaltoc.html
+++ /dev/null
@@ -1,11 +0,0 @@
-{#
- basic/globaltoc.html
- ~~~~~~~~~~~~~~~~~~~~
-
- Sphinx sidebar template: global table of contents.
-
- :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-#}
-<h3><a href="{{ pathto(master_doc) }}">{{ _('Ceres Solver') }}</a></h3>
-{{ toctree() }}
diff --git a/docs/source/_themes/armstrong/layout.html b/docs/source/_themes/armstrong/layout.html
deleted file mode 100644
index 3faa34c..0000000
--- a/docs/source/_themes/armstrong/layout.html
+++ /dev/null
@@ -1,80 +0,0 @@
-{% extends "basic/layout.html" %}
-
-{% set script_files = script_files + [pathto("_static/searchtools.js", 1)] %}
-
-{% block htmltitle %}
-{{ super() }}
-
-<meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0; user-scalable=0;"/>
-
-{% endblock %}
-
-
-{%- macro sidebar() %}
- {%- if render_sidebar %}
- <div class="sphinxsidebar">
- <div class="sphinxsidebarwrapper">
- {%- block sidebarlogo %}
- {%- if logo %}
- <p class="logo"><a href="{{ pathto(master_doc) }}">
- <img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
- </a></p>
- {%- endif %}
- {%- endblock %}
- {%- if sidebars != None %}
- {#- new style sidebar: explicitly include/exclude templates #}
- {%- for sidebartemplate in sidebars %}
- {%- include sidebartemplate %}
- {%- endfor %}
- {%- else %}
- {#- old style sidebars: using blocks -- should be deprecated #}
- {%- block sidebartoc %}
- {%- include "globaltoc.html" %}
- {%- endblock %}
- {%- block sidebarsourcelink %}
- {%- include "sourcelink.html" %}
- {%- endblock %}
- {%- if customsidebar %}
- {%- include customsidebar %}
- {%- endif %}
- {%- block sidebarsearch %}
- {%- include "searchbox.html" %}
- {%- endblock %}
- {%- endif %}
- </div>
- </div>
- {%- endif %}
-{%- endmacro %}
-
-
-{% block footer %}
-<div class="footer">
-{%- if show_copyright %}
- {%- if hasdoc('copyright') %}
- {% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
- {%- else %}
- {% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}
- {%- endif %}
-{%- endif %}
-{%- if last_updated %}
- {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
-{%- endif %}
-</div>
-
-
-{% if theme_analytics_code %}
-<!-- Google Analytics Code -->
-<script type="text/javascript">
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', '{{ theme_analytics_code }}']);
- _gaq.push(['_trackPageview']);
-
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
-</script>
-{% endif %}
-
-{% endblock %}
diff --git a/docs/source/_themes/armstrong/rtd-themes.conf b/docs/source/_themes/armstrong/rtd-themes.conf
deleted file mode 100644
index 5930488..0000000
--- a/docs/source/_themes/armstrong/rtd-themes.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-[theme]
-inherit = default
-stylesheet = rtd.css
-pygment_style = default
-show_sphinx = False
-
-[options]
-show_rtd = True
-
-white = #ffffff
-almost_white = #f8f8f8
-barely_white = #f2f2f2
-dirty_white = #eeeeee
-almost_dirty_white = #e6e6e6
-dirtier_white = #dddddd
-lighter_gray = #cccccc
-gray_a = #aaaaaa
-gray_9 = #999999
-light_gray = #888888
-gray_7 = #777777
-gray = #666666
-dark_gray = #444444
-gray_2 = #222222
-black = #111111
-light_color = #e8ecef
-light_medium_color = #DDEAF0
-medium_color = #8ca1af
-medium_color_link = #86989b
-medium_color_link_hover = #a6b8bb
-dark_color = #465158
-
-h1 = #000000
-h2 = #465158
-h3 = #6c818f
-
-link_color = #444444
-link_color_decoration = #CCCCCC
-
-medium_color_hover = #697983
-green_highlight = #8ecc4c
-
-
-positive_dark = #609060
-positive_medium = #70a070
-positive_light = #e9ffe9
-
-negative_dark = #900000
-negative_medium = #b04040
-negative_light = #ffe9e9
-negative_text = #c60f0f
-
-ruler = #abc
-
-viewcode_bg = #f4debf
-viewcode_border = #ac9
-
-highlight = #ffe080
-
-code_background = #eeeeee
-
-background = #465158
-background_link = #ffffff
-background_link_half = #ffffff
-background_text = #eeeeee
-background_text_link = #86989b
diff --git a/docs/source/_themes/armstrong/static/rtd.css_t b/docs/source/_themes/armstrong/static/rtd.css_t
deleted file mode 100644
index 90354c3..0000000
--- a/docs/source/_themes/armstrong/static/rtd.css_t
+++ /dev/null
@@ -1,781 +0,0 @@
-/*
- * rtd.css
- * ~~~~~~~~~~~~~~~
- *
- * Sphinx stylesheet -- sphinxdoc theme. Originally created by
- * Armin Ronacher for Werkzeug.
- *
- * Customized for ReadTheDocs by Eric Pierce & Eric Holscher
- *
- * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-/* RTD colors
- * light blue: {{ theme_light_color }}
- * medium blue: {{ theme_medium_color }}
- * dark blue: {{ theme_dark_color }}
- * dark grey: {{ theme_grey_color }}
- *
- * medium blue hover: {{ theme_medium_color_hover }};
- * green highlight: {{ theme_green_highlight }}
- * light blue (project bar): {{ theme_light_color }}
- */
-
-@import url("basic.css");
-
-/* PAGE LAYOUT -------------------------------------------------------------- */
-
-body {
- font: 100%/1.5 "ff-meta-web-pro-1","ff-meta-web-pro-2",Arial,"Helvetica Neue",sans-serif;
- text-align: center;
- color: black;
- background-color: {{ theme_background }};
- padding: 0;
- margin: 0;
-}
-
-div.document {
- text-align: left;
- background-color: {{ theme_light_color }};
-}
-
-div.bodywrapper {
- background-color: {{ theme_white }};
- border-left: 1px solid {{ theme_lighter_gray }};
- border-bottom: 1px solid {{ theme_lighter_gray }};
- margin: 0 0 0 16em;
-}
-
-div.body {
- margin: 0;
- padding: 0.5em 1.3em;
- max-width: 55em;
- min-width: 20em;
-}
-
-div.related {
- font-size: 1em;
- background-color: {{ theme_background }};
-}
-
-div.documentwrapper {
- float: left;
- width: 100%;
- background-color: {{ theme_light_color }};
-}
-
-
-/* HEADINGS --------------------------------------------------------------- */
-
-h1 {
- margin: 0;
- padding: 0.7em 0 0.3em 0;
- font-size: 1.5em;
- line-height: 1.15;
- color: {{ theme_h1 }};
- clear: both;
-}
-
-h2 {
- margin: 2em 0 0.2em 0;
- font-size: 1.35em;
- padding: 0;
- color: {{ theme_h2 }};
-}
-
-h3 {
- margin: 1em 0 -0.3em 0;
- font-size: 1.2em;
- color: {{ theme_h3 }};
-}
-
-div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
- color: black;
-}
-
-h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
- display: none;
- margin: 0 0 0 0.3em;
- padding: 0 0.2em 0 0.2em;
- color: {{ theme_gray_a }} !important;
-}
-
-h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
-h5:hover a.anchor, h6:hover a.anchor {
- display: inline;
-}
-
-h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
-h5 a.anchor:hover, h6 a.anchor:hover {
- color: {{ theme_gray_7 }};
- background-color: {{ theme_dirty_white }};
-}
-
-
-/* LINKS ------------------------------------------------------------------ */
-
-/* Normal links get a pseudo-underline */
-a {
- color: {{ theme_link_color }};
- text-decoration: none;
- border-bottom: 1px solid {{ theme_link_color_decoration }};
-}
-
-/* Links in sidebar, TOC, index trees and tables have no underline */
-.sphinxsidebar a,
-.toctree-wrapper a,
-.indextable a,
-#indices-and-tables a {
- color: {{ theme_dark_gray }};
- text-decoration: none;
- border-bottom: none;
-}
-
-/* Most links get an underline-effect when hovered */
-a:hover,
-div.toctree-wrapper a:hover,
-.indextable a:hover,
-#indices-and-tables a:hover {
- color: {{ theme_black }};
- text-decoration: none;
- border-bottom: 1px solid {{ theme_black }};
-}
-
-/* Footer links */
-div.footer a {
- color: {{ theme_background_text_link }};
- text-decoration: none;
- border: none;
-}
-div.footer a:hover {
- color: {{ theme_medium_color_link_hover }};
- text-decoration: underline;
- border: none;
-}
-
-/* Permalink anchor (subtle grey with a red hover) */
-div.body a.headerlink {
- color: {{ theme_lighter_gray }};
- font-size: 1em;
- margin-left: 6px;
- padding: 0 4px 0 4px;
- text-decoration: none;
- border: none;
-}
-div.body a.headerlink:hover {
- color: {{ theme_negative_text }};
- border: none;
-}
-
-
-/* NAVIGATION BAR --------------------------------------------------------- */
-
-div.related ul {
- height: 2.5em;
-}
-
-div.related ul li {
- margin: 0;
- padding: 0.65em 0;
- float: left;
- display: block;
- color: {{ theme_background_link_half }}; /* For the >> separators */
- font-size: 0.8em;
-}
-
-div.related ul li.right {
- float: right;
- margin-right: 5px;
- color: transparent; /* Hide the | separators */
-}
-
-/* "Breadcrumb" links in nav bar */
-div.related ul li a {
- order: none;
- background-color: inherit;
- font-weight: bold;
- margin: 6px 0 6px 4px;
- line-height: 1.75em;
- color: {{ theme_background_link }};
- text-shadow: 0 1px rgba(0, 0, 0, 0.5);
- padding: 0.4em 0.8em;
- border: none;
- border-radius: 3px;
-}
-/* previous / next / modules / index links look more like buttons */
-div.related ul li.right a {
- margin: 0.375em 0;
- background-color: {{ theme_medium_color_hover }};
- text-shadow: 0 1px rgba(0, 0, 0, 0.5);
- border-radius: 3px;
- -webkit-border-radius: 3px;
- -moz-border-radius: 3px;
-}
-/* All navbar links light up as buttons when hovered */
-div.related ul li a:hover {
- background-color: {{ theme_medium_color }};
- color: {{ theme_white }};
- text-decoration: none;
- border-radius: 3px;
- -webkit-border-radius: 3px;
- -moz-border-radius: 3px;
-}
-/* Take extra precautions for tt within links */
-a tt,
-div.related ul li a tt {
- background: inherit !important;
- color: inherit !important;
-}
-
-
-/* SIDEBAR ---------------------------------------------------------------- */
-
-div.sphinxsidebarwrapper {
- padding: 0;
-}
-
-div.sphinxsidebar {
- margin: 0;
- margin-left: -100%;
- float: left;
- top: 3em;
- left: 0;
- padding: 0 1em;
- width: 14em;
- font-size: 1em;
- text-align: left;
- background-color: {{ theme_light_color }};
-}
-
-div.sphinxsidebar img {
- max-width: 12em;
-}
-
-div.sphinxsidebar h3, div.sphinxsidebar h4 {
- margin: 1.2em 0 0.3em 0;
- font-size: 1em;
- padding: 0;
- color: {{ theme_gray_2 }};
- font-family: "ff-meta-web-pro-1", "ff-meta-web-pro-2", "Arial", "Helvetica Neue", sans-serif;
-}
-
-div.sphinxsidebar h3 a {
- color: {{ theme_grey_color }};
-}
-
-div.sphinxsidebar ul,
-div.sphinxsidebar p {
- margin-top: 0;
- padding-left: 0;
- line-height: 130%;
- background-color: {{ theme_light_color }};
-}
-
-/* No bullets for nested lists, but a little extra indentation */
-div.sphinxsidebar ul ul {
- list-style-type: none;
- margin-left: 1.5em;
- padding: 0;
-}
-
-/* A little top/bottom padding to prevent adjacent links' borders
- * from overlapping each other */
-div.sphinxsidebar ul li {
- padding: 1px 0;
-}
-
-/* A little left-padding to make these align with the ULs */
-div.sphinxsidebar p.topless {
- padding-left: 0 0 0 1em;
-}
-
-/* Make these into hidden one-liners */
-div.sphinxsidebar ul li,
-div.sphinxsidebar p.topless {
- white-space: nowrap;
- overflow: hidden;
-}
-/* ...which become visible when hovered */
-div.sphinxsidebar ul li:hover,
-div.sphinxsidebar p.topless:hover {
- overflow: visible;
-}
-
-/* Search text box and "Go" button */
-#searchbox {
- margin-top: 2em;
- margin-bottom: 1em;
- background: {{ theme_dirtier_white }};
- padding: 0.5em;
- border-radius: 6px;
- -moz-border-radius: 6px;
- -webkit-border-radius: 6px;
-}
-#searchbox h3 {
- margin-top: 0;
-}
-
-/* Make search box and button abut and have a border */
-input,
-div.sphinxsidebar input {
- border: 1px solid {{ theme_gray_9 }};
- float: left;
-}
-
-/* Search textbox */
-input[type="text"] {
- margin: 0;
- padding: 0 3px;
- height: 20px;
- width: 144px;
- border-top-left-radius: 3px;
- border-bottom-left-radius: 3px;
- -moz-border-radius-topleft: 3px;
- -moz-border-radius-bottomleft: 3px;
- -webkit-border-top-left-radius: 3px;
- -webkit-border-bottom-left-radius: 3px;
-}
-/* Search button */
-input[type="submit"] {
- margin: 0 0 0 -1px; /* -1px prevents a double-border with textbox */
- height: 22px;
- color: {{ theme_dark_gray }};
- background-color: {{ theme_light_color }};
- padding: 1px 4px;
- font-weight: bold;
- border-top-right-radius: 3px;
- border-bottom-right-radius: 3px;
- -moz-border-radius-topright: 3px;
- -moz-border-radius-bottomright: 3px;
- -webkit-border-top-right-radius: 3px;
- -webkit-border-bottom-right-radius: 3px;
-}
-input[type="submit"]:hover {
- color: {{ theme_white }};
- background-color: {{ theme_green_highlight }};
-}
-
-div.sphinxsidebar p.searchtip {
- clear: both;
- padding: 0.5em 0 0 0;
- background: {{ theme_dirtier_white }};
- color: {{ theme_gray }};
- font-size: 0.9em;
-}
-
-/* Sidebar links are unusual */
-div.sphinxsidebar li a,
-div.sphinxsidebar p a {
- background: {{ theme_light_color }}; /* In case links overlap main content */
- border-radius: 3px;
- -moz-border-radius: 3px;
- -webkit-border-radius: 3px;
- border: 1px solid transparent; /* To prevent things jumping around on hover */
- padding: 0 5px 0 5px;
-}
-div.sphinxsidebar li a:hover,
-div.sphinxsidebar p a:hover {
- color: {{ theme_black }};
- text-decoration: none;
- border: 1px solid {{ theme_light_gray }};
-}
-
-/* Tweak any link appearing in a heading */
-div.sphinxsidebar h3 a {
-}
-
-
-
-
-/* OTHER STUFF ------------------------------------------------------------ */
-
-cite, code, tt {
- font-family: 'Consolas', 'Deja Vu Sans Mono',
- 'Bitstream Vera Sans Mono', monospace;
- font-size: 0.95em;
- letter-spacing: 0.01em;
-}
-
-tt {
- background-color: {{ theme_code_background }};
- color: {{ theme_dark_gray }};
-}
-
-tt.descname, tt.descclassname, tt.xref {
- border: 0;
-}
-
-hr {
- border: 1px solid {{ theme_ruler }};
- margin: 2em;
-}
-
-pre, #_fontwidthtest {
- font-family: 'Consolas', 'Deja Vu Sans Mono',
- 'Bitstream Vera Sans Mono', monospace;
- margin: 1em 2em;
- font-size: 0.95em;
- letter-spacing: 0.015em;
- line-height: 120%;
- padding: 0.5em;
- border: 1px solid {{ theme_lighter_gray }};
- background-color: {{ theme_code_background }};
- border-radius: 6px;
- -moz-border-radius: 6px;
- -webkit-border-radius: 6px;
-}
-
-pre a {
- color: inherit;
- text-decoration: underline;
-}
-
-td.linenos pre {
- padding: 0.5em 0;
-}
-
-div.quotebar {
- background-color: {{ theme_almost_white }};
- max-width: 250px;
- float: right;
- padding: 2px 7px;
- border: 1px solid {{ theme_lighter_gray }};
-}
-
-div.topic {
- background-color: {{ theme_almost_white }};
-}
-
-table {
- border-collapse: collapse;
- margin: 0 -0.5em 0 -0.5em;
-}
-
-table td, table th {
- padding: 0.2em 0.5em 0.2em 0.5em;
-}
-
-
-/* ADMONITIONS AND WARNINGS ------------------------------------------------- */
-
-/* Shared by admonitions, warnings and sidebars */
-div.admonition,
-div.warning,
-div.sidebar {
- font-size: 0.9em;
- margin: 2em;
- padding: 0;
- /*
- border-radius: 6px;
- -moz-border-radius: 6px;
- -webkit-border-radius: 6px;
- */
-}
-div.admonition p,
-div.warning p,
-div.sidebar p {
- margin: 0.5em 1em 0.5em 1em;
- padding: 0;
-}
-div.admonition pre,
-div.warning pre,
-div.sidebar pre {
- margin: 0.4em 1em 0.4em 1em;
-}
-div.admonition p.admonition-title,
-div.warning p.admonition-title,
-div.sidebar p.sidebar-title {
- margin: 0;
- padding: 0.1em 0 0.1em 0.5em;
- color: white;
- font-weight: bold;
- font-size: 1.1em;
- text-shadow: 0 1px rgba(0, 0, 0, 0.5);
-}
-div.admonition ul, div.admonition ol,
-div.warning ul, div.warning ol,
-div.sidebar ul, div.sidebar ol {
- margin: 0.1em 0.5em 0.5em 3em;
- padding: 0;
-}
-
-
-/* Admonitions and sidebars only */
-div.admonition, div.sidebar {
- border: 1px solid {{ theme_positive_dark }};
- background-color: {{ theme_positive_light }};
-}
-div.admonition p.admonition-title,
-div.sidebar p.sidebar-title {
- background-color: {{ theme_positive_medium }};
- border-bottom: 1px solid {{ theme_positive_dark }};
-}
-
-
-/* Warnings only */
-div.warning {
- border: 1px solid {{ theme_negative_dark }};
- background-color: {{ theme_negative_light }};
-}
-div.warning p.admonition-title {
- background-color: {{ theme_negative_medium }};
- border-bottom: 1px solid {{ theme_negative_dark }};
-}
-
-
-/* Sidebars only */
-div.sidebar {
- max-width: 200px;
-}
-
-
-
-div.versioninfo {
- margin: 1em 0 0 0;
- border: 1px solid {{ theme_lighter_gray }};
- background-color: {{ theme_light_medium_color }};
- padding: 8px;
- line-height: 1.3em;
- font-size: 0.9em;
-}
-
-.viewcode-back {
- font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
- 'Verdana', sans-serif;
-}
-
-div.viewcode-block:target {
- background-color: {{ theme_viewcode_bg }};
- border-top: 1px solid {{ theme_viewcode_border }};
- border-bottom: 1px solid {{ theme_viewcode_border }};
-}
-
-dl {
- margin: 1em 0 2.5em 0;
-}
-
-/* Highlight target when you click an internal link */
-dt:target {
- background: {{ theme_highlight }};
-}
-/* Don't highlight whole divs */
-div.highlight {
- background: transparent;
-}
-/* But do highlight spans (so search results can be highlighted) */
-span.highlight {
- background: {{ theme_highlight }};
-}
-
-div.footer {
- background-color: {{ theme_background }};
- color: {{ theme_background_text }};
- padding: 0 2em 2em 2em;
- clear: both;
- font-size: 0.8em;
- text-align: center;
-}
-
-p {
- margin: 0.8em 0 0.5em 0;
-}
-
-.section p img {
- margin: 1em 2em;
-}
-
-
-/* MOBILE LAYOUT -------------------------------------------------------------- */
-
-@media screen and (max-width: 600px) {
-
- h1, h2, h3, h4, h5 {
- position: relative;
- }
-
- ul {
- padding-left: 1.75em;
- }
-
- div.bodywrapper a.headerlink, #indices-and-tables h1 a {
- color: {{ theme_almost_dirty_white }};
- font-size: 80%;
- float: right;
- line-height: 1.8;
- position: absolute;
- right: -0.7em;
- visibility: inherit;
- }
-
- div.bodywrapper h1 a.headerlink, #indices-and-tables h1 a {
- line-height: 1.5;
- }
-
- pre {
- font-size: 0.7em;
- overflow: auto;
- word-wrap: break-word;
- white-space: pre-wrap;
- }
-
- div.related ul {
- height: 2.5em;
- padding: 0;
- text-align: left;
- }
-
- div.related ul li {
- clear: both;
- color: {{ theme_dark_color }};
- padding: 0.2em 0;
- }
-
- div.related ul li:last-child {
- border-bottom: 1px dotted {{ theme_medium_color }};
- padding-bottom: 0.4em;
- margin-bottom: 1em;
- width: 100%;
- }
-
- div.related ul li a {
- color: {{ theme_dark_color }};
- padding-right: 0;
- }
-
- div.related ul li a:hover {
- background: inherit;
- color: inherit;
- }
-
- div.related ul li.right {
- clear: none;
- padding: 0.65em 0;
- margin-bottom: 0.5em;
- }
-
- div.related ul li.right a {
- color: {{ theme_white }};
- padding-right: 0.8em;
- }
-
- div.related ul li.right a:hover {
- background-color: {{ theme_medium_color }};
- }
-
- div.body {
- clear: both;
- min-width: 0;
- word-wrap: break-word;
- }
-
- div.bodywrapper {
- margin: 0 0 0 0;
- }
-
- div.sphinxsidebar {
- float: none;
- margin: 0;
- width: auto;
- }
-
- div.sphinxsidebar input[type="text"] {
- height: 2em;
- line-height: 2em;
- width: 70%;
- }
-
- div.sphinxsidebar input[type="submit"] {
- height: 2em;
- margin-left: 0.5em;
- width: 20%;
- }
-
- div.sphinxsidebar p.searchtip {
- background: inherit;
- margin-bottom: 1em;
- }
-
- div.sphinxsidebar ul li, div.sphinxsidebar p.topless {
- white-space: normal;
- }
-
- .bodywrapper img {
- display: block;
- margin-left: auto;
- margin-right: auto;
- max-width: 100%;
- }
-
- div.documentwrapper {
- float: none;
- }
-
- div.admonition, div.warning, pre, blockquote {
- margin-left: 0em;
- margin-right: 0em;
- }
-
- .body p img {
- margin: 0;
- }
-
- #searchbox {
- background: transparent;
- }
-
- .related:not(:first-child) li {
- display: none;
- }
-
- .related:not(:first-child) li.right {
- display: block;
- }
-
- div.footer {
- padding: 1em;
- }
-
- .rtd_doc_footer .badge {
- float: none;
- margin: 1em auto;
- position: static;
- }
-
- .rtd_doc_footer .badge.revsys-inline {
- margin-right: auto;
- margin-bottom: 2em;
- }
-
- table.indextable {
- display: block;
- width: auto;
- }
-
- .indextable tr {
- display: block;
- }
-
- .indextable td {
- display: block;
- padding: 0;
- width: auto !important;
- }
-
- .indextable td dt {
- margin: 1em 0;
- }
-
- ul.search {
- margin-left: 0.25em;
- }
-
- ul.search li div.context {
- font-size: 90%;
- line-height: 1.1;
- margin-bottom: 1;
- margin-left: 0;
- }
-
-}
diff --git a/docs/source/_themes/armstrong/theme.conf b/docs/source/_themes/armstrong/theme.conf
deleted file mode 100644
index 5930488..0000000
--- a/docs/source/_themes/armstrong/theme.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-[theme]
-inherit = default
-stylesheet = rtd.css
-pygment_style = default
-show_sphinx = False
-
-[options]
-show_rtd = True
-
-white = #ffffff
-almost_white = #f8f8f8
-barely_white = #f2f2f2
-dirty_white = #eeeeee
-almost_dirty_white = #e6e6e6
-dirtier_white = #dddddd
-lighter_gray = #cccccc
-gray_a = #aaaaaa
-gray_9 = #999999
-light_gray = #888888
-gray_7 = #777777
-gray = #666666
-dark_gray = #444444
-gray_2 = #222222
-black = #111111
-light_color = #e8ecef
-light_medium_color = #DDEAF0
-medium_color = #8ca1af
-medium_color_link = #86989b
-medium_color_link_hover = #a6b8bb
-dark_color = #465158
-
-h1 = #000000
-h2 = #465158
-h3 = #6c818f
-
-link_color = #444444
-link_color_decoration = #CCCCCC
-
-medium_color_hover = #697983
-green_highlight = #8ecc4c
-
-
-positive_dark = #609060
-positive_medium = #70a070
-positive_light = #e9ffe9
-
-negative_dark = #900000
-negative_medium = #b04040
-negative_light = #ffe9e9
-negative_text = #c60f0f
-
-ruler = #abc
-
-viewcode_bg = #f4debf
-viewcode_border = #ac9
-
-highlight = #ffe080
-
-code_background = #eeeeee
-
-background = #465158
-background_link = #ffffff
-background_link_half = #ffffff
-background_text = #eeeeee
-background_text_link = #86989b
diff --git a/docs/source/acknowledgements.rst b/docs/source/acknowledgements.rst
deleted file mode 100644
index 36c1562..0000000
--- a/docs/source/acknowledgements.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-.. _chapter-acknowledgements:
-
-================
-Acknowledgements
-================
-
-A number of people have helped with the development and open sourcing
-of Ceres.
-
-Fredrik Schaffalitzky when he was at Google started the development of
-Ceres, and even though much has changed since then, many of the ideas
-from his original design are still present in the current code.
-
-Amongst Ceres' users at Google two deserve special mention: William
-Rucklidge and James Roseborough. William was the first user of
-Ceres. He bravely took on the task of porting production code to an
-as-yet unproven optimization library, reporting bugs and helping fix
-them along the way. James is perhaps the most sophisticated user of
-Ceres at Google. He has reported and fixed bugs and helped evolve the
-API for the better.
-
-Since the initial release of Ceres, a number of people have
-contributed to Ceres by porting it to new platforms, reporting bugs,
-fixing bugs and adding new functionality. We acknowledge all of these
-contributions in the :ref:`chapter-version-history`.
diff --git a/docs/source/bibliography.rst b/docs/source/bibliography.rst
index 7ba435a..bd99758 100644
--- a/docs/source/bibliography.rst
+++ b/docs/source/bibliography.rst
@@ -48,6 +48,12 @@ Bibliography
preconditioning for bundle adjustment**, *In Proceedings of the
IEEE Conference on Computer Vision and Pattern Recognition*, 2012.
+.. [Kanzow] C. Kanzow, N. Yamashita and M. Fukushima,
+ **Levenberg–Marquardt methods with strong local convergence
+ properties for solving nonlinear equations with convex
+ constraints**, *Journal of Computational and Applied Mathematics*,
+ 177(2):375–397, 2005.
+
.. [Levenberg] K. Levenberg, **A method for the solution of certain
nonlinear problems in least squares**, *Quart. Appl. Math*,
2(2):164–168, 1944.
@@ -113,7 +119,3 @@ Bibliography
Levenberg Marquardt Method for Large Sparse Nonlinear Least
Squares**, *Journal of the Australian Mathematical Society Series
B*, 26(4):387–403, 1985.
-
-
-
-
diff --git a/docs/source/building.rst b/docs/source/building.rst
index c326fd1..4860b0d 100644
--- a/docs/source/building.rst
+++ b/docs/source/building.rst
@@ -1,13 +1,20 @@
.. _chapter-building:
-=====================
-Building Ceres Solver
-=====================
+=======================
+Building & Installation
+=======================
+
+Getting the source code
+=======================
+.. _section-source:
+
+You can start with the `latest stable release
+<http://ceres-solver.org/ceres-solver-1.9.0.tar.gz>`_ . Or if you want
+the latest version, you can clone the git repository
+
+.. code-block:: bash
-Stable Ceres Solver releases are available for download at
-`code.google.com <http://code.google.com/p/ceres-solver/>`_. For the
-more adventurous, the git repository is hosted on `Gerrit
-<https://ceres-solver-review.googlesource.com/>`_.
+ git clone https://ceres-solver.googlesource.com/ceres-solver
.. _section-dependencies:
@@ -18,53 +25,88 @@ Ceres relies on a number of open source libraries, some of which are
optional. For details on customizing the build process, see
:ref:`section-customizing` .
-1. `CMake <http://www.cmake.org>`_ is a cross platform build
-system. Ceres needs a relatively recent version of CMake (version
-2.8.0 or better).
-
-2. `eigen3 <http://eigen.tuxfamily.org/index.php?title=Main_Page>`_ is
-used for doing all the low level matrix and linear algebra operations.
-
-3. `google-glog <http://code.google.com/p/google-glog>`_ is
-used for error checking and logging. Ceres needs glog version 0.3.1 or
-later. Version 0.3 (which ships with Fedora 16) has a namespace bug
-which prevents Ceres from building.
-
-4. `gflags <http://code.google.com/p/gflags>`_ is a library for
-processing command line flags. It is used by some of the examples and
-tests. While it is not strictly necessary to build the library, we
-strongly recommend building the library with gflags.
-
-
-5. `SuiteSparse
-<http://www.cise.ufl.edu/research/sparse/SuiteSparse/>`_ is used for
-sparse matrix analysis, ordering and factorization. In particular
-Ceres uses the AMD, CAMD, COLAMD and CHOLMOD libraries. This is an optional
-dependency.
-
-6. `CXSparse <http://www.cise.ufl.edu/research/sparse/CXSparse/>`_ is
-a sparse matrix library similar in scope to ``SuiteSparse`` but with
-no dependencies on ``LAPACK`` and ``BLAS``. This makes for a simpler
-build process and a smaller binary. The simplicity comes at a cost --
-for all but the most trivial matrices, ``SuiteSparse`` is
-significantly faster than ``CXSparse``.
-
-7. `BLAS <http://www.netlib.org/blas/>`_ and `LAPACK
-<http://www.netlib.org/lapack/>`_ routines are needed by
-SuiteSparse. We recommend `ATLAS
-<http://math-atlas.sourceforge.net/>`_, which includes BLAS and LAPACK
-routines. It is also possible to use `OpenBLAS
-<https://github.com/xianyi/OpenBLAS>`_ . However, one needs to be
-careful to `turn off the threading
-<https://github.com/xianyi/OpenBLAS/wiki/faq#wiki-multi-threaded>`_
-inside ``OpenBLAS`` as it conflicts with use of threads in Ceres.
+- `Eigen <http://eigen.tuxfamily.org/index.php?title=Main_Page>`_
+ 3.2.1 or later. **Required**
+
+ .. NOTE ::
+
+ Ceres can also use Eigen as a sparse linear algebra
+ library. Please see the documentation for ``-DEIGENSPARSE`` for
+ more details.
+
+- `CMake <http://www.cmake.org>`_ 2.8.0 or later.
+ **Required on all platforms except for Android.**
+
+- `Google Log <http://code.google.com/p/google-glog>`_ 0.3.1 or
+ later. **Recommended**
+
+ .. NOTE::
+
+ Ceres has a minimal replacement of ``glog`` called ``miniglog``
+ that can be enabled with the ``MINIGLOG`` build
+ option. ``miniglog`` is needed on Android as ``glog`` currently
+ does not build using the NDK. It can however be used on other
+ platforms too.
+
+ **We do not advise using** ``miniglog`` **on platforms other than
+ Android due to the various performance and functionality
+ compromises in** ``miniglog``.
+
+- `Google Flags <http://code.google.com/p/gflags>`_. Needed to build
+ examples and tests.
+
+- `SuiteSparse
+ <http://www.cise.ufl.edu/research/sparse/SuiteSparse/>`_. Needed for
+ solving large sparse linear systems. **Optional; strongly recomended
+ for large scale bundle adjustment**
+
+- `CXSparse <http://www.cise.ufl.edu/research/sparse/CXSparse/>`_.
+ Similar to ``SuiteSparse`` but simpler and slower. CXSparse has
+ no dependencies on ``LAPACK`` and ``BLAS``. This makes for a simpler
+ build process and a smaller binary. **Optional**
+
+- `BLAS <http://www.netlib.org/blas/>`_ and `LAPACK
+ <http://www.netlib.org/lapack/>`_ routines are needed by
+ ``SuiteSparse``, and optionally used by Ceres directly for some
+ operations.
+
+ On ``UNIX`` OSes other than Mac OS X we recommend `ATLAS
+ <http://math-atlas.sourceforge.net/>`_, which includes ``BLAS`` and
+ ``LAPACK`` routines. It is also possible to use `OpenBLAS
+ <https://github.com/xianyi/OpenBLAS>`_ . However, one needs to be
+ careful to `turn off the threading
+ <https://github.com/xianyi/OpenBLAS/wiki/faq#wiki-multi-threaded>`_
+ inside ``OpenBLAS`` as it conflicts with use of threads in Ceres.
+
+ MAC OS X ships with an optimized ``LAPACK`` and ``BLAS``
+ implementation as part of the ``Accelerate`` framework. The Ceres
+ build system will automatically detect and use it.
+
+ For Windows things are much more complicated. `LAPACK For
+ Windows <http://icl.cs.utk.edu/lapack-for-windows/lapack/>`_
+ has detailed instructions..
+
+ **Optional but required for** ``SuiteSparse``.
.. _section-linux:
-Building on Linux
-=================
-We will use `Ubuntu <http://www.ubuntu.com>`_ as our example
-platform. Start by installing all the dependencies.
+Linux
+=====
+
+We will use `Ubuntu <http://www.ubuntu.com>`_ as our example linux
+distribution.
+
+.. NOTE::
+
+ Up to at least Ubuntu 13.10, the SuiteSparse package in the official
+ package repository (built from SuiteSparse v3.4.0) **cannot** be used
+ to build Ceres as a *shared* library. Thus if you want to build
+ Ceres as a shared library using SuiteSparse, you must perform a
+ source install of SuiteSparse. It is recommended that you use the
+ current version of SuiteSparse (4.2.1 at the time of writing).
+
+
+Start by installing all the dependencies.
.. code-block:: bash
@@ -86,19 +128,26 @@ platform. Start by installing all the dependencies.
sudo apt-get install libatlas-base-dev
# Eigen3
sudo apt-get install libeigen3-dev
- # SuiteSparse and CXSparse
+ # SuiteSparse and CXSparse (optional)
+ # - If you want to build Ceres as a *static* library (the default)
+ # you can use the SuiteSparse package in the main Ubuntu package
+ # repository:
sudo apt-get install libsuitesparse-dev
+ # - However, if you want to build Ceres as a *shared* library, you must
+ # perform a source install of SuiteSparse (and uninstall the Ubuntu
+ # package if it is currently installed.
-We are now ready to build and test Ceres.
+We are now ready to build, test, and install Ceres.
.. code-block:: bash
- tar zxf ceres-solver-1.7.0.tar.gz
+ tar zxf ceres-solver-1.9.0.tar.gz
mkdir ceres-bin
cd ceres-bin
- cmake ../ceres-solver-1.7.0
+ cmake ../ceres-solver-1.9.0
make -j3
make test
+ make install
You can also try running the command line bundling application with one of the
included problems, which comes from the University of Washington's BAL
@@ -106,7 +155,7 @@ dataset [Agarwal]_.
.. code-block:: bash
- bin/simple_bundle_adjuster ../ceres-solver-1.7.0/data/problem-16-22106-pre.txt
+ bin/simple_bundle_adjuster ../ceres-solver-1.9.0/data/problem-16-22106-pre.txt
This runs Ceres for a maximum of 10 iterations using the
``DENSE_SCHUR`` linear solver. The output should look something like
@@ -114,63 +163,87 @@ this.
.. code-block:: bash
- 0: f: 4.185660e+06 d: 0.00e+00 g: 1.09e+08 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li: 0 it: 1.16e-01 tt: 3.39e-01
- 1: f: 1.062590e+05 d: 4.08e+06 g: 8.99e+06 h: 5.36e+02 rho: 9.82e-01 mu: 3.00e+04 li: 1 it: 3.90e-01 tt: 7.29e-01
- 2: f: 4.992817e+04 d: 5.63e+04 g: 8.32e+06 h: 3.19e+02 rho: 6.52e-01 mu: 3.09e+04 li: 1 it: 3.52e-01 tt: 1.08e+00
- 3: f: 1.899774e+04 d: 3.09e+04 g: 1.60e+06 h: 1.24e+02 rho: 9.77e-01 mu: 9.26e+04 li: 1 it: 3.60e-01 tt: 1.44e+00
- 4: f: 1.808729e+04 d: 9.10e+02 g: 3.97e+05 h: 6.39e+01 rho: 9.51e-01 mu: 2.78e+05 li: 1 it: 3.62e-01 tt: 1.80e+00
- 5: f: 1.803399e+04 d: 5.33e+01 g: 1.48e+04 h: 1.23e+01 rho: 9.99e-01 mu: 8.33e+05 li: 1 it: 3.54e-01 tt: 2.16e+00
- 6: f: 1.803390e+04 d: 9.02e-02 g: 6.35e+01 h: 8.00e-01 rho: 1.00e+00 mu: 2.50e+06 li: 1 it: 3.59e-01 tt: 2.52e+00
-
- Ceres Solver Report
- -------------------
- Original Reduced
- Parameter blocks 22122 22122
- Parameters 66462 66462
- Residual blocks 83718 83718
- Residual 167436 167436
- Trust Region Strategy LEVENBERG_MARQUARDT
-
- Given Used
- Linear solver DENSE_SCHUR DENSE_SCHUR
- Preconditioner N/A N/A
- Threads: 1 1
- Linear solver threads 1 1
- Linear solver ordering AUTOMATIC 22106,16
-
- Cost:
- Initial 4.185660e+06
- Final 1.803390e+04
- Change 4.167626e+06
-
- Number of iterations:
- Successful 6
- Unsuccessful 0
- Total 6
-
- Time (in seconds):
- Preprocessor 2.229e-01
-
- Evaluator::Residuals 7.438e-02
- Evaluator::Jacobians 6.790e-01
- Linear Solver 1.681e+00
- Minimizer 2.547e+00
-
- Postprocessor 1.920e-02
- Total 2.823e+00
-
- Termination: FUNCTION_TOLERANCE
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 4.185660e+06 0.00e+00 1.09e+08 0.00e+00 0.00e+00 1.00e+04 0 7.59e-02 3.37e-01
+ 1 1.062590e+05 4.08e+06 8.99e+06 5.36e+02 9.82e-01 3.00e+04 1 1.65e-01 5.03e-01
+ 2 4.992817e+04 5.63e+04 8.32e+06 3.19e+02 6.52e-01 3.09e+04 1 1.45e-01 6.48e-01
+ 3 1.899774e+04 3.09e+04 1.60e+06 1.24e+02 9.77e-01 9.26e+04 1 1.43e-01 7.92e-01
+ 4 1.808729e+04 9.10e+02 3.97e+05 6.39e+01 9.51e-01 2.78e+05 1 1.45e-01 9.36e-01
+ 5 1.803399e+04 5.33e+01 1.48e+04 1.23e+01 9.99e-01 8.33e+05 1 1.45e-01 1.08e+00
+ 6 1.803390e+04 9.02e-02 6.35e+01 8.00e-01 1.00e+00 2.50e+06 1 1.50e-01 1.23e+00
+
+ Ceres Solver v1.10.0 Solve Report
+ ----------------------------------
+ Original Reduced
+ Parameter blocks 22122 22122
+ Parameters 66462 66462
+ Residual blocks 83718 83718
+ Residual 167436 167436
+
+ Minimizer TRUST_REGION
+
+ Dense linear algebra library EIGEN
+ Trust region strategy LEVENBERG_MARQUARDT
+
+ Given Used
+ Linear solver DENSE_SCHUR DENSE_SCHUR
+ Threads 1 1
+ Linear solver threads 1 1
+ Linear solver ordering AUTOMATIC 22106, 16
+
+ Cost:
+ Initial 4.185660e+06
+ Final 1.803390e+04
+ Change 4.167626e+06
+
+ Minimizer iterations 6
+ Successful steps 6
+ Unsuccessful steps 0
+
+ Time (in seconds):
+ Preprocessor 0.261
+
+ Residual evaluation 0.082
+ Jacobian evaluation 0.412
+ Linear solver 0.442
+ Minimizer 1.051
+
+ Postprocessor 0.002
+ Total 1.357
+
+ Termination: CONVERGENCE (Function tolerance reached. |cost_change|/cost: 1.769766e-09 <= 1.000000e-06)
.. section-osx:
-Building on Mac OS X
-====================
+Mac OS X
+========
+.. NOTE::
+
+ Ceres will not compile using Xcode 4.5.x (Clang version 4.1) due to a bug in that version of
+ Clang. If you are running Xcode 4.5.x, please update to Xcode >= 4.6.x before attempting to
+ build Ceres.
+
On OS X, we recommend using the `homebrew
-<http://mxcl.github.com/homebrew/>`_ package manager to install the
-dependencies. There is no need to install ``BLAS`` or ``LAPACK``
-separately as OS X ships with optimized ``BLAS`` and ``LAPACK``
-routines as part of the `vecLib
+<http://mxcl.github.com/homebrew/>`_ package manager to install Ceres.
+
+.. code-block:: bash
+
+ brew install ceres-solver
+
+will install the latest stable version along with all the required
+dependencies and
+
+.. code-block:: bash
+
+ brew install ceres-solver --HEAD
+
+will install the latest version in the git repo.
+
+You can also install each of the dependencies by hand using `homebrew
+<http://mxcl.github.com/homebrew/>`_. There is no need to install
+``BLAS`` or ``LAPACK`` separately as OS X ships with optimized
+``BLAS`` and ``LAPACK`` routines as part of the `vecLib
<https://developer.apple.com/library/mac/#documentation/Performance/Conceptual/vecLib/Reference/reference.html>`_
framework.
@@ -185,32 +258,51 @@ framework.
# SuiteSparse and CXSparse
brew install suite-sparse
-
-We are now ready to build and test Ceres.
+We are now ready to build, test, and install Ceres.
.. code-block:: bash
- tar zxf ceres-solver-1.7.0.tar.gz
+ tar zxf ceres-solver-1.9.0.tar.gz
mkdir ceres-bin
cd ceres-bin
- cmake ../ceres-solver-1.7.0
+ cmake ../ceres-solver-1.9.0
make -j3
make test
-
+ make install
Like the Linux build, you should now be able to run
``bin/simple_bundle_adjuster``.
.. _section-windows:
-Building on Windows with Visual Studio
-======================================
+Windows
+=======
On Windows, we support building with Visual Studio 2010 or newer. Note
that the Windows port is less featureful and less tested than the
-Linux or Mac OS X versions due to the unavailability of SuiteSparse
-and ``CXSparse``. Building is also more involved since there is no
-automated way to install the dependencies.
+Linux or Mac OS X versions due to the lack of an officially supported
+way of building SuiteSparse and CXSparse. There are however a number
+of unofficial ways of building these libraries. Building on Windows
+also a bit more involved since there is no automated way to install
+dependencies.
+
+.. NOTE:: Using ``google-glog`` & ``miniglog`` with windows.h.
+
+ The windows.h header if used with GDI (Graphics Device Interface)
+ defines ``ERROR``, which conflicts with the definition of ``ERROR``
+ as a LogSeverity level in ``google-glog`` and ``miniglog``. There
+ are at least two possible fixes to this problem:
+
+ #. Use ``google-glog`` and define ``GLOG_NO_ABBREVIATED_SEVERITIES``
+ when building Ceres and your own project, as documented
+ `here <http://google-glog.googlecode.com/svn/trunk/doc/glog.html>`__.
+ Note that this fix will not work for ``miniglog``,
+ but use of ``miniglog`` is strongly discouraged on any platform for which
+ ``google-glog`` is available (which includes Windows).
+ #. If you do not require GDI, then define ``NOGDI`` **before** including
+ windows.h. This solution should work for both ``google-glog`` and
+ ``miniglog`` and is documented for ``google-glog``
+ `here <https://code.google.com/p/google-glog/issues/detail?id=33>`__.
#. Make a toplevel directory for deps & build & src somewhere: ``ceres/``
#. Get dependencies; unpack them as subdirectories in ``ceres/``
@@ -222,6 +314,18 @@ automated way to install the dependencies.
#. ``google-glog`` Open up the Visual Studio solution and build it.
#. ``gflags`` Open up the Visual Studio solution and build it.
+ #. (Experimental) ``SuiteSparse`` Previously SuiteSparse was not available
+ on Windows, recently it has become possible to build it on Windows using
+ the `suitesparse-metis-for-windows <https://github.com/jlblancoc/suitesparse-metis-for-windows>`_
+ project. If you wish to use ``SuiteSparse``, follow their instructions
+ for obtaining and building it.
+
+ #. (Experimental) ``CXSparse`` Previously CXSparse was not available on
+ Windows, there are now several ports that enable it to be, including:
+ `[1] <https://github.com/PetterS/CXSparse>`_ and
+ `[2] <https://github.com/TheFrenchLeaf/CXSparse>`_. If you wish to use
+ ``CXSparse``, follow their instructions for obtaining and building it.
+
#. Unpack the Ceres tarball into ``ceres``. For the tarball, you
should get a directory inside ``ceres`` similar to
``ceres-solver-1.3.0``. Alternately, checkout Ceres via ``git`` to
@@ -238,12 +342,22 @@ automated way to install the dependencies.
#. Try running ``Configure``. It won't work. It'll show a bunch of options.
You'll need to set:
- #. ``GLOG_INCLUDE``
- #. ``GLOG_LIB``
- #. ``GFLAGS_LIB``
- #. ``GFLAGS_INCLUDE``
-
- to the appropriate place where you unpacked/built them.
+ #. ``EIGEN_INCLUDE_DIR_HINTS``
+ #. ``GLOG_INCLUDE_DIR_HINTS``
+ #. ``GLOG_LIBRARY_DIR_HINTS``
+ #. ``GFLAGS_INCLUDE_DIR_HINTS``
+ #. ``GFLAGS_LIBRARY_DIR_HINTS``
+ #. (Optional) ``SUITESPARSE_INCLUDE_DIR_HINTS``
+ #. (Optional) ``SUITESPARSE_LIBRARY_DIR_HINTS``
+ #. (Optional) ``CXSPARSE_INCLUDE_DIR_HINTS``
+ #. (Optional) ``CXSPARSE_LIBRARY_DIR_HINTS``
+
+ to the appropriate directories where you unpacked/built them. If any of
+ the variables are not visible in the ``CMake`` GUI, create a new entry
+ for them. We recommend using the ``<NAME>_(INCLUDE/LIBRARY)_DIR_HINTS``
+ variables rather than setting the ``<NAME>_INCLUDE_DIR`` &
+ ``<NAME>_LIBRARY`` variables directly to keep all of the validity
+ checking, and to avoid having to specify the library files manually.
#. You may have to tweak some more settings to generate a MSVC
project. After each adjustment, try pressing Configure & Generate
@@ -255,30 +369,66 @@ automated way to install the dependencies.
To run the tests, select the ``RUN_TESTS`` target and hit **Build
RUN_TESTS** from the build menu.
-Like the Linux build, you should now be able to run ``bin/simple_bundle_adjuster``.
+Like the Linux build, you should now be able to run
+``bin/simple_bundle_adjuster``.
Notes:
#. The default build is Debug; consider switching it to release mode.
#. Currently ``system_test`` is not working properly.
-#. Building Ceres as a DLL is not supported; patches welcome.
#. CMake puts the resulting test binaries in ``ceres-bin/examples/Debug``
by default.
#. The solvers supported on Windows are ``DENSE_QR``, ``DENSE_SCHUR``,
``CGNR``, and ``ITERATIVE_SCHUR``.
#. We're looking for someone to work with upstream ``SuiteSparse`` to
port their build system to something sane like ``CMake``, and get a
- supported Windows port.
+ fully supported Windows port.
.. _section-android:
-Building on Android
-===================
+Android
+=======
+Download the ``Android NDK`` version ``r9d`` or later. Run
+``ndk-build`` from inside the ``jni`` directory. Use the
+``libceres.a`` that gets created.
-Download the ``Android NDK``. Run ``ndk-build`` from inside the
-``jni`` directory. Use the ``libceres.a`` that gets created.
+.. _section-ios:
+
+iOS
+===
+
+.. NOTE::
+
+ You need iOS version 6.0 or higher to build Ceres Solver.
+
+To build Ceres for iOS, we need to force ``CMake`` to find the toolchains from
+the iOS SDK instead of using the standard ones. For example:
+
+.. code-block:: bash
+
+ cmake ../ceres-solver \
+ -DCMAKE_TOOLCHAIN_FILE=../ceres-solver/cmake/iOS.cmake \
+ -DEIGEN_INCLUDE_DIR=/path/to/eigen/header \
+ -DIOS_PLATFORM=<PLATFORM>
+
+``PLATFORM`` can be one of ``OS``, ``SIMULATOR`` and ``SIMULATOR64``. You can
+build for ``OS`` (``armv7``, ``armv7s``, ``arm64``), ``SIMULATOR`` (``i386``) or
+``SIMULATOR64`` (``x86_64``) separately and use ``LIPO`` to merge them into
+one static library. See ``cmake/iOS.cmake`` for more options.
+
+After building, you will get a ``libceres.a`` library, which you will need to
+add to your Xcode project.
+
+The default CMake configuration builds a bare bones version of Ceres
+Solver that only depends on Eigen (``MINIGLOG`` is compiled into Ceres if it is
+used), this should be sufficient for solving small to moderate sized problems
+(No ``SPARSE_SCHUR``, ``SPARSE_NORMAL_CHOLESKY`` linear solvers and no
+``CLUSTER_JACOBI`` and ``CLUSTER_TRIDIAGONAL`` preconditioners).
+
+If you decide to use ``LAPACK`` and ``BLAS``, then you also need to add
+``Accelerate.framework`` to your XCode project's linking dependency.
.. _section-customizing:
@@ -286,42 +436,147 @@ Customizing the build
=====================
It is possible to reduce the libraries needed to build Ceres and
-customize the build process by passing appropriate flags to
-``CMake``. Use these flags only if you really know what you are doing.
-
-#. ``-DSUITESPARSE=OFF``: By default, Ceres will link to
- ``SuiteSparse`` if all its dependencies are present. Use this flag
- to build Ceres without ``SuiteSparse``. This will also disable
- dependency checking for ``LAPACK`` and ``BLAS``. This will reduce
- Ceres' dependencies down to ``Eigen``, ``gflags`` and
- ``google-glog``.
-
-#. ``-DCXSPARSE=OFF``: By default, Ceres will link to ``CXSparse`` if
- all its dependencies are present. Use this flag to builds Ceres
- without ``CXSparse``. This will reduce Ceres' dependencies down to
- ``Eigen``, ``gflags`` and ``google-glog``.
-
-#. ``-DGFLAGS=OFF``: Use this flag to build Ceres without
+customize the build process by setting the appropriate options in
+``CMake``. These options can either be set in the ``CMake`` GUI,
+or via ``-D<OPTION>=<ON/OFF>`` when running ``CMake`` from the
+command line. In general, you should only modify these options from
+their defaults if you know what you are doing.
+
+.. NOTE::
+
+ If you are setting variables via ``-D<VARIABLE>=<VALUE>`` when calling
+ ``CMake``, it is important to understand that this forcibly **overwrites** the
+ variable ``<VARIABLE>`` in the ``CMake`` cache at the start of *every configure*.
+
+ This can lead to confusion if you are invoking the ``CMake``
+ `curses <http://www.gnu.org/software/ncurses/ncurses.html>`_ terminal GUI
+ (via ``ccmake``, e.g. ```ccmake -D<VARIABLE>=<VALUE> <PATH_TO_SRC>``).
+ In this case, even if you change the value of ``<VARIABLE>`` in the ``CMake``
+ GUI, your changes will be **overwritten** with the value passed via
+ ``-D<VARIABLE>=<VALUE>`` (if one exists) at the start of each configure.
+
+ As such, it is generally easier not to pass values to ``CMake`` via ``-D``
+ and instead interactively experiment with their values in the ``CMake`` GUI.
+ If they are not present in the *Standard View*, toggle to the *Advanced View*
+ with ``<t>``.
+
+Options controlling Ceres configuration
+---------------------------------------
+
+#. ``LAPACK [Default: ON]``: By default Ceres will use ``LAPACK`` (&
+ ``BLAS``) if they are found. Turn this ``OFF`` to build Ceres
+ without ``LAPACK``. Turning this ``OFF`` also disables
+ ``SUITESPARSE`` as it depends on ``LAPACK``.
+
+#. ``SUITESPARSE [Default: ON]``: By default, Ceres will link to
+ ``SuiteSparse`` if it and all of its dependencies are present. Turn
+ this ``OFF`` to build Ceres without ``SuiteSparse``. Note that
+ ``LAPACK`` must be ``ON`` in order to build with ``SuiteSparse``.
+
+#. ``CXSPARSE [Default: ON]``: By default, Ceres will link to
+ ``CXSparse`` if all its dependencies are present. Turn this ``OFF``
+ to build Ceres without ``CXSparse``.
+
+#. ``EIGENSPARSE [Default: OFF]``: By default, Ceres will not use
+ Eigen's sparse Cholesky factorization. The is because this part of
+ the code is licensed under the ``LGPL`` and since ``Eigen`` is a
+ header only library, including this code will result in an ``LGPL``
+ licensed version of Ceres.
+
+#. ``GFLAGS [Default: ON]``: Turn this ``OFF`` to build Ceres without
``gflags``. This will also prevent some of the example code from
building.
-#. ``-DSCHUR_SPECIALIZATIONS=OFF``: If you are concerned about binary
- size/compilation time over some small (10-20%) performance gains in
- the ``SPARSE_SCHUR`` solver, you can disable some of the template
- specializations by using this flag.
-
-#. ``-DLINE_SEARCH_MINIMIZER=OFF``: The line search based minimizer is
- mostly suitable for large scale optimization problems, or when sparse
- linear algebra libraries are not available. You can further save on
- some compile time and binary size by using this flag.
-
-#. ``-DOPENMP=OFF``: On certain platforms like Android,
- multi-threading with ``OpenMP`` is not supported. Use this flag to
- disable multithreading.
-
-#. ``-DBUILD_DOCUMENTATION=ON``: Use this flag to enable building the
- documentation. In addition, ``make ceres_docs`` can be used to
- build only the documentation.
+#. ``MINIGLOG [Default: OFF]``: Ceres includes a stripped-down,
+ minimal implementation of ``glog`` which can optionally be used as
+ a substitute for ``glog``, thus removing ``glog`` as a required
+ dependency. Turn this ``ON`` to use this minimal ``glog``
+ implementation.
+
+#. ``SCHUR_SPECIALIZATIONS [Default: ON]``: If you are concerned about
+ binary size/compilation time over some small (10-20%) performance
+ gains in the ``SPARSE_SCHUR`` solver, you can disable some of the
+ template specializations by turning this ``OFF``.
+
+#. ``OPENMP [Default: ON]``: On certain platforms like Android,
+ multi-threading with ``OpenMP`` is not supported. Turn this ``OFF``
+ to disable multithreading.
+
+#. ``BUILD_SHARED_LIBS [Default: OFF]``: By default Ceres is built as
+ a static library, turn this ``ON`` to instead build Ceres as a
+ shared library.
+
+#. ``BUILD_DOCUMENTATION [Default: OFF]``: Use this to enable building
+ the documentation, requires `Sphinx <http://sphinx-doc.org/>`_ and the
+ `sphinx_rtd_theme <https://pypi.python.org/pypi/sphinx_rtd_theme>`_
+ package available from the Python package index. In addition,
+ ``make ceres_docs`` can be used to build only the documentation.
+
+#. ``MSVC_USE_STATIC_CRT [Default: OFF]`` *Windows Only*: By default
+ Ceres will use the Visual Studio default, *shared* C-Run Time (CRT) library.
+ Turn this ``ON`` to use the *static* C-Run Time library instead.
+
+
+Options controlling Ceres dependency locations
+----------------------------------------------
+
+Ceres uses the ``CMake``
+`find_package <http://www.cmake.org/cmake/help/v2.8.12/cmake.html#command:find_package>`_
+function to find all of its dependencies using
+``Find<DEPENDENCY_NAME>.cmake`` scripts which are either included in Ceres
+(for most dependencies) or are shipped as standard with ``CMake``
+(for ``LAPACK`` & ``BLAS``). These scripts will search all of the "standard"
+install locations for various OSs for each dependency. However, particularly
+for Windows, they may fail to find the library, in this case you will have to
+manually specify its installed location. The ``Find<DEPENDENCY_NAME>.cmake``
+scripts shipped with Ceres support two ways for you to do this:
+
+#. Set the *hints* variables specifying the *directories* to search in
+ preference, but in addition, to the search directories in the
+ ``Find<DEPENDENCY_NAME>.cmake`` script:
+
+ - ``<DEPENDENCY_NAME (CAPS)>_INCLUDE_DIR_HINTS``
+ - ``<DEPENDENCY_NAME (CAPS)>_LIBRARY_DIR_HINTS``
+
+ These variables should be set via ``-D<VAR>=<VALUE>``
+ ``CMake`` arguments as they are not visible in the GUI.
+
+#. Set the variables specifying the *explicit* include directory
+ and library file to use:
+
+ - ``<DEPENDENCY_NAME (CAPS)>_INCLUDE_DIR``
+ - ``<DEPENDENCY_NAME (CAPS)>_LIBRARY``
+
+ This bypasses *all* searching in the
+ ``Find<DEPENDENCY_NAME>.cmake`` script, but validation is still
+ performed.
+
+ These variables are available to set in the ``CMake`` GUI. They
+ are visible in the *Standard View* if the library has not been
+ found (but the current Ceres configuration requires it), but
+ are always visible in the *Advanced View*. They can also be
+ set directly via ``-D<VAR>=<VALUE>`` arguments to ``CMake``.
+
+Building using custom BLAS & LAPACK installs
+----------------------------------------------
+
+If the standard find package scripts for ``BLAS`` & ``LAPACK`` which ship with
+``CMake`` fail to find the desired libraries on your system, try setting
+``CMAKE_LIBRARY_PATH`` to the path(s) to the directories containing the
+``BLAS`` & ``LAPACK`` libraries when invoking ``CMake`` to build Ceres via
+``-D<VAR>=<VALUE>``. This should result in the libraries being found for any
+common variant of each.
+
+If you are building on an exotic system, or setting ``CMAKE_LIBRARY_PATH``
+does not work, or is not appropriate for some other reason, one option would be
+to write your own custom versions of ``FindBLAS.cmake`` &
+``FindLAPACK.cmake`` specific to your environment. In this case you must set
+``CMAKE_MODULE_PATH`` to the directory containing these custom scripts when
+invoking ``CMake`` to build Ceres and they will be used in preference to the
+default versions. However, in order for this to work, your scripts must provide
+the full set of variables provided by the default scripts. Also, if you are
+building Ceres with ``SuiteSparse``, the versions of ``BLAS`` & ``LAPACK``
+used by ``SuiteSparse`` and Ceres should be the same.
.. _section-using-ceres:
@@ -343,7 +598,7 @@ the following CMakeList.txt can be used:
PROJECT(helloworld)
FIND_PACKAGE(Ceres REQUIRED)
- INCLUDE_DIRECTORIES(${CERES_INCLUDES})
+ INCLUDE_DIRECTORIES(${CERES_INCLUDE_DIRS})
# helloworld
ADD_EXECUTABLE(helloworld helloworld.cc)
@@ -374,19 +629,5 @@ the **PATHS** option to the ``FIND_PACKAGE()`` command. e.g.,
FIND_PACKAGE(Ceres REQUIRED PATHS "/some/where/local/")
-Note that this can be used to have multiple versions of Ceres installed.
-
-Compiling against static or shared library
-------------------------------------------
-
-.. code-block:: cmake
-
- TARGET_LINK_LIBRARIES(helloworld ${CERES_LIBRARIES})
-
-will result in a statically linked binary. Changing this line to
-
-.. code-block:: cmake
-
- TARGET_LINK_LIBRARIES(helloworld ${CERES_LIBRARIES_SHARED})
-
-will result in a dynamically linked binary.
+Note that this can be used to have multiple versions of Ceres
+installed.
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f5ffb6d..478682f 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -41,16 +41,16 @@ master_doc = 'index'
# General information about the project.
project = u'Ceres Solver'
-copyright = u'2013, Google Inc.'
+copyright = u'2014 Google Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = '1.7'
+version = '1.9'
# The full version, including alpha/beta/rc tags.
-release = '1.7.0'
+release = '1.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -91,7 +91,7 @@ pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'armstrong'
+html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -100,6 +100,8 @@ html_theme = 'armstrong'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes",]
+import sphinx_rtd_theme
+html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
@@ -120,7 +122,7 @@ html_title = "Ceres Solver"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
index 20fe34d..b169dbf 100644
--- a/docs/source/contributing.rst
+++ b/docs/source/contributing.rst
@@ -1,9 +1,8 @@
.. _chapter-contributing:
-=============
-Contributions
-=============
-
+============
+Contributing
+============
We welcome contributions to Ceres, whether they are new features, bug
fixes or tests. The Ceres `mailing
@@ -27,8 +26,8 @@ no merges.
We now describe how to set up your development environment and submit
a change list for review via Gerrit.
-Setting up your Development Environment
-=======================================
+Setting up your Environment
+===========================
1. Download and configure ``git``.
@@ -98,13 +97,16 @@ Setting up your Development Environment
name.
-Submitting a change to Ceres Solver
-===================================
+Submitting a change
+===================
1. Make your changes against master or whatever branch you
like. Commit your changes as one patch. When you commit, the Gerrit
hook will add a `Change-Id:` line as the last line of the commit.
+ Make sure that your commit message is formatted in the `50/72 style
+ <http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html>`_.
+
2. Push your changes to the Ceres Gerrit instance:
.. code-block:: bash
@@ -112,8 +114,8 @@ Submitting a change to Ceres Solver
git push origin HEAD:refs/for/master
When the push succeeds, the console will display a URL showing the
- address of the review. Go to the URL and add reviewers; typically
- this is Sameer or Keir at this point.
+ address of the review. Go to the URL and add atleast one of the
+ maintainers (Sameer Agarwal, Keir Mierle, or Alex Stewart) as reviewers.
3. Wait for a review.
diff --git a/docs/source/faqs.rst b/docs/source/faqs.rst
new file mode 100644
index 0000000..73ad41d
--- /dev/null
+++ b/docs/source/faqs.rst
@@ -0,0 +1,282 @@
+.. _chapter-tricks:
+
+===================
+FAQS, Tips & Tricks
+===================
+
+Answers to frequently asked questions, tricks of the trade and general
+wisdom.
+
+Building
+========
+
+#. Use `google-glog <http://code.google.com/p/google-glog>`_.
+
+ Ceres has extensive support for logging detailed information about
+ memory allocations and time consumed in various parts of the solve,
+ internal error conditions etc. This is done logging using the
+ `google-glog <http://code.google.com/p/google-glog>`_ library. We
+ use it extensively to observe and analyze Ceres's
+ performance. `google-glog <http://code.google.com/p/google-glog>`_
+ allows you to control its behaviour from the command line `flags
+ <http://google-glog.googlecode.com/svn/trunk/doc/glog.html>`_. Starting
+ with ``-logtostdterr`` you can add ``-v=N`` for increasing values
+ of ``N`` to get more and more verbose and detailed information
+ about Ceres internals.
+
+ In an attempt to reduce dependencies, it is tempting to use
+ `miniglog` - a minimal implementation of the ``glog`` interface
+ that ships with Ceres. This is a bad idea. ``miniglog`` was written
+ primarily for building and using Ceres on Android because the
+ current version of `google-glog
+ <http://code.google.com/p/google-glog>`_ does not build using the
+ NDK. It has worse performance than the full fledged glog library
+ and is much harder to control and use.
+
+
+Modeling
+========
+
+#. Use analytical/automatic derivatives.
+
+ This is the single most important piece of advice we can give to
+ you. It is tempting to take the easy way out and use numeric
+ differentiation. This is a bad idea. Numeric differentiation is
+ slow, ill-behaved, hard to get right, and results in poor
+ convergence behaviour.
+
+ Ceres allows the user to define templated functors which will
+ be automatically differentiated. For most situations this is enough
+ and we recommend using this facility. In some cases the derivatives
+ are simple enough or the performance considerations are such that
+ the overhead of automatic differentiation is too much. In such
+ cases, analytic derivatives are recommended.
+
+ The use of numerical derivatives should be a measure of last
+ resort, where it is simply not possible to write a templated
+ implementation of the cost function.
+
+ In many cases it is not possible to do analytic or automatic
+ differentiation of the entire cost function, but it is generally
+ the case that it is possible to decompose the cost function into
+ parts that need to be numerically differentiated and parts that can
+ be automatically or analytically differentiated.
+
+ To this end, Ceres has extensive support for mixing analytic,
+ automatic and numeric differentiation. See
+ :class:`NumericDiffFunctor` and :class:`CostFunctionToFunctor`.
+
+#. Putting `Inverse Function Theorem
+ <http://en.wikipedia.org/wiki/Inverse_function_theorem>`_ to use.
+
+ Every now and then we have to deal with functions which cannot be
+ evaluated analytically. Computing the Jacobian in such cases is
+ tricky. A particularly interesting case is where the inverse of the
+ function is easy to compute analytically. An example of such a
+ function is the Coordinate transformation between the `ECEF
+ <http://en.wikipedia.org/wiki/ECEF>`_ and the `WGS84
+ <http://en.wikipedia.org/wiki/World_Geodetic_System>`_ where the
+ conversion from WGS84 from ECEF is analytic, but the conversion
+ back to ECEF uses an iterative algorithm. So how do you compute the
+ derivative of the ECEF to WGS84 transformation?
+
+ One obvious approach would be to numerically
+ differentiate the conversion function. This is not a good idea. For
+ one, it will be slow, but it will also be numerically quite
+ bad.
+
+ Turns out you can use the `Inverse Function Theorem
+ <http://en.wikipedia.org/wiki/Inverse_function_theorem>`_ in this
+ case to compute the derivatives more or less analytically.
+
+ The key result here is. If :math:`x = f^{-1}(y)`, and :math:`Df(x)`
+ is the invertible Jacobian of :math:`f` at :math:`x`. Then the
+ Jacobian :math:`Df^{-1}(y) = [Df(x)]^{-1}`, i.e., the Jacobian of
+ the :math:`f^{-1}` is the inverse of the Jacobian of :math:`f`.
+
+ Algorithmically this means that given :math:`y`, compute :math:`x =
+ f^{-1}(y)` by whatever means you can. Evaluate the Jacobian of
+ :math:`f` at :math:`x`. If the Jacobian matrix is invertible, then
+ the inverse is the Jacobian of the inverse at :math:`y`.
+
+ One can put this into practice with the following code fragment.
+
+ .. code-block:: c++
+
+ Eigen::Vector3d ecef; // Fill some values
+ // Iterative computation.
+ Eigen::Vector3d lla = ECEFToLLA(ecef);
+ // Analytic derivatives
+ Eigen::Matrix3d lla_to_ecef_jacobian = LLAToECEFJacobian(lla);
+ bool invertible;
+ Eigen::Matrix3d ecef_to_lla_jacobian;
+ lla_to_ecef_jacobian.computeInverseWithCheck(ecef_to_lla_jacobian, invertible);
+
+#. When using Quaternions, use :class:`QuaternionParameterization`.
+
+ TBD
+
+#. How to choose a parameter block size?
+
+ TBD
+
+Solving
+=======
+
+#. Choosing a linear solver.
+
+ When using the ``TRUST_REGION`` minimizer, the choice of linear
+ solver is an important decision. It affects solution quality and
+ runtime. Here is a simple way to reason about it.
+
+ 1. For small (a few hundred parameters) or dense problems use
+ ``DENSE_QR``.
+
+ 2. For general sparse problems (i.e., the Jacobian matrix has a
+ substantial number of zeros) use
+ ``SPARSE_NORMAL_CHOLESKY``. This requires that you have
+ ``SuiteSparse`` or ``CXSparse`` installed.
+
+ 3. For bundle adjustment problems with up to a hundred or so
+ cameras, use ``DENSE_SCHUR``.
+
+ 4. For larger bundle adjustment problems with sparse Schur
+ Complement/Reduced camera matrices use ``SPARSE_SCHUR``. This
+ requires that you have ``SuiteSparse`` or ``CXSparse``
+ installed.
+
+ 5. For large bundle adjustment problems (a few thousand cameras or
+ more) use the ``ITERATIVE_SCHUR`` solver. There are a number of
+ preconditioner choices here. ``SCHUR_JACOBI`` offers an
+ excellent balance of speed and accuracy. This is also the
+ recommended option if you are solving medium sized problems for
+ which ``DENSE_SCHUR`` is too slow but ``SuiteSparse`` is not
+ available.
+
+ If you are not satisfied with ``SCHUR_JACOBI``'s performance try
+ ``CLUSTER_JACOBI`` and ``CLUSTER_TRIDIAGONAL`` in that
+ order. They require that you have ``SuiteSparse``
+ installed. Both of these preconditioners use a clustering
+ algorithm. Use ``SINGLE_LINKAGE`` before ``CANONICAL_VIEWS``.
+
+#. Use `Solver::Summary::FullReport` to diagnose performance problems.
+
+ When diagnosing Ceres performance issues - runtime and convergence,
+ the first place to start is by looking at the output of
+ ``Solver::Summary::FullReport``. Here is an example
+
+ .. code-block:: bash
+
+ ./bin/bundle_adjuster --input ../data/problem-16-22106-pre.txt
+
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 4.185660e+06 0.00e+00 2.16e+07 0.00e+00 0.00e+00 1.00e+04 0 7.50e-02 3.58e-01
+ 1 1.980525e+05 3.99e+06 5.34e+06 2.40e+03 9.60e-01 3.00e+04 1 1.84e-01 5.42e-01
+ 2 5.086543e+04 1.47e+05 2.11e+06 1.01e+03 8.22e-01 4.09e+04 1 1.53e-01 6.95e-01
+ 3 1.859667e+04 3.23e+04 2.87e+05 2.64e+02 9.85e-01 1.23e+05 1 1.71e-01 8.66e-01
+ 4 1.803857e+04 5.58e+02 2.69e+04 8.66e+01 9.93e-01 3.69e+05 1 1.61e-01 1.03e+00
+ 5 1.803391e+04 4.66e+00 3.11e+02 1.02e+01 1.00e+00 1.11e+06 1 1.49e-01 1.18e+00
+
+ Ceres Solver v1.10.0 Solve Report
+ ----------------------------------
+ Original Reduced
+ Parameter blocks 22122 22122
+ Parameters 66462 66462
+ Residual blocks 83718 83718
+ Residual 167436 167436
+
+ Minimizer TRUST_REGION
+
+ Sparse linear algebra library SUITE_SPARSE
+ Trust region strategy LEVENBERG_MARQUARDT
+
+ Given Used
+ Linear solver SPARSE_SCHUR SPARSE_SCHUR
+ Threads 1 1
+ Linear solver threads 1 1
+ Linear solver ordering AUTOMATIC 22106, 16
+
+ Cost:
+ Initial 4.185660e+06
+ Final 1.803391e+04
+ Change 4.167626e+06
+
+ Minimizer iterations 5
+ Successful steps 5
+ Unsuccessful steps 0
+
+ Time (in seconds):
+ Preprocessor 0.283
+
+ Residual evaluation 0.061
+ Jacobian evaluation 0.361
+ Linear solver 0.382
+ Minimizer 0.895
+
+ Postprocessor 0.002
+ Total 1.220
+
+ Termination: NO_CONVERGENCE (Maximum number of iterations reached.)
+
+ Let us focus on run-time performance. The relevant lines to look at
+ are
+
+
+ .. code-block:: bash
+
+ Time (in seconds):
+ Preprocessor 0.283
+
+ Residual evaluation 0.061
+ Jacobian evaluation 0.361
+ Linear solver 0.382
+ Minimizer 0.895
+
+ Postprocessor 0.002
+ Total 1.220
+
+
+ Which tell us that of the total 1.2 seconds, about .3 seconds was
+ spent in the linear solver and the rest was mostly spent in
+ preprocessing and jacobian evaluation.
+
+ The preprocessing seems particularly expensive. Looking back at the
+ report, we observe
+
+ .. code-block:: bash
+
+ Linear solver ordering AUTOMATIC 22106, 16
+
+ Which indicates that we are using automatic ordering for the
+ ``SPARSE_SCHUR`` solver. This can be expensive at times. A straight
+ forward way to deal with this is to give the ordering manually. For
+ ``bundle_adjuster`` this can be done by passing the flag
+ ``-ordering=user``. Doing so and looking at the timing block of the
+ full report gives us
+
+ .. code-block:: bash
+
+ Time (in seconds):
+ Preprocessor 0.051
+
+ Residual evaluation 0.053
+ Jacobian evaluation 0.344
+ Linear solver 0.372
+ Minimizer 0.854
+
+ Postprocessor 0.002
+ Total 0.935
+
+
+
+ The preprocessor time has gone down by more than 5.5x!.
+
+Further Reading
+===============
+
+For a short but informative introduction to the subject we recommend
+the booklet by [Madsen]_ . For a general introduction to non-linear
+optimization we recommend [NocedalWright]_. [Bjorck]_ remains the
+seminal reference on least squares problems. [TrefethenBau]_ book is
+our favorite text on introductory numerical linear algebra. [Triggs]_
+provides a thorough coverage of the bundle adjustment problem.
diff --git a/docs/source/features.rst b/docs/source/features.rst
new file mode 100644
index 0000000..50f22e7
--- /dev/null
+++ b/docs/source/features.rst
@@ -0,0 +1,92 @@
+========
+Features
+========
+.. _chapter-features:
+
+* **Code Quality** - Ceres Solver has been used in production at
+ Google for more than three years now. It is used to solve a wide
+ variety of problems, both in size and complexity. The code runs on
+ Google's data centers, desktops and on cellphones. It is clean,
+ extensively tested and well documented code that is actively
+ developed and supported.
+
+* **Modeling API** - It is rarely the case that one starts with the
+ exact and complete formulation of the problem that one is trying to
+ solve. Ceres's modeling API has been designed so that the user can
+ easily build and modify the objective function, one term at a
+ time. And to do so without worrying about how the solver is going to
+ deal with the resulting changes in the sparsity/structure of the
+ underlying problem. Indeed we take great care to separate the
+ modeling of the optimization problem from solving it. The two can be
+ done more or less completely independently of each other.
+
+ - **Derivatives** Supplying derivatives is perhaps the most tedious
+ and error prone part of using an optimization library. Ceres
+ ships with `automatic`_ and `numeric`_ differentiation. So you
+ never have to compute derivatives by hand (unless you really want
+ to). Not only this, Ceres allows you to mix automatic, numeric and
+ analytical derivatives in any combination that you want.
+
+ - **Robust Loss Functions** Most non-linear least squares problems
+ involve data. If there is data, there will be outliers. Ceres
+ allows the user to *shape* their residuals using robust loss
+ functions to reduce the influence of outliers.
+
+ - **Local Parameterization** In many cases, some parameters lie on a
+ manifold other than Euclidean space, e.g., rotation matrices. In
+ such cases, the user can specify the geometry of the local tangent
+ space by specifying a LocalParameterization object.
+
+* **Solver Choice** Depending on the size, sparsity structure, time &
+ memory budgets, and solution quality requiremnts, different
+ optimization algorithms will suit different needs. To this end,
+ Ceres Solver comes with a variety of optimization algorithms, some
+ of them the result of the author's own research.
+
+ - **Trust Region Solvers** - Ceres supports Levenberg-Marquardt,
+ Powell's Dogleg, and Subspace dogleg methods. The key
+ computational cost in all of these methods is the solution of a
+ linear system. To this end Ceres ships with a variety of linear
+ solvers - dense QR and dense Cholesky factorization (using
+ `Eigen`_ or `LAPACK`_) for dense problems, sparse Cholesky
+ factorization (`SuiteSparse`_ or `CXSparse`_) for large sparse
+ problems custom Schur complement based dense, sparse, and
+ iterative linear solvers for `bundle adjustment`_ problems.
+
+ - **Line Search Solvers** - When the problem size is so large that
+ storing and factoring the Jacobian is not feasible or a low
+ accuracy solution is required cheaply, Ceres offers a number of
+ line search based algorithms. This includes a number of variants
+ of Non-linear Conjugate Gradients, BFGS and LBFGS.
+
+* **Speed** - Ceres code has been extensively optimized, with C++
+ templating, hand written linear algebra routines and OpenMP based
+ multithreading of the Jacobian evaluation and the linear solvers.
+
+* **Solution Quality** Ceres is the best performing solver on the NIST
+ problem set used by Mondragon and Borchers for benchmarking
+ non-linear least squares solvers.
+
+* **Covariance estimation** - Evaluate the sensitivity/uncertainty of
+ the solution by evaluating all or part of the covariance
+ matrix. Ceres is one of the few solvers that allows you to to do
+ this analysis at scale.
+
+* **Community** Since its release as an open source software, Ceres
+ has developed an active developer community that contributes new
+ features, bug fixes and support.
+
+* **Portability** - Runs on *Linux*, *Windows*, *Mac OS X*, *Android*
+ *and iOS*.
+
+* **BSD Licensed** The BSD license offers the flexibility to ship your
+ application
+
+.. _solution quality: https://groups.google.com/forum/#!topic/ceres-solver/UcicgMPgbXw
+.. _bundle adjustment: http://en.wikipedia.org/wiki/Bundle_adjustment
+.. _SuiteSparse: http://www.cise.ufl.edu/research/sparse/SuiteSparse/
+.. _Eigen: http://eigen.tuxfamily.org/
+.. _LAPACK: http://www.netlib.org/lapack/
+.. _CXSparse: https://www.cise.ufl.edu/research/sparse/CXSparse/
+.. _automatic: http://en.wikipedia.org/wiki/Automatic_differentiation
+.. _numeric: http://en.wikipedia.org/wiki/Numerical_differentiation
diff --git a/docs/source/history.rst b/docs/source/history.rst
new file mode 100644
index 0000000..b159284
--- /dev/null
+++ b/docs/source/history.rst
@@ -0,0 +1,27 @@
+.. _chapter-history:
+
+=======
+History
+=======
+
+Ceres Solver grew out of the need for general least squares solving at
+Google. In early 2010, Sameer Agarwal and Fredrik Schaffalitzky
+started the development of Ceres Solver. Fredrik left Google shortly
+thereafter and Keir Mierle stepped in to take his place. After two
+years of on-and-off development, Ceres Solver was released as open
+source in May of 2012.
+
+Origin of the name
+------------------
+
+While there is some debate as to who invented the method of Least
+Squares [Stigler]_, there is no debate that it was `Carl Friedrich
+Gauss
+<http://www-groups.dcs.st-and.ac.uk/~history/Biographies/Gauss.html>`_
+who brought it to the attention of the world. Using just 22
+observations of the newly discovered asteroid `Ceres
+<http://en.wikipedia.org/wiki/Ceres_(dwarf_planet)>`_, Gauss used the
+method of least squares to correctly predict when and where the
+asteroid will emerge from behind the Sun [TenenbaumDirector]_. We
+named our solver after Ceres to celebrate this seminal event in the
+history of astronomy, statistics and optimization.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index f20dad4..26b318a 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -7,45 +7,75 @@
Ceres Solver
============
-Ceres Solver is a portable C++ library for solving non-linear least
-squares problems.
+.. toctree::
+ :maxdepth: 3
+ :hidden:
+
+ features
+ building
+ tutorial
+ modeling
+ solving
+ faqs
+ contributing
+ version_history
+ history
+ bibliography
+ license
+
+Ceres Solver is an open source C++ library for modeling and solving
+large complicated `nonlinear least squares`_ problems. It is a feature
+rich, mature and performant library which has been used in production
+since 2010. At Google, Ceres Solver is used to:
+
+* Estimate the pose of `Street View`_ cars, aircrafts, and satellites.
+* Build 3D models for `PhotoTours`_.
+* Estimate satellite image sensor characteristics.
+* Stitch `panoramas`_ or apply `Lens Blur`_ on Android.
+* Solve `bundle adjustment`_ and SLAM problems in `Project Tango`_.
+
+Outside Google, Ceres is used for solving problems in computer vision,
+computer graphics, astronomy and physics. e.g., `Willow Garage`_ uses
+it to solve SLAM problems and `Blender`_ uses it for for planar
+tracking and bundle adjustment.
+
+.. _nonlinear least squares: http://en.wikipedia.org/wiki/Non-linear_least_squares
+.. _fitting curves: http://en.wikipedia.org/wiki/Nonlinear_regression
+.. _bundle adjustment: http://en.wikipedia.org/wiki/Structure_from_motion
+.. _Street View: http://youtu.be/z00ORu4bU-A
+.. _PhotoTours: http://google-latlong.blogspot.com/2012/04/visit-global-landmarks-with-photo-tours.html
+.. _panoramas: http://www.google.com/maps/about/contribute/photosphere/
+.. _Project Tango: https://www.google.com/atap/projecttango/
+.. _Blender: http://mango.blender.org/development/planar-tracking-preview/
+.. _Willow Garage: https://www.willowgarage.com/blog/2013/08/09/enabling-robots-see-better-through-improved-camera-calibration
+.. _Lens Blur: http://googleresearch.blogspot.com/2014/04/lens-blur-in-new-google-camera-app.html
-* Download the latest stable `release
- <https://ceres-solver.googlecode.com/files/ceres-solver-1.6.0.tar.gz>`_
- or clone the `repository
- <https://ceres-solver.googlesource.com/ceres-solver>`_
+Getting started
+---------------
-* Read the :ref:`chapter-tutorial`
+* Download the `latest stable release
+ <http://ceres-solver.org/ceres-solver-1.9.0.tar.gz>`_ or clone the
+ Git repository for the latest development version.
-* Browse the :ref:`chapter-modeling` API and :ref:`chapter-solving` API.
+ .. code-block:: bash
+ git clone https://ceres-solver.googlesource.com/ceres-solver
+
+* Read the :ref:`chapter-tutorial`, browse the chapters on the
+ :ref:`chapter-modeling` API and the :ref:`chapter-solving` API.
* Join the `mailing list
<https://groups.google.com/forum/?fromgroups#!forum/ceres-solver>`_
and ask questions.
-
* File bugs, feature requests in the `issue tracker
<https://code.google.com/p/ceres-solver/issues/list>`_.
-* If you use Ceres Solver for a publication, you must cite it as::
+
+Cite Us
+-------
+If you use Ceres Solver for a publication, please cite it as::
@misc{ceres-solver,
author = "Sameer Agarwal and Keir Mierle and Others",
title = "Ceres Solver",
- howpublished = "\url{https://code.google.com/p/ceres-solver/}",
+ howpublished = "\url{http://ceres-solver.org}",
}
-
-.. toctree::
- :maxdepth: 1
- :hidden:
-
- introduction
- building
- tutorial
- modeling
- solving
- reading
- contributing
- acknowledgements
- version_history
- bibliography
- license
diff --git a/docs/source/introduction.rst b/docs/source/introduction.rst
deleted file mode 100644
index 19a6f2e..0000000
--- a/docs/source/introduction.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-.. _chapter-introduction:
-
-============
-Introduction
-============
-
-Solving nonlinear least squares problems [#f1]_ comes up in a broad
-range of areas across science and engineering - from fitting curves in
-statistics, to constructing 3D models from photographs in computer
-vision. Ceres Solver [#f2]_ [#f3]_ is a portable C++ library for
-solving non-linear least squares problems accurately and efficiently.
-
-**Features**
-
-#. A friendly :ref:`chapter-modeling` API.
-
-#. Automatic and numeric differentiation.
-
-#. Robust loss functions and local parameterizations.
-
-#. Multithreading.
-
-#. Trust-Region (Levenberg-Marquardt and Dogleg) and Line Search
- (Nonlinear CG and L-BFGS) solvers.
-
-#. Variety of linear solvers.
-
- a. Dense QR and Cholesky factorization (using `Eigen
- <http://eigen.tuxfamily.org/index.php?title=Main_Page>`_) for
- small problems.
-
- b. Sparse Cholesky factorization (using `SuiteSparse
- <http://www.cise.ufl.edu/research/sparse/SuiteSparse/>`_ and
- `CXSparse <http://www.cise.ufl.edu/research/sparse/CSparse/>`_) for
- large sparse problems.
-
- c. Specialized solvers for bundle adjustment problems in computer
- vision.
-
- d. Iterative linear solvers with preconditioners for general sparse
- and bundle adjustment problems.
-
-#. Portable: Runs on Linux, Windows, Mac OS X and Android.
-
-
-At Google, Ceres Solver has been used for solving a variety of
-problems in computer vision and machine learning. e.g., it is used to
-to estimate the pose of Street View cars, aircrafts, and satellites;
-to build 3D models for PhotoTours; to estimate satellite image sensor
-characteristics, and more.
-
-`Blender <http://www.blender.org>`_ uses Ceres for `motion tracking
-<http://mango.blender.org/development/planar-tracking-preview/>`_ and
-`bundle adjustment
-<http://wiki.blender.org/index.php/Dev:Ref/Release_Notes/2.67/Motion_Tracker>`_.
-
-
-.. rubric:: Footnotes
-
-.. [#f1] For a gentle but brief introduction to non-linear least
- squares problems, please start by reading the
- :ref:`chapter-tutorial`.
-
-.. [#f2] While there is some debate as to who invented the method of
- Least Squares [Stigler]_, there is no debate that it was
- `Carl Friedrich Gauss
- <http://en.wikipedia.org/wiki/Carl_Friedrich_Gauss>`_ who
- brought it to the attention of the world. Using just 22
- observations of the newly discovered asteroid `Ceres
- <http://en.wikipedia.org/wiki/Ceres_(dwarf_planet)>`_, Gauss
- used the method of least squares to correctly predict when
- and where the asteroid will emerge from behind the Sun
- [TenenbaumDirector]_. We named our solver after Ceres to
- celebrate this seminal event in the history of astronomy,
- statistics and optimization.
-
-.. [#f3] For brevity, in the rest of this document we will just use
- the term Ceres.
-
-
-
diff --git a/docs/source/license.rst b/docs/source/license.rst
index 58d70df..cfa1d79 100644
--- a/docs/source/license.rst
+++ b/docs/source/license.rst
@@ -4,7 +4,7 @@ License
Ceres Solver is licensed under the New BSD license, whose terms are as follows.
-Copyright (c) 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+Copyright (c) 2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/docs/source/modeling.rst b/docs/source/modeling.rst
index 8e6de12..5bbd441 100644
--- a/docs/source/modeling.rst
+++ b/docs/source/modeling.rst
@@ -8,41 +8,64 @@
Modeling
========
-Recall that Ceres solves robustified non-linear least squares problems
-of the form
+Ceres solver consists of two distinct parts. A modeling API which
+provides a rich set of tools to construct an optimization problem one
+term at a time and a solver API that controls the minimization
+algorithm. This chapter is devoted to the task of modeling
+optimization problems using Ceres. :ref:`chapter-solving` discusses
+the various ways in which an optimization problem can be solved using
+Ceres.
-.. math:: \frac{1}{2}\sum_{i=1} \rho_i\left(\left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2\right).
- :label: ceresproblem
+Ceres solves robustified bounds constrained non-linear least squares
+problems of the form:
-The expression
+.. math:: :label: ceresproblem
+
+ \min_{\mathbf{x}} &\quad \frac{1}{2}\sum_{i}
+ \rho_i\left(\left\|f_i\left(x_{i_1},
+ ... ,x_{i_k}\right)\right\|^2\right) \\
+ \text{s.t.} &\quad l_j \le x_j \le u_j
+
+In Ceres parlance, the expression
:math:`\rho_i\left(\left\|f_i\left(x_{i_1},...,x_{i_k}\right)\right\|^2\right)`
-is known as a ``ResidualBlock``, where :math:`f_i(\cdot)` is a
-:class:`CostFunction` that depends on the parameter blocks
-:math:`\left[x_{i_1},... , x_{i_k}\right]`. In most optimization
-problems small groups of scalars occur together. For example the three
-components of a translation vector and the four components of the
-quaternion that define the pose of a camera. We refer to such a group
-of small scalars as a ``ParameterBlock``. Of course a
-``ParameterBlock`` can just be a single parameter. :math:`\rho_i` is a
-:class:`LossFunction`. A :class:`LossFunction` is a scalar function
-that is used to reduce the influence of outliers on the solution of
-non-linear least squares problems.
-
-In this chapter we will describe the various classes that are part of
-Ceres Solver's modeling API, and how they can be used to construct an
-optimization problem. Once a problem has been constructed, various
-methods for solving them will be discussed in
-:ref:`chapter-solving`. It is by design that the modeling and the
-solving APIs are orthogonal to each other. This enables
-switching/tweaking of various solver parameters without having to
-touch the problem once it has been successfully modeled.
+is known as a **residual block**, where :math:`f_i(\cdot)` is a
+:class:`CostFunction` that depends on the **parameter blocks**
+:math:`\left\{x_{i_1},... , x_{i_k}\right\}`.
+
+In most optimization problems small groups of scalars occur
+together. For example the three components of a translation vector and
+the four components of the quaternion that define the pose of a
+camera. We refer to such a group of scalars as a **parameter block**. Of
+course a parameter block can be just a single scalar too.
+
+:math:`\rho_i` is a :class:`LossFunction`. A :class:`LossFunction` is
+a scalar valued function that is used to reduce the influence of
+outliers on the solution of non-linear least squares problems.
+
+:math:`l_j` and :math:`u_j` are lower and upper bounds on the
+parameter block :math:`x_j`.
+
+As a special case, when :math:`\rho_i(x) = x`, i.e., the identity
+function, and :math:`l_j = -\infty` and :math:`u_j = \infty` we get
+the more familiar unconstrained `non-linear least squares problem
+<http://en.wikipedia.org/wiki/Non-linear_least_squares>`_.
+
+.. math:: :label: ceresproblemunconstrained
+
+ \frac{1}{2}\sum_{i} \left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2.
:class:`CostFunction`
---------------------
-The single biggest task when modeling a problem is specifying the
-residuals and their derivatives. This is done using
-:class:`CostFunction` objects.
+For each term in the objective function, a :class:`CostFunction` is
+responsible for computing a vector of residuals and if asked a vector
+of Jacobian matrices, i.e., given :math:`\left[x_{i_1}, ... ,
+x_{i_k}\right]`, compute the vector
+:math:`f_i\left(x_{i_1},...,x_{i_k}\right)` and the matrices
+
+ .. math:: J_{ij} = \frac{\partial}{\partial
+ x_{i_j}}f_i\left(x_{i_1},...,x_{i_k}\right),\quad \forall j
+ \in \{1, \ldots, k\}
.. class:: CostFunction
@@ -53,30 +76,22 @@ residuals and their derivatives. This is done using
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) = 0;
- const vector<int16>& parameter_block_sizes();
+ const vector<int32>& parameter_block_sizes();
int num_residuals() const;
protected:
- vector<int16>* mutable_parameter_block_sizes();
+ vector<int32>* mutable_parameter_block_sizes();
void set_num_residuals(int num_residuals);
};
- Given parameter blocks :math:`\left[x_{i_1}, ... , x_{i_k}\right]`,
- a :class:`CostFunction` is responsible for computing a vector of
- residuals and if asked a vector of Jacobian matrices, i.e., given
- :math:`\left[x_{i_1}, ... , x_{i_k}\right]`, compute the vector
- :math:`f_i\left(x_{i_1},...,x_{i_k}\right)` and the matrices
- .. math:: J_{ij} = \frac{\partial}{\partial x_{i_j}}f_i\left(x_{i_1},...,x_{i_k}\right),\quad \forall j \in \{i_1,..., i_k\}
-
- The signature of the :class:`CostFunction` (number and sizes of
- input parameter blocks and number of outputs) is stored in
- :member:`CostFunction::parameter_block_sizes_` and
- :member:`CostFunction::num_residuals_` respectively. User code
- inheriting from this class is expected to set these two members
- with the corresponding accessors. This information will be verified
- by the :class:`Problem` when added with
- :func:`Problem::AddResidualBlock`.
+The signature of the :class:`CostFunction` (number and sizes of input
+parameter blocks and number of outputs) is stored in
+:member:`CostFunction::parameter_block_sizes_` and
+:member:`CostFunction::num_residuals_` respectively. User code
+inheriting from this class is expected to set these two members with
+the corresponding accessors. This information will be verified by the
+:class:`Problem` when added with :func:`Problem::AddResidualBlock`.
.. function:: bool CostFunction::Evaluate(double const* const* parameters, double* residuals, double** jacobians)
@@ -114,19 +129,6 @@ residuals and their derivatives. This is done using
This can be used to communicate numerical failures in Jacobian
computations for instance.
- A more interesting and common use is to impose constraints on the
- parameters. If the initial values of the parameter blocks satisfy
- the constraints, then returning false whenever the constraints are
- not satisfied will prevent the solver from moving into the
- infeasible region. This is not a very sophisticated mechanism for
- enforcing constraints, but is often good enough for things like
- non-negativity constraints.
-
- Note that it is important that the initial values of the parameter
- block must be feasible, otherwise the solver will declare a
- numerical problem at iteration 0.
-
-
:class:`SizedCostFunction`
--------------------------
@@ -164,7 +166,7 @@ residuals and their derivatives. This is done using
.. code-block:: c++
template <typename CostFunctor,
- int M, // Number of residuals, or ceres::DYNAMIC.
+ int kNumResiduals, // Number of residuals, or ceres::DYNAMIC.
int N0, // Number of parameters in block 0.
int N1 = 0, // Number of parameters in block 1.
int N2 = 0, // Number of parameters in block 2.
@@ -176,8 +178,13 @@ residuals and their derivatives. This is done using
int N8 = 0, // Number of parameters in block 8.
int N9 = 0> // Number of parameters in block 9.
class AutoDiffCostFunction : public
- SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
- };
+ SizedCostFunction<kNumResiduals, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
+ public:
+ explicit AutoDiffCostFunction(CostFunctor* functor);
+ // Ignore the template parameter kNumResiduals and use
+ // num_residuals instead.
+ AutoDiffCostFunction(CostFunctor* functor, int num_residuals);
+ }
To get an auto differentiated cost function, you must define a
class with a templated ``operator()`` (a functor) that computes the
@@ -189,9 +196,6 @@ residuals and their derivatives. This is done using
The function must write the computed value in the last argument
(the only non-``const`` one) and return true to indicate success.
- Please see :class:`CostFunction` for details on how the return
- value may be used to impose simple constraints on the parameter
- block.
For example, consider a scalar error :math:`e = k - x^\top y`,
where both :math:`x` and :math:`y` are two-dimensional vector
@@ -254,6 +258,22 @@ residuals and their derivatives. This is done using
computing a 1-dimensional output from two arguments, both
2-dimensional.
+ :class:`AutoDiffCostFunction` also supports cost functions with a
+ runtime-determined number of residuals. For example:
+
+ .. code-block:: c++
+
+ CostFunction* cost_function
+ = new AutoDiffCostFunction<MyScalarCostFunctor, DYNAMIC, 2, 2>(
+ new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^
+ runtime_number_of_residuals); <----+ | | |
+ | | | |
+ | | | |
+ Actual number of residuals ------+ | | |
+ Indicate dynamic number of residuals --------+ | |
+ Dimension of x ------------------------------------+ |
+ Dimension of y ---------------------------------------+
+
The framework can currently accommodate cost functions of up to 10
independent variables, and there is no limit on the dimensionality
of each of them.
@@ -279,10 +299,10 @@ residuals and their derivatives. This is done using
.. class:: DynamicAutoDiffCostFunction
:class:`AutoDiffCostFunction` requires that the number of parameter
- blocks and their sizes be known at compile time, e.g., Bezier curve
- fitting, Neural Network training etc. It also has an upper limit of
- 10 parameter blocks. In a number of applications, this is not
- enough.
+ blocks and their sizes be known at compile time. It also has an
+ upper limit of 10 parameter blocks. In a number of applications,
+ this is not enough e.g., Bezier curve fitting, Neural Network
+ training etc.
.. code-block:: c++
@@ -342,12 +362,21 @@ residuals and their derivatives. This is done using
.. code-block:: c++
- template <typename CostFunctionNoJacobian,
- NumericDiffMethod method = CENTRAL, int M = 0,
- int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
- int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
- class NumericDiffCostFunction
- : public SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
+ template <typename CostFunctor,
+ NumericDiffMethod method = CENTRAL,
+ int kNumResiduals, // Number of residuals, or ceres::DYNAMIC.
+ int N0, // Number of parameters in block 0.
+ int N1 = 0, // Number of parameters in block 1.
+ int N2 = 0, // Number of parameters in block 2.
+ int N3 = 0, // Number of parameters in block 3.
+ int N4 = 0, // Number of parameters in block 4.
+ int N5 = 0, // Number of parameters in block 5.
+ int N6 = 0, // Number of parameters in block 6.
+ int N7 = 0, // Number of parameters in block 7.
+ int N8 = 0, // Number of parameters in block 8.
+ int N9 = 0> // Number of parameters in block 9.
+ class NumericDiffCostFunction : public
+ SizedCostFunction<kNumResiduals, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
};
To get a numerically differentiated :class:`CostFunction`, you must
@@ -426,6 +455,24 @@ residuals and their derivatives. This is done using
computing a 1-dimensional output from two arguments, both
2-dimensional.
+ NumericDiffCostFunction also supports cost functions with a
+ runtime-determined number of residuals. For example:
+
+ .. code-block:: c++
+
+ CostFunction* cost_function
+ = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, DYNAMIC, 2, 2>(
+ new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^
+ TAKE_OWNERSHIP, | | |
+ runtime_number_of_residuals); <----+ | | |
+ | | | |
+ | | | |
+ Actual number of residuals ------+ | | |
+ Indicate dynamic number of residuals --------------------+ | |
+ Dimension of x ------------------------------------------------+ |
+ Dimension of y ---------------------------------------------------+
+
+
The framework can currently accommodate cost functions of up to 10
independent variables, and there is no limit on the dimensionality
of each of them.
@@ -475,6 +522,52 @@ residuals and their derivatives. This is done using
sizes 4 and 8 respectively. Look at the tests for a more detailed
example.
+:class:`DynamicNumericDiffCostFunction`
+---------------------------------------
+
+.. class:: DynamicNumericDiffCostFunction
+
+ Like :class:`AutoDiffCostFunction` :class:`NumericDiffCostFunction`
+ requires that the number of parameter blocks and their sizes be
+ known at compile time. It also has an upper limit of 10 parameter
+ blocks. In a number of applications, this is not enough.
+
+ .. code-block:: c++
+
+ template <typename CostFunctor, NumericDiffMethod method = CENTRAL>
+ class DynamicNumericDiffCostFunction : public CostFunction {
+ };
+
+ In such cases when numeric differentiation is desired,
+ :class:`DynamicNumericDiffCostFunction` can be used.
+
+ Like :class:`NumericDiffCostFunction` the user must define a
+ functor, but the signature of the functor differs slightly. The
+ expected interface for the cost functors is:
+
+ .. code-block:: c++
+
+ struct MyCostFunctor {
+ bool operator()(double const* const* parameters, double* residuals) const {
+ }
+ }
+
+ Since the sizing of the parameters is done at runtime, you must
+ also specify the sizes after creating the dynamic numeric diff cost
+ function. For example:
+
+ .. code-block:: c++
+
+ DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+ new MyCostFunctor());
+ cost_function.AddParameterBlock(5);
+ cost_function.AddParameterBlock(10);
+ cost_function.SetNumResiduals(21);
+
+ As a rule of thumb, try using :class:`NumericDiffCostFunction` before
+ you use :class:`DynamicNumericDiffCostFunction`.
+
+
:class:`NumericDiffFunctor`
---------------------------
@@ -713,6 +806,8 @@ residuals and their derivatives. This is done using
+.. _`section-loss_function`:
+
:class:`LossFunction`
---------------------
@@ -1080,8 +1175,8 @@ Instances
For example, Quaternions have a three dimensional local
parameterization. It's plus operation can be implemented as (taken
- from `internal/ceres/auto_diff_local_parameterization_test.cc
- <https://ceres-solver.googlesource.com/ceres-solver/+/master/include/ceres/local_parameterization.h>`_
+ from `internal/ceres/autodiff_local_parameterization_test.cc
+ <https://ceres-solver.googlesource.com/ceres-solver/+/master/internal/ceres/autodiff_local_parameterization_test.cc>`_
)
.. code-block:: c++
@@ -1139,10 +1234,10 @@ Instances
.. class:: Problem
- :class:`Problem` holds the robustified non-linear least squares
- problem :eq:`ceresproblem`. To create a least squares problem, use
- the :func:`Problem::AddResidualBlock` and
- :func:`Problem::AddParameterBlock` methods.
+ :class:`Problem` holds the robustified bounds constrained
+ non-linear least squares problem :eq:`ceresproblem`. To create a
+ least squares problem, use the :func:`Problem::AddResidualBlock`
+ and :func:`Problem::AddParameterBlock` methods.
For example a problem containing 3 parameter blocks of sizes 3, 4
and 5 respectively and two residual blocks of size 2 and 6:
@@ -1274,7 +1369,10 @@ Instances
Remove a residual block from the problem. Any parameters that the residual
block depends on are not removed. The cost and loss functions for the
residual block will not get deleted immediately; won't happen until the
- problem itself is deleted.
+ problem itself is deleted. If Problem::Options::enable_fast_removal is
+ true, then the removal is fast (almost constant time). Otherwise, removing a
+ residual block will incur a scan of the entire Problem object to verify that
+ the residual_block represents a valid residual in the problem.
**WARNING:** Removing a residual or parameter block will destroy
the implicit ordering, rendering the jacobian or residuals returned
@@ -1289,7 +1387,7 @@ Instances
of the problem (similar to cost/loss functions in residual block
removal). Any residual blocks that depend on the parameter are also
removed, as described above in RemoveResidualBlock(). If
- Problem::Options::enable_fast_parameter_block_removal is true, then
+ Problem::Options::enable_fast_removal is true, then
the removal is fast (almost constant time). Otherwise, removing a
parameter block will incur a scan of the entire Problem object.
@@ -1315,6 +1413,24 @@ Instances
parameterizations only once. The local parameterization can only be
set once per parameter, and cannot be changed once set.
+.. function:: LocalParameterization* Problem::GetParameterization(double* values) const
+
+ Get the local parameterization object associated with this
+ parameter block. If there is no parameterization object associated
+ then `NULL` is returned
+
+.. function:: void Problem::SetParameterLowerBound(double* values, int index, double lower_bound)
+
+ Set the lower bound for the parameter at position `index` in the
+ parameter block corresponding to `values`. By default the lower
+ bound is :math:`-\infty`.
+
+.. function:: void Problem::SetParameterUpperBound(double* values, int index, double upper_bound)
+
+ Set the upper bound for the parameter at position `index` in the
+ parameter block corresponding to `values`. By default the value is
+ :math:`\infty`.
+
.. function:: int Problem::NumParameterBlocks() const
Number of parameter blocks in the problem. Always equals
@@ -1335,22 +1451,47 @@ Instances
The size of the residual vector obtained by summing over the sizes
of all of the residual blocks.
-.. function int Problem::ParameterBlockSize(const double* values) const;
+.. function:: int Problem::ParameterBlockSize(const double* values) const
The size of the parameter block.
-.. function int Problem::ParameterBlockLocalSize(const double* values) const;
+.. function:: int Problem::ParameterBlockLocalSize(const double* values) const
+
+ The size of local parameterization for the parameter block. If
+ there is no local parameterization associated with this parameter
+ block, then ``ParameterBlockLocalSize`` = ``ParameterBlockSize``.
- The size of local parameterization for the parameter block. If
- there is no local parameterization associated with this parameter
- block, then ``ParameterBlockLocalSize`` = ``ParameterBlockSize``.
+.. function:: bool Problem::HasParameterBlock(const double* values) const
+ Is the given parameter block present in the problem or not?
-.. function void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const;
+.. function:: void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const
+
+ Fills the passed ``parameter_blocks`` vector with pointers to the
+ parameter blocks currently in the problem. After this call,
+ ``parameter_block.size() == NumParameterBlocks``.
+
+.. function:: void Problem::GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const
+
+ Fills the passed `residual_blocks` vector with pointers to the
+ residual blocks currently in the problem. After this call,
+ `residual_blocks.size() == NumResidualBlocks`.
+
+.. function:: void Problem::GetParameterBlocksForResidualBlock(const ResidualBlockId residual_block, vector<double*>* parameter_blocks) const
+
+ Get all the parameter blocks that depend on the given residual
+ block.
+
+.. function:: void Problem::GetResidualBlocksForParameterBlock(const double* values, vector<ResidualBlockId>* residual_blocks) const
+
+ Get all the residual blocks that depend on the given parameter
+ block.
- Fills the passed ``parameter_blocks`` vector with pointers to the
- parameter blocks currently in the problem. After this call,
- ``parameter_block.size() == NumParameterBlocks``.
+ If `Problem::Options::enable_fast_removal` is
+ `true`, then getting the residual blocks is fast and depends only
+ on the number of residual blocks. Otherwise, getting the residual
+ blocks for a parameter block will incur a scan of the entire
+ :class:`Problem` object.
.. function:: bool Problem::Evaluate(const Problem::EvaluateOptions& options, double* cost, vector<double>* residuals, vector<double>* gradient, CRSMatrix* jacobian)
diff --git a/docs/source/reading.rst b/docs/source/reading.rst
deleted file mode 100644
index 4e27567..0000000
--- a/docs/source/reading.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-===============
-Further Reading
-===============
-
-For a short but informative introduction to the subject we recommend
-the booklet by [Madsen]_ . For a general introduction to non-linear
-optimization we recommend [NocedalWright]_. [Bjorck]_ remains the
-seminal reference on least squares problems. [TrefethenBau]_ book is
-our favorite text on introductory numerical linear algebra. [Triggs]_
-provides a thorough coverage of the bundle adjustment problem.
diff --git a/docs/source/solving.rst b/docs/source/solving.rst
index f17c695..5f3711a 100644
--- a/docs/source/solving.rst
+++ b/docs/source/solving.rst
@@ -9,7 +9,6 @@
Solving
=======
-
Introduction
============
@@ -24,16 +23,22 @@ variables, and
:math:`m`-dimensional function of :math:`x`. We are interested in
solving the following optimization problem [#f1]_ .
-.. math:: \arg \min_x \frac{1}{2}\|F(x)\|^2\ .
+.. math:: \arg \min_x \frac{1}{2}\|F(x)\|^2\ . \\
+ L \le x \le U
:label: nonlinsq
-Here, the Jacobian :math:`J(x)` of :math:`F(x)` is an :math:`m\times
-n` matrix, where :math:`J_{ij}(x) = \partial_j f_i(x)` and the
-gradient vector :math:`g(x) = \nabla \frac{1}{2}\|F(x)\|^2 = J(x)^\top
-F(x)`. Since the efficient global minimization of :eq:`nonlinsq` for
+Where, :math:`L` and :math:`U` are lower and upper bounds on the
+parameter vector :math:`x`.
+
+Since the efficient global minimization of :eq:`nonlinsq` for
general :math:`F(x)` is an intractable problem, we will have to settle
for finding a local minimum.
+In the following, the Jacobian :math:`J(x)` of :math:`F(x)` is an
+:math:`m\times n` matrix, where :math:`J_{ij}(x) = \partial_j f_i(x)`
+and the gradient vector is :math:`g(x) = \nabla \frac{1}{2}\|F(x)\|^2
+= J(x)^\top F(x)`.
+
The general strategy when solving non-linear optimization problems is
to solve a sequence of approximations to the original problem
[NocedalWright]_. At each iteration, the approximation is solved to
@@ -81,15 +86,20 @@ Trust Region Methods
The basic trust region algorithm looks something like this.
1. Given an initial point :math:`x` and a trust region radius :math:`\mu`.
- 2. :math:`\arg \min_{\Delta x} \frac{1}{2}\|J(x)\Delta
- x + F(x)\|^2` s.t. :math:`\|D(x)\Delta x\|^2 \le \mu`
+ 2. Solve
+
+ .. math::
+ \arg \min_{\Delta x}& \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 \\
+ \text{such that} &\|D(x)\Delta x\|^2 \le \mu\\
+ &L \le x + \Delta x \le U.
+
3. :math:`\rho = \frac{\displaystyle \|F(x + \Delta x)\|^2 -
\|F(x)\|^2}{\displaystyle \|J(x)\Delta x + F(x)\|^2 -
\|F(x)\|^2}`
4. if :math:`\rho > \epsilon` then :math:`x = x + \Delta x`.
5. if :math:`\rho > \eta_1` then :math:`\rho = 2 \rho`
6. else if :math:`\rho < \eta_2` then :math:`\rho = 0.5 * \rho`
- 7. Goto 2.
+ 7. Go to 2.
Here, :math:`\mu` is the trust region radius, :math:`D(x)` is some
matrix used to define a metric on the domain of :math:`F(x)` and
@@ -103,21 +113,27 @@ in the value of :math:`\rho`.
The key computational step in a trust-region algorithm is the solution
of the constrained optimization problem
-.. math:: \arg\min_{\Delta x} \frac{1}{2}\|J(x)\Delta x + F(x)\|^2\quad \text{such that}\quad \|D(x)\Delta x\|^2 \le \mu
+.. math::
+ \arg \min_{\Delta x}& \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 \\
+ \text{such that} &\|D(x)\Delta x\|^2 \le \mu\\
+ &L \le x + \Delta x \le U.
:label: trp
There are a number of different ways of solving this problem, each
giving rise to a different concrete trust-region algorithm. Currently
Ceres, implements two trust-region algorithms - Levenberg-Marquardt
-and Dogleg. The user can choose between them by setting
-:member:`Solver::Options::trust_region_strategy_type`.
+and Dogleg, each of which is augmented with a line search if bounds
+constraints are present [Kanzow]_. The user can choose between them by
+setting :member:`Solver::Options::trust_region_strategy_type`.
.. rubric:: Footnotes
-.. [#f1] At the level of the non-linear solver, the block
- structure is not relevant, therefore our discussion here is
- in terms of an optimization problem defined over a state
- vector of size :math:`n`.
+.. [#f1] At the level of the non-linear solver, the block structure is
+ not relevant, therefore our discussion here is in terms of an
+ optimization problem defined over a state vector of size
+ :math:`n`. Similarly the presence of loss functions is also
+ ignored as the problem is internally converted into a pure
+ non-linear least squares problem.
.. _section-levenberg-marquardt:
@@ -291,9 +307,10 @@ In this case, we solve for the trust region step for the full problem,
and then use it as the starting point to further optimize just `a_1`
and `a_2`. For the linear case, this amounts to doing a single linear
least squares solve. For non-linear problems, any method for solving
-the `a_1` and `a_2` optimization problems will do. The only constraint
-on `a_1` and `a_2` (if they are two different parameter block) is that
-they do not co-occur in a residual block.
+the :math:`a_1` and :math:`a_2` optimization problems will do. The
+only constraint on :math:`a_1` and :math:`a_2` (if they are two
+different parameter block) is that they do not co-occur in a residual
+block.
This idea can be further generalized, by not just optimizing
:math:`(a_1, a_2)`, but decomposing the graph corresponding to the
@@ -315,7 +332,7 @@ Non-monotonic Steps
-------------------
Note that the basic trust-region algorithm described in
-Algorithm~\ref{alg:trust-region} is a descent algorithm in that they
+:ref:`section-trust-region-methods` is a descent algorithm in that it
only accepts a point if it strictly reduces the value of the objective
function.
@@ -346,10 +363,9 @@ region strategies.
Line Search Methods
===================
-**The implementation of line search algorithms in Ceres Solver is
-fairly new and not very well tested, so for now this part of the
-solver should be considered beta quality. We welcome reports of your
-experiences both good and bad on the mailinglist.**
+The line search method in Ceres Solver cannot handle bounds
+constraints right now, so it can only be used for solving
+unconstrained problems.
Line search algorithms
@@ -362,7 +378,7 @@ Line search algorithms
Here :math:`H(x)` is some approximation to the Hessian of the
objective function, and :math:`g(x)` is the gradient at
:math:`x`. Depending on the choice of :math:`H(x)` we get a variety of
-different search directions -`\Delta x`.
+different search directions :math:`\Delta x`.
Step 4, which is a one dimensional optimization or `Line Search` along
:math:`\Delta x` is what gives this class of methods its name.
@@ -383,7 +399,7 @@ directions, all aimed at large scale problems.
Gradient method to non-linear functions. The generalization can be
performed in a number of different ways, resulting in a variety of
search directions. Ceres Solver currently supports
- ``FLETCHER_REEVES``, ``POLAK_RIBIRERE`` and ``HESTENES_STIEFEL``
+ ``FLETCHER_REEVES``, ``POLAK_RIBIERE`` and ``HESTENES_STIEFEL``
directions.
3. ``BFGS`` A generalization of the Secant method to multiple
@@ -474,7 +490,9 @@ Cholesky factorization of the normal equations. Ceres uses
Cholesky factorization of the normal equations. This leads to
substantial savings in time and memory for large sparse
problems. Ceres uses the sparse Cholesky factorization routines in
-Professor Tim Davis' ``SuiteSparse`` or ``CXSparse`` packages [Chen]_.
+Professor Tim Davis' ``SuiteSparse`` or ``CXSparse`` packages [Chen]_
+or the sparse Cholesky factorization algorithm in ``Eigen`` (which
+incidently is a port of the algorithm implemented inside ``CXSparse``)
.. _section-schur:
@@ -775,9 +793,14 @@ elimination group [LiSaad]_.
.. class:: Solver::Options
- :class:`Solver::Options` controls the overall behavior of the
- solver. We list the various settings and their default values below.
+ :class:`Solver::Options` controls the overall behavior of the
+ solver. We list the various settings and their default values below.
+.. function:: bool Solver::Options::IsValid(string* error) const
+
+ Validate the values in the options struct and returns true on
+ success. If there is a problem, the method returns false with
+ ``error`` containing a textual description of the cause.
.. member:: MinimizerType Solver::Options::minimizer_type
@@ -807,7 +830,7 @@ elimination group [LiSaad]_.
Default: ``FLETCHER_REEVES``
- Choices are ``FLETCHER_REEVES``, ``POLAK_RIBIRERE`` and
+ Choices are ``FLETCHER_REEVES``, ``POLAK_RIBIERE`` and
``HESTENES_STIEFEL``.
.. member:: int Solver::Options::max_lbfs_rank
@@ -1099,7 +1122,7 @@ elimination group [LiSaad]_.
Solver terminates if
- .. math:: \frac{|\Delta \text{cost}|}{\text{cost} < \text{function_tolerance}}
+ .. math:: \frac{|\Delta \text{cost}|}{\text{cost}} < \text{function_tolerance}
where, :math:`\Delta \text{cost}` is the change in objective
function value (up or down) in the current iteration of
@@ -1111,10 +1134,12 @@ elimination group [LiSaad]_.
Solver terminates if
- .. math:: \frac{\|g(x)\|_\infty}{\|g(x_0)\|_\infty} < \text{gradient_tolerance}
+ .. math:: \|x - \Pi \boxplus(x, -g(x))\|_\infty < \text{gradient_tolerance}
- where :math:`\|\cdot\|_\infty` refers to the max norm, and :math:`x_0` is
- the vector of initial parameter values.
+ where :math:`\|\cdot\|_\infty` refers to the max norm, :math:`\Pi`
+ is projection onto the bounds constraints and :math:`\boxplus` is
+ Plus operation for the overall local parameterization associated
+ with the parameter vector.
.. member:: double Solver::Options::parameter_tolerance
@@ -1133,8 +1158,9 @@ elimination group [LiSaad]_.
Type of linear solver used to compute the solution to the linear
least squares problem in each iteration of the Levenberg-Marquardt
- algorithm. If Ceres is build with ``SuiteSparse`` linked in then
- the default is ``SPARSE_NORMAL_CHOLESKY``, it is ``DENSE_QR``
+ algorithm. If Ceres is built with support for ``SuiteSparse`` or
+ ``CXSparse`` or ``Eigen``'s sparse Cholesky factorization, the
+ default is ``SPARSE_NORMAL_CHOLESKY``, it is ``DENSE_QR``
otherwise.
.. member:: PreconditionerType Solver::Options::preconditioner_type
@@ -1147,6 +1173,28 @@ elimination group [LiSaad]_.
``CLUSTER_JACOBI`` and ``CLUSTER_TRIDIAGONAL``. See
:ref:`section-preconditioner` for more details.
+.. member:: VisibilityClusteringType Solver::Options::visibility_clustering_type
+
+ Default: ``CANONICAL_VIEWS``
+
+ Type of clustering algorithm to use when constructing a visibility
+ based preconditioner. The original visibility based preconditioning
+ paper and implementation only used the canonical views algorithm.
+
+ This algorithm gives high quality results but for large dense
+ graphs can be particularly expensive. As its worst case complexity
+ is cubic in size of the graph.
+
+ Another option is to use ``SINGLE_LINKAGE`` which is a simple
+ thresholded single linkage clustering algorithm that only pays
+ attention to tightly coupled blocks in the Schur complement. This
+ is a fast algorithm that works well.
+
+ The optimal choice of the clustering algorithm depends on the
+ sparsity structure of the problem, but generally speaking we
+ recommend that you try ``CANONICAL_VIEWS`` first and if it is too
+ expensive try ``SINGLE_LINKAGE``.
+
.. member:: DenseLinearAlgebraLibrary Solver::Options::dense_linear_algebra_library_type
Default:``EIGEN``
@@ -1167,16 +1215,33 @@ elimination group [LiSaad]_.
Default:``SUITE_SPARSE``
- Ceres supports the use of two sparse linear algebra libraries,
+ Ceres supports the use of three sparse linear algebra libraries,
``SuiteSparse``, which is enabled by setting this parameter to
- ``SUITE_SPARSE`` and ``CXSparse``, which can be selected by setting
- this parameter to ```CX_SPARSE``. ``SuiteSparse`` is a
- sophisticated and complex sparse linear algebra library and should
- be used in general. If your needs/platforms prevent you from using
- ``SuiteSparse``, consider using ``CXSparse``, which is a much
- smaller, easier to build library. As can be expected, its
- performance on large problems is not comparable to that of
- ``SuiteSparse``.
+ ``SUITE_SPARSE``, ``CXSparse``, which can be selected by setting
+ this parameter to ```CX_SPARSE`` and ``Eigen`` which is enabled by
+ setting this parameter to ``EIGEN_SPARSE``.
+
+ ``SuiteSparse`` is a sophisticated and complex sparse linear
+ algebra library and should be used in general.
+
+ If your needs/platforms prevent you from using ``SuiteSparse``,
+ consider using ``CXSparse``, which is a much smaller, easier to
+ build library. As can be expected, its performance on large
+ problems is not comparable to that of ``SuiteSparse``.
+
+ Last but not the least you can use the sparse linear algebra
+ routines in ``Eigen``. Currently the performance of this library is
+ the poorest of the three. But this should change in the near
+ future.
+
+ Another thing to consider here is that the sparse Cholesky
+ factorization libraries in Eigen are licensed under ``LGPL`` and
+ building Ceres with support for ``EIGEN_SPARSE`` will result in an
+ LGPL licensed library (since the corresponding code from Eigen is
+ compiled into the library).
+
+ The upside is that you do not need to build and link to an external
+ library to use ``EIGEN_SPARSE``.
.. member:: int Solver::Options::num_linear_solver_threads
@@ -1184,7 +1249,7 @@ elimination group [LiSaad]_.
Number of threads used by the linear solver.
-.. member:: ParameterBlockOrdering* Solver::Options::linear_solver_ordering
+.. member:: shared_ptr<ParameterBlockOrdering> Solver::Options::linear_solver_ordering
Default: ``NULL``
@@ -1221,6 +1286,22 @@ elimination group [LiSaad]_.
expense of an extra copy of the Jacobian matrix. Setting
``use_postordering`` to ``true`` enables this tradeoff.
+.. member:: bool Solver::Options::dynamic_sparsity
+
+ Some non-linear least squares problems are symbolically dense but
+ numerically sparse. i.e. at any given state only a small number of
+ Jacobian entries are non-zero, but the position and number of
+ non-zeros is different depending on the state. For these problems
+ it can be useful to factorize the sparse jacobian at each solver
+ iteration instead of including all of the zero entries in a single
+ general factorization.
+
+ If your problem does not have this property (or you do not know),
+ then it is probably best to keep this false, otherwise it will
+ likely lead to worse performance.
+
+ This settings affects the `SPARSE_NORMAL_CHOLESKY` solver.
+
.. member:: int Solver::Options::min_linear_solver_iterations
Default: ``1``
@@ -1280,7 +1361,7 @@ elimination group [LiSaad]_.
inner iterations in subsequent trust region minimizer iterations is
disabled.
-.. member:: ParameterBlockOrdering* Solver::Options::inner_iteration_ordering
+.. member:: shared_ptr<ParameterBlockOrdering> Solver::Options::inner_iteration_ordering
Default: ``NULL``
@@ -1316,28 +1397,29 @@ elimination group [LiSaad]_.
.. code-block:: bash
- 0: f: 1.250000e+01 d: 0.00e+00 g: 5.00e+00 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li: 0 it: 6.91e-06 tt: 1.91e-03
- 1: f: 1.249750e-07 d: 1.25e+01 g: 5.00e-04 h: 5.00e+00 rho: 1.00e+00 mu: 3.00e+04 li: 1 it: 2.81e-05 tt: 1.99e-03
- 2: f: 1.388518e-16 d: 1.25e-07 g: 1.67e-08 h: 5.00e-04 rho: 1.00e+00 mu: 9.00e+04 li: 1 it: 1.00e-05 tt: 2.01e-03
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 4.185660e+06 0.00e+00 1.09e+08 0.00e+00 0.00e+00 1.00e+04 0 7.59e-02 3.37e-01
+ 1 1.062590e+05 4.08e+06 8.99e+06 5.36e+02 9.82e-01 3.00e+04 1 1.65e-01 5.03e-01
+ 2 4.992817e+04 5.63e+04 8.32e+06 3.19e+02 6.52e-01 3.09e+04 1 1.45e-01 6.48e-01
Here
- #. ``f`` is the value of the objective function.
- #. ``d`` is the change in the value of the objective function if
- the step computed in this iteration is accepted.
- #. ``g`` is the max norm of the gradient.
- #. ``h`` is the change in the parameter vector.
- #. ``rho`` is the ratio of the actual change in the objective
+ #. ``cost`` is the value of the objective function.
+ #. ``cost_change`` is the change in the value of the objective
+ function if the step computed in this iteration is accepted.
+ #. ``|gradient|`` is the max norm of the gradient.
+ #. ``|step|`` is the change in the parameter vector.
+ #. ``tr_ratio`` is the ratio of the actual change in the objective
function value to the change in the the value of the trust
region model.
- #. ``mu`` is the size of the trust region radius.
- #. ``li`` is the number of linear solver iterations used to compute
- the trust region step. For direct/factorization based solvers it
- is always 1, for iterative solvers like ``ITERATIVE_SCHUR`` it
- is the number of iterations of the Conjugate Gradients
- algorithm.
- #. ``it`` is the time take by the current iteration.
- #. ``tt`` is the the total time taken by the minimizer.
+ #. ``tr_radius`` is the size of the trust region radius.
+ #. ``ls_iter`` is the number of linear solver iterations used to
+ compute the trust region step. For direct/factorization based
+ solvers it is always 1, for iterative solvers like
+ ``ITERATIVE_SCHUR`` it is the number of iterations of the
+ Conjugate Gradients algorithm.
+ #. ``iter_time`` is the time take by the current iteration.
+ #. ``total_time`` is the the total time taken by the minimizer.
For ``LINE_SEARCH_MINIMIZER`` the progress display looks like
@@ -1466,17 +1548,6 @@ elimination group [LiSaad]_.
iteration. This setting is useful when building an interactive
application using Ceres and using an :class:`IterationCallback`.
-.. member:: string Solver::Options::solver_log
-
- Default: ``empty``
-
- If non-empty, a summary of the execution of the solver is recorded
- to this file. This file is used for recording and Ceres'
- performance. Currently, only the iteration number, total time and
- the objective function value are logged. The format of this file is
- expected to change over time as the performance evaluation
- framework is fleshed out.
-
:class:`ParameterBlockOrdering`
-------------------------------
@@ -1542,95 +1613,127 @@ elimination group [LiSaad]_.
.. class:: IterationSummary
- :class:`IterationSummary` describes the state of the optimizer
- after each iteration of the minimization. Note that all times are
- wall times.
+ :class:`IterationSummary` describes the state of the minimizer at
+ the end of each iteration.
- .. code-block:: c++
+.. member:: int32 IterationSummary::iteration
+
+ Current iteration number.
+
+.. member:: bool IterationSummary::step_is_valid
+
+ Step was numerically valid, i.e., all values are finite and the
+ step reduces the value of the linearized model.
+
+ **Note**: :member:`IterationSummary::step_is_valid` is `false`
+ when :member:`IterationSummary::iteration` = 0.
+
+.. member:: bool IterationSummary::step_is_nonmonotonic
+
+ Step did not reduce the value of the objective function
+ sufficiently, but it was accepted because of the relaxed
+ acceptance criterion used by the non-monotonic trust region
+ algorithm.
+
+ **Note**: :member:`IterationSummary::step_is_nonmonotonic` is
+ `false` when when :member:`IterationSummary::iteration` = 0.
+
+.. member:: bool IterationSummary::step_is_successful
+
+ Whether or not the minimizer accepted this step or not.
+
+ If the ordinary trust region algorithm is used, this means that the
+ relative reduction in the objective function value was greater than
+ :member:`Solver::Options::min_relative_decrease`. However, if the
+ non-monotonic trust region algorithm is used
+ (:member:`Solver::Options::use_nonmonotonic_steps` = `true`), then
+ even if the relative decrease is not sufficient, the algorithm may
+ accept the step and the step is declared successful.
+
+ **Note**: :member:`IterationSummary::step_is_successful` is `false`
+ when when :member:`IterationSummary::iteration` = 0.
+
+.. member:: double IterationSummary::cost
+
+ Value of the objective function.
+
+.. member:: double IterationSummary::cost_change
+
+ Change in the value of the objective function in this
+ iteration. This can be positive or negative.
+
+.. member:: double IterationSummary::gradient_max_norm
+
+ Infinity norm of the gradient vector.
+
+.. member:: double IterationSummary::gradient_norm
+
+ 2-norm of the gradient vector.
+
+.. member:: double IterationSummary::step_norm
+
+ 2-norm of the size of the step computed in this iteration.
+
+.. member:: double IterationSummary::relative_decrease
+
+ For trust region algorithms, the ratio of the actual change in cost
+ and the change in the cost of the linearized approximation.
+
+ This field is not used when a linear search minimizer is used.
+
+.. member:: double IterationSummary::trust_region_radius
+
+ Size of the trust region at the end of the current iteration. For
+ the Levenberg-Marquardt algorithm, the regularization parameter is
+ 1.0 / member::`IterationSummary::trust_region_radius`.
+
+.. member:: double IterationSummary::eta
+
+ For the inexact step Levenberg-Marquardt algorithm, this is the
+ relative accuracy with which the step is solved. This number is
+ only applicable to the iterative solvers capable of solving linear
+ systems inexactly. Factorization-based exact solvers always have an
+ eta of 0.0.
+
+.. member:: double IterationSummary::step_size
+
+ Step sized computed by the line search algorithm.
+
+ This field is not used when a trust region minimizer is used.
+
+.. member:: int IterationSummary::line_search_function_evaluations
+
+ Number of function evaluations used by the line search algorithm.
+
+ This field is not used when a trust region minimizer is used.
+
+.. member:: int IterationSummary::linear_solver_iterations
+
+ Number of iterations taken by the linear solver to solve for the
+ trust region step.
+
+ Currently this field is not used when a line search minimizer is
+ used.
+
+.. member:: double IterationSummary::iteration_time_in_seconds
+
+ Time (in seconds) spent inside the minimizer loop in the current
+ iteration.
+
+.. member:: double IterationSummary::step_solver_time_in_seconds
+
+ Time (in seconds) spent inside the trust region step solver.
+
+.. member:: double IterationSummary::cumulative_time_in_seconds
+
+ Time (in seconds) since the user called Solve().
- struct IterationSummary {
- // Current iteration number.
- int32 iteration;
-
- // Step was numerically valid, i.e., all values are finite and the
- // step reduces the value of the linearized model.
- //
- // Note: step_is_valid is false when iteration = 0.
- bool step_is_valid;
-
- // Step did not reduce the value of the objective function
- // sufficiently, but it was accepted because of the relaxed
- // acceptance criterion used by the non-monotonic trust region
- // algorithm.
- //
- // Note: step_is_nonmonotonic is false when iteration = 0;
- bool step_is_nonmonotonic;
-
- // Whether or not the minimizer accepted this step or not. If the
- // ordinary trust region algorithm is used, this means that the
- // relative reduction in the objective function value was greater
- // than Solver::Options::min_relative_decrease. However, if the
- // non-monotonic trust region algorithm is used
- // (Solver::Options:use_nonmonotonic_steps = true), then even if the
- // relative decrease is not sufficient, the algorithm may accept the
- // step and the step is declared successful.
- //
- // Note: step_is_successful is false when iteration = 0.
- bool step_is_successful;
-
- // Value of the objective function.
- double cost;
-
- // Change in the value of the objective function in this
- // iteration. This can be positive or negative.
- double cost_change;
-
- // Infinity norm of the gradient vector.
- double gradient_max_norm;
-
- // 2-norm of the size of the step computed by the optimization
- // algorithm.
- double step_norm;
-
- // For trust region algorithms, the ratio of the actual change in
- // cost and the change in the cost of the linearized approximation.
- double relative_decrease;
-
- // Size of the trust region at the end of the current iteration. For
- // the Levenberg-Marquardt algorithm, the regularization parameter
- // mu = 1.0 / trust_region_radius.
- double trust_region_radius;
-
- // For the inexact step Levenberg-Marquardt algorithm, this is the
- // relative accuracy with which the Newton(LM) step is solved. This
- // number affects only the iterative solvers capable of solving
- // linear systems inexactly. Factorization-based exact solvers
- // ignore it.
- double eta;
-
- // Step sized computed by the line search algorithm.
- double step_size;
-
- // Number of function evaluations used by the line search algorithm.
- int line_search_function_evaluations;
-
- // Number of iterations taken by the linear solver to solve for the
- // Newton step.
- int linear_solver_iterations;
-
- // Time (in seconds) spent inside the minimizer loop in the current
- // iteration.
- double iteration_time_in_seconds;
-
- // Time (in seconds) spent inside the trust region step solver.
- double step_solver_time_in_seconds;
-
- // Time (in seconds) since the user called Solve().
- double cumulative_time_in_seconds;
- };
.. class:: IterationCallback
+ Interface for specifying callbacks that are executed at the end of
+ each iteration of the minimizer.
+
.. code-block:: c++
class IterationCallback {
@@ -1639,16 +1742,16 @@ elimination group [LiSaad]_.
virtual CallbackReturnType operator()(const IterationSummary& summary) = 0;
};
- Interface for specifying callbacks that are executed at the end of
- each iteration of the Minimizer. The solver uses the return value of
- ``operator()`` to decide whether to continue solving or to
- terminate. The user can return three values.
+
+ The solver uses the return value of ``operator()`` to decide whether
+ to continue solving or to terminate. The user can return three
+ values.
#. ``SOLVER_ABORT`` indicates that the callback detected an abnormal
situation. The solver returns without updating the parameter
blocks (unless ``Solver::Options::update_state_every_iteration`` is
set true). Solver returns with ``Solver::Summary::termination_type``
- set to ``USER_ABORT``.
+ set to ``USER_FAILURE``.
#. ``SOLVER_TERMINATE_SUCCESSFULLY`` indicates that there is no need
to optimize anymore (some user specified termination criterion
@@ -1658,8 +1761,8 @@ elimination group [LiSaad]_.
#. ``SOLVER_CONTINUE`` indicates that the solver should continue
optimizing.
- For example, the following ``IterationCallback`` is used internally
- by Ceres to log the progress of the optimization.
+ For example, the following :class:`IterationCallback` is used
+ internally by Ceres to log the progress of the optimization.
.. code-block:: c++
@@ -1703,50 +1806,56 @@ elimination group [LiSaad]_.
.. class:: CRSMatrix
- .. code-block:: c++
-
- struct CRSMatrix {
- int num_rows;
- int num_cols;
- vector<int> cols;
- vector<int> rows;
- vector<double> values;
- };
-
A compressed row sparse matrix used primarily for communicating the
Jacobian matrix to the user.
- A compressed row matrix stores its contents in three arrays,
- ``rows``, ``cols`` and ``values``.
+.. member:: int CRSMatrix::num_rows
- ``rows`` is a ``num_rows + 1`` sized array that points into the ``cols`` and
- ``values`` array. For each row ``i``:
+ Number of rows.
- ``cols[rows[i]]`` ... ``cols[rows[i + 1] - 1]`` are the indices of the
- non-zero columns of row ``i``.
+.. member:: int CRSMatrix::num_cols
- ``values[rows[i]]`` ... ``values[rows[i + 1] - 1]`` are the values of the
- corresponding entries.
+ Number of columns.
- ``cols`` and ``values`` contain as many entries as there are
+.. member:: vector<int> CRSMatrix::rows
+
+ :member:`CRSMatrix::rows` is a :member:`CRSMatrix::num_rows` + 1
+ sized array that points into the :member:`CRSMatrix::cols` and
+ :member:`CRSMatrix::values` array.
+
+.. member:: vector<int> CRSMatrix::cols
+
+ :member:`CRSMatrix::cols` contain as many entries as there are
non-zeros in the matrix.
- e.g, consider the 3x4 sparse matrix
+ For each row ``i``, ``cols[rows[i]]`` ... ``cols[rows[i + 1] - 1]``
+ are the indices of the non-zero columns of row ``i``.
- .. code-block:: c++
+.. member:: vector<int> CRSMatrix::values
- 0 10 0 4
- 0 2 -3 2
- 1 2 0 0
+ :member:`CRSMatrix::values` contain as many entries as there are
+ non-zeros in the matrix.
- The three arrays will be:
+ For each row ``i``,
+ ``values[rows[i]]`` ... ``values[rows[i + 1] - 1]`` are the values
+ of the non-zero columns of row ``i``.
- .. code-block:: c++
+e.g, consider the 3x4 sparse matrix
+
+.. code-block:: c++
+
+ 0 10 0 4
+ 0 2 -3 2
+ 1 2 0 0
+
+The three arrays will be:
+
+.. code-block:: c++
- -row0- ---row1--- -row2-
- rows = [ 0, 2, 5, 7]
- cols = [ 1, 3, 1, 2, 3, 0, 1]
- values = [10, 4, 2, -3, 2, 1, 2]
+ -row0- ---row1--- -row2-
+ rows = [ 0, 2, 5, 7]
+ cols = [ 1, 3, 1, 2, 3, 0, 1]
+ values = [10, 4, 2, -3, 2, 1, 2]
:class:`Solver::Summary`
@@ -1754,113 +1863,289 @@ elimination group [LiSaad]_.
.. class:: Solver::Summary
- Note that all times reported in this struct are wall times.
+ Summary of the various stages of the solver after termination.
- .. code-block:: c++
- struct Summary {
- // A brief one line description of the state of the solver after
- // termination.
- string BriefReport() const;
+.. function:: string Solver::Summary::BriefReport() const
- // A full multiline description of the state of the solver after
- // termination.
- string FullReport() const;
+ A brief one line description of the state of the solver after
+ termination.
- // Minimizer summary -------------------------------------------------
- MinimizerType minimizer_type;
+.. function:: string Solver::Summary::FullReport() const
- SolverTerminationType termination_type;
+ A full multiline description of the state of the solver after
+ termination.
- // If the solver did not run, or there was a failure, a
- // description of the error.
- string error;
+.. function:: bool Solver::Summary::IsSolutionUsable() const
- // Cost of the problem before and after the optimization. See
- // problem.h for definition of the cost of a problem.
- double initial_cost;
- double final_cost;
+ Whether the solution returned by the optimization algorithm can be
+ relied on to be numerically sane. This will be the case if
+ `Solver::Summary:termination_type` is set to `CONVERGENCE`,
+ `USER_SUCCESS` or `NO_CONVERGENCE`, i.e., either the solver
+ converged by meeting one of the convergence tolerances or because
+ the user indicated that it had converged or it ran to the maximum
+ number of iterations or time.
- // The part of the total cost that comes from residual blocks that
- // were held fixed by the preprocessor because all the parameter
- // blocks that they depend on were fixed.
- double fixed_cost;
+.. member:: MinimizerType Solver::Summary::minimizer_type
- vector<IterationSummary> iterations;
+ Type of minimization algorithm used.
- int num_successful_steps;
- int num_unsuccessful_steps;
- int num_inner_iteration_steps;
+.. member:: TerminationType Solver::Summary::termination_type
- // When the user calls Solve, before the actual optimization
- // occurs, Ceres performs a number of preprocessing steps. These
- // include error checks, memory allocations, and reorderings. This
- // time is accounted for as preprocessing time.
- double preprocessor_time_in_seconds;
+ The cause of the minimizer terminating.
- // Time spent in the TrustRegionMinimizer.
- double minimizer_time_in_seconds;
+.. member:: string Solver::Summary::message
- // After the Minimizer is finished, some time is spent in
- // re-evaluating residuals etc. This time is accounted for in the
- // postprocessor time.
- double postprocessor_time_in_seconds;
+ Reason why the solver terminated.
- // Some total of all time spent inside Ceres when Solve is called.
- double total_time_in_seconds;
+.. member:: double Solver::Summary::initial_cost
- double linear_solver_time_in_seconds;
- double residual_evaluation_time_in_seconds;
- double jacobian_evaluation_time_in_seconds;
- double inner_iteration_time_in_seconds;
+ Cost of the problem (value of the objective function) before the
+ optimization.
- // Preprocessor summary.
- int num_parameter_blocks;
- int num_parameters;
- int num_effective_parameters;
- int num_residual_blocks;
- int num_residuals;
+.. member:: double Solver::Summary::final_cost
- int num_parameter_blocks_reduced;
- int num_parameters_reduced;
- int num_effective_parameters_reduced;
- int num_residual_blocks_reduced;
- int num_residuals_reduced;
+ Cost of the problem (value of the objective function) after the
+ optimization.
- int num_eliminate_blocks_given;
- int num_eliminate_blocks_used;
+.. member:: double Solver::Summary::fixed_cost
- int num_threads_given;
- int num_threads_used;
+ The part of the total cost that comes from residual blocks that
+ were held fixed by the preprocessor because all the parameter
+ blocks that they depend on were fixed.
- int num_linear_solver_threads_given;
- int num_linear_solver_threads_used;
+.. member:: vector<IterationSummary> Solver::Summary::iterations
- LinearSolverType linear_solver_type_given;
- LinearSolverType linear_solver_type_used;
+ :class:`IterationSummary` for each minimizer iteration in order.
- vector<int> linear_solver_ordering_given;
- vector<int> linear_solver_ordering_used;
+.. member:: int Solver::Summary::num_successful_steps
- bool inner_iterations_given;
- bool inner_iterations_used;
+ Number of minimizer iterations in which the step was
+ accepted. Unless :member:`Solver::Options::use_non_monotonic_steps`
+ is `true` this is also the number of steps in which the objective
+ function value/cost went down.
- vector<int> inner_iteration_ordering_given;
- vector<int> inner_iteration_ordering_used;
+.. member:: int Solver::Summary::num_unsuccessful_steps
- PreconditionerType preconditioner_type;
+ Number of minimizer iterations in which the step was rejected
+ either because it did not reduce the cost enough or the step was
+ not numerically valid.
- TrustRegionStrategyType trust_region_strategy_type;
- DoglegType dogleg_type;
+.. member:: int Solver::Summary::num_inner_iteration_steps
- DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
+ Number of times inner iterations were performed.
- LineSearchDirectionType line_search_direction_type;
- LineSearchType line_search_type;
- int max_lbfgs_rank;
- };
+.. member:: double Solver::Summary::preprocessor_time_in_seconds
+
+ Time (in seconds) spent in the preprocessor.
+
+.. member:: double Solver::Summary::minimizer_time_in_seconds
+
+ Time (in seconds) spent in the Minimizer.
+
+.. member:: double Solver::Summary::postprocessor_time_in_seconds
+
+ Time (in seconds) spent in the post processor.
+
+.. member:: double Solver::Summary::total_time_in_seconds
+
+ Time (in seconds) spent in the solver.
+
+.. member:: double Solver::Summary::linear_solver_time_in_seconds
+
+ Time (in seconds) spent in the linear solver computing the trust
+ region step.
+
+.. member:: double Solver::Summary::residual_evaluation_time_in_seconds
+
+ Time (in seconds) spent evaluating the residual vector.
+
+.. member:: double Solver::Summary::jacobian_evaluation_time_in_seconds
+
+ Time (in seconds) spent evaluating the Jacobian matrix.
+
+.. member:: double Solver::Summary::inner_iteration_time_in_seconds
+
+ Time (in seconds) spent doing inner iterations.
+
+.. member:: int Solver::Summary::num_parameter_blocks
+
+ Number of parameter blocks in the problem.
+
+.. member:: int Solver::Summary::num_parameters
+
+ Number of parameters in the problem.
+
+.. member:: int Solver::Summary::num_effective_parameters
+
+ Dimension of the tangent space of the problem (or the number of
+ columns in the Jacobian for the problem). This is different from
+ :member:`Solver::Summary::num_parameters` if a parameter block is
+ associated with a :class:`LocalParameterization`.
+
+.. member:: int Solver::Summary::num_residual_blocks
+
+ Number of residual blocks in the problem.
+
+.. member:: int Solver::Summary::num_residuals
+
+ Number of residuals in the problem.
+
+.. member:: int Solver::Summary::num_parameter_blocks_reduced
+
+ Number of parameter blocks in the problem after the inactive and
+ constant parameter blocks have been removed. A parameter block is
+ inactive if no residual block refers to it.
+
+.. member:: int Solver::Summary::num_parameters_reduced
+
+ Number of parameters in the reduced problem.
+
+.. member:: int Solver::Summary::num_effective_parameters_reduced
+
+ Dimension of the tangent space of the reduced problem (or the
+ number of columns in the Jacobian for the reduced problem). This is
+ different from :member:`Solver::Summary::num_parameters_reduced` if
+ a parameter block in the reduced problem is associated with a
+ :class:`LocalParameterization`.
+
+.. member:: int Solver::Summary::num_residual_blocks_reduced
+
+ Number of residual blocks in the reduced problem.
+
+.. member:: int Solver::Summary::num_residuals_reduced
+
+ Number of residuals in the reduced problem.
+
+.. member:: int Solver::Summary::num_threads_given
+
+ Number of threads specified by the user for Jacobian and residual
+ evaluation.
+
+.. member:: int Solver::Summary::num_threads_used
+
+ Number of threads actually used by the solver for Jacobian and
+ residual evaluation. This number is not equal to
+ :member:`Solver::Summary::num_threads_given` if `OpenMP` is not
+ available.
+
+.. member:: int Solver::Summary::num_linear_solver_threads_given
+
+ Number of threads specified by the user for solving the trust
+ region problem.
+
+.. member:: int Solver::Summary::num_linear_solver_threads_used
+
+ Number of threads actually used by the solver for solving the trust
+ region problem. This number is not equal to
+ :member:`Solver::Summary::num_linear_solver_threads_given` if
+ `OpenMP` is not available.
+
+.. member:: LinearSolverType Solver::Summary::linear_solver_type_given
+
+ Type of the linear solver requested by the user.
+.. member:: LinearSolverType Solver::Summary::linear_solver_type_used
+
+ Type of the linear solver actually used. This may be different from
+ :member:`Solver::Summary::linear_solver_type_given` if Ceres
+ determines that the problem structure is not compatible with the
+ linear solver requested or if the linear solver requested by the
+ user is not available, e.g. The user requested
+ `SPARSE_NORMAL_CHOLESKY` but no sparse linear algebra library was
+ available.
+
+.. member:: vector<int> Solver::Summary::linear_solver_ordering_given
+
+ Size of the elimination groups given by the user as hints to the
+ linear solver.
+
+.. member:: vector<int> Solver::Summary::linear_solver_ordering_used
+
+ Size of the parameter groups used by the solver when ordering the
+ columns of the Jacobian. This maybe different from
+ :member:`Solver::Summary::linear_solver_ordering_given` if the user
+ left :member:`Solver::Summary::linear_solver_ordering_given` blank
+ and asked for an automatic ordering, or if the problem contains
+ some constant or inactive parameter blocks.
+
+.. member:: bool Solver::Summary::inner_iterations_given
+
+ `True` if the user asked for inner iterations to be used as part of
+ the optimization.
+
+.. member:: bool Solver::Summary::inner_iterations_used
+
+ `True` if the user asked for inner iterations to be used as part of
+ the optimization and the problem structure was such that they were
+ actually performed. e.g., in a problem with just one parameter
+ block, inner iterations are not performed.
+
+.. member:: vector<int> inner_iteration_ordering_given
+
+ Size of the parameter groups given by the user for performing inner
+ iterations.
+
+.. member:: vector<int> inner_iteration_ordering_used
+
+ Size of the parameter groups given used by the solver for
+ performing inner iterations. This maybe different from
+ :member:`Solver::Summary::inner_iteration_ordering_given` if the
+ user left :member:`Solver::Summary::inner_iteration_ordering_given`
+ blank and asked for an automatic ordering, or if the problem
+ contains some constant or inactive parameter blocks.
+
+.. member:: PreconditionerType Solver::Summary::preconditioner_type
+
+ Type of preconditioner used for solving the trust region step. Only
+ meaningful when an iterative linear solver is used.
+
+.. member:: VisibilityClusteringType Solver::Summary::visibility_clustering_type
+
+ Type of clustering algorithm used for visibility based
+ preconditioning. Only meaningful when the
+ :member:`Solver::Summary::preconditioner_type` is
+ ``CLUSTER_JACOBI`` or ``CLUSTER_TRIDIAGONAL``.
+
+.. member:: TrustRegionStrategyType Solver::Summary::trust_region_strategy_type
+
+ Type of trust region strategy.
+
+.. member:: DoglegType Solver::Summary::dogleg_type
+
+ Type of dogleg strategy used for solving the trust region problem.
+
+.. member:: DenseLinearAlgebraLibraryType Solver::Summary::dense_linear_algebra_library_type
+
+ Type of the dense linear algebra library used.
+
+.. member:: SparseLinearAlgebraLibraryType Solver::Summary::sparse_linear_algebra_library_type
+
+ Type of the sparse linear algebra library used.
+
+.. member:: LineSearchDirectionType Solver::Summary::line_search_direction_type
+
+ Type of line search direction used.
+
+.. member:: LineSearchType Solver::Summary::line_search_type
+
+ Type of the line search algorithm used.
+
+.. member:: LineSearchInterpolationType Solver::Summary::line_search_interpolation_type
+
+ When performing line search, the degree of the polynomial used to
+ approximate the objective function.
+
+.. member:: NonlinearConjugateGradientType Solver::Summary::nonlinear_conjugate_gradient_type
+
+ If the line search direction is `NONLINEAR_CONJUGATE_GRADIENT`,
+ then this indicates the particular variant of non-linear conjugate
+ gradient used.
+
+.. member:: int Solver::Summary::max_lbfgs_rank
+
+ If the type of the line search direction is `LBFGS`, then this
+ indicates the rank of the Hessian approximation.
Covariance Estimation
=====================
@@ -1997,7 +2282,8 @@ cases.
.. member:: CovarianceAlgorithmType Covariance::Options::algorithm_type
- Default: ``SPARSE_QR`` or ``DENSE_SVD``
+ Default: ``SUITE_SPARSE_QR`` if ``SuiteSparseQR`` is installed and
+ ``EIGEN_SPARSE_QR`` otherwise.
Ceres supports three different algorithms for covariance
estimation, which represent different tradeoffs in speed, accuracy
@@ -2016,47 +2302,23 @@ cases.
small to moderate sized problems. It can handle full-rank as
well as rank deficient Jacobians.
- 2. ``SPARSE_CHOLESKY`` uses the ``CHOLMOD`` sparse Cholesky
- factorization library to compute the decomposition :
-
- .. math:: R^\top R = J^\top J
-
- and then
-
- .. math:: \left(J^\top J\right)^{-1} = \left(R^\top R\right)^{-1}
-
- It a fast algorithm for sparse matrices that should be used when
- the Jacobian matrix J is well conditioned. For ill-conditioned
- matrices, this algorithm can fail unpredictabily. This is
- because Cholesky factorization is not a rank-revealing
- factorization, i.e., it cannot reliably detect when the matrix
- being factorized is not of full
- rank. ``SuiteSparse``/``CHOLMOD`` supplies a heuristic for
- checking if the matrix is rank deficient (cholmod_rcond), but it
- is only a heuristic and can have both false positive and false
- negatives.
-
- Recent versions of ``SuiteSparse`` (>= 4.2.0) provide a much more
- efficient method for solving for rows of the covariance
- matrix. Therefore, if you are doing ``SPARSE_CHOLESKY``, we strongly
- recommend using a recent version of ``SuiteSparse``.
-
- 3. ``SPARSE_QR`` uses the ``SuiteSparseQR`` sparse QR factorization
- library to compute the decomposition
+ 2. ``EIGEN_SPARSE_QR`` uses the sparse QR factorization algorithm
+ in ``Eigen`` to compute the decomposition
.. math::
QR &= J\\
\left(J^\top J\right)^{-1} &= \left(R^\top R\right)^{-1}
- It is a moderately fast algorithm for sparse matrices, which at
- the price of more time and memory than the ``SPARSE_CHOLESKY``
- algorithm is numerically better behaved and is rank revealing,
- i.e., it can reliably detect when the Jacobian matrix is rank
- deficient.
+ It is a moderately fast algorithm for sparse matrices.
- Neither ``SPARSE_CHOLESKY`` or ``SPARSE_QR`` are capable of computing
- the covariance if the Jacobian is rank deficient.
+ 3. ``SUITE_SPARSE_QR`` uses the sparse QR factorization algorithm
+ in ``SuiteSparse``. It uses dense linear algebra and is multi
+ threaded, so for large sparse sparse matrices it is
+ significantly faster than ``EIGEN_SPARSE_QR``.
+
+ Neither ``EIGEN_SPARSE_QR`` nor ``SUITE_SPARSE_QR`` are capable of
+ computing the covariance if the Jacobian is rank deficient.
.. member:: int Covariance::Options::min_reciprocal_condition_number
@@ -2095,29 +2357,14 @@ cases.
:math:`\sigma_{\text{max}}` are the minimum and maxiumum
singular values of :math:`J` respectively.
- 2. ``SPARSE_CHOLESKY``
-
- .. math:: \text{cholmod_rcond} < \text{min_reciprocal_conditioner_number}
-
- Here cholmod_rcond is a crude estimate of the reciprocal
- condition number of :math:`J^\top J` by using the maximum and
- minimum diagonal entries of the Cholesky factor :math:`R`. There
- are no theoretical guarantees associated with this test. It can
- give false positives and negatives. Use at your own risk. The
- default value of ``min_reciprocal_condition_number`` has been
- set to a conservative value, and sometimes the
- :func:`Covariance::Compute` may return false even if it is
- possible to estimate the covariance reliably. In such cases, the
- user should exercise their judgement before lowering the value
- of ``min_reciprocal_condition_number``.
-
- 3. ``SPARSE_QR``
+ 2. ``EIGEN_SPARSE_QR`` and ``SUITE_SPARSE_QR``
.. math:: \operatorname{rank}(J) < \operatorname{num\_col}(J)
Here :\math:`\operatorname{rank}(J)` is the estimate of the
- rank of `J` returned by the ``SuiteSparseQR`` algorithm. It is
- a fairly reliable indication of rank deficiency.
+ rank of `J` returned by the sparse QR factorization
+ algorithm. It is a fairly reliable indication of rank
+ deficiency.
.. member:: int Covariance::Options::null_space_rank
@@ -2152,8 +2399,8 @@ cases.
.. math:: \frac{\lambda_i}{\lambda_{\textrm{max}}} < \textrm{min_reciprocal_condition_number}
- This option has no effect on ``SPARSE_QR`` and ``SPARSE_CHOLESKY``
- algorithms.
+ This option has no effect on ``EIGEN_SPARSE_QR`` and
+ ``SUITE_SPARSE_QR``.
.. member:: bool Covariance::Options::apply_loss_function
@@ -2243,4 +2490,3 @@ Example Usage
covariance.GetCovarianceBlock(x, x, covariance_xx)
covariance.GetCovarianceBlock(y, y, covariance_yy)
covariance.GetCovarianceBlock(x, y, covariance_xy)
-
diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst
index 1e5756a..79714f6 100644
--- a/docs/source/tutorial.rst
+++ b/docs/source/tutorial.rst
@@ -7,10 +7,27 @@
========
Tutorial
========
-Ceres solves robustified non-linear least squares problems of the form
-.. math:: \frac{1}{2}\sum_{i=1} \rho_i\left(\left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2\right).
- :label: ceresproblem
+Ceres solves robustified non-linear bounds constrained least squares
+problems of the form
+
+.. math:: :label: ceresproblem
+
+ \min_{\mathbf{x}} &\quad \frac{1}{2}\sum_{i} \rho_i\left(\left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2\right) \\
+ \text{s.t.} &\quad l_j \le x_j \le u_j
+
+Problems of this form comes up in a broad range of areas across
+science and engineering - from `fitting curves`_ in statistics, to
+constructing `3D models from photographs`_ in computer vision.
+
+.. _fitting curves: http://en.wikipedia.org/wiki/Nonlinear_regression
+.. _3D models from photographs: http://en.wikipedia.org/wiki/Bundle_adjustment
+
+In this chapter we will learn how to solve :eq:`ceresproblem` using
+Ceres Solver. Full working code for all the examples described in this
+chapter and more can be found in the `examples
+<https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/>`_
+directory.
The expression
:math:`\rho_i\left(\left\|f_i\left(x_{i_1},...,x_{i_k}\right)\right\|^2\right)`
@@ -21,24 +38,21 @@ problems small groups of scalars occur together. For example the three
components of a translation vector and the four components of the
quaternion that define the pose of a camera. We refer to such a group
of small scalars as a ``ParameterBlock``. Of course a
-``ParameterBlock`` can just be a single parameter.
+``ParameterBlock`` can just be a single parameter. :math:`l_j` and
+:math:`u_j` are bounds on the parameter block :math:`x_j`.
:math:`\rho_i` is a :class:`LossFunction`. A :class:`LossFunction` is
a scalar function that is used to reduce the influence of outliers on
-the solution of non-linear least squares problems. As a special case,
-when :math:`\rho_i(x) = x`, i.e., the identity function, we get the
-more familiar `non-linear least squares problem
+the solution of non-linear least squares problems.
+
+As a special case, when :math:`\rho_i(x) = x`, i.e., the identity
+function, and :math:`l_j = -\infty` and :math:`u_j = \infty` we get
+the more familiar `non-linear least squares problem
<http://en.wikipedia.org/wiki/Non-linear_least_squares>`_.
-.. math:: \frac{1}{2}\sum_{i=1} \left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2.
+.. math:: \frac{1}{2}\sum_{i} \left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2.
:label: ceresproblem2
-In this chapter we will learn how to solve :eq:`ceresproblem` using
-Ceres Solver. Full working code for all the examples described in this
-chapter and more can be found in the `examples
-<https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/>`_
-directory.
-
.. _section-hello-world:
Hello World!
@@ -68,10 +82,10 @@ function :math:`f(x) = 10 - x`:
The important thing to note here is that ``operator()`` is a templated
method, which assumes that all its inputs and outputs are of some type
-``T``. The reason for using templates here is because Ceres will call
-``CostFunctor::operator<T>()``, with ``T=double`` when just the
-residual is needed, and with a special type ``T=Jet`` when the
-Jacobians are needed. In :ref:`section-derivatives` we discuss the
+``T``. The use of templating here allows Ceres to call
+``CostFunctor::operator<T>()``, with ``T=double`` when just the value
+of the residual is needed, and with a special type ``T=Jet`` when the
+Jacobians are needed. In :ref:`section-derivatives` we will discuss the
various ways of supplying derivatives to Ceres in more detail.
Once we have a way of computing the residual function, it is now time
@@ -119,11 +133,12 @@ gives us
.. code-block:: bash
- 0: f: 1.250000e+01 d: 0.00e+00 g: 5.00e+00 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li: 0 it: 6.91e-06 tt: 1.91e-03
- 1: f: 1.249750e-07 d: 1.25e+01 g: 5.00e-04 h: 5.00e+00 rho: 1.00e+00 mu: 3.00e+04 li: 1 it: 2.81e-05 tt: 1.99e-03
- 2: f: 1.388518e-16 d: 1.25e-07 g: 1.67e-08 h: 5.00e-04 rho: 1.00e+00 mu: 9.00e+04 li: 1 it: 1.00e-05 tt: 2.01e-03
- Ceres Solver Report: Iterations: 2, Initial cost: 1.250000e+01, Final cost: 1.388518e-16, Termination: PARAMETER_TOLERANCE.
- x : 5 -> 10
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 4.512500e+01 0.00e+00 9.50e+00 0.00e+00 0.00e+00 1.00e+04 0 5.33e-04 3.46e-03
+ 1 4.511598e-07 4.51e+01 9.50e-04 9.50e+00 1.00e+00 3.00e+04 1 5.00e-04 4.05e-03
+ 2 5.012552e-16 4.51e-07 3.17e-08 9.50e-04 1.00e+00 9.00e+04 1 1.60e-05 4.09e-03
+ Ceres Solver Report: Iterations: 2, Initial cost: 4.512500e+01, Final cost: 5.012552e-16, Termination: CONVERGENCE
+ x : 0.5 -> 10
Starting from a :math:`x=5`, the solver in two iterations goes to 10
[#f2]_. The careful reader will note that this is a linear problem and
@@ -359,21 +374,64 @@ gives us:
.. code-block:: bash
- Initial x1 = 3, x2 = -1, x3 = 0, x4 = 1
- 0: f: 1.075000e+02 d: 0.00e+00 g: 1.55e+02 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li: 0 it: 0.00e+00 tt: 0.00e+00
- 1: f: 5.036190e+00 d: 1.02e+02 g: 2.00e+01 h: 2.16e+00 rho: 9.53e-01 mu: 3.00e+04 li: 1 it: 0.00e+00 tt: 0.00e+00
- 2: f: 3.148168e-01 d: 4.72e+00 g: 2.50e+00 h: 6.23e-01 rho: 9.37e-01 mu: 9.00e+04 li: 1 it: 0.00e+00 tt: 0.00e+00
- 3: f: 1.967760e-02 d: 2.95e-01 g: 3.13e-01 h: 3.08e-01 rho: 9.37e-01 mu: 2.70e+05 li: 1 it: 0.00e+00 tt: 0.00e+00
- 4: f: 1.229900e-03 d: 1.84e-02 g: 3.91e-02 h: 1.54e-01 rho: 9.37e-01 mu: 8.10e+05 li: 1 it: 0.00e+00 tt: 0.00e+00
- 5: f: 7.687123e-05 d: 1.15e-03 g: 4.89e-03 h: 7.69e-02 rho: 9.37e-01 mu: 2.43e+06 li: 1 it: 0.00e+00 tt: 0.00e+00
- 6: f: 4.804625e-06 d: 7.21e-05 g: 6.11e-04 h: 3.85e-02 rho: 9.37e-01 mu: 7.29e+06 li: 1 it: 0.00e+00 tt: 0.00e+00
- 7: f: 3.003028e-07 d: 4.50e-06 g: 7.64e-05 h: 1.92e-02 rho: 9.37e-01 mu: 2.19e+07 li: 1 it: 0.00e+00 tt: 0.00e+00
- 8: f: 1.877006e-08 d: 2.82e-07 g: 9.54e-06 h: 9.62e-03 rho: 9.37e-01 mu: 6.56e+07 li: 1 it: 0.00e+00 tt: 0.00e+00
- 9: f: 1.173223e-09 d: 1.76e-08 g: 1.19e-06 h: 4.81e-03 rho: 9.37e-01 mu: 1.97e+08 li: 1 it: 0.00e+00 tt: 0.00e+00
- 10: f: 7.333425e-11 d: 1.10e-09 g: 1.49e-07 h: 2.40e-03 rho: 9.37e-01 mu: 5.90e+08 li: 1 it: 0.00e+00 tt: 0.00e+00
- 11: f: 4.584044e-12 d: 6.88e-11 g: 1.86e-08 h: 1.20e-03 rho: 9.37e-01 mu: 1.77e+09 li: 1 it: 0.00e+00 tt: 0.00e+00
- Ceres Solver Report: Iterations: 12, Initial cost: 1.075000e+02, Final cost: 4.584044e-12, Termination: GRADIENT_TOLERANCE.
- Final x1 = 0.00116741, x2 = -0.000116741, x3 = 0.000190535, x4 = 0.000190535
+ Initial x1 = 3, x2 = -1, x3 = 0, x4 = 1
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 1.075000e+02 0.00e+00 1.55e+02 0.00e+00 0.00e+00 1.00e+04 0 4.95e-04 2.30e-03
+ 1 5.036190e+00 1.02e+02 2.00e+01 2.16e+00 9.53e-01 3.00e+04 1 4.39e-05 2.40e-03
+ 2 3.148168e-01 4.72e+00 2.50e+00 6.23e-01 9.37e-01 9.00e+04 1 9.06e-06 2.43e-03
+ 3 1.967760e-02 2.95e-01 3.13e-01 3.08e-01 9.37e-01 2.70e+05 1 8.11e-06 2.45e-03
+ 4 1.229900e-03 1.84e-02 3.91e-02 1.54e-01 9.37e-01 8.10e+05 1 6.91e-06 2.48e-03
+ 5 7.687123e-05 1.15e-03 4.89e-03 7.69e-02 9.37e-01 2.43e+06 1 7.87e-06 2.50e-03
+ 6 4.804625e-06 7.21e-05 6.11e-04 3.85e-02 9.37e-01 7.29e+06 1 5.96e-06 2.52e-03
+ 7 3.003028e-07 4.50e-06 7.64e-05 1.92e-02 9.37e-01 2.19e+07 1 5.96e-06 2.55e-03
+ 8 1.877006e-08 2.82e-07 9.54e-06 9.62e-03 9.37e-01 6.56e+07 1 5.96e-06 2.57e-03
+ 9 1.173223e-09 1.76e-08 1.19e-06 4.81e-03 9.37e-01 1.97e+08 1 7.87e-06 2.60e-03
+ 10 7.333425e-11 1.10e-09 1.49e-07 2.40e-03 9.37e-01 5.90e+08 1 6.20e-06 2.63e-03
+ 11 4.584044e-12 6.88e-11 1.86e-08 1.20e-03 9.37e-01 1.77e+09 1 6.91e-06 2.65e-03
+ 12 2.865573e-13 4.30e-12 2.33e-09 6.02e-04 9.37e-01 5.31e+09 1 5.96e-06 2.67e-03
+ 13 1.791438e-14 2.69e-13 2.91e-10 3.01e-04 9.37e-01 1.59e+10 1 7.15e-06 2.69e-03
+
+ Ceres Solver v1.10.0 Solve Report
+ ----------------------------------
+ Original Reduced
+ Parameter blocks 4 4
+ Parameters 4 4
+ Residual blocks 4 4
+ Residual 4 4
+
+ Minimizer TRUST_REGION
+
+ Dense linear algebra library EIGEN
+ Trust region strategy LEVENBERG_MARQUARDT
+
+ Given Used
+ Linear solver DENSE_QR DENSE_QR
+ Threads 1 1
+ Linear solver threads 1 1
+
+ Cost:
+ Initial 1.075000e+02
+ Final 1.791438e-14
+ Change 1.075000e+02
+
+ Minimizer iterations 14
+ Successful steps 14
+ Unsuccessful steps 0
+
+ Time (in seconds):
+ Preprocessor 0.002
+
+ Residual evaluation 0.000
+ Jacobian evaluation 0.000
+ Linear solver 0.000
+ Minimizer 0.001
+
+ Postprocessor 0.000
+ Total 0.005
+
+ Termination: CONVERGENCE (Gradient tolerance reached. Gradient max norm: 3.642190e-11 <= 1.000000e-10)
+
+ Final x1 = 0.000292189, x2 = -2.92189e-05, x3 = 4.79511e-05, x4 = 4.79511e-05
It is easy to see that the optimal solution to this problem is at
:math:`x_1=0, x_2=0, x_3=0, x_4=0` with an objective function value of
@@ -447,24 +505,24 @@ gives us:
.. code-block:: bash
- 0: f: 1.211734e+02 d: 0.00e+00 g: 3.61e+02 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li: 0 it: 0.00e+00 tt: 0.00e+00
- 1: f: 1.211734e+02 d:-2.21e+03 g: 3.61e+02 h: 7.52e-01 rho:-1.87e+01 mu: 5.00e+03 li: 1 it: 0.00e+00 tt: 0.00e+00
- 2: f: 1.211734e+02 d:-2.21e+03 g: 3.61e+02 h: 7.51e-01 rho:-1.86e+01 mu: 1.25e+03 li: 1 it: 0.00e+00 tt: 0.00e+00
- 3: f: 1.211734e+02 d:-2.19e+03 g: 3.61e+02 h: 7.48e-01 rho:-1.85e+01 mu: 1.56e+02 li: 1 it: 0.00e+00 tt: 0.00e+00
- 4: f: 1.211734e+02 d:-2.02e+03 g: 3.61e+02 h: 7.22e-01 rho:-1.70e+01 mu: 9.77e+00 li: 1 it: 0.00e+00 tt: 0.00e+00
- 5: f: 1.211734e+02 d:-7.34e+02 g: 3.61e+02 h: 5.78e-01 rho:-6.32e+00 mu: 3.05e-01 li: 1 it: 0.00e+00 tt: 0.00e+00
- 6: f: 3.306595e+01 d: 8.81e+01 g: 4.10e+02 h: 3.18e-01 rho: 1.37e+00 mu: 9.16e-01 li: 1 it: 0.00e+00 tt: 0.00e+00
- 7: f: 6.426770e+00 d: 2.66e+01 g: 1.81e+02 h: 1.29e-01 rho: 1.10e+00 mu: 2.75e+00 li: 1 it: 0.00e+00 tt: 0.00e+00
- 8: f: 3.344546e+00 d: 3.08e+00 g: 5.51e+01 h: 3.05e-02 rho: 1.03e+00 mu: 8.24e+00 li: 1 it: 0.00e+00 tt: 0.00e+00
- 9: f: 1.987485e+00 d: 1.36e+00 g: 2.33e+01 h: 8.87e-02 rho: 9.94e-01 mu: 2.47e+01 li: 1 it: 0.00e+00 tt: 0.00e+00
- 10: f: 1.211585e+00 d: 7.76e-01 g: 8.22e+00 h: 1.05e-01 rho: 9.89e-01 mu: 7.42e+01 li: 1 it: 0.00e+00 tt: 0.00e+00
- 11: f: 1.063265e+00 d: 1.48e-01 g: 1.44e+00 h: 6.06e-02 rho: 9.97e-01 mu: 2.22e+02 li: 1 it: 0.00e+00 tt: 0.00e+00
- 12: f: 1.056795e+00 d: 6.47e-03 g: 1.18e-01 h: 1.47e-02 rho: 1.00e+00 mu: 6.67e+02 li: 1 it: 0.00e+00 tt: 0.00e+00
- 13: f: 1.056751e+00 d: 4.39e-05 g: 3.79e-03 h: 1.28e-03 rho: 1.00e+00 mu: 2.00e+03 li: 1 it: 0.00e+00 tt: 0.00e+00
- Ceres Solver Report: Iterations: 13, Initial cost: 1.211734e+02, Final cost: 1.056751e+00, Termination: FUNCTION_TOLERANCE.
- Initial m: 0 c: 0
- Final m: 0.291861 c: 0.131439
-
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 1.211734e+02 0.00e+00 3.61e+02 0.00e+00 0.00e+00 1.00e+04 0 5.34e-04 2.56e-03
+ 1 1.211734e+02 -2.21e+03 0.00e+00 7.52e-01 -1.87e+01 5.00e+03 1 4.29e-05 3.25e-03
+ 2 1.211734e+02 -2.21e+03 0.00e+00 7.51e-01 -1.86e+01 1.25e+03 1 1.10e-05 3.28e-03
+ 3 1.211734e+02 -2.19e+03 0.00e+00 7.48e-01 -1.85e+01 1.56e+02 1 1.41e-05 3.31e-03
+ 4 1.211734e+02 -2.02e+03 0.00e+00 7.22e-01 -1.70e+01 9.77e+00 1 1.00e-05 3.34e-03
+ 5 1.211734e+02 -7.34e+02 0.00e+00 5.78e-01 -6.32e+00 3.05e-01 1 1.00e-05 3.36e-03
+ 6 3.306595e+01 8.81e+01 4.10e+02 3.18e-01 1.37e+00 9.16e-01 1 2.79e-05 3.41e-03
+ 7 6.426770e+00 2.66e+01 1.81e+02 1.29e-01 1.10e+00 2.75e+00 1 2.10e-05 3.45e-03
+ 8 3.344546e+00 3.08e+00 5.51e+01 3.05e-02 1.03e+00 8.24e+00 1 2.10e-05 3.48e-03
+ 9 1.987485e+00 1.36e+00 2.33e+01 8.87e-02 9.94e-01 2.47e+01 1 2.10e-05 3.52e-03
+ 10 1.211585e+00 7.76e-01 8.22e+00 1.05e-01 9.89e-01 7.42e+01 1 2.10e-05 3.56e-03
+ 11 1.063265e+00 1.48e-01 1.44e+00 6.06e-02 9.97e-01 2.22e+02 1 2.60e-05 3.61e-03
+ 12 1.056795e+00 6.47e-03 1.18e-01 1.47e-02 1.00e+00 6.67e+02 1 2.10e-05 3.64e-03
+ 13 1.056751e+00 4.39e-05 3.79e-03 1.28e-03 1.00e+00 2.00e+03 1 2.10e-05 3.68e-03
+ Ceres Solver Report: Iterations: 13, Initial cost: 1.211734e+02, Final cost: 1.056751e+00, Termination: CONVERGENCE
+ Initial m: 0 c: 0
+ Final m: 0.291861 c: 0.131439
Starting from parameter values :math:`m = 0, c=0` with an initial
objective function value of :math:`121.173` Ceres finds a solution
@@ -635,10 +693,9 @@ as follows:
ceres::Problem problem;
for (int i = 0; i < bal_problem.num_observations(); ++i) {
ceres::CostFunction* cost_function =
- new ceres::AutoDiffCostFunction<SnavelyReprojectionError, 2, 9, 3>(
- new SnavelyReprojectionError(
- bal_problem.observations()[2 * i + 0],
- bal_problem.observations()[2 * i + 1]));
+ SnavelyReprojectionError::Create(
+ bal_problem.observations()[2 * i + 0],
+ bal_problem.observations()[2 * i + 1]);
problem.AddResidualBlock(cost_function,
NULL /* squared loss */,
bal_problem.mutable_camera_for_observation(i),
@@ -713,5 +770,3 @@ directory contains a number of other examples:
#. `libmv_bundle_adjuster.cc
<https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/libmv_bundle_adjuster.cc>`_
is the bundle adjustment algorithm used by `Blender <www.blender.org>`_/libmv.
-
-
diff --git a/docs/source/version_history.rst b/docs/source/version_history.rst
index f9bc273..a52ab30 100644
--- a/docs/source/version_history.rst
+++ b/docs/source/version_history.rst
@@ -1,8 +1,217 @@
.. _chapter-version-history:
-===============
-Version History
-===============
+========
+Releases
+========
+
+HEAD
+====
+
+#. Added ``Solver::Options::IsValid`` which allows users to validate
+ their solver configuration before calling ``Solve``.
+
+#. Added ``EIGEN_SPARSE_QR`` algorithm for covariance estimation using
+ ``Eigen``'s sparse QR factorization. (Michael Vitus)
+
+Backward Incompatible API Changes
+---------------------------------
+
+#. ``Solver::Options::solver_log`` has been removed. If needed this
+ iteration callback can easily be implemented in user code.
+
+#. The ``SPARSE_CHOLESKY`` algorithm for covariance estimation has
+ been removed. It is not rank revealing and numerically poorly
+ behaved. Sparse QR factorization is a much better way to do this.
+
+#. The ``SPARSE_QR`` algorithm for covariance estimation has been
+ renamed to ``SUITE_SPARSE_QR`` to be consistent with
+ ``EIGEN_SPARSE_QR``.
+
+
+1.9.0
+=====
+
+New Features
+------------
+
+#. Bounds constraints: Support for upper and/or lower bounds on
+ parameters when using the trust region minimizer.
+#. Dynamic Sparsity: Problems in which the sparsity structure of the
+ Jacobian changes over the course of the optimization can now be
+ solved much more efficiently. (Richard Stebbing)
+#. Improved support for Microsoft Visual C++ including the ability to
+ build and ship DLLs. (Björn Piltz, Alex Stewart and Sergey
+ Sharybin)
+#. Support for building on iOS 6.0 or higher (Jack Feng).
+#. Autogeneration of config.h that captures all the defines used to
+ build and use Ceres Solver.
+#. Simpler and more informative solver termination type
+ reporting. (See below for more details)
+#. New `website <http://www.ceres-solver.org>`_ based entirely on
+ Sphinx.
+#. ``AutoDiffLocalParameterization`` allows the use of automatic
+ differentiation for defining ``LocalParameterization`` objects
+ (Alex Stewart)
+#. LBFGS is faster due to fewer memory copies.
+#. Parameter blocks are not restricted to be less than 32k in size,
+ they can be up to 2G in size.
+#. Faster ``SPARSE_NORMAL_CHOLESKY`` solver when using ``CX_SPARSE``
+ as the sparse linear algebra library.
+#. Added ``Problem::IsParameterBlockPresent`` and
+ ``Problem::GetParameterization``.
+#. Added the (2,4,9) and (2,4,8) template specializations.
+#. An example demonstrating the use of
+ DynamicAutoDiffCostFunction. (Joydeep Biswas)
+#. Homography estimation example from Blender demonstrating the use of
+ a custom ``IterationCallback``. (Sergey Sharybin)
+#. Support user passing a custom CMAKE_MODULE_PATH (for BLAS /
+ LAPACK).
+
+Backward Incompatible API Changes
+---------------------------------
+
+#. ``Solver::Options::linear_solver_ordering`` used to be a naked
+ pointer that Ceres took ownership of. This is error prone behaviour
+ which leads to problems when copying the ``Solver::Options`` struct
+ around. This has been replaced with a ``shared_ptr`` to handle
+ ownership correctly across copies.
+
+#. The enum used for reporting the termination/convergence status of
+ the solver has been renamed from ``SolverTerminationType`` to
+ ``TerminationType``.
+
+ The enum values have also changed. ``FUNCTION_TOLERANCE``,
+ ``GRADIENT_TOLERANCE`` and ``PARAMETER_TOLERANCE`` have all been
+ replaced by ``CONVERGENCE``.
+
+ ``NUMERICAL_FAILURE`` has been replaed by ``FAILURE``.
+
+ ``USER_ABORT`` has been renamed to ``USER_FAILURE``.
+
+ Further ``Solver::Summary::error`` has been renamed to
+ ``Solver::Summary::message``. It contains a more detailed
+ explanation for why the solver terminated.
+
+#. ``Solver::Options::gradient_tolerance`` used to be a relative
+ gradient tolerance. i.e., The solver converged when
+
+ .. math::
+ \|g(x)\|_\infty < \text{gradient_tolerance} * \|g(x_0)\|_\infty
+
+ where :math:`g(x)` is the gradient of the objective function at
+ :math:`x` and :math:`x_0` is the parmeter vector at the start of
+ the optimization.
+
+ This has changed to an absolute tolerance, i.e. the solver
+ converges when
+
+ .. math::
+ \|g(x)\|_\infty < \text{gradient_tolerance}
+
+#. Ceres cannot be built without the line search minimizer
+ anymore. Thus the preprocessor define
+ ``CERES_NO_LINE_SEARCH_MINIMIZER`` has been removed.
+
+Bug Fixes
+---------
+
+#. Disabled warning C4251. (Björn Piltz)
+#. Do not propagate 3d party libs through
+ `IMPORTED_LINK_INTERFACE_LIBRARIES_[DEBUG/RELEASE]` mechanism when
+ building shared libraries. (Björn Piltz)
+#. Fixed errant verbose levels (Björn Piltz)
+#. Variety of code cleanups, optimizations and bug fixes to the line
+ search minimizer code (Alex Stewart)
+#. Fixed ``BlockSparseMatrix::Transpose`` when the matrix has row and
+ column blocks. (Richard Bowen)
+#. Better error checking when ``Problem::RemoveResidualBlock`` is
+ called. (Alex Stewart)
+#. Fixed a memory leak in ``SchurComplementSolver``.
+#. Added ``epsilon()`` method to ``NumTraits<ceres::Jet<T, N> >``. (Filippo
+ Basso)
+#. Fixed a bug in `CompressedRowSparseMatrix::AppendRows`` and
+ ``DeleteRows``.q
+#. Handle empty problems consistently.
+#. Restore the state of the ``Problem`` after a call to
+ ``Problem::Evaluate``. (Stefan Leutenegger)
+#. Better error checking and reporting for linear solvers.
+#. Use explicit formula to solve quadratic polynomials instead of the
+ eigenvalue solver.
+#. Fix constant parameter handling in inner iterations (Mikael
+ Persson).
+#. SuiteSparse errors do not cause a fatal crash anymore.
+#. Fix ``corrector_test.cc``.
+#. Relax the requirements on loss function derivatives.
+#. Minor bugfix to logging.h (Scott Ettinger)
+#. Updated ``gmock`` and ``gtest`` to the latest upstream version.
+#. Fix build breakage on old versions of SuiteSparse.
+#. Fixed build issues related to Clang / LLVM 3.4 (Johannes
+ Schönberger)
+#. METIS_FOUND is never set. Changed the commit to fit the setting of
+ the other #._FOUND definitions. (Andreas Franek)
+#. Variety of bug fixes and cleanups to the ``CMake`` build system
+ (Alex Stewart)
+#. Removed fictious shared library target from the NDK build.
+#. Solver::Options now uses ``shared_ptr`` to handle ownership of
+ ``Solver::Options::linear_solver_ordering`` and
+ ``Solver::Options::inner_iteration_ordering``. As a consequence the
+ ``NDK`` build now depends on ``libc++`` from the ``LLVM`` project.
+#. Variety of lint cleanups (William Rucklidge & Jim Roseborough)
+#. Various internal cleanups including dead code removal.
+
+
+1.8.0
+=====
+
+New Features
+------------
+#. Significant improved ``CMake`` files with better robustness,
+ dependency checking and GUI support. (Alex Stewart)
+#. Added ``DynamicNumericDiffCostFunction`` for numerically
+ differentiated cost functions whose sizing is determined at run
+ time.
+#. ``NumericDiffCostFunction`` now supports a dynamic number of
+ residuals just like ``AutoDiffCostFunction``.
+#. ``Problem`` exposes more of its structure in its API.
+#. Faster automatic differentiation (Tim Langlois)
+#. Added the commonly occuring ``2_d_d`` template specialization for
+ the Schur Eliminator.
+#. Faster ``ITERATIVE_SCHUR`` solver using template specializations.
+#. Faster ``SCHUR_JACOBI`` preconditioner construction.
+#. Faster ``AngleAxisRotatePoint``.
+#. Faster Jacobian evaluation when a loss function is used.
+#. Added support for multiple clustering algorithms in visibility
+ based preconditioning, including a new fast single linkage
+ clustering algorithm.
+
+Bug Fixes
+---------
+#. Fix ordering of ParseCommandLineFlags() & InitGoogleTest() for
+ Windows. (Alex Stewart)
+#. Remove DCHECK_GE checks from fixed_array.h.
+#. Fix build on MSVC 2013 (Petter Strandmark)
+#. Fixed ``AngleAxisToRotationMatrix`` near zero.
+#. Move ``CERES_HASH_NAMESPACE`` macros to ``collections_port.h``.
+#. Fix handling of unordered_map/unordered_set on OSX 10.9.0.
+#. Explicitly link to libm for ``curve_fitting_c.c``. (Alex Stewart)
+#. Minor type conversion fix to autodiff.h
+#. Remove RuntimeNumericDiffCostFunction.
+#. Fix operator= ambiguity on some versions of Clang. (Alex Stewart)
+#. Various Lint cleanups (William Rucklidge & Jim Roseborough)
+#. Modified installation folders for Windows. (Pablo Speciale)
+#. Added librt to link libraries for SuiteSparse_config on Linux. (Alex Stewart)
+#. Check for presence of return-type-c-linkage option with
+ Clang. (Alex Stewart)
+#. Fix Problem::RemoveParameterBlock after calling solve. (Simon Lynen)
+#. Fix a free/delete bug in covariance_impl.cc
+#. Fix two build errors. (Dustin Lang)
+#. Add RequireInitialization = 1 to NumTraits::Jet.
+#. Update gmock/gtest to 1.7.0
+#. Added IterationSummary::gradient_norm.
+#. Reduced verbosity of the inner iteration minimizer.
+#. Fixed a bug in TrustRegionMinimizer. (Michael Vitus)
+#. Removed android/build_android.sh.
+
1.7.0
=====
@@ -35,7 +244,10 @@ New Features
#. Add BlockRandomAccessCRSMatrix.
#. Speeded up automatic differentiation by 7\%.
#. Bundle adjustment example from libmv/Blender (Sergey Sharybin)
-#. Add the ability to turn shared library compilation on and off
+#. Shared library building is now controlled by CMake, rather than a custom
+ solution. Previously, Ceres had a custom option, but this is now deprecated
+ in favor of CMake's built in support for switching between static and
+ shared. Turn on BUILD_SHARED_LIBS to get shared Ceres libraries.
#. No more dependence on Protocol Buffers.
#. Incomplete LQ factorization.
#. Ability to write trust region problems to disk.
@@ -96,6 +308,7 @@ Bug Fixes
#. Fix a reallocation bug in
``CreateJacobianBlockSparsityTranspose``. (Yuliy Schwartzburg)
#. Add a define for O_BINARY.
+#. Fix miniglog-based Android NDK build; now works with NDK r9. (Scott Ettinger)
1.6.0