pax_global_header00006660000000000000000000000064150015206330014505gustar00rootroot0000000000000052 comment=814e068d534a0fc4700c08984a7a0f7d67da688f uncertainties-3.2.3/000077500000000000000000000000001500152063300143675ustar00rootroot00000000000000uncertainties-3.2.3/.codecov.yml000066400000000000000000000002021500152063300166040ustar00rootroot00000000000000coverage: status: project: default: informational: true patch: default: informational: true uncertainties-3.2.3/.github/000077500000000000000000000000001500152063300157275ustar00rootroot00000000000000uncertainties-3.2.3/.github/FUNDING.yml000066400000000000000000000000441500152063300175420ustar00rootroot00000000000000custom: "https://paypal.me/lebigot" uncertainties-3.2.3/.github/pull_request_template.md000066400000000000000000000003541500152063300226720ustar00rootroot00000000000000- [ ] Closes # (insert issue number) - [ ] Executed `pre-commit run --all-files` with no errors - [ ] The change is fully covered by automated unit tests - [ ] Documented in docs/ as appropriate - [ ] Added an entry to the CHANGES file uncertainties-3.2.3/.github/workflows/000077500000000000000000000000001500152063300177645ustar00rootroot00000000000000uncertainties-3.2.3/.github/workflows/lint.yml000066400000000000000000000005201500152063300214520ustar00rootroot00000000000000name: Lint on: [push, pull_request] jobs: lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v3 with: python-version: 3.x - name: Lint uses: pre-commit/action@v3.0.1 with: extra_args: --all-files --show-diff-on-failure uncertainties-3.2.3/.github/workflows/python-package.yml000066400000000000000000000057411500152063300234300ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python name: Python package on: push: branches: [ "master" ] pull_request: branches: [ "master" ] jobs: build: name: Tests strategy: fail-fast: false matrix: python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] os: ["ubuntu-latest", "windows-latest", "macos-latest"] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install .[all] - name: Test source code and docs run: | python -m pytest tests/ doc/ uncertainties/ --cov=uncertainties --cov-report=xml --cov-report=term --doctest-modules --doctest-glob="*.rst" - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4.6.0 with: flags: ${{ matrix.os }}-${{ matrix.python-version }} env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - name: Benchmarking upload to Codspeed if: matrix.python-version == '3.12' && matrix.os == 'ubuntu-latest' uses: CodSpeedHQ/action@v3 with: run: | cd tests python -m pytest --codspeed token: ${{ secrets.CODSPEED_TOKEN }} test_without_numpy: name: Test without numpy runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: 3.12 - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install .[test] - name: Test source code and docs run: cd tests; python -m pytest --ignore=test_unumpy.py --ignore=test_ulinalg.py -k "not test_monte_carlo_comparison" --cov=uncertainties --cov=. --cov-report=xml --cov-report=term - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4.6.0 with: flags: no-numpy env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} results: # This step aggregates the results from all the tests and allows us to # require only this single job to pass for a PR to be merged rather than # requiring each job in the matrix separately. # See https://github.com/orgs/community/discussions/26822?sort=old#discussioncomment-8285141 if: ${{ always() }} runs-on: ubuntu-latest name: Final Results needs: [build,test_without_numpy] steps: - run: exit 1 if: >- ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }} uncertainties-3.2.3/.gitignore000066400000000000000000000004541500152063300163620ustar00rootroot00000000000000uncertainties/version.py *.pyc build doc/_build dist MANIFEST # Created by setuptools: uncertainties.egg-info/ # For PyCharm (contains project files): .idea/ # py.test cache files (normally we are using nose though) .cache # vim temporary files .*.swp # For Visual Studio Code .vscode/ coverage.xml uncertainties-3.2.3/.pre-commit-config.yaml000066400000000000000000000007121500152063300206500ustar00rootroot00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. rev: v0.3.4 hooks: # Run the linter. - id: ruff types_or: [ python, pyi, jupyter ] args: [ --fix ] # Run the formatter. - id: ruff-format types_or: [ python, pyi, jupyter ] uncertainties-3.2.3/.readthedocs.yml000066400000000000000000000016501500152063300174570ustar00rootroot00000000000000# Read the Docs configuration file for Sphinx projects # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the OS, Python version and other tools you might need build: os: ubuntu-22.04 tools: python: "3.12" # Build documentation in the "docs/" directory with Sphinx sphinx: configuration: doc/conf.py builder: "html" # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs # builder: "dirhtml" # Fail on all warnings to avoid broken references # fail_on_warning: true # Optionally build your docs in additional formats such as PDF and ePub # formats: # - pdf # - epub # Optional but recommended, declare the Python requirements required # to build your documentation # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - requirements: doc/requirements.txt uncertainties-3.2.3/00_prepare_for_PyPI.sh000077500000000000000000000025531500152063300204370ustar00rootroot00000000000000#!/bin/sh # This script prepares the package for PyPI. It must be run # before uploading it on PyPI. # This script must be run from its directory. # Fail the script at the first failed command (HOWEVER, maybe when there are # no commits to be done during the merges, the commands fail?): #set -e echo "****************************************************************" echo "WARNING: if any commit fails, RESOLVE IT before running this" echo "script again. Otherwise conflict marks will be committed by the" echo "second run!" echo "****************************************************************" ## Only committed versions are packaged, to help with debugging published code: git commit -a # We make sure that the release and master branches are merged (changes # may have been made on both sides): git checkout master git merge release git checkout release git merge master # Default branch for working on the code: git checkout release # Packaging. We include wheels because it makes it easier to install, # in some cases (https://github.com/lebigot/uncertainties/pull/108, # https://discourse.slicer.org/t/problems-installing-lmfit-python-package/9210/6): python setup.py sdist bdist_wheel echo "Package created. The package can be uploaded with twine upload dist/...*" echo "where ...* is the new versions." echo "WARNING: current git branch is:" git branch | grep '^\*' uncertainties-3.2.3/CHANGES.rst000066400000000000000000000304411500152063300161730ustar00rootroot00000000000000Change Log =================== Unreleased ---------- 3.2.3 2025-April-18 ----------------------- Changes - Changes how `numpy` is handled as an optional dependency. Previously, importing a `numpy`-dependent function, like `correlated_values`, without `numpy` installed would result in an `ImportError` at import time. Now such a function can be imported but if the user attempts to execute it, a `NotImplementedError` is raised indicating that the function can't be used because `numpy` couldn't be imported. - Refactors the implementation for the calculation of the derivatives of the power function and improves the corresponding testing. Adds: - Adds a small benchmarking suite to CI to guard against absolute performance regressions and accidental breakage of the lazy expansion algorithm whichs ensures O(N), rather than O(N^2), scaling complexity for operations involving many numbers with uncertainty. Established connectivity with `codspeed.io`_ to track benchmarking results. (#274) Fixes: - Adds documentation for `ufloat_fromstr` behavior for strings which do not contain uncertainty. (#287) - Fixes typos in `ufloat_fromstr` docstring examples (#285) - Fixes `readthedocs` configuration so that the build passes (#254) - Fixes all doctests (#281) - Adjusts `codecov.io` configuration so that minor code coverage changes will not result in indications that tests are failing. Rather code coverage reports will be purely informational for code reviewers. Also fix other minor configuration issues. (#270) - Fixes a bug that resulted in a `ZeroDivisionError` while formatting very small numbers with uncertainty (#135) Deprecates: - Certain `umath` functions and `AffineScalarFunc`/`UFloat` methods will be removed in a future release. A deprecation warning has been added to these functions and methods. The following `umath` functions are marked as deprecated: `ceil`, `copysign`, `fabs`, `factorial`, `floor`, `fmod`, `frexp`, `ldexp`, `modf`, `trunc`. The following `AffineScalarFunc`/`UFloat` methods are marked as deprecated: `__floordiv__`, `__mod__`, `__abs__`, `__trunc__`, `__lt__`, `__le__`, `__gt__`, `__ge__`. 3.2.2 2024-July-08 ----------------------- Fixes: - fix support for Numpy 2.0 (#245). Note: `uncertainties.unumpy` still provides `umatrix` based on `numpy.matrix`. With `numpy.matrix` discouraged, `umatrix` is too, and will be dropped in a future release. - fix automated running and reporting of code coverage with tests (#246) - use `setuptools-scm` for setting version number from git tag (#247) 3.2.1 2024-June-08 ----------------------- Fixes for build, deployment, and docs - Use explicit package list to make sure unumpy is included (#232) - Use setuptools-scm to make sure all files are in the source distribution (#235) - updates to configuration for and links to readthedocs documentation. (#239) - use double backticks more uniformly in docs. (#240) - fixes to README.rst to allow it to render (needed for PyPI upload) (#243) 3.2.0 2024-June-02 ----------------------- Version 3.2.0 is the first release of Uncertainties in nearly two years and the first minor release in over five years. It marks the beginning of an effort to refresh and update the project with a new and expanded team of maintainers. * Main Changes - Moved code development to lmfit organization, with 4 maintainers. - Update documentation. - Drop future dependency. Uncertainties now has no external dependencies when not using Numpy integration (Drop official support for Python versions before 3.8 #200). - Drop support for Python versions before 3.8, including Python 2 (Drop official support for Python versions before 3.8 #200) - remove 1to2 and deprecations (remove 1to2 and depreciations #214) * Developer related changes - Moved from setup.py to pyproject.toml (Transition from setup.py to pyproject.toml #199) - Move tests to tests folder (Move tests to tests folder #216) - Update unumpy test to be compatible with numpy 2 - Mark docstrings with backslashes as raw strings in tests (Mark docstrings with backslashes as raw strings #226) Older Version history ------------------------ Main changes: - 3.1.6: The pretty-print and LaTeX format can now be customized. - 3.1.5: Added a "p" formatting option, that makes sure that there are always parentheses around the … ± … part of printed numbers. - 3.1.4: Python 2.7+ is now required. - 3.1.2: Fix for NumPy 1.17 and ``unumpy.ulinalg.pinv()``. - 3.1: Variables built through a correlation or covariance matrix, and that have uncertainties that span many orders of magnitude are now calculated more accurately (improved ``correlated_values()`` and ``correlated_values_norm()`` functions). - 3.0: Massive speedup for some operations involving large numbers of numbers with uncertainty, like ``sum(ufloat(1, 1) for _ in xrange(100000))`` (this is about 5,000 times faster than before). - 2.4.8: Friendlier completions in Python shells, etc.: internal functions should not appear anymore (for the user modules: ``uncertainties``, ``uncertainties.umath`` and ``uncertainties.unumpy``). Parsing the shorthand notation (e.g. ``3.1(2)``) now works with infinite values (e.g. ``-inf(inf)``); this mirrors the ability to print such numbers with uncertainty. The Particle Data Group rounding rule is applied in more cases (e.g. printing 724.2±26.2 now gives ``724±26``). The shorthand+LaTeX formatting of numbers with an infinite nominal value is fixed. ``uncertainties.unumpy.matrix`` now uses ``.std_devs`` instead of ``.std_devs()``, for consistency with floats with uncertainty (automatic conversion of code added to ``uncertainties.1to2``). - 2.4.7: String formatting now works for ``(-)inf+/-...`` numbers. - 2.4.5: String formatting now works for ``NaN+/-...`` numbers. - 2.4.4: The documentation license now allows its commercial use. - 2.4.2: `NumPy 1.8 compatibility `_. - 2.4.1: In ``uncertainties.umath``, functions ``ceil()``, ``floor()``, ``isinf()``, ``isnan()`` and ``trunc()`` now return values of the same type as the corresponding ``math`` module function (instead of generally returning a value with a zero uncertainty ``...+/-0``). - 2.4: Extensive support for the formatting_ of numbers with uncertainties. A zero uncertainty is now explicitly displayed as the integer 0. The new formats are generally understood by ``ufloat_fromstr()``. Abbreviations for the nominal value (``n``) and the standard deviation (``s``) are now available. - 2.3.6: Full support for limit cases of the power operator ``umath.pow()``. - 2.3.5: Uncertainties and derivatives can now be NaN (not-a-number). Full support for numbers with a zero uncertainty (``sqrt(ufloat(0, 0))`` now works). Full support for limit cases of the power operator (``x**y``). - 2.3: Functions wrapped so that they accept numbers with uncertainties instead of floats now have full keyword arguments support (improved ``wrap()`` function). Incompatible change: ``wrap(..., None)`` should be replaced by ``wrap(...)`` or ``wrap(..., [])``. - 2.2: Creating arrays and matrices of numbers with uncertainties with ``uarray()`` and ``umatrix()`` now requires two simple arguments (nominal values and standard deviations) instead of a tuple argument. This is consistent with the new, simpler ``ufloat()`` interface. The previous usage will be supported for some time. Users are encouraged to update their code, for instance through the newly provided `code updater`_, which in addition now automatically converts ``.set_std_dev(v)`` to ``.std_dev = v``. - 2.1: Numbers with uncertainties are now created more directly like ``ufloat(3, 0.1)``, ``ufloat(3, 0.1, "pi")``, ``ufloat_fromstr("3.0(1)")``, or ``ufloat_fromstr("3.0(1)", "pi")``. The previous ``ufloat((3, 0.1))`` and ``ufloat("3.0(1)")`` forms will be supported for some time. Users are encouraged to update their code, for instance through the newly provided `code updater`_. - 2.0: The standard deviation is now obtained more directly without an explicit call (``x.std_dev`` instead of ``x.std_dev()``). ``x.std_dev()`` will be supported for some time. Users are encouraged to update their code. The standard deviation of a variable can now be directly updated with ``x.std_dev = 0.1``. As a consequence, ``x.set_std_dev()`` is deprecated. - 1.9.1: Support added for pickling subclasses of ``UFloat`` (= ``Variable``). - 1.9: Added functions for handling correlation matrices: ``correlation_matrix()`` and ``correlated_values_norm()``. (These new functions mirror the covariance-matrix based ``covariance_matrix()`` and ``correlated_values()``.) ``UFloat.position_in_sigmas()`` is now named ``UFloat.std_score()``, so as to follow the common naming convention (`standard score `_). Obsolete functions were removed (from the main module: ``NumberWithUncert``, ``num_with_uncert``, ``array_u``, ``nominal_values``, ``std_devs``). - 1.8: Compatibility with Python 3.2 added. - 1.7.2: Compatibility with Python 2.3, Python 2.4, Jython 2.5.1 and Jython 2.5.2 added. - 1.7.1: New semantics: ``ufloat("12.3(78)")`` now represents 12.3+/-7.8 instead of 12.3+/-78. - 1.7: ``ufloat()`` now raises ValueError instead of a generic Exception, when given an incorrect string representation, like ``float()`` does. - 1.6: Testing whether an object is a number with uncertainty should now be done with ``isinstance(..., UFloat)``. ``AffineScalarFunc`` is not imported by ``from uncertainties import *`` anymore, but its new alias ``UFloat`` is. - 1.5.5: The first possible license is now the Revised BSD License instead of GPLv2, which makes it easier to include this package in other projects. - 1.5.4.2: Added ``umath.modf()`` and ``umath.frexp()``. - 1.5.4: ``ufloat`` does not accept a single number (nominal value) anymore. This removes some potential confusion about ``ufloat(1.1)`` (zero uncertainty) being different from ``ufloat("1.1")`` (uncertainty of 1 on the last digit). - 1.5.2: ``float_u``, ``array_u`` and ``matrix_u`` renamed ``ufloat``, ``uarray`` and ``umatrix``, for ease of typing. - 1.5: Added functions ``nominal_value`` and ``std_dev``, and modules ``unumpy`` (additional support for NumPy arrays and matrices) and ``unumpy.ulinalg`` (generalization of some functions from ``numpy.linalg``). Memory footprint of arrays of numbers with uncertainties divided by 3. Function ``array_u`` is 5 times faster. Main function ``num_with_uncert`` renamed ``float_u``, for consistency with ``unumpy.array_u`` and ``unumpy.matrix_u``, with the added benefit of a shorter name. - 1.4.5: Added support for the standard ``pickle`` module. - 1.4.2: Added support for the standard ``copy`` module. - 1.4: Added utilities for manipulating NumPy arrays of numbers with uncertainties (``array_u``, ``nominal_values`` and ``std_devs``). - 1.3: Numbers with uncertainties are now constructed with ``num_with_uncert()``, which replaces ``NumberWithUncert()``. This simplifies the class hierarchy by removing the ``NumberWithUncert`` class. - 1.2.5: Numbers with uncertainties can now be entered as ``NumberWithUncert("1.23+/-0.45")`` too. - 1.2.3: ``log(x, base)`` is now supported by ``umath.log()``, in addition to ``log(x)``. - 1.2.2: Values with uncertainties are now output like 3+/-1, in order to avoid confusing 3+-1 with 3+(-1). - 1.2: A new function, ``wrap()``, is exposed, which allows non-Python functions (e.g. Fortran or C used through a module such as SciPy) to handle numbers with uncertainties. - 1.1: Mathematical functions (such as cosine, etc.) are in a new uncertainties.umath module; they do not override functions from the ``math`` module anymore. - 1.0.12: Main class (``Number_with_uncert``) renamed ``NumberWithUncert`` so as to follow `PEP 8`_. - 1.0.11: ``origin_value`` renamed more appropriately as ``nominal_value``. - 1.0.9: ``correlations()`` renamed more appropriately as ``covariance_matrix()``. .. _math: http://docs.python.org/library/math.html .. _PEP 8: http://www.python.org/dev/peps/pep-0008/ .. _code updater: http://uncertainties-python-package.readthedocs.io/en/latest/index.html#migration-from-version-1-to-version-2 .. _formatting: http://uncertainties-python-package.readthedocs.io/en/latest/user_guide.html#printing uncertainties-3.2.3/LICENSE.txt000066400000000000000000000026701500152063300162170ustar00rootroot00000000000000Copyright (c) 2010-2020, Eric O. LEBIGOT (EOL). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. uncertainties-3.2.3/README.rst000066400000000000000000000106241500152063300160610ustar00rootroot00000000000000uncertainties ============= .. image:: https://readthedocs.org/projects/uncertainties/badge/?version=latest :target: https://uncertainties.readthedocs.io/en/latest/?badge=latest .. image:: https://img.shields.io/pypi/v/uncertainties.svg :target: https://pypi.org/project/uncertainties/ .. image:: https://pepy.tech/badge/uncertainties/week :target: https://pepy.tech/project/uncertainties .. image:: https://codecov.io/gh/lmfit/uncertainties/branch/master/graph/badge.svg :target: https://codecov.io/gh/lmfit/uncertainties/ .. image:: https://img.shields.io/github/actions/workflow/status/lmfit/uncertainties/python-package.yml?logo=github%20actions :target: https://github.com/lmfit/uncertainties/actions/workflows/python-package.yml The ``uncertainties`` package allows calculations with values that have uncertaintes, such as (2 +/- 0.1)*2 = 4 +/- 0.2. ``uncertainties`` takes the pain and complexity out of error propagation and calculations of values with uncertainties. For more information, see https://uncertainties.readthedocs.io/ Basic examples -------------- .. code-block:: python >>> from uncertainties import ufloat >>> x = ufloat(2, 0.25) >>> x 2.0+/-0.25 >>> square = x**2 >>> square 4.0+/-1.0 >>> square.nominal_value 4.0 >>> square.std_dev # Standard deviation 1.0 >>> square - x*x 0.0 # Exactly 0: correlations taken into account >>> from uncertainties.umath import sin, cos # and many more. >>> sin(1+x**2) -0.95892427466313845+/-0.2836621854632263 >>> print (2*x+1000).derivatives[x] # Automatic calculation of derivatives 2.0 >>> from uncertainties import unumpy # Array manipulation >>> varr = unumpy.uarray([1, 2], [0.1, 0.2]) >>> print(varr) [1.0+/-0.1 2.0+/-0.2] >>> print(varr.mean()) 1.50+/-0.11 >>> print(unumpy.cos(varr)) [0.540302305868+/-0.0841470984808 -0.416146836547+/-0.181859485365] Main features ------------- - **Transparent calculations with uncertainties**: Little or no modification of existing code is needed to convert calculations of floats to calculations of values with uncertainties. - **Correlations** between expressions are correctly taken into account. Thus, ``x-x`` is exactly zero. - **Most mathematical operations** are supported, including most functions from the standard math_ module (sin,...). Comparison operators (``>``, ``==``, etc.) are supported too. - Many **fast operations on arrays and matrices** of numbers with uncertainties are supported. - **Extensive support for printing** numbers with uncertainties (including LaTeX support and pretty-printing). - Most uncertainty calculations are performed **analytically**. - This module also gives access to the **derivatives** of any mathematical expression (they are used by `error propagation theory`_, and are thus automatically calculated by this module). Installation or upgrade ----------------------- To install `uncertainties`, use:: pip install uncertainties To upgrade from an older version, use:: pip install --upgrade uncertainties Further details are in the `on-line documentation `_. Git branches ------------ The GitHub ``master`` branch is the latest development version, and is intended to be a stable pre-release version. It will be experimental, but should pass all tests.. Tagged releases will be available on GitHub, and correspond to the releases to PyPI. The GitHub ``gh-pages`` branch will contain a stable test version of the documentation that can be viewed at ``_. Other Github branches should be treated as unstable and in-progress development branches. License ------- This package and its documentation are released under the `Revised BSD License `_. History ------- .. Note from Eric Lebigot: I would like the origin of the package to remain documented for its whole life. Thanks! This package was created back around 2009 by `Eric O. LEBIGOT `_. Ownership of the package was taken over by the `lmfit GitHub organization `_ in 2024. .. _IPython: https://ipython.readthedocs.io/en/stable/ .. _math: https://docs.python.org/library/math.html .. _error propagation theory: https://en.wikipedia.org/wiki/Propagation_of_uncertainty .. _main website: https://uncertainties.readthedocs.io/ uncertainties-3.2.3/doc/000077500000000000000000000000001500152063300151345ustar00rootroot00000000000000uncertainties-3.2.3/doc/Makefile000066400000000000000000000027701500152063300166020ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . PDFFILE = uncertainties.pdf .PHONY: all html help clean latex pdf html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html @echo @echo "Build finished. The HTML pages are in _build/html." all: html pdf $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html -cp _build/latex/$(PDFFILE) _build/html/. @echo -cd _build && ln -s html uncertainties_doc && zip -pur uncertainties_doc.zip uncertainties_doc/* && mv uncertainties_doc.zip html && rm -f uncertainties_doc @echo "Build finished. The HTML pages are in _build/html." help: @echo "Please use \`make ' where is one of" @echo " all to make standalone HTML files with PDF and zipped HTML" @echo " html to make standalone HTML files" @echo " pdf to make PDF" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " clean to clean folders" clean: -rm -rf _build/* latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex @echo @echo "Build finished; the LaTeX files are in _build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." pdf: latex (cd _build/latex; $(MAKE) all-pdf) uncertainties-3.2.3/doc/_static/000077500000000000000000000000001500152063300165625ustar00rootroot00000000000000uncertainties-3.2.3/doc/_static/default.css000066400000000000000000000200021500152063300207120ustar00rootroot00000000000000/** * Alternate Sphinx design * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl. */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-size: 14px; letter-spacing: -0.01em; line-height: 150%; text-align: center; /*background-color: #AFC1C4; */ background-color: #BFD1D4; color: black; padding: 0; border: 1px solid #aaa; margin: 0px 80px 0px 80px; min-width: 740px; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } pre { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } td.linenos pre { padding: 0.5em 0; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } hr { border: 1px solid #abc; margin: 2em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; border: 0; } tt.descclassname { background-color: transparent; border: 0; } tt.xref { background-color: transparent; font-weight: bold; border: 0; } a tt { background-color: transparent; font-weight: bold; border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } dl { margin-bottom: 15px; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .refcount { color: #060; } dt:target, .highlight { background-color: #fbe54e; } dl.class, dl.function { border-top: 2px solid #888; } dl.method, dl.attribute { border-top: 1px solid #aaa; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } pre { line-height: 120%; } pre a { color: inherit; text-decoration: underline; } .first { margin-top: 0 !important; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } /* div.documentwrapper { width: 100%; } */ div.clearer { clear: both; } div.related h3 { display: none; } div.related ul { background-image: url(navigation.png); height: 2em; list-style: none; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 0; padding-left: 10px; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; } div.related ul li a:hover { color: #3CA8E7; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.bodywrapper { margin: 0 240px 0 0; border-right: 1px solid #ccc; } div.body a { text-decoration: underline; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: 210px; float: right; text-align: left; /* margin-left: -100%; */ } div.sphinxsidebar h4, div.sphinxsidebar h3 { margin: 1em 0 0.5em 0; font-size: 0.9em; padding: 0.1em 0 0.1em 0.5em; color: white; border: 1px solid #86989B; background-color: #AFC1C4; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; margin-bottom: 7px; list-style: none; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { list-style: square; margin-left: 20px; } p { margin: 0.8em 0 0.5em 0; } p.rubric { font-weight: bold; } h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; color: #11557C; } h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.2em; } h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.footer { background-color: #E3EFF1; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } div.pagination { margin-top: 2em; padding-top: 0.5em; border-top: 1px solid black; text-align: center; } div.sphinxsidebar ul.toc { margin: 1em 0 1em 0; padding: 0 0 0 0.5em; list-style: none; } div.sphinxsidebar ul.toc li { margin: 0.5em 0 0.5em 0; font-size: 0.9em; line-height: 130%; } div.sphinxsidebar ul.toc li p { margin: 0; padding: 0; } div.sphinxsidebar ul.toc ul { margin: 0.2em 0 0.2em 0; padding: 0 0 0 1.8em; } div.sphinxsidebar ul.toc ul li { padding: 0; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 0 0; border: 1px solid #86989B; background-color: #f7f7f7; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #ccc; color: white!important; } table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } img.inheritance { border: 0px } form.pfform { margin: 10px 0 20px 0; } table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } uncertainties-3.2.3/doc/_static/eol.jpg000066400000000000000000000104571500152063300200520ustar00rootroot00000000000000JFIFHHExifMM*Created with GIMP XICC_PROFILE HLinomntrRGB XYZ  1acspMSFTIEC sRGB-HP cprtP3desclwtptbkptrXYZgXYZ,bXYZ@dmndTpdmddvuedLview$lumimeas $tech0 rTRC< gTRC< bTRC< textCopyright (c) 1998 Hewlett-Packard CompanydescsRGB IEC61966-2.1sRGB IEC61966-2.1XYZ QXYZ XYZ o8XYZ bXYZ $descIEC http://www.iec.chIEC http://www.iec.chdesc.IEC 61966-2.1 Default RGB colour space - sRGB.IEC 61966-2.1 Default RGB colour space - sRGBdesc,Reference Viewing Condition in IEC61966-2.1,Reference Viewing Condition in IEC61966-2.1view_. \XYZ L VPWmeassig CRT curv #(-27;@EJOTY^chmrw| %+28>ELRY`gnu| &/8AKT]gqz !-8COZfr~ -;HUcq~ +:IXgw'7HYj{+=Oat 2FZn  % : O d y  ' = T j " 9 Q i  * C \ u & @ Z t .Id %A^z &Ca~1Om&Ed#Cc'Ij4Vx&IlAe@e Ek*Qw;c*R{Gp@j>i  A l !!H!u!!!"'"U"""# #8#f###$$M$|$$% %8%h%%%&'&W&&&''I'z''( (?(q(())8)k))**5*h**++6+i++,,9,n,,- -A-v--..L.../$/Z///050l0011J1112*2c223 3F3334+4e4455M555676r667$7`7788P8899B999:6:t::;-;k;;<' >`>>?!?a??@#@d@@A)AjAAB0BrBBC:C}CDDGDDEEUEEF"FgFFG5G{GHHKHHIIcIIJ7J}JK KSKKL*LrLMMJMMN%NnNOOIOOP'PqPQQPQQR1R|RSS_SSTBTTU(UuUVV\VVWDWWX/X}XYYiYZZVZZ[E[[\5\\]']x]^^l^__a_``W``aOaabIbbcCccd@dde=eef=ffg=ggh?hhiCiijHjjkOkklWlmm`mnnknooxop+ppq:qqrKrss]sttptu(uuv>vvwVwxxnxy*yyzFz{{c{|!||}A}~~b~#G k͂0WGrׇ;iΉ3dʋ0cʍ1fΏ6n֑?zM _ɖ4 uL$h՛BdҞ@iءG&vVǥ8nRĩ7u\ЭD-u`ֲK³8%yhYѹJº;.! zpg_XQKFAǿ=ȼ:ɹ8ʷ6˶5̵5͵6ζ7ϸ9к<Ѿ?DINU\dlvۀ܊ݖޢ)߯6DScs 2F[p(@Xr4Pm8Ww)KmC  !"$"$C@@"/!1QaAq"2BRS !1"2q ?•4D"mˎ,! ܨG_W N֡Aj+etV0{v6;raÖ2e-NTsa\?Ut"MP;#YGr)(ǥ v8>qY, Ms:p_`vvtC$+ڸ$}Y(+uE&lO)Xϥg֝Y|#|̸Aԫڊ]ڂx$Y=}3]E#=*eN}k$\*$$)Iw;t*֜š<@ eR(G1W {8u+?,w'-“T2]Im3^!CfkeN8qۧz|] OB1VLj0BbXJVY IRs#izLCe[He q֨ˢ,7.JV( /7QÈ -.$GljZPckCktu$t[DvFBq[$%/p)ZPvgr+b9qJ qԎ%#Niy g"R~qzpp=)G尗#%JyRMf5s;C5{6-s*$Z~j7C 5|Aqvd}Jqө*_Z[_q6ܶW&;R9 *yskW/Vn/U5wyodN},w>y b )iUqC?ث2e͗νui>.=(ZjY FzۗM8jO.3~Nv8jيe(葟8IZ +ò5w>jjsiv"HdޞX&uncertainties-3.2.3/doc/_static/favicon.ico000066400000000000000000000260361500152063300207120ustar00rootroot0000000000000044 ,(4h %;^uY@* 'B_~jM5 *DeyT; *FfX;.Gf~S3HfrIit`YY_kfulfbdhlqwMwymlntGdkodfs^ZikY_ysuxWicSVmn^VSOMHN]k}fXn]N\bRJB:4201;HT[_efMPsdYYc_|f#.=XuZKWmdlz*?nYMeko**LXSy}r#5h}VZ|})NiW`(@UWex$9uCWg3e>Vi0[>Vj$/V=Vj+0T=Vi01R=Ui42S=Ui63S=Ui63S=Ui63S=Ui63T=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui63S=Ui52S;Qf40Q6I[~2*G~-2wzLz% B 5HSR~ *} 4EEы%RB%Lܹ圳qrdRy>CBfs^eB"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"D!B"$" ;'y؇GׄF(y؇b}vg!T P%1@#U6E~iHc# ib I"Dn OHb0bCfJrHY ڊ{uCݍ}f|8_4!) "Q.b(T.7^DzW҆ xd'Of}[ y(H@?ۀUX dKoR:OPkCPڅra'E(tBãst3>ۜ"y؇!F@0 CkK Aʼ{?9&-69b-DR131 CL^v7AP6dmV9cpr?=skEHow:{Cg#:Pڀ#d`hMq7[Q[z/d D9>w,4yRĺ²5h2i#1Df/ЙrRA*tL4H;J)i00u9[/ͷeXyd}N $ܖXwtzX9%{ow_\" Q=woxw}w;\=>4YDi7\QbI{7Vn vל;z )[æ;cݢJ /xed֦q?-ٵ@I)!ZHf|Nk@g+rQ Z4Ih+tu Phʈ#CM#U1ˍ9sH;(LKqml͢u?q/.<9_/MBO``bVA c EK|[}tg逩uK'mWlҰҦ%N&2=a/F;:HrurduK>nѻG|~ۜߤD 6 vٱbMiL@P͚_s]Nֿ0E+E ^tDwyyԷ:ʝJM93p.0ܻx{OotSÃg<#cVG 8u# rt]tbfK3/F qzʰ;{tB:#XJG=_=4/(pƥwr>?*Q"/KD;2mLj eD(MDي+B-dR4FA~R ;X}. d[0yC|J" XNymm=+6dBpf mg}ZaФ!rj[Ϯpq=QI(#!Vg>}Ga_ؕ0d8f\/+ 6@d 9󿦐pau_Xz^O\3nٹ;z1(H#Uߨ6\ D(wBO锬}։H,["VyNZxzUS Z.pYXm]Urt,%S,Bue  }PeT4߷:C) W*X ;9o85F="jt!s13qKXp}Ş{gI!!oZN}h\lS9*ҶDȌ]xt, Jc8{ƞZ3>(h}%`5  ͥ<#JCa[g~?ѯN[:]R*iXNP[v*~0pk M,rŮf|oq<[RO+v}My}qa*m`?y6㪕 @. ZK !d4Ŭ)6[SmێV^fDkM4JLXƐBkɡ*ؔr :5Hu/*R%8q;p=5>X"a~gaG'5NPjv;q$RϿ8ƈ$3ʒC*{W{g}r޹O]4]v;}F;v%qt3O W3kX"-LOl;+1{OeץT/& ݌}.-ܰvaU 2b+ְH&- Zk Ș}%1z$&-↯nhzҫ&f1Lz-ط~7"uَ Ԗ_Gd1؂6Ȉ( C{6k3;&Yzl HfE1R$" l$QF3k1. "}ɮ{"E)-v`\|0py !fuPT#+TD4{X]x)X<]9*(`M5){PV,q,ऊu%:6Ԏ"mV4 t,\҄w~+.;}3u{o9`ʣ'>ԧvɞ|CΝ/F$P>}f 'RVULsݤ--ŞQ ٗϼX倊/=PUJ&DѺxd,B D4it$;6Xm<%OSQ>w^c?|"ݍO30()N>'ώKUMeTW1c 469Rd5)1e3$ײ(Ţ,K0$-h|CF@ Rr*@*c)8쑥`YV} qc+41&}#^ڱ_ s\"R3oxCGuȇԷy(GϮnQr #Wi @ u \}dpV g3VWo!u-hc=Hc~?yJ<盒&BZrͮӊhi">ZYv]C#ZL&Ҟ^#7l5ACE}6?γ)K< 6b=Q'gBP,(Q0YSTv* d=|xM_N_?N7in0uyaHm!UCK/$/4S>[6{G ,khʄ^~é= SK!0iMe{k(+e(µ{Waθ܋VԀС㶔<3(*4N|/?װ`8oΚh5f\YY[Cy e,~Cz*Ű%ֶ޻rMAXs+5~غفw)W1WX2YyVvӬ{8R:Fwp̲tD=W9M Q[Z3Er?07-lW~ m4b%+K7" FD1 gqdl W@I1~:v E~ciRej]+taĎALoYV%ՙRUUSn YamZm @dS{w<{QP}MDE!8-(:䄒Ҋ(>(> m,my]sZL`a٨C{[.[7"0bY6Q4cuUYƬxߞlRSt˓(M ں*YYkT6H%K R ڧW]_V30Rض]Q^&/3.w?V) />\5+{NV yAS$g胔o,VNt?KǎqՏ]zW*_R#~ᑉ.{׆N{=_rȢKF5UL䦬-6}gy!'$*G;Gy_^!r/&q~O5޵saJxe.,o}47zw|n# VdrX ecvΊv%r*vA&Xٕoaaǟ?8Oe&bbJ|S>@)W춥crq'8O*ɾQꏾ5وyp$w-G /eJLpP2_̼뾅^\u[Ŕs*/:i#uQ~TC*uKYi^z!V:m([?{CWv"~g1^n+J`ҲJRC f>jꇧ֔sh:l1 Ey4|KO]6Vb열p}byʋqf#%?_j7g_]kې;NSf0>:[ o)x:`pAPb=]>sgU7jߒ|Gӫn<o_|t [vtu9s]5WC X%׍LN9|/dz 8_BvUԥ."3̙;>tv `֓8Ak01+ҰiȆXXt_3e߬=K.GS ?hDt^ d׀Yz@SQ}i} 嘶ޝ|!~ >N"4?v9f>Gm70T>U8z 簑$?xO?o}xS@:=-fCxae^NIl{b)%CTNUVt$FodVd !pͭ a 0Z(oS a<@J Y[Qzҳ;.[2@yB vsFbxM$e*mɏy_~ҷO7c9ȉz3V28=g\2YjJvR̕pG]Ə W}E_|OOiF5 _=3"5#>"? ߜ!0# 9Վ##Ӈ_W~7عn'w+LsSg> suA%2~6qݛF;_3Ya(60`\8*6H:Nmw%t, ܢ_ໍ拢Nwhch~̔g vo&A &zӬO0YHۇu׶+"}>,{blجƾ7y^=v/IAV-\8?>߫|Y`x;ژn FB,+-"< wkAUjQ@ JT2K+s/,Ȋ1]z)p1bbu}P`2(94мV=V!ð^SOa.,9f>n֗)r@w"XU,Lii,/9J'\=gQ~u4)_YJzJxY SQXRlz"~KSc[ν:@WWyz L4-Za0} Hn`$^tSb"^kc#߳b0`i]"M-B%;O@яRDvL?6F6Mu1ZWEo?9{1@?ջo: ʶ#RG_F:+Z8Dz\XY\se1}'a@DEQ80&B$ xq}Nf='[.jf[\] K纍P A0./v.hsJj!ڗ!\.(F=OVr/Nn>§gcmTT}T\}KV#Zf! 31x䋣tP/B~>u Mg^wicF+k*NQb1宀嫽n#@InPpذ͕LS`Jѳ긘'ڿ̺a0z~g@cf|}f H foch׌Oؗ齠-,"ٗK>?m[XGŚ-;>htMOWK>[ܲpۿ^Y/8eEj),U~-&I`V="C;78ƹYmv׊E~ʎa@)`eG ewOc*VG`Tq >d1mP9=${Jq7DY{BWoر/S? SD^u7.'<{<}Q۽ptsS8g'D -.2"]Lf8 "2}Jr= D-'>S4"C`R h]CN7 Jٗ3sf&Z;wdwjmϬ^ 3m;$vXza/:8?Mv6U\1?}էn~۴Ic?6g-Pqyṏzβ>q%1Azr2ΜpV4HdU0.5_p[K)b8R<9g(/PXu`FU7#AG.P>x)E[8n61'y`@vEޮ!}o{ (q=i "fybJX o֓ - ^,ʤࠡIޓ3= MNf;5~ΚpqP``jmxO=K\d幇3{kRs)umˮBв_"XDKx;GEζP(FuĶI ' x;v <{X3xWwˣw}ۊsaeñ/Tpr'J"_k*{|ǩ"`MYtN|"J&s.\on:bp`tCuFU"!oZ j/zE*bDn\gcy*w] DWhhS6+7nK-2B*fc4AJ`$' Xf&fUO>t_оN=ζh0<jkP}:fֿG=y%O\3v5 vX;bܤ3 np 3<-|>44|cHyk~>][ǫeEq96Tf^ϤTWch^͖e?24(e7~E/gCG ?_Tk \._< 1 `Ք[uqqX[DKѸDJw^yc;o+7M4[Ɓluf;\1#l`{d`݇^E={G*8tB2(C^.Ϛ3m8V_ےi8l[VW~9pf96&DgkkM`peg/[M|2\xhDOc>uX6Q^Ȃ1оx#xk۱9)K̐Ѫ2kxLϴiL O|~~pw/o7dޠ) f=|/BjF5hFAQfh_[IXU Qz47 0ʲIJlQ$hҝ 0)6>9K4 ۋ)&Mh8&@ >S߅6 pl}PzaDdq#t N9w`:m8스gK2~tT6GuÆ rʆP(Z2n3$--cڄoޜ|A{Z3t)Xm=#?d*Eս`n3E~S7 ;e?){|ٌ_ZUѸV]h|_ Vw2kړ5Ɏt/ hcZK6*"CjSl|!d;Wz7~{2pqLrM=U]oEpcR1Z{njߜ|YCvط,[nsk"Q-UՄ`nVk/v|Ҽ.9w궏 Ip oec[ F.rj'wu'R^{|TlvO8d.rGwbB@E]+5kʛk-E Ba|c)^VrI;>&^'(֧(dyfH@ {sXh.U&uVHLЁ,QLy+ٗW Xe={6kœYDL :GMATHF!5gX%eOsܧsZtRX  B}mܡ m*͗+]]g K@]7]7I4|+ǨOHL8njR[:,*̲FDD–G#"KW%;BM6crӏ z9LM2Zo,fYu m<$k :'9m+0BP7#{EoQӆd2 /t]Q+g<h W.ƺA #=k7\#neg Uƅ^u uߕυc>7䠴^\c.%8h*"W3>^ dE"ѝ(1|ιj1[UsD5ơ-h]71yy 49bs>Y}B/xye/_ޗ^`v)δ?|aGQ-ߣ4}Q%GFIOskeh]8S [?iA;vkK!a<_f rD@Ac_:$ gwq xʏ;e?n f~+Voi{]s9ˎ_ rPSAG} 5;kL(yOTl2+ۭ/W:oyhp7bt^@BkR?Fј990.QF4Z: j["?|  /jV~]fl|blE>Nߴ>Gt^=aƋtMg(^<$ ăXx"-F N|yfR )lK9ׂp=̲]1 s۸j29'Vg楥D k_o 2`NGE=%eUU}<6liLgoOfe]CwՇ$IKV:KZ8h*˧m2^w,%[y^RlM5EW~;r.x}+RDlC$P4kvfiս?w/{3oϾr$.\m*7ۼVL튆4tF(7LLݦ|e]=p `җ]\:d+Qn?K7N:ugA ,ei\X}!!moQފ|Eȼcݏw1]xQa ~ѾKgn7'Q NۈKsa_3 ]xloagMσ&G~R՗B©==U~3V7Y@+U[X`?c[F"\Sڧ5M^z寎>}xaIj|׿G|w)y#@*VS9Tu}7dT:L:Fv=mYwvOtn3nܷ/<K! ['lg:m:FöyLoM^I/M_i߯,rQ'Z2e s0+盾2~U:gԜau{\Sm۶gfoDZe)ݣ [/hջXdZ4o>?*8 Tu}O>|`<✧2PcP%H: dE ToPJv̿?IЅ+4ZZbˡe!]_?^lE 3O:}kl'f MfX _?im7?F.>1$u{^v kֶ]UWʌ}[s|5ISgI_?%^RVe'?l}]{{XDnlZGɧzf_}uvz_Wc?x{}.~W~((2L13}؄΍橏ƽwwL`|';.-q/ۡ.S^줷5/M{vF@%,R&/3~˧?תC*ZMF%d%x3Nguji}Nvyri(O {Vщ49=b}w6 ^n`滗cK347g[vogڒoiϼ`koQKCW\ _Rs|[sA%DD8H&WuӫA-4&(Fz @CYšzJNچJWM¼S_ZҜXQoأU_UXgk EߐxO n~w ^>~w/yH$}:heռe!m\6j͎m:Ⱙ h>w׼۔*mtUԖRuŴ(%c@z 2+7:?8ÆYKb!'1`]M}mtLo;]/XuKG\_eˣX"Q1j鸄u["WTD а5euH>e EVÛ{؝>=UD]A91y7DRߵ5s+c/]  L6wٴ9;|e`ZrWYAn}`D:飾w`O_tkkud2y6[Gwv$ݓZ>J|Qǿ'ז`XCud۵K7b^T:vtKU_43hN~q~K͂L|]Mg"IXG&Um0*dt WhFXJIY^T2hlށPϳ/3?{سud㾥19h 6ړ`J2)fNh7TPjieeg=_Lwfmu6(uygu|GUsHߚ>qr;/Fܯkuoc9#vSO14!ئ$? *Q"n[|oKI ֑>hk#$JG5@4@ +KNjp=Ǯ~1ϬT>6mPpl%՞w)O\)66gfYg疭O"L\Z5U5]h,IϘIDATz-[O.mƒ`a,YGP__7r)> zd$Aoj=㵙:}-fVќk3 9?,16"E%3GU]`msCV~f#-Bں2C5Osrz`|Z+74y:X,E+ڟ@WX֬^\ٱ4Fc#60P6jxjKvLT{}xw=wՈ^rQ4jX"yk&6yl ڡ5so]i ZJb`)VD"HO-w}d@ K ,Kl`Oi}2e%%)$ +f19y(B}"Q6djuIdOjS<{瑯z}t̒TN~O:GfusnE摘*˛!͕h!?"}fMD|I~WݑR,.޶)Z|m,s@`x&g0> ;Y]OԻ<_BBy<"HX P2bUkJic!EP DYey}}T2/o}y&r^.~7{^i5f8ya!bCqD*S{1 ӽSnO}Nf'g$",刲 y Mk t׼vg)iJ%2& lBT $2!6"1)c*ƯA")e[Hx!X!!+={ow̬6޹;WULo9}z)?8&!`D/^Uw; 4бRyLD+w{wm<|Ztkf _u Bx3UawJn~¿]c{;࢈R]wL0,@ !h bG4Ϣ>qygخܒMoߔ/}Iw3Eށ<q8L'BYl9ȷ:+5K;$?5mpx3bMc@pm\N 6R8qΉF8Ͻ8wA$ P`캆&u:zGn{Lx5\/_x-6>wyOǙ`N%2D.4bDj-M_7]'VSWhh-d`!3,hIRP{HT|!HJcwH4( A0DJOԮO7Οsٿ~w}|_dl/vtػܳ؀GpG )ЀjuttDl-)__ nm-^L&/ۇFstp)cHa@5ҭ%TTJH- F's{hFžy?Ėj:2tfq!r.rqp X[ H@@f~N~vnꗿk,6  c|RMp@x,\ӽ.z &jN۾ӯ5m/wupJ A  +|\18Λ̬4:m /x}Rny75e;ceDn`_=WYX5t\j~ލ;(^lBd] h)@!hup۲?ncm1e[uǂ W_~bpAYQZVUOz?|d3G_уwȿ?gݝ7 ߨhSopŊ8uIbE:6X0bI$OryRgoyͭcRąym^!0wK7}xΆOf;Py>D>³Ҡ%JA2Lk4fg@/pݝ7 [~ch"Yn^r3Ol%@Qge6.'3f7`|1]1#{Cor %H9 pxGi{ _:V[kz{ڗ$[2nqbn|toz{mv6&S z='⣴zK.7 ~;^W"L]WO^,c>yūtәyl4Bp)b~tppwo OO=)J)IWI>g~I '%RvY1Bu:Ŝ]gva|xt[صkǶxy|CqPYeA}zͯџN/mMkMy\=xK?x7O%^pf'Zt=a:n1;62|;qAk#j 8bB@jH FzEbr/}y8m6qIaFlhxNSjM}W>֞gX-8ߕ\ڡbT3 u S̹v!;<2:W{!|Y> }0+mN{?#Grr+x 0d>=?ҦLSݠ2ShrI\2ÀÐpv'EzB GA%lA3C ?8g4_aN2RD2} kA<ߘ!L#BiQ||#Ǒ}r}"M¯={iDV(b$~zK~"d kׯA8LL 5+F%'2:HK#4b .= APu/?C|y-" q(1brx腐z&>dKL3|KQ5>`{eFD Q|^Xɟ*V&4p\Z$1G+@«-UK //:xBhCzQ#:!޾yߩKW> y,y[joW+lh,0̐^9g443C%c3|OYZ4}z~a}* JD ܧ=,([X_a( Q\M7򀅟D[ :?rsd3|\!}dUfb71| n^Ȳ1UD>WL#I -s}]hBC0խ3Ek0"Pztϳ& i$]WBҊoխ؂D yχE8 AUdkz q_;hZ+lJ68`>G26ϱ/`58~Ѷ9ƟB%,&LV~p  W} 3dljc 6ΠKXAHXyH^;]/aYXQ}TүFm.qzr@41g;Hge5  <- LzO#s33=jzQ#%0;s #F؎Qwj7 |TjLk]j!W {~y3W/So4ը^{w6QZ+[xc hzF$`iia& Xyn7R/8X|>ic&hиEΧhB oo8(q7f`{c֢gf #vqŝIENDB`uncertainties-3.2.3/doc/_templates/000077500000000000000000000000001500152063300172715ustar00rootroot00000000000000uncertainties-3.2.3/doc/_templates/indexsidebar.html000066400000000000000000000007361500152063300226260ustar00rootroot00000000000000

Get uncertainties

Current version: {{ release }}

Installation:
pip install uncertainties

Development Version: Github

Offline Documentation

uncertainties.pdf
Zipped HTML and PDF

uncertainties-3.2.3/doc/_templates/layout.html000066400000000000000000000014341500152063300214760ustar00rootroot00000000000000{% extends "!layout.html" %} {% block rootrellink %}
  • [Home
  • [Installation & Credits
  • |User Guide
  • |Formatting
  • | Numpy Arrays
  • |Advanced Topics]
  •         {% endblock %} {% block relbar1 %} {{ super() }} {% endblock %} uncertainties-3.2.3/doc/conf.py000066400000000000000000000157701500152063300164450ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # uncertainties Python package documentation build configuration file, created by # sphinx-quickstart on Tue Jun 8 18:32:22 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from datetime import date import sys import os sys.path.insert(0, os.path.abspath("..")) import uncertainties # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx_copybutton"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8' # The master toctree document. master_doc = "index" # General information about the project. project = "uncertainties" copyright = f"2010–{date.today().year}, Eric O. LEBIGOT (EOL)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "1" # The full version, including alpha/beta/rc tags. release = uncertainties.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme = 'sphinxdoc' html_theme = "bizstyle" # html_theme = 'cloud' html_theme = "python_docs_theme" # html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = "uncertainties" # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "_static/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {"index": ["indexsidebar.html", "searchbox.html", "globaltoc.html"]} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = "uncertainties" # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ( "index", "uncertainties.tex", "uncertainties Python package Documentation", "Eric O. LEBIGOT (EOL)", "manual", ), ] # latex_engine = "xelatex" # Not recognized by readthedocs.io as of 2018-04-08 # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = r'\DeclareUnicodeCharacter{207B}{$^-$}' latex_elements = { # Superscript -, etc. for pdflatex (unnecessary, with xelatex): "preamble": r""" \DeclareUnicodeCharacter{207B}{$^-$} \DeclareUnicodeCharacter{22C5}{$\cdot$} """ } # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True uncertainties-3.2.3/doc/formatting.rst000066400000000000000000000160041500152063300200410ustar00rootroot00000000000000.. index:: formatting Variables .. _formatting guide: ======================================== Formatting Variables with uncertainties ======================================== .. index:: printing formatting Printing ======== .. Overview: Numbers with uncertainties can be printed conveniently: >>> from uncertainties import ufloat >>> x = ufloat(0.2, 0.01) >>> print(x) 0.200+/-0.010 The resulting form can generally be parsed back with :func:`ufloat_fromstr` (except for the LaTeX form). .. Precision matching: The nominal value and the uncertainty always have the **same precision**: this makes it easier to compare them. Standard formats ---------------- .. Formatting method: More **control over the format** can be obtained (in Python 2.6+) through the usual :func:`format` method of strings: >>> print('Result = {:10.2f}'.format(x)) Result = 0.20+/- 0.01 .. Legacy formats and base syntax of the format specification: **All the float format specifications** are accepted, except those with the ``n`` format type. In particular, a fill character, an alignment option, a sign or zero option, a width, or the ``%`` format type are all supported. The usual **float formats with a precision** retain their original meaning (e.g. ``.2e`` uses two digits after the decimal point): code that works with floats produces similar results when running with numbers with uncertainties. Precision control ----------------- .. Precision control: It is possible to **control the number of significant digits of the uncertainty** by adding the precision modifier ``u`` after the precision (and before any valid float format type like ``f``, ``e``, the empty format type, etc.): >>> print('1 significant digit on the uncertainty: {:.1u}'.format(x)) 1 significant digit on the uncertainty: 0.20+/-0.01 >>> print('3 significant digits on the uncertainty: {:.3u}'.format(x)) 3 significant digits on the uncertainty: 0.2000+/-0.0100 >>> print('1 significant digit, exponent notation: {:.1ue}'.format(x)) 1 significant digit, exponent notation: (2.0+/-0.1)e-01 >>> print('1 significant digit, percentage: {:.1u%}'.format(x)) 1 significant digit, percentage: (20+/-1)% When :mod:`uncertainties` must **choose the number of significant digits on the uncertainty**, it uses the `Particle Data Group `_ rounding rules (these rules keep the number of digits small, which is convenient for reading numbers with uncertainties, and at the same time prevent the uncertainty from being displayed with too few digits): >>> print('Automatic number of digits on the uncertainty: {}'.format(x)) Automatic number of digits on the uncertainty: 0.200+/-0.010 >>> print(x) 0.200+/-0.010 Custom options -------------- .. Options: :mod:`uncertainties` provides even more flexibility through custom formatting options. They can be added at the end of the format string: - ``P`` for **pretty-printing**: >>> print('{:.2e}'.format(x)) (2.00+/-0.10)e-01 >>> print(u'{:.2eP}'.format(x)) (2.00±0.10)×10⁻¹ The pretty-printing mode thus uses "±", "×" and superscript exponents. - ``S`` for the **shorthand notation**: >>> print('{:+.1uS}'.format(x)) # Sign, 1 digit for the uncertainty, shorthand +0.20(1) In this notation, the digits in parentheses represent the uncertainty on the last digits of the nominal value. - ``L`` for a **LaTeX** output: >>> print(x*1e7) (2.00+/-0.10)e+06 >>> print('{:L}'.format(x*1e7)) # Automatic exponent form, LaTeX \left(2.00 \pm 0.10\right) \times 10^{6} - ``p`` is for requiring that parentheses be always printed around the …±… part (without enclosing any exponent or trailing "%", etc.). This can for instance be useful so as to explicitly factor physical units: >>> print('{:p} kg'.format(x)) # Adds parentheses (0.200+/-0.010) kg >>> print("{:p} kg".format(x*1e7)) # No parentheses added (exponent) (2.00+/-0.10)e+06 kg These custom formatting options **can be combined** (when meaningful). Details ------- .. Common exponent: A **common exponent** is automatically calculated if an exponent is needed for the larger of the nominal value (in absolute value) and the uncertainty (the rule is the same as for floats). The exponent is generally **factored**, for increased legibility: >>> print(x*1e7) (2.00+/-0.10)e+06 When a *format width* is used, the common exponent is not factored: >>> print('Result = {:10.1e}'.format(x*1e-10)) Result = 2.0e-11+/- 0.1e-11 (Using a (minimal) width of 1 is thus a way of forcing exponents to not be factored.) Thanks to this feature, each part (nominal value and standard deviation) is correctly aligned across multiple lines, while the relative magnitude of the error can still be readily estimated thanks to the common exponent. .. Special cases: An uncertainty which is *exactly* **zero** is always formatted as an integer: >>> print(ufloat(3.1415, 0)) 3.1415+/-0 >>> print(ufloat(3.1415e10, 0)) 31415000000.0+/-0 >>> print(ufloat(3.1415, 0.0005)) 3.1415+/-0.0005 >>> print('{:.2f}'.format(ufloat(3.14, 0.001))) 3.14+/-0.00 >>> print('{:.2f}'.format(ufloat(3.14, 0.00))) 3.14+/-0 **All the digits** of a number with uncertainty are given in its representation: >>> y = ufloat(1.23456789012345, 0.123456789) >>> print(y) 1.23+/-0.12 >>> print(repr(y)) 1.23456789012345+/-0.123456789 >>> y 1.23456789012345+/-0.123456789 Global formatting ----------------- It is sometimes useful to have a **consistent formatting** across multiple parts of a program. Python's `string.Formatter class `_ allows one to do just that. Here is how it can be used to consistently use the shorthand notation for numbers with uncertainties: .. code-block:: python class ShorthandFormatter(string.Formatter): def format_field(self, value, format_spec): if isinstance(value, uncertainties.UFloat): return value.format(format_spec+'S') # Shorthand option added # Special formatting for other types can be added here (floats, etc.) else: # Usual formatting: return super(ShorthandFormatter, self).format_field( value, format_spec) frmtr = ShorthandFormatter() print(frmtr.format("Result = {0:.1u}", x)) # 1-digit uncertainty prints with the shorthand notation: ``Result = 0.20(1)``. Customizing the pretty-print and LaTeX outputs ---------------------------------------------- The pretty print and LaTeX outputs themselves can be customized. For example, the pretty-print representation of numbers with uncertainty can display multiplication with a centered dot (⋅) instead of the default symbol (×), like in ``(2.00±0.10)⋅10⁻¹``; this is easily done through the global setting ``uncertainties.core.MULT_SYMBOLS["pretty-print"] = "⋅"``. Beyond this multiplication symbol, the "±" symbol, the parentheses and the exponent representations can also be customized globally. The details can be found in the documentation of :func:`uncertainties.core.format_num`. uncertainties-3.2.3/doc/index.rst000066400000000000000000000043661500152063300170060ustar00rootroot00000000000000.. meta:: :description: The uncertainties Python package :keywords: error propagation, uncertainties, error calculations, Python, calculator, library, package Uncertainties ================= The `uncertainties package`_ is an open source Python library for doing calculations on numbers that have uncertainties (like 3.14±0.01) that are common in many scientific fields. The calculations done with this package will propagate the uncertainties to the result of mathematical calculations. The :mod:`uncertainties` package takes the pain and complexity out of uncertainty calculations and error propagation. Here is a quick taste of how to use :mod:`uncertainties`: >>> from uncertainties import ufloat >>> x = ufloat(2, 0.1) # x = 2+/-0.1 >>> y = ufloat(3, 0.2) # y = 3+/-0.2 >>> print(2*x) 4.00+/-0.20 >>> print(x+y) 5.00+/-0.22 >>> print(x*y) 6.0+/-0.5 The :mod:`uncertainties` library calculates uncertainties using linear `error propagation theory`_ by automatically :ref:`calculating derivatives ` and analytically propagating these to the results. Correlations between variables are automatically handled. This library can also yield the derivatives of any expression with respect to the variables that have uncertain values. For other approaches, see soerp_ (using higher-order terms) and mcerp_ (using a Monte-Carlo approach). The `source code`_ for the uncertainties package is licensed under the `Revised BSD License`_. This documentation is licensed under the `CC-SA-3 License`_. .. _uncertainties package: https://pypi.python.org/pypi/uncertainties/ .. _error propagation theory: https://en.wikipedia.org/wiki/Propagation_of_uncertainty .. _soerp: https://pypi.python.org/pypi/soerp .. _mcerp: https://pypi.python.org/pypi/mcer .. _Revised BSD License: https://opensource.org/licenses/BSD-3-Clause .. _CC-SA-3 License: https://creativecommons.org/licenses/by-sa/3.0 .. _source code: https://github.com/lmfit/uncertainties/ .. _version history: https://pypi.python.org/pypi/uncertainties#version-history .. _Pint: https://pypi.python.org/pypi/Pint/ .. _future: https://pypi.org/project/future/ Table of Contents ================= .. toctree:: :maxdepth: 2 install user_guide numpy_guide formatting tech_guide uncertainties-3.2.3/doc/install.rst000066400000000000000000000126061500152063300173410ustar00rootroot00000000000000.. index:: installation .. index:: credits .. _installation: ==================================== Installation and Credits ==================================== Download and Installation ========================= The :mod:`uncertainties` package supports Python versions 3.8 and higher. Earlier versions of Python are not tested, but may still work. Development version of Python (currently, 3.13) are likely to work, but are not regularly tested. To install :mod:`uncertainties`, use: .. code-block:: sh pip install uncertainties You can upgrade from an older version of :mod:`uncertainties` with: .. code-block:: sh pip install --upgrade uncertainties Other packaging systems such as `Anaconda `_, `MacPorts `_, or Linux package manager may also maintain packages for :mod:`uncertainties`, so that you may also be able to install using something like .. code-block:: sh conda install -c conda-forge uncertainties .. code-block:: sh sudo port install py**-uncertainties or .. code-block:: sh sudo apt get python-uncertainties depending on your platform and installation of Python. For all installations of Python, using `pip` should work and is therefore recommended. Source code and Development Version ===================================== .. _download: https://pypi.python.org/pypi/uncertainties/#files .. _GitHub releases: https://github.com/lmfit/uncertainties/releases .. _NumPy: http://numpy.scipy.org/ You can `download`_ the latest source package archive from the Python Package Index (PyPI) and unpack it, or from the `GitHub releases`_ page. This package can be unpacked using `unzip`, `tar xf` , or other similar utilities, and then installed with .. code-block:: sh python -m pip install . To work with the development version, use `git` to fork or clone the code: .. code-block:: sh git clone git@github.com:lmfit/uncertainties.git The :mod:`uncertainties` package is written in pure Python and has no external dependencies. If available (and recommended), the `NumPy`_ package can be used. Running the test suite requires `pytest` and `pytest_cov`, and building these docs requires `sphinx`. To install these optional packages, use one of: .. code-block:: sh pip install ".[arrays]" # to install numpy pip install ".[test]" # to enable running the tests pip install ".[doc]" # to enable building the docs pip install ".[all]" # to enable all of these options Getting Help ================= .. _GitHub Discussions: https://github.com/lmfit/uncertainties/discussions .. _GitHub Issues: https://github.com/lmfit/uncertainties/issues .. _lmfit GitHub organization: https://github.com/lmfit/ If you have questions about :mod:`uncertainties` or run into trouble, use the `GitHub Discussions`_ page. For bug reports, use the `GitHub Issues`_ pages. Credits ================ .. _Eric O. LEBIGOT (EOL): http://linkedin.com/pub/eric-lebigot/22/293/277 The :mod:`uncertainties` package was written and developed by `Eric O. LEBIGOT (EOL)`_. EOL also maintained the package until 2024, when the GitHub project was moved to the `lmfit GitHub organization`_ to allow more sustainable development and maintenance. Current members of the devlopment and maintenance team include `Andrew G Savage `_, `Justin Gerber `_, `Eric O Legibot `_, `Matt Newville `_, and `Will Shanks `_. Contributions and suggestions for development are welcome. How to cite this package ======================== If you use this package for a publication, please cite it as *Uncertainties: a Python package for calculations with uncertainties*, Eric O. LEBIGOT. A version number can be added, but is optional. Acknowledgments =============== .. _Python(x,y): https://python-xy.github.io/ .. _scientific Python packages: http://www.lfd.uci.edu/~gohlke/pythonlibs/ Eric O. LEBIGOT (EOL) thanks all the people who made generous donations: that help to keep this project alive by providing positive feedback. EOL greatly appreciates having gotten key technical input from Arnaud Delobelle, Pierre Cladé, and Sebastian Walter. Patches by Pierre Cladé, Tim Head, José Sabater Montes, Martijn Pieters, Ram Rachum, Christoph Deil, Gabi Davar, Roman Yurchak and Paul Romano are gratefully acknowledged. EOL also thanks users who contributed with feedback and suggestions, which greatly helped improve this program: Joaquin Abian, Jason Moore, Martin Lutz, Víctor Terrón, Matt Newville, Matthew Peel, Don Peterson, Mika Pflueger, Albert Puig, Abraham Lee, Arian Sanusi, Martin Laloux, Jonathan Whitmore, Federico Vaggi, Marco A. Ferra, Hernan Grecco, David Zwicker, James Hester, Andrew Nelson, and many others. EOL is grateful to the Anaconda, macOS and Linux distribution maintainers of this package (Jonathan Stickel, David Paleino, Federico Ceratto, Roberto Colistete Jr, Filipe Pires Alvarenga Fernandes, and Felix Yan) and also to Gabi Davar and Pierre Raybaut for including it in `Python(x,y)`_ and to Christoph Gohlke for including it in his Base distribution of `scientific Python packages`_ for Windows. .. index:: license License ======= .. _Revised BSD License: http://opensource.org/licenses/BSD-3-Clause This software is released under the `Revised BSD License`_ (© 2010–2024, Eric O. LEBIGOT [EOL]). uncertainties-3.2.3/doc/make.bat000066400000000000000000000013741500152063300165460ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=build %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) if "%1" == "" goto help %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd uncertainties-3.2.3/doc/numpy_guide.rst000066400000000000000000000202531500152063300202150ustar00rootroot00000000000000.. index: NumPy support =============================== Uncertainties and numpy arrays =============================== .. index:: unumpy .. index:: arrays; simple use, matrices; simple use .. _simple_array_use: Arrays of uncertainties Variables ==================================== It is possible to put uncertainties Variable in NumPy_ arrays and matrices: >>> import numpy as np >>> from uncertainties import ufloat >>> arr = np.array([ufloat(1, 0.01), ufloat(2, 0.1)]) >>> print(2*arr) [2.0+/-0.02 4.0+/-0.2] >>> print(str(arr.sum())) 3.00+/-0.10 Many common operations on NumPy arrays can be performed transparently even when these arrays contain numbers with uncertainties. The unumpy package ================== While :ref:`basic operations on arrays ` that contain numbers with uncertainties can be performed without it, the :mod:`unumpy` package is useful for more advanced uses. This package contains: 1. utilities that help with the **creation and manipulation** of NumPy_ arrays and matrices of numbers with uncertainties; 2. **generalizations** of multiple NumPy functions so that they also work with arrays that contain numbers with uncertainties. Operations on arrays (including their cosine, etc.) can thus be performed transparently. These features can be made available with >>> from uncertainties import unumpy .. Here, there is no need to mention unumpy.unlinalg, because it is indeed made available through "import unumpy". Creation and manipulation of arrays and matrices ------------------------------------------------ .. index:: single: arrays; creation and manipulation single: creation; arrays Arrays ^^^^^^ Arrays of numbers with uncertainties can be built from values and uncertainties: >>> arr = unumpy.uarray([1, 2], [0.01, 0.002]) >>> print(arr) [1.0+/-0.01 2.0+/-0.002] NumPy arrays of numbers with uncertainties can also be built directly through NumPy, thanks to NumPy's support of arrays of arbitrary objects: >>> arr = np.array([ufloat(1, 0.1), ufloat(2, 0.002)]) .. index:: single: matrices; creation and manipulation single: creation; matrices Matrices ^^^^^^^^ Matrices of numbers with uncertainties are best created in one of two ways. The first way is similar to using :func:`uarray`: >>> mat = unumpy.umatrix([1, 2], [0.01, 0.002]) Matrices can also be built by converting arrays of numbers with uncertainties into matrices through the :class:`unumpy.matrix` class: >>> mat = unumpy.matrix(arr) :class:`unumpy.matrix` objects behave like :class:`numpy.matrix` objects of numbers with uncertainties, but with better support for some operations (such as matrix inversion). For instance, regular NumPy matrices cannot be inverted, if they contain numbers with uncertainties (i.e., ``numpy.matrix([[ufloat(…), …]]).I`` does not work). This is why the :class:`unumpy.matrix` class is provided: both the inverse and the pseudo-inverse of a matrix can be calculated in the usual way: if :data:`mat` is a :class:`unumpy.matrix`, >>> print(mat.I) [[0.19999999999999996+/-0.012004265908417718] [0.3999999999999999+/-0.01600179989876138]] does calculate the inverse or pseudo-inverse of :data:`mat` with uncertainties. .. index:: pair: nominal value; uniform access (array) pair: uncertainty; uniform access (array) pair: standard deviation; uniform access (array) Uncertainties and nominal values ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Nominal values and uncertainties in arrays (and matrices) can be directly accessed (through functions that work on pure float arrays too): >>> unumpy.nominal_values(arr) array([1., 2.]) >>> unumpy.std_devs(mat) matrix([[0.1 , 0.002]]) .. index:: mathematical operation; on an array of numbers Mathematical functions ---------------------- This module defines uncertainty-aware mathematical functions that generalize those from :mod:`uncertainties.umath` so that they work on NumPy arrays of numbers with uncertainties instead of just scalars: >>> print(unumpy.cos(arr)) # Cosine of each array element [0.5403023058681398+/-0.08414709848078966 -0.4161468365471424+/-0.0018185948536513636] NumPy's function names are used, and not those from the :mod:`math` module (for instance, :func:`unumpy.arccos` is defined, like in NumPy, and is not named :func:`acos` like in the :mod:`math` module). The definition of the mathematical quantities calculated by these functions is available in the documentation for :mod:`uncertainties.umath`. .. index:: pair: testing and operations (in arrays); NaN NaN testing and NaN-aware operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ One particular function pertains to NaN testing: ``unumpy.isnan()``. It returns true for each NaN *nominal value* (and false otherwise). Since NaN±1 is *not* (the scalar) NaN, functions like ``numpy.nanmean()`` do not skip such values. This is where ``unumpy.isnan()`` is useful, as it can be used for masking out numbers with a NaN nominal value: >>> nan = float("nan") >>> arr = np.array([nan, ufloat(nan, 1), ufloat(1, nan), 2]) >>> print(arr) [nan nan+/-1.0 1.0+/-nan 2] >>> print(arr[~unumpy.isnan(arr)].mean()) 1.5+/-nan or equivalently, by using masked arrays: >>> masked_arr = np.ma.array(arr, mask=unumpy.isnan(arr)) >>> masked_arr.mean() 1.5+/-nan In this case the uncertainty is NaN as it should be, because one of the numbers does have an undefined uncertainty, which makes the final uncertainty undefined (but the average is well defined). In general, uncertainties are not NaN and one obtains the mean of the non-NaN values. .. index:: saving to file; array .. index:: reading from file; array Storing arrays in text format ============================= Number with uncertainties can easy be cast to strings and back. This means that arrays of numbers with uncertainties can also be cast to string representations and back. There are many ways to convert an array of numbers with uncertainties to a string representation for storage and then convert it back to a python array of numbers with uncertainties. Here is one example set of functions to perform this operation. >>> import json >>> from uncertainties import ufloat_fromstr >>> def serialize_unumpy_array(u_arr): ... string_u_arr = np.vectorize(repr)(u_arr) ... return json.dumps(string_u_arr.tolist(), indent=4) >>> >>> def deserialize_unumpy_arr(serialized_u_arr): ... string_u_arr = np.array(json.loads(serialized_u_arr)) ... return np.vectorize(ufloat_fromstr)(string_u_arr) We can use the first function to serialize an array >>> u_arr = np.array([ ... [ufloat(1, 0.1), ufloat(2, 0.2)], ... [ufloat(3, 0.3), ufloat(4, 0.4)], ... ]) >>> print(u_arr) [[1.0+/-0.1 2.0+/-0.2] [3.0+/-0.3 4.0+/-0.4]] >>> serialized_u_arr = serialize_unumpy_array(u_arr) >>> print(serialized_u_arr) [ [ "1.0+/-0.1", "2.0+/-0.2" ], [ "3.0+/-0.3", "4.0+/-0.4" ] ] This can then of course be stored in a ``.json`` file using ``json.dump``. We can then deserialize >>> u_arr_2 = deserialize_unumpy_arr(serialized_u_arr) >>> print(u_arr_2) [[1.0+/-0.1 2.0+/-0.2] [3.0+/-0.3 4.0+/-0.4]] Note that the process of serializing and deserializing the array of numbers with uncertainties has result in all correlations between numbers within one array, and also between numbers from the original array and its deserialized copy >>> print(u_arr[0, 0] - u_arr_2[0, 0]) 0.00+/-0.14 >>> print(u_arr[0, 0] == u_arr_2[0, 0]) False A future release of :mod:`uncertainties` may provide functionality for serializing/deserializing number with uncertainties in such a way that correlations can be preserved. .. index:: linear algebra; additional functions, ulinalg Additional array functions: unumpy.ulinalg ========================================== The :mod:`unumpy.ulinalg` module contains more uncertainty-aware functions for arrays that contain numbers with uncertainties. It currently offers generalizations of two functions from :mod:`numpy.linalg` that work on arrays (or matrices) that contain numbers with uncertainties, the **matrix inverse and pseudo-inverse**: >>> print(unumpy.ulinalg.inv([[ufloat(2, 0.1)]])) [[0.5+/-0.025]] >>> print(unumpy.ulinalg.pinv(mat)) [[0.19999999999999996+/-0.012004265908417718] [0.3999999999999999+/-0.01600179989876138]] .. _NumPy: http://numpy.scipy.org/ uncertainties-3.2.3/doc/requirements.txt000066400000000000000000000000071500152063300204150ustar00rootroot00000000000000.[all] uncertainties-3.2.3/doc/tech_guide.rst000066400000000000000000000347471500152063300200050ustar00rootroot00000000000000.. index:: technical details ========================= Advanced Topics ========================= This page gives more in-depth technical description of the :mod:`uncertainties` package. .. index:: api .. _api_funcs: API: Application Programming Interface ============================================== .. module:: uncertainties The most common and important functions for creating uncertain :class:`Variables` are :func:`ufloat` and :func:`ufloat_fromstr`. In addition, the :func:`wrap` can be used to support the propagation of uncertainties with a user-supplied function. .. autofunction:: ufloat .. autofunction:: ufloat_fromstr .. autoclass:: Variable .. autofunction:: wrap Testing whether an object is a number with uncertainty ------------------------------------------------------ The recommended way of testing whether :data:`value` carries an uncertainty handled by this module is by checking whether :data:`value` is an instance of :class:`UFloat`, through ``isinstance(value, uncertainties.UFloat)``. Special Technical Topics ============================================================ .. index:: pickling .. index:: saving to file; number with uncertainty .. index:: reading from file; number with uncertainty .. _pickling: Pickling -------- The quantities with uncertainties created by the :mod:`uncertainties` package can be `pickled `_ (they can be stored in a file, for instance). If multiple variables are pickled together (including when pickling :doc:`NumPy arrays `), their correlations are preserved: >>> import pickle >>> from uncertainties import ufloat >>> x = ufloat(2, 0.1) >>> y = 2*x >>> p = pickle.dumps([x, y]) # Pickling to a string >>> (x2, y2) = pickle.loads(p) # Unpickling into new variables >>> y2 - 2*x2 0.0+/-0 The final result is exactly zero because the unpickled variables :data:`x2` and :data:`y2` are completely correlated. However, **unpickling necessarily creates new variables that bear no relationship with the original variables** (in fact, the pickled representation can be stored in a file and read from another program after the program that did the pickling is finished: the unpickled variables cannot be correlated to variables that can disappear). Thus >>> x - x2 0.0+/-0.14142135623730953 which shows that the original variable :data:`x` and the new variable :data:`x2` are completely uncorrelated. .. index:: comparison operators; technical details .. _comparison_operators: Comparison operators -------------------- Comparison operations (>, ==, etc.) on numbers with uncertainties have a **pragmatic semantics**, in this package: numbers with uncertainties can be used wherever Python numbers are used, most of the time with a result identical to the one that would be obtained with their nominal value only. This allows code that runs with pure numbers to also work with numbers with uncertainties. .. index:: boolean value The **boolean value** (``bool(x)``, ``if x …``) of a number with uncertainty :data:`x` is defined as the result of ``x != 0``, as usual. However, since the objects defined in this module represent probability distributions and not pure numbers, comparison operators are interpreted in a specific way. The result of a comparison operation is defined so as to be essentially consistent with the requirement that uncertainties be small: the **value of a comparison operation** is True only if the operation yields True for all *infinitesimal* variations of its random variables around their nominal values, *except*, possibly, for an *infinitely small number* of cases. Example: >>> x = ufloat(3.14, 0.01) >>> x == x True because a sample from the probability distribution of :data:`x` is always equal to itself. However: >>> y = ufloat(3.14, 0.01) >>> x == y False since :data:`x` and :data:`y` are independent random variables that *almost* always give a different value (put differently, :data:`x`-:data:`y` is not equal to 0, as it can take many different values). Note that this is different from the result of ``z = 3.14; t = 3.14; print(z == t)``, because :data:`x` and :data:`y` are *random variables*, not pure numbers. Similarly, >>> x = ufloat(3.14, 0.01) >>> y = ufloat(3.00, 0.01) >>> x > y True because :data:`x` is supposed to have a probability distribution largely contained in the 3.14±~0.01 interval, while :data:`y` is supposed to be well in the 3.00±~0.01 one: random samples of :data:`x` and :data:`y` will most of the time be such that the sample from :data:`x` is larger than the sample from :data:`y`. Therefore, it is natural to consider that for all practical purposes, ``x > y``. Since comparison operations are subject to the same constraints as other operations, as required by the :ref:`linear approximation ` method, their result should be essentially *constant* over the regions of highest probability of their variables (this is the equivalent of the linearity of a real function, for boolean values). Thus, it is not meaningful to compare the following two independent variables, whose probability distributions overlap: >>> x = ufloat(3, 0.01) >>> y = ufloat(3.0001, 0.01) In fact the function (x, y) → (x > y) is not even continuous over the region where x and y are concentrated, which violates the assumption of approximate linearity made in this package on operations involving numbers with uncertainties. Comparing such numbers therefore returns a boolean result whose meaning is undefined. However, values with largely overlapping probability distributions can sometimes be compared unambiguously: >>> x = ufloat(3, 1) >>> x 3.0+/-1.0 >>> y = x + 0.0002 >>> y 3.0002+/-1.0 >>> y > x True In fact, correlations guarantee that :data:`y` is always larger than :data:`x`: ``y-x`` correctly satisfies the assumption of linearity, since it is a constant "random" function (with value 0.0002, even though :data:`y` and :data:`x` are random). Thus, it is indeed true that :data:`y` > :data:`x`. .. index:: linear propagation of uncertainties .. _linear_method: Linear propagation of uncertainties ----------------------------------- This package calculates the standard deviation of mathematical expressions through the linear approximation of `error propagation theory`_. The standard deviations and nominal values calculated by this package are thus meaningful approximations as long as **uncertainties are "small"**. A more precise version of this constraint is that the final calculated functions must have **precise linear expansions in the region where the probability distribution of their variables is the largest**. Mathematically, this means that the linear terms of the final calculated functions around the nominal values of their variables should be much larger than the remaining higher-order terms over the region of significant probability (because such higher-order contributions are neglected). For example, calculating ``x*10`` with :data:`x` = 5±3 gives a *perfect result* since the calculated function is linear. So does ``umath.atan(umath.tan(x))`` for :data:`x` = 0±1, since only the *final* function counts (not an intermediate function like :func:`tan`). Another example is ``sin(0+/-0.01)``, for which :mod:`uncertainties` yields a meaningful standard deviation since the sine is quite linear over 0±0.01. However, ``cos(0+/-0.01)``, yields an approximate standard deviation of 0 because it is parabolic around 0 instead of linear; this might not be precise enough for all applications. **More precise uncertainty estimates** can be obtained, if necessary, with the soerp_ and mcerp_ packages. The soerp_ package performs *second-order* error propagation: this is still quite fast, but the standard deviation of higher-order functions like f(x) = x\ :sup:`3` for x = 0±0.1 is calculated as being exactly zero (as with :mod:`uncertainties`). The mcerp_ package performs Monte-Carlo calculations, and can in principle yield very precise results, but calculations are much slower than with approximation schemes. .. index:: pair: uncertainty; NaN NaN uncertainty ---------------------- If linear `error propagation theory`_ cannot be applied, the functions defined by :mod:`uncertainties` internally use a `not-a-number value `_ (``nan``) for the derivative. As a consequence, it is possible for uncertainties to be ``nan``: >>> from uncertainties import umath >>> umath.sqrt(ufloat(0, 1)) 0.0+/-nan This indicates that **the derivative required by linear error propagation theory is not defined** (a Monte-Carlo calculation of the resulting random variable is more adapted to this specific case). However, even in this case where the derivative at the nominal value is infinite, the :mod:`uncertainties` package **correctly handles perfectly precise numbers**: >>> umath.sqrt(ufloat(0, 0)) 0.0+/-0 is thus the correct result, despite the fact that the derivative of the square root is not defined in zero. .. _math_def_num_uncert: Mathematical definition of numbers with uncertainties ----------------------------------------------------- .. index:: number with uncertainty; definition .. index:: probability distribution Mathematically, **numbers with uncertainties** are, in this package, **probability distributions**. They are *not restricted* to normal (Gaussian) distributions and can be **any distribution**. These probability distributions are reduced to two numbers: a nominal value and an uncertainty. Thus, both independent variables (:class:`Variable` objects) and the result of mathematical operations (:class:`AffineScalarFunc` objects) contain these two values (respectively in their :attr:`nominal_value` and :attr:`std_dev` attributes). .. index:: uncertainty; definition The **uncertainty** of a number with uncertainty is simply defined in this package as the **standard deviation** of the underlying probability distribution. The numbers with uncertainties manipulated by this package are assumed to have a probability distribution mostly contained around their nominal value, in an interval of about the size of their standard deviation. This should cover most practical cases. .. index:: nominal value; definition A good choice of **nominal value** for a number with uncertainty is thus the median of its probability distribution, the location of highest probability, or the average value. Probability distributions (random variables and calculation results) are printed as:: nominal value +/- standard deviation but this does not imply any property on the nominal value (beyond the fact that the nominal value is normally inside the region of high probability density), or that the probability distribution of the result is symmetrical (this is rarely strictly the case). .. _differentiation method: Differentiation method ---------------------- The :mod:`uncertainties` package automatically calculates the derivatives required by linear error propagation theory. Almost all the derivatives of the fundamental functions provided by :mod:`uncertainties` are obtained through analytical formulas (the few mathematical functions that are instead differentiated through numerical approximation are listed in ``umath_core.num_deriv_funcs``). The derivatives of mathematical *expressions* are evaluated through a fast and precise method: :mod:`uncertainties` transparently implements `automatic differentiation`_ with reverse accumulation. This method essentially consists in keeping track of the value of derivatives, and in automatically applying the `chain rule `_. Automatic differentiation is faster than symbolic differentiation and more precise than numerical differentiation. The derivatives of any expression can be obtained with :mod:`uncertainties` in a simple way, as demonstrated in the :ref:`User Guide `. .. _variable_tracking: Tracking of random variables ---------------------------- This package keeps track of all the random variables a quantity depends on, which allows one to perform transparent calculations that yield correct uncertainties. For example: >>> x = ufloat(2, 0.1) >>> a = 42 >>> poly = x**2 + a >>> poly 46.0+/-0.4 >>> poly - x*x 42.0+/-0 Even though ``x*x`` has a non-zero uncertainty, the result has a zero uncertainty, because it is equal to :data:`a`. If the variable :data:`a` above is modified, the value of :data:`poly` is not modified, as is usual in Python: >>> a = 123 >>> print(poly) # Still equal to x**2 + 42, not x**2 + 123 46.0+/-0.4 Random variables can, on the other hand, have their uncertainty updated on the fly, because quantities with uncertainties (like :data:`poly`) keep track of them: >>> x.std_dev = 0 >>> print(poly) # Zero uncertainty, now 46.0+/-0 As usual, Python keeps track of objects as long as they are used. Thus, redefining the value of :data:`x` does not change the fact that :data:`poly` depends on the quantity with uncertainty previously stored in :data:`x`: >>> x = 10000 >>> print(poly) # Unchanged 46.0+/-0 These mechanisms make quantities with uncertainties behave mostly like regular numbers, while providing a fully transparent way of handling correlations between quantities. .. index:: number with uncertainty; classes, Variable class .. index:: AffineScalarFunc class .. _classes: Python classes for variables and functions with uncertainty ----------------------------------------------------------- Numbers with uncertainties are represented through two different classes: 1. a class for independent random variables (:class:`Variable`, which inherits from :class:`UFloat`), 2. a class for functions that depend on independent variables (:class:`AffineScalarFunc`, aliased as :class:`UFloat`). Additional documentation for these classes is available in their Python docstring. The factory function :func:`ufloat` creates variables and thus returns a :class:`Variable` object: >>> x = ufloat(1, 0.1) >>> type(x) :class:`Variable` objects can be used as if they were regular Python numbers (the summation, etc. of these objects is defined). Mathematical expressions involving numbers with uncertainties generally return :class:`AffineScalarFunc` objects, because they represent mathematical functions and not simple variables; these objects store all the variables they depend on: >>> type(umath.sin(x)) .. _automatic differentiation: http://en.wikipedia.org/wiki/Automatic_differentiation .. _error propagation theory: http://en.wikipedia.org/wiki/Error_propagation .. _soerp: https://pypi.python.org/pypi/soerp .. _mcerp: https://pypi.python.org/pypi/mcerp uncertainties-3.2.3/doc/user_guide.rst000066400000000000000000000505041500152063300200250ustar00rootroot00000000000000.. index:: user guide .. _user guide: ========== User Guide ========== Basic usage =========== Basic mathematical operations involving numbers with uncertainties requires importing the :func:`ufloat` function which creates a :class:`Variable`: number with both a nominal value and an uncertainty. >>> from uncertainties import ufloat >>> x = ufloat(2.7, 0.01) # a Variable with a value 2.7+/-0.01 The :mod:`uncertainties` module contains sub-modules for :ref:`advanced mathematical functions `, and :doc:`arrays and matrices `, which can be accessed as well. .. index:: pair: number with uncertainty; creation Creating Variables: numbers with uncertainties ================================================ To create a number with uncertainties or *Variable*, use the :func:`ufloat` function, which takes a *nominal value* (which can be interpreted as the most likely value, or the mean or central value of the distribution of values), a *standard error* (the standard deviation or :math:`1-\sigma` uncertainty), and an optional *tag*: >>> x = ufloat(2.7, 0.01) # x = 2.7+/-0.01 >>> y = ufloat(4.5, 1.2, tag='y_variable') # x = 4..5+/-1.2 .. index:: pair: nominal value; scalar pair: uncertainty; scalar You can access the nominal value and standard deviation for any Variable with the `nominal_value` and `std_dev` attributes: >>> print(x.nominal_value, x.std_dev) 2.7 0.01 Because these are fairly long to type, for convenience, `nominal_value` can be abbreviated as `n` and `std_dev` as `s`: >>> print(x.n, x.s) 2.7 0.01 uncertainties Variables can also be created from one of many string representations. The following forms will all create Variables with the same value: >>> from uncertainties import ufloat_fromstr >>> x = ufloat(0.2, 0.01) >>> x = ufloat_fromstr("0.20+/-0.01") >>> x = ufloat_fromstr("(2+/-0.1)e-01") # Factored exponent >>> x = ufloat_fromstr("0.20(1)") # Short-hand notation >>> x = ufloat_fromstr("20(1)e-2") # Exponent notation >>> x = ufloat_fromstr(u"0.20±0.01") # Pretty-print form >>> x = ufloat_fromstr("0.20") # Automatic uncertainty of +/-1 on last digit More details on the :func:`ufloat` and :func:`ufloat_from_str` can be found in :ref:`api_funcs`. Basic math with uncertain Variables ========================================= Uncertainties variables created in :func:`ufloat` or :func:`ufloat_fromstr` can be used in basic mathematical calculations (``+``, ``-``, ``*``, ``/``, ``**``) as with other Python numbers and variables. >>> t = ufloat(0.2, 0.01) >>> double = 2.0*t >>> print(double) 0.400+/-0.020 >>> square = t**2 >>> print(square) 0.040+/-0.004 When adding two Variables, the uncertainty in the result is the quadrature sum (square-root of the sum of squares) of the uncertainties of the two Variables: >>> x = ufloat(20, 4) >>> y = ufloat(12, 3) >>> print(x+y) 32+/-5 We can check that error propagation when adding two independent variables (using the abbreviation `.s` for the standard error): >>> from math import sqrt >>> (x+y).s == sqrt(x.s**2 + y.s**2) True Multiplying two Variables will properly propagate those uncertainties too: >>> print(x*y) (2.4+/-0.8)e+02 >>> (x*y).s == (x*y).n * sqrt((x.s/x.n)**2 + (y.s/y.n)**2 ) True But note that adding a Variable to itself does not add its uncertainties in quadrature, but are simply scaled: >>> print(x+x) 40+/-8 >>> print(3*x + 10) 70+/-12 It is important to understand that calculations done with Variable know about the correlation between the Variables. Variables created with :func:`ufloat` (and :func:`ufloat_fromstr`) are completely uncorrelated with each other, but are known to be completely correlated with themselves. This means that >>> x = ufloat(5, 0.5) >>> y = ufloat(5, 0.5) >>> x - y 0.0+/-0.7071067811865476 >>> x - x 0.0+/-0 For two *different* Variables, uncorrelated uncertainties will be propagated. But when doing a calculation with a single Variable, the uncertainties are correlated, and calculations will reflect that. .. index:: mathematical operation; on a scalar, umath .. _advanced math operations: Mathematical operations with uncertain Variables ===================================================== Besides being able to apply basic mathematical operations to uncertainties Variables, this package provides generalized versions of 40 of the the functions from the standard :mod:`math` *module*. These mathematical functions are found in the :mod:`uncertainties.umath` module: >>> from uncertainties.umath import sin, exp, sqrt >>> x = ufloat(0.2, 0.01) >>> sin(x) 0.19866933079506122+/-0.009800665778412416 >>> sin(x*x) 0.03998933418663417+/-0.003996800426643912 >>> exp(-x/3.0) 0.9355069850316178+/-0.003118356616772059 >>> sqrt(230*x + 3) 7.0+/-0.16428571428571428 The functions in the :mod:`uncertainties.umath` module include: ``acos``, ``acosh``, ``asin``, ``asinh``, ``atan``, ``atan2``, ``atanh``, ``ceil``, ``copysign``, ``cos``, ``cosh``, ``degrees``, ``erf``, ``erfc``, ``exp``, ``expm1``, ``fabs``, ``factorial``, ``floor``, ``fmod``, ``frexp``, ``fsum``, ``gamma``, ``hypot``, ``isinf``, ``isnan``, ``ldexp``, ``lgamma``, ``log``, ``log10``, ``log1p``, ``modf``, ``pow``, ``radians``, ``sin``, ``sinh``, ``sqrt``, ``tan``, ``tanh``, ``trunc`` Comparison operators ==================== Comparison operators (``==``, ``!=``, ``>``, ``<``, ``>=``, and ``<=``) for Variables with uncertainties are somewhat complicated, and need special attention. As we hinted at above, and will explore in more detail below and in the :ref:`Technical Guide `, this relates to the correlation between Variables. Equality and inequality comparisons ------------------------------------ If we compare the equality of two Variables with the same nominal value and uncertainty, we see >>> x = ufloat(5, 0.5) >>> y = ufloat(5, 0.5) >>> x == x True >>> x == y False The difference here is that although the two Python objects have the same nominal value and uncertainty, these are independent, uncorrelated values. It is not exactly true that the difference is based on identity, note that >>> x == (1.0*x) True >>> x is (1.0*x) False In order for the result of two calculations with uncertainties to be considered equal, the :mod:`uncertainties` package does not test whether the nominal value and the uncertainty have the same value. Instead it checks whether the difference of the two calculations has a nominal value of 0 *and* an uncertainty of 0. >>> (x -x) 0.0+/-0 >>> (x -y) 0.0+/-0.7071067811865476 Comparisons of magnitude ------------------------------------ The concept of comparing the magnitude of values with uncertainties is a bit complicated. That is, a Variable with a value of 25 +/- 10 might be greater than a Variable with a value of 24 +/- 8 most of the time, but *sometimes* it might be less than it. The :mod:`uncertainties` package takes the simple approach of comparing nominal values. That is >>> a = ufloat(25, 10) >>> b = ufloat(24, 8) >>> a > b True Note that combining this comparison and the above discussion of `==` and `!=` can lead to a result that maybe somewhat surprising: >>> a = ufloat(25, 10) >>> b = ufloat(25, 8) >>> a >= b False >>> a > b False >>> a == b False >>> a.nominal_value >= b.nominal_value True That is, since `a` is neither greater than `b` (nominal value only) nor equal to `b`, it cannot be greater than or equal to `b`. .. index:: pair: testing (scalar); NaN Handling NaNs and infinities =============================== NaN values can appear in either the nominal value or uncertainty of a Variable. As is always the case, care must be exercised when handling NaN values. While :func:`math.isnan` and :func:`numpy.isnan` will raise `TypeError` exceptions for uncertainties Variables (because an uncertainties Variable is not a float), the function :func:`umath.isnan` will return whether the nominal value of a Variable is NaN. Similarly, :func:`umath.isinf` will return whether the nominal value of a Variable is infinite. To check whether the uncertainty is NaN or Inf, use one of :func:`math.isnan`, :func:`math.isinf`, :func:`nupmy.isnan`, or , :func:`nupmy.isinf` on the ``std_dev`` attribute. .. index:: correlations; detailed example Power Function Behavior ======================= The value of one :class:`UFloat` raised to the power of another can be calculated in two ways: >>> from uncertainties import umath >>> >>> x = ufloat(4.5, 0.2) >>> y = ufloat(3.4, 0.4) >>> print(x**y) (1.7+/-1.0)e+02 >>> print(umath.pow(x, y)) (1.7+/-1.0)e+02 The function ``x**y`` is defined for all ``x != 0`` and for ``x == 0`` as long as ``y > 0``. There is not a unique definition for ``0**0``, however python takes the convention for :class:`float` that ``0**0 == 1``. If the power operation is performed on an ``(x, y)`` pair for which ``x**y`` is undefined then an exception will be raised: >>> x = ufloat(0, 0.2) >>> y = ufloat(-3.4, 0.4) >>> print(x**y) Traceback (most recent call last): ... ZeroDivisionError: 0.0 cannot be raised to a negative power On the domain where it is defined, ``x**y`` is always real for ``x >= 0``. For ``x < 0`` it is real for all integer values of ``y``. If ``x<0`` and ``y`` is not an integer then ``x**y`` has a non-zero imaginary component. The :mod:`uncertainties` module does not handle complex values: >>> x = ufloat(-4.5, 0.2) >>> y = ufloat(-3.4, 0.4) >>> print(x**y) Traceback (most recent call last): ... ValueError: The uncertainties module does not handle complex results The ``x`` derivative is real anywhere ``x**y`` is real except along ``x==0`` for non-integer ``y``. At these points the ``x`` derivative would be complex so a NaN value is used: >>> x = ufloat(0, 0.2) >>> y=1.5 >>> print((x**y).error_components()) {0.0+/-0.2: nan} The ``y`` derivative is real anywhere ``x**y`` is real as long as ``x>=0``. For ``x < 0`` the ``y`` derivative is always complex valued so a NaN value is used: >>> x = -2 >>> y = ufloat(1, 0.2) >>> print((x**y).error_components()) {1.0+/-0.2: nan} Automatic correlations ====================== Correlations between variables are **automatically handled** whatever the number of variables involved, and whatever the complexity of the calculation. For example, when :data:`x` is the number with uncertainty defined above, >>> x = ufloat(0.2, 0.01) >>> square = x**2 >>> print(square) 0.040+/-0.004 >>> square - x*x 0.0+/-0 >>> y = x*x + 1 >>> y - square 1.0+/-0 The last two printed results above have a zero uncertainty despite the fact that :data:`x`, :data:`y` and :data:`square` have a non-zero uncertainty: the calculated functions give the same value for all samples of the random variable :data:`x`. Thanks to the automatic correlation handling, calculations can be performed in as many steps as necessary, exactly as with simple floats. When various quantities are combined through mathematical operations, the result is calculated by taking into account all the correlations between the quantities involved. All of this is done transparently. Access to the individual sources of uncertainty =============================================== The various contributions to an uncertainty can be obtained through the :func:`error_components` method, which maps the **independent variables a quantity depends on** to their **contribution to the total uncertainty**. According to :ref:`linear error propagation theory ` (which is the method followed by :mod:`uncertainties`), the sum of the squares of these contributions is the squared uncertainty. The individual contributions to the uncertainty are more easily usable when the variables are **tagged**: >>> u = ufloat(1, 0.1, "u variable") # Tag >>> v = ufloat(10, 0.1, "v variable") >>> sum_value = u+2*v >>> sum_value 21.0+/-0.223606797749979 >>> for (var, error) in sum_value.error_components().items(): ... print("{}: {}".format(var.tag, error)) ... v variable: 0.2 u variable: 0.1 The variance (i.e. squared uncertainty) of the result (:data:`sum_value`) is the quadratic sum of these independent uncertainties, as it should be (``0.1**2 + 0.2**2``). The tags *do not have to be distinct*. For instance, *multiple* random variables can be tagged as ``"systematic"``, and their contribution to the total uncertainty of :data:`result` can simply be obtained as: >>> import math >>> x = ufloat(132, 0.02, "statistical") >>> y = ufloat(2.1, 0.05, "systematic") >>> z = ufloat(12, 0.1, "systematic") >>> result = x**y / z >>> syst_error = math.sqrt(sum( # Error from *all* systematic errors ... error**2 ... for (var, error) in result.error_components().items() ... if var.tag == "systematic")) >>> print(format(syst_error, ".3f")) 577.984 The remaining contribution to the uncertainty is: >>> other_error = math.sqrt(result.std_dev**2 - syst_error**2) The variance of :data:`result` is in fact simply the quadratic sum of these two errors, since the variables from :func:`result.error_components` are independent. .. index:: comparison operators .. index:: covariance matrix Covariance and correlation matrices =================================== Covariance matrix ----------------- The covariance matrix between various variables or calculated quantities can be simply obtained: >>> from uncertainties import covariance_matrix >>> sum_value = u+2*v >>> cov_matrix = covariance_matrix([u, v, sum_value]) has value :: [[0.01, 0.0, 0.01], [0.0, 0.01, 0.02], [0.01, 0.02, 0.05]] In this matrix, the zero covariances indicate that :data:`u` and :data:`v` are independent from each other; the last column shows that :data:`sum_value` does depend on these variables. The :mod:`uncertainties` package keeps track at all times of all correlations between quantities (variables and functions): >>> sum_value - (u+2*v) 0.0+/-0 Correlation matrix ------------------ If the NumPy_ package is available, the correlation matrix can be obtained as well: >>> from uncertainties import correlation_matrix >>> corr_matrix = correlation_matrix([u, v, sum_value]) >>> print(corr_matrix) [[1. 0. 0.4472136 ] [0. 1. 0.89442719] [0.4472136 0.89442719 1. ]] .. index:: correlations; correlated variables Correlated variables ==================== Reciprocally, **correlated variables can be created** transparently, provided that the NumPy_ package is available. Use of a covariance matrix -------------------------- Correlated variables can be obtained through the *covariance* matrix: >>> from uncertainties import correlated_values >>> (u2, v2, sum2) = correlated_values([1, 10, 21], cov_matrix) creates three new variables with the listed nominal values, and the given covariance matrix: >>> print(sum_value) 21.00+/-0.22 >>> print(sum2) 21.00+/-0.22 >>> print(format(sum2 - (u2+2*v2), ".6f")) 0.000000+/-0.000000 The theoretical value of the last expression is exactly zero, like for ``sum - (u+2*v)``, but numerical errors yield a small uncertainty (3e-9 is indeed very small compared to the uncertainty on :data:`sum2`: correlations should in fact cancel the uncertainty on :data:`sum2`). The covariance matrix is the desired one: >>> import numpy as np >>> print(np.array_str(np.array(covariance_matrix([u2, v2, sum2])), suppress_small=True)) [[0.01 0. 0.01] [0. 0.01 0.02] [0.01 0.02 0.05]] reproduces the original covariance matrix :data:`cov_matrix` (up to rounding errors). Use of a correlation matrix --------------------------- Alternatively, correlated values can be defined through: - a sequence of nominal values and standard deviations, and - a *correlation* matrix between each variable of this sequence (the correlation matrix is the covariance matrix normalized with individual standard deviations; it has ones on its diagonal)—in the form of a NumPy array-like object, e.g. a list of lists, or a NumPy array. Example: >>> from uncertainties import correlated_values_norm >>> (u3, v3, sum3) = correlated_values_norm( ... [(1, 0.1), (10, 0.1), (21, 0.22360679774997899)], ... corr_matrix, ... ) >>> print(u3) 1.00+/-0.10 The three returned numbers with uncertainties have the correct uncertainties and correlations (:data:`corr_matrix` can be recovered through :func:`correlation_matrix`). .. index:: single: C code; wrapping single: Fortran code; wrapping single: wrapping (C, Fortran,…) functions Making custom functions accept numbers with uncertainties ========================================================= This package allows **code which is not meant to be used with numbers with uncertainties to handle them anyway**. This is for instance useful when calling external functions (which are out of the user's control), including functions written in C or Fortran. Similarly, **functions that do not have a simple analytical form** can be automatically wrapped so as to also work with arguments that contain uncertainties. It is thus possible to take a function :func:`f` *that returns a single float*, and to automatically generalize it so that it also works with numbers with uncertainties: >>> from scipy.special import jv >>> from uncertainties import wrap as u_wrap >>> x = ufloat(2, 0.01) >>> jv(0, x) Traceback (most recent call last): ... TypeError: ufunc 'jv' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe'' >>> print(u_wrap(jv)(0, x)) 0.224+/-0.006 The new function :func:`wrapped_f` (optionally) *accepts a number with uncertainty* in place of any float *argument* of :func:`f` (note that floats contained instead *inside* arguments of :func:`f`, like in a list or a NumPy array, *cannot* be replaced by numbers with uncertainties). :func:`wrapped_f` returns the same values as :func:`f`, but with uncertainties. With a simple wrapping call like above, uncertainties in the function result are automatically calculated numerically. **Analytical uncertainty calculations can be performed** if derivatives are provided to :func:`wrap`. Miscellaneous utilities ======================= .. index:: standard deviation; on the fly modification It is sometimes useful to modify the error on certain parameters so as to study its impact on a final result. With this package, the **uncertainty of a variable can be changed** on the fly: >>> sum_value = u+2*v >>> sum_value 21.0+/-0.223606797749979 >>> prev_uncert = u.std_dev >>> u.std_dev = 10 >>> sum_value 21.0+/-10.00199980003999 >>> u.std_dev = prev_uncert The relevant concept is that :data:`sum_value` does depend on the variables :data:`u` and :data:`v`: the :mod:`uncertainties` package keeps track of this fact, as detailed in the :ref:`Technical Guide `, and uncertainties can thus be updated at any time. .. index:: pair: nominal value; uniform access (scalar) pair: uncertainty; uniform access (scalar) pair: standard deviation; uniform access (scalar) When manipulating ensembles of numbers, *some* of which contain uncertainties while others are simple floats, it can be useful to access the **nominal value and uncertainty of all numbers in a uniform manner**. This is what the :func:`nominal_value` and :func:`std_dev` functions do: >>> from uncertainties import nominal_value, std_dev >>> x = ufloat(0.2, 0.01) >>> print(nominal_value(x)) 0.2 >>> print(std_dev(x)) 0.01 >>> print(nominal_value(3)) 3 >>> print(std_dev(3)) 0.0 Finally, a utility method is provided that directly yields the `standard score `_ (number of standard deviations) between a number and a result with uncertainty: >>> x = ufloat(0.20, 0.01) >>> print(x.std_score(0.17)) -3.0 .. index:: derivatives .. _derivatives: Derivatives =========== Since the application of :ref:`linear error propagation theory ` involves the calculation of **derivatives**, this package automatically performs such calculations; users can thus easily get the derivative of an expression with respect to any of its variables: >>> u = ufloat(1, 0.1) >>> v = ufloat(10, 0.1) >>> sum_value = u+2*v >>> sum_value.derivatives[u] 1.0 >>> sum_value.derivatives[v] 2.0 These values are obtained with a :ref:`fast differentiation algorithm `. Additional information ====================== The capabilities of the :mod:`uncertainties` package in terms of array handling are detailed in :doc:`numpy_guide`. Details about the theory behind this package and implementation information are given in the :doc:`tech_guide`. .. _NumPy: http://numpy.scipy.org/ .. |minus2html| raw:: html -2 uncertainties-3.2.3/pyproject.toml000066400000000000000000000045501500152063300173070ustar00rootroot00000000000000[build-system] requires = ["setuptools>=64", "setuptools-scm>=8"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] write_to = "uncertainties/version.py" version_scheme = "post-release" [project] name = "uncertainties" dynamic = ["version"] authors = [ {name = "Eric O. LEBIGOT (EOL)", email = "eric.lebigot@normalesup.org"}, ] description = "calculations with values with uncertainties, error propagation" readme = "README.rst" requires-python = ">=3.8" keywords = [ "error propagation", "uncertainties", "uncertainty calculations", "standard deviation", "derivatives", "partial derivatives", "differentiation" ] license = {text = "Revised BSD License"} classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Other Audience", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Education", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Physics", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities" ] dependencies = [] [tool.setuptools] packages = ["uncertainties", "uncertainties.unumpy"] [project.urls] Documentation = "https://uncertainties.readthedocs.io/" Repository = "https://github.com/lmfit/uncertainties" Issues = "https://github.com/lmfit/uncertainties/issues" Changelog = "https://github.com/lmfit/uncertainties/blob/master/CHANGES.rst" [project.optional-dependencies] arrays = ["numpy"] test = ["pytest", "pytest_codspeed", "pytest_cov", "scipy"] doc = ["sphinx", "sphinx-copybutton", "python-docs-theme"] all = ["uncertainties[doc,test,arrays]"] [tool.pytest.ini_options] testpaths = ["tests"] uncertainties-3.2.3/tests/000077500000000000000000000000001500152063300155315ustar00rootroot00000000000000uncertainties-3.2.3/tests/cases/000077500000000000000000000000001500152063300166275ustar00rootroot00000000000000uncertainties-3.2.3/tests/cases/generate_ufloat_method_cases.py000066400000000000000000000033171500152063300250670ustar00rootroot00000000000000import json from pathlib import Path import random unary_functions = ["__neg__", "__pos__"] binary_functions = [ "__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__", "__truediv__", # Will fail if second value is 0 but that is unlikely "__rtruediv__", # Will fail if first value is 0 but that is unlikely "__pow__", # We will coerce the base to be positive "__rpow__", # We will coerce the base to be positive ] ufloat_method_cases_dict = {} for func_name in unary_functions: cases_list = [] for _ in range(10): nominal_value = random.uniform(-100, 100) std_dev = random.uniform(0, 100) ufloat_tuple = (nominal_value, std_dev) case = (ufloat_tuple,) cases_list.append(case) ufloat_method_cases_dict[func_name] = cases_list for func_name in binary_functions: cases_list = [] for _ in range(10): nominal_value_0 = random.uniform(-100, 100) if func_name == "__pow__": nominal_value_0 = abs(nominal_value_0) std_dev_0 = random.uniform(0, 100) ufloat_tuple_0 = (nominal_value_0, std_dev_0) nominal_value_1 = random.uniform(-100, 100) if func_name == "__rpow__": nominal_value_1 = abs(nominal_value_1) std_dev_1 = random.uniform(0, 100) ufloat_tuple_1 = (nominal_value_1, std_dev_1) case = (ufloat_tuple_0, ufloat_tuple_1) cases_list.append(case) ufloat_method_cases_dict[func_name] = cases_list ufloat_method_cases_json_path = Path(Path.cwd(), "ufloat_method_cases.json") if __name__ == "__main__": with open(ufloat_method_cases_json_path, "w") as f: json.dump(ufloat_method_cases_dict, f, indent=4) uncertainties-3.2.3/tests/cases/generate_umath_function_cases.py000066400000000000000000000054051500152063300252600ustar00rootroot00000000000000import json from pathlib import Path import random real_single_input_funcs = [ "asinh", "atan", "cos", "cosh", "degrees", "erf", "erfc", "exp", "expm1", "gamma", "lgamma", "radians", "sin", "sinh", "tan", "tanh", ] positive_single_input_funcs = [ "log", "log1p", "log10", "sqrt", ] minus_one_to_plus_one_single_input_funcs = [ "acos", "asin", "atanh", ] greater_than_one_single_input_funcs = ["acosh"] single_input_funcs = ( real_single_input_funcs + positive_single_input_funcs + minus_one_to_plus_one_single_input_funcs + greater_than_one_single_input_funcs ) real_double_input_funcs = ( "atan2", "pow", # We will coerce the base to be positive ) positive_double_input_funcs = ( "hypot", "log", ) double_input_funcs = real_double_input_funcs + positive_double_input_funcs umath_function_cases_dict = {} for func_name in single_input_funcs: cases_list = [] for _ in range(10): if func_name in real_single_input_funcs: min_val = -100 max_val = +100 elif func_name in positive_single_input_funcs: min_val = 0 max_val = +100 elif func_name in minus_one_to_plus_one_single_input_funcs: min_val = -1 max_val = +1 elif func_name in greater_than_one_single_input_funcs: min_val = +1 max_val = +100 else: raise ValueError nominal_value = random.uniform(min_val, max_val) std_dev = random.uniform(0, 100) ufloat_tuple = (nominal_value, std_dev) case = (ufloat_tuple,) cases_list.append(case) umath_function_cases_dict[func_name] = cases_list for func_name in double_input_funcs: cases_list = [] for _ in range(10): if func_name in real_double_input_funcs: min_val = -100 elif func_name in positive_double_input_funcs: min_val = 0 else: raise ValueError max_val = +100 nominal_value_0 = random.uniform(min_val, max_val) if func_name == "pow": nominal_value_0 = abs(nominal_value_0) std_dev_0 = random.uniform(0, 100) ufloat_tuple_0 = (nominal_value_0, std_dev_0) nominal_value_1 = random.uniform(min_val, max_val) std_dev_1 = random.uniform(0, 100) ufloat_tuple_1 = (nominal_value_1, std_dev_1) case = (ufloat_tuple_0, ufloat_tuple_1) cases_list.append(case) umath_function_cases_dict[func_name] = cases_list umath_function_cases_json_path = Path(Path.cwd(), "umath_function_cases.json") if __name__ == "__main__": with open(umath_function_cases_json_path, "w") as f: json.dump(umath_function_cases_dict, f, indent=4) uncertainties-3.2.3/tests/cases/ufloat_method_cases.json000066400000000000000000000575741500152063300235540ustar00rootroot00000000000000{ "__neg__": [ [ [ 44.526397550740086, 62.56564223004092 ] ], [ [ -3.8543430709897564, 16.432566107440614 ] ], [ [ -51.97779697191089, 66.97351746633987 ] ], [ [ 2.9483391837429593, 93.33179341932865 ] ], [ [ -11.133300066266983, 83.13148995114268 ] ], [ [ -4.114453143716219, 70.05633851136446 ] ], [ [ -62.443341798070826, 25.126388035122872 ] ], [ [ 56.83828585942939, 63.32926267886724 ] ], [ [ 15.44820477243951, 74.88785494089683 ] ], [ [ -46.542699067190554, 76.07962824216686 ] ] ], "__pos__": [ [ [ 8.81928634663167, 20.326915114837075 ] ], [ [ 40.99483480877609, 53.49744261669285 ] ], [ [ 76.89955644047708, 39.86445120845894 ] ], [ [ 12.799461122045287, 83.94344441489156 ] ], [ [ 9.6011888871314, 12.412920841562158 ] ], [ [ 18.525687282557584, 74.03893964186497 ] ], [ [ 68.04540983305964, 14.895435076310138 ] ], [ [ 36.297450396949614, 91.49432718790649 ] ], [ [ -69.16200363945926, 54.754154042333525 ] ], [ [ -70.80168621326868, 54.900418880500844 ] ] ], "__add__": [ [ [ 63.24615770020972, 30.2937534823033 ], [ 41.74602753345761, 75.4331305471685 ] ], [ [ -97.80732163899255, 76.52300672902139 ], [ 35.786996331698816, 10.149726185979768 ] ], [ [ -73.63318205539622, 43.261021493905986 ], [ -61.28704381958037, 26.27701900273437 ] ], [ [ -21.388629583649106, 32.79724434870251 ], [ -74.04295317264477, 36.39914060175931 ] ], [ [ -24.872171554822515, 60.89805258508713 ], [ 46.90018532361435, 19.037605036491378 ] ], [ [ -11.67608372715003, 0.05584394850707275 ], [ 8.629320952127472, 12.56971226839575 ] ], [ [ 31.588772817165335, 35.93387982516412 ], [ 21.44241417731496, 85.55953080786163 ] ], [ [ 10.694802135914244, 99.01523916687796 ], [ 44.746204936673166, 7.760830559785292 ] ], [ [ 65.0972567792713, 81.72412814103761 ], [ 23.685439283414283, 1.9230109096926462 ] ], [ [ 16.73499754534238, 30.634082469375855 ], [ -0.13327856055238385, 20.810864611131187 ] ] ], "__radd__": [ [ [ 16.49893004008429, 6.968473540848619 ], [ 20.47940503375743, 28.49826397041253 ] ], [ [ 92.22985095842034, 17.298023164325492 ], [ -69.51121920197957, 33.87633927730599 ] ], [ [ 81.84491417965808, 21.808237630956363 ], [ -49.19020839158566, 26.466610577060923 ] ], [ [ 10.570519388331064, 47.26580357384131 ], [ -4.636520398533236, 79.55327514595704 ] ], [ [ -16.030245501335045, 72.14013360167343 ], [ -42.2579000675849, 35.91614809356616 ] ], [ [ -54.14817763816953, 28.787256113199756 ], [ -30.130167505967023, 43.65362270802603 ] ], [ [ 0.9689390134056168, 89.77280718145414 ], [ -51.7397014189765, 10.508955984076508 ] ], [ [ 45.57403891363475, 37.87260513020488 ], [ -16.29096504035897, 52.5129725523241 ] ], [ [ 84.71212683503936, 7.867506166284488 ], [ 92.53855754238936, 64.03510197439945 ] ], [ [ -84.41439896655463, 88.96200770637468 ], [ -66.96944295716409, 82.70379990742892 ] ] ], "__sub__": [ [ [ -33.566615024219544, 80.7754605870885 ], [ 45.22876958268202, 16.35857759707424 ] ], [ [ 95.58346684117234, 47.87516151791097 ], [ -8.931720721668924, 57.810574147905214 ] ], [ [ 91.70031736586236, 47.7922251392611 ], [ 84.91298115132128, 92.56526311400324 ] ], [ [ 78.42117157031748, 84.46494220705598 ], [ 28.185446755703083, 54.63905698397456 ] ], [ [ 10.840868364778643, 79.2057524717243 ], [ 16.57023223925816, 74.21592471218742 ] ], [ [ -17.92652045123255, 27.574368170393114 ], [ -37.38089846139847, 60.95338303501627 ] ], [ [ -24.9834908441251, 92.10528392261473 ], [ 0.29470230730059654, 42.99153339163875 ] ], [ [ -14.883491546417844, 75.86624542970301 ], [ -19.32971653687261, 85.2635140330157 ] ], [ [ 86.56566421855726, 72.46776637450407 ], [ -26.579416974756114, 76.5141673203135 ] ], [ [ -27.321094048451585, 2.805470405749222 ], [ 61.008527920935734, 57.35655246672869 ] ] ], "__rsub__": [ [ [ -48.385045472017055, 59.68437188627467 ], [ -34.255650232296105, 34.33461645166371 ] ], [ [ 54.75354669546749, 28.466500298822783 ], [ 61.73714299586618, 17.879669278925814 ] ], [ [ 29.15417944361235, 15.491592741834515 ], [ -81.09542707829378, 40.82294755464607 ] ], [ [ 25.18373596111509, 73.86147730094272 ], [ 9.003790832376396, 59.895064759780325 ] ], [ [ 15.085900219940925, 11.806849686308961 ], [ 53.3754743000338, 82.2322395885899 ] ], [ [ -20.410733845085446, 92.28335211561752 ], [ 66.279176153805, 71.3910003153406 ] ], [ [ -36.954991307609376, 34.61373711554876 ], [ -96.8206724269661, 0.4845513610238106 ] ], [ [ 72.65791052433909, 73.05527780861297 ], [ 85.23341640894176, 11.477497638574608 ] ], [ [ -24.003754811078906, 84.05198231110104 ], [ -57.784125432147526, 38.78657015190643 ] ], [ [ 80.34957357085551, 62.05325359972276 ], [ -97.90643091756654, 20.389330402180118 ] ] ], "__mul__": [ [ [ -14.21029432408676, 84.13146364183797 ], [ -90.35446099561779, 3.415805654664006 ] ], [ [ -71.7120910175769, 31.44778948644499 ], [ -57.49308618625331, 89.06749401739212 ] ], [ [ -72.97137590384838, 91.28945461369814 ], [ -28.774571671079926, 5.127452428789681 ] ], [ [ -50.14136137965761, 98.186579880004 ], [ -36.271783551901905, 67.50040487592469 ] ], [ [ 18.426453813123643, 40.03389874833568 ], [ 81.95777394581745, 93.14426811710375 ] ], [ [ -99.66292297782446, 37.08647660869768 ], [ -19.120539513872956, 89.54026743837092 ] ], [ [ -42.57312251073324, 99.20934248137064 ], [ -3.6661701105389852, 62.03724954391823 ] ], [ [ -55.168305258320615, 89.33231561071182 ], [ -8.869166748490741, 56.16969544865993 ] ], [ [ 77.08852799253464, 98.7387558500092 ], [ -48.2123241252812, 52.48501139726537 ] ], [ [ 88.95891965671962, 76.21028937793504 ], [ -73.6154232432523, 69.55765486681209 ] ] ], "__rmul__": [ [ [ -64.40877124189058, 60.78081766968877 ], [ 1.4470430725608878, 17.993492604808313 ] ], [ [ -17.781365487888664, 45.66554129079676 ], [ 78.12607641975961, 78.49286583360599 ] ], [ [ 18.160475591779274, 29.457531669278282 ], [ -14.419170535612253, 3.9107660362597496 ] ], [ [ -38.43568944983502, 66.99761201444059 ], [ -49.984442562963636, 3.720397974304135 ] ], [ [ 48.16577997720904, 65.1472209002918 ], [ -27.66555447192765, 67.14857996584837 ] ], [ [ -22.710903957000255, 67.36984047037224 ], [ -70.00374305633754, 0.9476294489061843 ] ], [ [ -72.71509251605113, 58.879176845045166 ], [ -3.6393981296912443, 96.35261729747911 ] ], [ [ -70.05750228694774, 0.8685144590405591 ], [ 49.261259337545255, 55.66359703184265 ] ], [ [ 43.884315142444876, 31.745557120282875 ], [ -10.431999854052592, 26.228803885432338 ] ], [ [ -84.62383842596353, 7.138733047603074 ], [ -88.76595487390892, 28.604181061896682 ] ] ], "__truediv__": [ [ [ -51.18663684475606, 34.84670823330749 ], [ -40.66345841621326, 47.48488499868421 ] ], [ [ -21.486182569717897, 86.26161221969954 ], [ 46.851499952400786, 60.596572733801935 ] ], [ [ 15.27967047116185, 8.774900255957995 ], [ -18.64302884476703, 71.87612810181577 ] ], [ [ 71.05057310941118, 89.3380760941515 ], [ -29.938964846214503, 91.45781923315627 ] ], [ [ 55.487367800679124, 54.56384556816576 ], [ 73.4050038132618, 44.610868513683734 ] ], [ [ -28.45891982660511, 90.67135260015328 ], [ -47.541038243610046, 42.39646766500723 ] ], [ [ 66.5212224203637, 49.60332256903178 ], [ -66.56939592402415, 76.24704794130479 ] ], [ [ -53.283184765229706, 43.75183253248639 ], [ -21.237774767690894, 42.75679474726496 ] ], [ [ 32.78632977503298, 26.59933754830507 ], [ 39.88268787803224, 46.50064967723759 ] ], [ [ -85.77024560511477, 3.041349210555633 ], [ -33.0694946992881, 94.26299244687442 ] ] ], "__rtruediv__": [ [ [ -51.558214830419516, 67.96587002288966 ], [ -15.615726168242517, 76.56142406403326 ] ], [ [ 69.9719069242474, 97.11575354507596 ], [ -53.766186098928785, 88.87539659343588 ] ], [ [ 52.51295882077713, 63.92725859487227 ], [ 76.2044675516118, 61.37521595092878 ] ], [ [ 88.00868874375288, 50.59078815002237 ], [ 76.2880627778369, 60.86830482020721 ] ], [ [ 92.18815269054562, 54.57166431751084 ], [ -42.40329960576121, 92.13516191132295 ] ], [ [ 57.97187792362851, 16.578250220081227 ], [ -87.09094531751751, 45.09943071023176 ] ], [ [ -95.49757939661076, 36.21876994117046 ], [ 99.96153561629114, 88.94820805750103 ] ], [ [ -0.4781964449222471, 22.89624909415896 ], [ 39.669264754075954, 29.793423719913637 ] ], [ [ 63.38656974139093, 68.00949332477293 ], [ 99.85933035195612, 30.782846378196137 ] ], [ [ -30.07602400157458, 60.90895263920093 ], [ -69.43317991401699, 45.4044159577097 ] ] ], "__pow__": [ [ [ 66.09572084325436, 83.76847568722823 ], [ 74.33994676894133, 18.84336240472423 ] ], [ [ 94.39372309675775, 41.812675591373704 ], [ 35.1903664220016, 76.48998868822176 ] ], [ [ 26.975866095374982, 22.649640993786846 ], [ -32.50941650165362, 48.99939993157629 ] ], [ [ 35.514904644226064, 77.43125857817313 ], [ 82.45349521680757, 12.680577773764268 ] ], [ [ 63.787257658780796, 27.62165350490563 ], [ 90.99799369635701, 43.75426292611635 ] ], [ [ 50.62388500618309, 64.93125985905428 ], [ -64.93481113906353, 58.14168460773166 ] ], [ [ 86.50434255974938, 80.91482333770757 ], [ -42.09635740790212, 38.16372561193941 ] ], [ [ 98.21887137588942, 45.28189475186926 ], [ 28.44013422866351, 65.82393373982453 ] ], [ [ 87.87275208274296, 22.614455428779646 ], [ 80.10160553606181, 34.645921759055675 ] ], [ [ 56.34116651799036, 73.69554099317848 ], [ -3.936927926965609, 76.36977235867654 ] ] ], "__rpow__": [ [ [ -92.62435102026802, 7.409570792637266 ], [ 27.16591726614739, 31.051221744884227 ] ], [ [ 97.55217016604146, 37.422258750070924 ], [ 53.36925876360314, 60.952052545591116 ] ], [ [ -80.95690245197133, 40.56227591804994 ], [ 89.48368078109254, 97.35173819872749 ] ], [ [ 78.593320941857, 58.070015349394346 ], [ 77.30551556049988, 51.92187014060751 ] ], [ [ -60.664155429348334, 11.033664104369723 ], [ 27.78995074898522, 50.04001527749108 ] ], [ [ -80.88161169797674, 95.01501385659424 ], [ 21.522114263959338, 4.295069764070913 ] ], [ [ -86.01160768385874, 26.01474177672919 ], [ 91.83908103938077, 31.848429819063874 ] ], [ [ 63.27570619850309, 19.23259936086279 ], [ 90.04812396154172, 64.9322144362053 ] ], [ [ 84.78337217683051, 39.17159199586301 ], [ 41.2276692141854, 76.68414116819076 ] ], [ [ 14.611213886028736, 44.074207684575875 ], [ 94.79561879817825, 45.81079102809578 ] ] ] } uncertainties-3.2.3/tests/cases/umath_function_cases.json000066400000000000000000001073721500152063300237350ustar00rootroot00000000000000{ "asinh": [ [ [ -65.04711551601186, 8.196204796683782 ] ], [ [ -95.98491692898313, 3.833062355484995 ] ], [ [ -34.192019282760185, 17.33792554520186 ] ], [ [ -43.34186803743747, 83.38758236689901 ] ], [ [ -10.685655630511121, 28.20777285164612 ] ], [ [ 22.12799233002616, 14.92898219535589 ] ], [ [ -71.45323147093794, 21.26443345799075 ] ], [ [ 37.54943955050604, 94.29521401305556 ] ], [ [ 28.635332285239542, 60.85446299415287 ] ], [ [ 22.278553467660657, 1.3690865611733383 ] ] ], "atan": [ [ [ 49.08864986324261, 60.513540243522556 ] ], [ [ -49.712066456904225, 15.449895468974617 ] ], [ [ -62.404807050147596, 66.98385288739583 ] ], [ [ 53.698444740187114, 41.00601213157869 ] ], [ [ 33.52578289145225, 15.439840817833229 ] ], [ [ -3.8044017786586153, 0.8904172302929503 ] ], [ [ -59.771857732418155, 20.34163519010924 ] ], [ [ -25.942642670218575, 43.99791159100366 ] ], [ [ -83.32734126170398, 10.649968625096061 ] ], [ [ -35.06040268843904, 6.814609672123351 ] ] ], "cos": [ [ [ -10.44542952960181, 9.96098544187336 ] ], [ [ 23.97304114785956, 40.84558332736524 ] ], [ [ -66.1737787647941, 60.890795060865635 ] ], [ [ -70.10563440020528, 97.21469977221369 ] ], [ [ 88.14737909638032, 46.596278935341076 ] ], [ [ -46.124198647244306, 67.61483110800756 ] ], [ [ 28.519365883648618, 10.238522099233672 ] ], [ [ 79.02051036882546, 32.632001494114014 ] ], [ [ -60.228700313281806, 22.672866745892094 ] ], [ [ 92.63036369070474, 34.97609626729607 ] ] ], "cosh": [ [ [ 68.83976149557515, 37.56320463532011 ] ], [ [ -53.36934120155128, 44.974232217158125 ] ], [ [ -43.2758934306263, 66.75205128609662 ] ], [ [ -40.24107103707859, 66.87919204643588 ] ], [ [ -99.16619515270726, 86.68121917493109 ] ], [ [ -1.4439326932447187, 78.77480832642243 ] ], [ [ -59.200224663863054, 74.63850632045343 ] ], [ [ 29.98756012924514, 73.70166407706957 ] ], [ [ -44.23099585542411, 41.79067082095712 ] ], [ [ 17.215170806459227, 9.003362713987983 ] ] ], "degrees": [ [ [ -27.39321185651184, 54.97717770339894 ] ], [ [ 72.80779572760812, 0.7838349848705906 ] ], [ [ -47.10926244714155, 72.64748549196126 ] ], [ [ -37.14170408341568, 85.60650746371505 ] ], [ [ -72.9284502122705, 91.19400398557522 ] ], [ [ 92.3453220301985, 20.731115130977873 ] ], [ [ 88.95827283377272, 25.737741626322087 ] ], [ [ 4.5724488791494196, 58.46678747538312 ] ], [ [ -38.79379682927977, 60.34001933715195 ] ], [ [ 76.85960641106169, 48.37000448400808 ] ] ], "erf": [ [ [ -83.20354457834935, 60.6180491448666 ] ], [ [ 88.39036006577146, 78.16585482617539 ] ], [ [ 73.16934201935823, 58.19696363433855 ] ], [ [ -96.70531949280206, 62.26300241893101 ] ], [ [ -28.44536081912932, 93.58630883484864 ] ], [ [ 34.83276247682264, 94.84012873368258 ] ], [ [ 68.87562722936556, 51.83452801235975 ] ], [ [ 90.4196059302324, 82.8065129681794 ] ], [ [ 35.85784345529862, 16.683899141396108 ] ], [ [ -15.58470519073991, 18.944315239941268 ] ] ], "erfc": [ [ [ -38.29683901866392, 50.7593513208111 ] ], [ [ -84.46632391406999, 14.183730325773048 ] ], [ [ -62.16497302165409, 77.57121783632644 ] ], [ [ -74.84465896316016, 84.767615582884 ] ], [ [ 27.622792247084675, 50.25345358546262 ] ], [ [ -22.251620815652558, 33.92059276551419 ] ], [ [ -88.14820164954523, 37.95440886091799 ] ], [ [ 59.53275360883333, 24.446475178944148 ] ], [ [ 17.188020949089847, 92.45771188490139 ] ], [ [ 53.88812658260042, 7.634606045543957 ] ] ], "exp": [ [ [ -29.961892167069593, 54.08241176874278 ] ], [ [ 17.52203823669703, 71.42507537131438 ] ], [ [ -25.907404988809816, 76.05973781954066 ] ], [ [ 27.65803628375909, 98.29939812792261 ] ], [ [ -66.80590139385404, 45.71307117420562 ] ], [ [ 31.516994411255695, 57.86122702495518 ] ], [ [ 99.75078830679851, 90.52566199789037 ] ], [ [ -43.86961920198482, 43.271201729082435 ] ], [ [ 26.99533681530461, 93.50316508643822 ] ], [ [ 61.32702903977571, 7.97074108214969 ] ] ], "expm1": [ [ [ -35.29047598849611, 99.61529825707879 ] ], [ [ 31.216856493203437, 83.27330037596717 ] ], [ [ 0.05441546480066961, 49.240682418745806 ] ], [ [ -76.27369367592414, 56.45319851102075 ] ], [ [ -55.07117049240369, 64.0622893503791 ] ], [ [ -21.145619768297365, 7.728793780427146 ] ], [ [ 94.8434238917406, 53.63862297912463 ] ], [ [ -77.30057920614826, 97.82312155280253 ] ], [ [ 45.722905640975824, 39.711875936974394 ] ], [ [ 52.93994801299931, 79.73322319450575 ] ] ], "gamma": [ [ [ 78.1046770853387, 3.661391035056749 ] ], [ [ -4.970036270057562, 14.585292757408919 ] ], [ [ 97.47095759775556, 4.312702303330729 ] ], [ [ -49.94445913488714, 40.89441690046608 ] ], [ [ -78.65441371215177, 73.40547956690492 ] ], [ [ -2.978173071715844, 90.88209866623477 ] ], [ [ -31.3691405370719, 96.20547897326477 ] ], [ [ -92.3683904216717, 15.858598092287624 ] ], [ [ -73.03389131230098, 62.65866180499078 ] ], [ [ -99.79328842638839, 6.590907922182021 ] ] ], "lgamma": [ [ [ -42.253118536089886, 41.14466759082439 ] ], [ [ -27.565154285214533, 56.893252596160146 ] ], [ [ -81.3342208215102, 89.28070563219251 ] ], [ [ 0.7600463416793843, 81.29710916800877 ] ], [ [ 20.213121385031968, 39.05381126072999 ] ], [ [ -20.30902865134057, 95.08903385805571 ] ], [ [ -5.504440721438186, 48.54680160631044 ] ], [ [ 12.379441492658302, 56.796768253322085 ] ], [ [ -5.254223733251791, 12.016755818357916 ] ], [ [ -9.897055476948239, 72.33102573855241 ] ] ], "radians": [ [ [ -70.33418616686633, 55.408249565819304 ] ], [ [ 9.031202912309737, 68.77591744836849 ] ], [ [ 10.975285558774644, 42.18349754170565 ] ], [ [ -56.318161258133024, 38.24696968873027 ] ], [ [ 90.0032851462561, 13.591997510752908 ] ], [ [ 55.16627061816598, 50.08400354313963 ] ], [ [ -93.39793507011882, 32.13091060770441 ] ], [ [ -17.253894454031453, 79.64915821727028 ] ], [ [ 63.66015933456015, 5.980001922232336 ] ], [ [ 9.237762719794844, 95.66468169077912 ] ] ], "sin": [ [ [ -28.670939129374418, 12.631735469042981 ] ], [ [ 34.24527121287042, 89.15892904494581 ] ], [ [ 59.32073119239777, 52.988447116272084 ] ], [ [ -75.12098359975252, 13.14896826777967 ] ], [ [ 86.17366385270827, 30.79630607070062 ] ], [ [ 63.49624167577642, 47.306268449846925 ] ], [ [ -93.19258715773108, 29.258564616909656 ] ], [ [ 63.52182091967501, 96.7216758883016 ] ], [ [ 15.696156599421698, 75.16358627832209 ] ], [ [ -14.224541444308386, 59.24068198853746 ] ] ], "sinh": [ [ [ -59.10194239838171, 52.0742366429454 ] ], [ [ -38.69607405992917, 90.79202781455706 ] ], [ [ 22.72345237397404, 60.04194612995719 ] ], [ [ -77.81172865298127, 82.34244427194852 ] ], [ [ -26.23230872509943, 27.06499845406316 ] ], [ [ 54.696526931543275, 68.59670754405614 ] ], [ [ 79.25158990446616, 24.336788781489393 ] ], [ [ 63.719490619221915, 87.30879266549567 ] ], [ [ 41.298626524034944, 51.61805618721377 ] ], [ [ -25.866543978260708, 14.560986272250165 ] ] ], "tan": [ [ [ -65.96734970175295, 24.024736836778803 ] ], [ [ 95.17157687218187, 2.6395634108068 ] ], [ [ -23.115761831292446, 34.4100613965083 ] ], [ [ -42.96947698651658, 48.44148555705073 ] ], [ [ 47.97315912055154, 63.10017921738273 ] ], [ [ -43.765682244246705, 93.23469778854525 ] ], [ [ 61.94833993204958, 59.84647746282775 ] ], [ [ -10.870168161106662, 82.58740489351374 ] ], [ [ 41.634230381205555, 0.987475475004318 ] ], [ [ 12.095537159359765, 64.32274973836327 ] ] ], "tanh": [ [ [ 63.39674454226309, 69.48981789212412 ] ], [ [ 23.77940734065389, 96.26279019121175 ] ], [ [ -25.426097953507366, 68.38919866680236 ] ], [ [ 14.770058990991657, 75.723298475845 ] ], [ [ -18.653797476326517, 74.18153930013742 ] ], [ [ -75.7788958209118, 93.96625055746412 ] ], [ [ 47.08601186454305, 39.6331115775782 ] ], [ [ -16.54930386893861, 23.902416610374654 ] ], [ [ -8.35893515343777, 7.963671036025888 ] ], [ [ -39.160796292727376, 95.61819342735203 ] ] ], "log": [ [ [ 36.008006573567776, 89.85903631666804 ], [ 95.53615930342254, 17.926769923409115 ] ], [ [ 40.969490821667534, 43.31270588812471 ], [ 79.79183836639659, 89.14823262730935 ] ], [ [ 10.47608570457249, 67.15740248961815 ], [ 24.085005927408286, 88.36217621082189 ] ], [ [ 98.64002335321567, 43.95001601569709 ], [ 56.011528274825864, 81.72515395269171 ] ], [ [ 25.675802504037325, 33.13653549234643 ], [ 37.9737662780015, 90.63645976178888 ] ], [ [ 90.64149566548545, 96.71639852899354 ], [ 65.78965949154407, 3.768045081937632 ] ], [ [ 49.701957760691805, 34.08993470174768 ], [ 79.17465392386124, 13.797012592664814 ] ], [ [ 24.91144559658107, 73.44831819762338 ], [ 45.11721257229973, 16.803277118670536 ] ], [ [ 82.3947533392365, 86.18245320238465 ], [ 69.13595284495648, 37.62641920651853 ] ], [ [ 46.84439761452146, 56.37619799839243 ], [ 57.73912895614494, 80.48063960196463 ] ] ], "log1p": [ [ [ 10.953807523608216, 10.814151874930445 ] ], [ [ 81.03904129946102, 46.58878543617191 ] ], [ [ 22.914012328587695, 3.97479678124405 ] ], [ [ 9.530298855402696, 29.565846583777187 ] ], [ [ 23.140627472025265, 75.95184627090954 ] ], [ [ 56.50314827372173, 61.79630972992697 ] ], [ [ 20.632696936743645, 35.20619049006575 ] ], [ [ 57.420044687333935, 24.722587585098754 ] ], [ [ 64.04346894681383, 23.92702850050672 ] ], [ [ 76.61000651472597, 72.16854018252434 ] ] ], "log10": [ [ [ 79.2864399699313, 87.71577031747503 ] ], [ [ 59.03507847348369, 20.26879649394194 ] ], [ [ 81.77301334343807, 82.40252744804589 ] ], [ [ 24.39630095653136, 77.7749296744897 ] ], [ [ 63.40345787970574, 98.09294895668769 ] ], [ [ 91.90572138557447, 42.703097768127385 ] ], [ [ 69.04100497634063, 35.551044861779836 ] ], [ [ 13.624784679864844, 85.93782845524998 ] ], [ [ 41.433465079564144, 99.46740989350089 ] ], [ [ 10.679648941175957, 85.68118440916852 ] ] ], "sqrt": [ [ [ 55.591222839468976, 14.069249125659145 ] ], [ [ 34.627571681904335, 85.06293610803016 ] ], [ [ 70.62186574007933, 5.787023767542077 ] ], [ [ 34.216161546907465, 76.85688425581728 ] ], [ [ 38.3895122736014, 32.73510385765985 ] ], [ [ 16.856831960402918, 46.800642602549146 ] ], [ [ 1.3933264766970432, 72.16050142760318 ] ], [ [ 8.840524640724269, 98.15198529419816 ] ], [ [ 37.39148875046791, 61.06572693466741 ] ], [ [ 80.98560643634696, 2.4777389334587507 ] ] ], "acos": [ [ [ 0.5745729187648279, 69.74930527699318 ] ], [ [ 0.0505878960053594, 73.43228822601499 ] ], [ [ 0.21455240815298882, 79.58788129571442 ] ], [ [ -0.47862813985362873, 69.11321954356346 ] ], [ [ -0.2963392526048292, 61.04494833957011 ] ], [ [ 0.36586428287973294, 35.89838347506694 ] ], [ [ -0.7924203151075029, 53.049666897242055 ] ], [ [ 0.8154794275686374, 99.09014443733189 ] ], [ [ -0.6650595935385306, 20.808292749890512 ] ], [ [ 0.5401631964007902, 44.19838979755113 ] ] ], "asin": [ [ [ 0.22627682107042335, 98.768942656348 ] ], [ [ 0.09702395309546419, 17.49363410353315 ] ], [ [ 0.6661323476086958, 53.49513454801358 ] ], [ [ 0.08009327183290171, 94.54321818835737 ] ], [ [ -0.6277576328850889, 16.887739618926524 ] ], [ [ -0.6456266544120572, 82.28931304520057 ] ], [ [ -0.7083727581044355, 5.163558605187024 ] ], [ [ -0.6580362653081369, 45.54641915563757 ] ], [ [ -0.9705738499441547, 22.68792650865136 ] ], [ [ -0.18426698356700544, 99.6107555765626 ] ] ], "atanh": [ [ [ -0.06375643928203023, 29.995560092416362 ] ], [ [ -0.8193191510099629, 57.96393360262796 ] ], [ [ -0.38199564360070704, 8.086304246985254 ] ], [ [ 0.1261927983876403, 0.5272157797109545 ] ], [ [ 0.19522977751860093, 33.613272287132425 ] ], [ [ -0.6723452888956247, 73.04580565634046 ] ], [ [ -0.6099762410879246, 25.59095872046996 ] ], [ [ -0.2648935836667652, 43.05521372250368 ] ], [ [ 0.7402494125103309, 60.934786914929006 ] ], [ [ -0.8194076076584587, 46.79154337150775 ] ] ], "acosh": [ [ [ 65.68327946554803, 11.486805557300904 ] ], [ [ 72.46375623719894, 93.08384610250869 ] ], [ [ 76.64827660812554, 86.51608834267515 ] ], [ [ 50.64564933712884, 92.15104033170259 ] ], [ [ 46.81193725776084, 20.694424470626505 ] ], [ [ 22.03359808265206, 96.0650698159477 ] ], [ [ 92.8454035350182, 75.27360150282128 ] ], [ [ 16.915178558624117, 11.90238717743104 ] ], [ [ 66.91958918506394, 78.21152477129095 ] ], [ [ 20.208433005453887, 0.7884762002405843 ] ] ], "atan2": [ [ [ 5.345336080187863, 44.30887884739082 ], [ -34.93702893462121, 56.856602829054346 ] ], [ [ 94.06909202798442, 33.36538500357922 ], [ 26.183654659625105, 27.027759021022636 ] ], [ [ -80.83455210769273, 90.23361936672649 ], [ -12.647198073042063, 78.34865648499367 ] ], [ [ -13.501557073083646, 53.22034375806718 ], [ 9.7281544851418, 19.35259036372663 ] ], [ [ -14.84909469196198, 16.622757561027424 ], [ 24.39313391851134, 27.028191930126145 ] ], [ [ 51.870632714187565, 17.42737206142676 ], [ -5.758812347567115, 91.67282577370561 ] ], [ [ -43.14204127092511, 55.32319569528944 ], [ -49.74327234145308, 8.89642636837047 ] ], [ [ 52.99128772498247, 34.554837464953266 ], [ 12.488076436022368, 49.50154375425132 ] ], [ [ -21.18966291754974, 63.34805696540988 ], [ 95.94088011077221, 76.47425376162904 ] ], [ [ -90.65807311117884, 81.37558722667644 ], [ 55.882790636595644, 38.20434469058278 ] ] ], "pow": [ [ [ 15.97842039222985, 26.860370884719476 ], [ -14.538827989473504, 39.87819676818087 ] ], [ [ 82.15984903975783, 63.35536409353022 ], [ -9.764697413095334, 49.67400721624159 ] ], [ [ 53.58015618096894, 97.92509030116993 ], [ 29.484437331158745, 95.64911934219106 ] ], [ [ 61.016382838892305, 77.4258439279853 ], [ 98.22265252229377, 41.37745097455555 ] ], [ [ 90.23296383518102, 27.3297220939263 ], [ -69.34452610081124, 65.34668500204923 ] ], [ [ 52.76043967290263, 48.23875906802893 ], [ -19.071988083657757, 53.599120326530404 ] ], [ [ 56.47516826965088, 35.55277031609961 ], [ 84.83583318066067, 22.24342523683238 ] ], [ [ 24.30477494506293, 6.581863045609893 ], [ -53.52444258166729, 98.70058847345483 ] ], [ [ 55.918833775933784, 51.86179823417698 ], [ -2.24891133521146, 5.469712470309684 ] ], [ [ 89.92509745593873, 83.40273397060614 ], [ -41.87867012451241, 17.625317495049785 ] ] ], "hypot": [ [ [ 0.6112467005825462, 40.92389929924891 ], [ 63.86069678366384, 59.90803408432661 ] ], [ [ 33.24460374248037, 51.14103383977932 ], [ 84.4741055686211, 9.259576197807895 ] ], [ [ 89.75689539684524, 63.419731403557044 ], [ 26.294688038330648, 50.78980711358334 ] ], [ [ 87.12973983416504, 65.5562958710385 ], [ 48.47560466849165, 80.6487755756807 ] ], [ [ 96.8888610724462, 83.11516779094555 ], [ 64.514268439172, 19.36912365755431 ] ], [ [ 2.225730972856066, 95.86515570741977 ], [ 75.91191573319841, 57.47330423634558 ] ], [ [ 25.822663182194216, 16.436458532575205 ], [ 72.67748916730963, 84.03405603193278 ] ], [ [ 10.739910075232329, 42.42512417381976 ], [ 16.94697295819758, 35.40398414485182 ] ], [ [ 67.21668697675604, 61.045988781248674 ], [ 95.01466179170039, 16.394240083515843 ] ], [ [ 32.61018185055363, 33.21255855913563 ], [ 18.504058467573138, 71.67162926379548 ] ] ] } uncertainties-3.2.3/tests/helpers.py000066400000000000000000000057711500152063300175570ustar00rootroot00000000000000from math import isclose, isnan, isinf import uncertainties.core as uncert_core def nan_close(first, second): if isnan(first): return isnan(second) else: return isclose(first, second) ############################################################### # TODO: move to uncertainties/testing.py ############################################################################### # Utilities for unit testing def numbers_close(x, y, tolerance=1e-6): """ Returns True if the given floats are close enough. The given tolerance is the relative difference allowed, or the absolute difference, if one of the numbers is 0. NaN is allowed: it is considered close to itself. """ # !!! Python 3.5+ has math.isclose(): maybe it could be used here. # Instead of using a try and ZeroDivisionError, we do a test, # NaN could appear silently: if x != 0 and y != 0: if isinf(x): return isinf(y) elif isnan(x): return isnan(y) else: # Symmetric form of the test: return 2 * abs(x - y) / (abs(x) + abs(y)) < tolerance else: # Either x or y is zero return abs(x or y) < tolerance def ufloats_close(x, y, tolerance=1e-6): """ Tests if two numbers with uncertainties are close, as random variables: this is stronger than testing whether their nominal value and standard deviation are close. The tolerance is applied to both the nominal value and the standard deviation of the difference between the numbers. """ diff = x - y return numbers_close(diff.nominal_value, 0, tolerance) and numbers_close( diff.std_dev, 0, tolerance ) ############################################################################### try: import numpy # noqa except ImportError: pass else: def uarrays_close(m1, m2, precision=1e-4): """ Returns True iff m1 and m2 are almost equal, where elements can be either floats or AffineScalarFunc objects. Two independent AffineScalarFunc objects are deemed equal if both their nominal value and uncertainty are equal (up to the given precision). m1, m2 -- NumPy arrays. precision -- precision passed through to uncertainties.test_uncertainties.numbers_close(). """ # ! numpy.allclose() is similar to this function, but does not # work on arrays that contain numbers with uncertainties, because # of the isinf() function. for elmt1, elmt2 in zip(m1.flat, m2.flat): # For a simpler comparison, both elements are # converted to AffineScalarFunc objects: elmt1 = uncert_core.to_affine_scalar(elmt1) elmt2 = uncert_core.to_affine_scalar(elmt2) if not numbers_close(elmt1.nominal_value, elmt2.nominal_value, precision): return False if not numbers_close(elmt1.std_dev, elmt2.std_dev, precision): return False return True uncertainties-3.2.3/tests/test_formatting.py000066400000000000000000000550711500152063300213240ustar00rootroot00000000000000import pytest from uncertainties import ufloat, ufloat_fromstr, formatting from helpers import numbers_close def test_PDG_precision(): """ Test of the calculation of the number of significant digits for the uncertainty. """ # The 3 cases of the rounding rules are covered in each case: tests = { # Very big floats: 1.7976931348623157e308: (2, 1.7976931348623157e308), 0.5e308: (1, 0.5e308), 0.9976931348623157e308: (2, 1e308), # Very small floats: 1.3e-323: (2, 1.3e-323), 5e-324: (1, 5e-324), 9.99e-324: (2, 1e-323), } for std_dev, result in tests.items(): assert formatting.PDG_precision(std_dev) == result def test_small_float(): """ Make sure that very small floats do not error, even though printing as str causes the number to be rounded to the nearest 324. Suggested by issue #135. """ a = 1e-324 b = 3e-324 assert a < b str(ufloat(a, 0.0)) str(ufloat(b, 0.0)) def test_repr(): """Test the representation of numbers with uncertainty.""" # The uncertainty is a power of 2, so that it can be exactly # represented: x = ufloat(3.14159265358979, 0.25) assert repr(x) == "3.14159265358979+/-0.25" x = ufloat(3.14159265358979, 0) assert repr(x) == "3.14159265358979+/-0" # Tagging: x = ufloat(3, 1, "length") assert repr(x) == "< length = 3.0+/-1.0 >" # The way NaN is formatted with F, E and G depends on the version of Python (NAN for # Python 2.5+ at least): NaN_EFG = "%F" % float("nan") Inf_EFG = "%F" % float("inf") # Tests of each point of the docstring of # AffineScalarFunc.__format__() in turn, mostly in the same order. # The LaTeX tests do not use the customization of # uncert_core.GROUP_SYMBOLS and uncert_core.EXP_PRINT: this # way, problems in the customization themselves are caught. formatting_cases = [ # (Nominal value, uncertainty): {format: result,...} # Usual float formatting, and individual widths, etc.: (3.1415, 0.0001, "*^+7.2f", "*+3.14*+/-*0.00**"), (3.1415, 0.0001, "+07.2f", "+003.14+/-0000.00"), # fill (3.1415, 0.0001, ">10f", " 3.14150+/- 0.00010"), # Width and align (3.1415, 0.0001, "11.3e", " 3.142e+00+/- 0.000e+00"), # Duplicated exponent (3.1415, 0.0001, "1.4e", "3.1415e+00+/-0.0001e+00"), # Forced double exponent # Full generalization of float formatting: (3.1415, 0.0001, "+09.2uf", "+03.14150+/-000.00010"), (3.1415, 0.0001, "*^+9.2uf", "+3.14150*+/-*0.00010*"), (3.1415, 0.0001, ">9f", " 3.14150+/- 0.00010"), # Width and align # Number of digits of the uncertainty fixed: (123.456789, 0.00123, ".1uf", "123.457+/-0.001"), (123.456789, 0.00123, ".2uf", "123.4568+/-0.0012"), (123.456789, 0.00123, ".3uf", "123.45679+/-0.00123"), (123.456789, 0.00123, ".2ue", "(1.234568+/-0.000012)e+02"), # Sign handling: (-123.456789, 0.00123, ".1uf", "-123.457+/-0.001"), (-123.456789, 0.00123, ".2uf", "-123.4568+/-0.0012"), (-123.456789, 0.00123, ".3uf", "-123.45679+/-0.00123"), (-123.456789, 0.00123, ".2ue", "(-1.234568+/-0.000012)e+02"), # Uncertainty larger than the nominal value: (12.3, 456.78, "", "(0+/-5)e+02"), (12.3, 456.78, ".1uf", "12+/-457"), (12.3, 456.78, ".4uf", "12.3+/-456.8"), # ... Same thing, but with an exponent: (12.3, 456.78, ".1ue", "(0+/-5)e+02"), (12.3, 456.78, ".4ue", "(0.123+/-4.568)e+02"), (12.3, 456.78, ".1ueS", "0(5)e+02"), (23456.789123, 1234.56789123, ".6gS", "23456.8(1234.6)"), # Test of the various float formats: the nominal value should have a similar # representation as if it were directly represented as a float: (1234567.89, 0.1, ".0e", "(1+/-0)e+06"), (1234567.89, 0.1, "e", "(1.23456789+/-0.00000010)e+06"), (1234567.89, 0.1, "E", "(1.23456789+/-0.00000010)E+06"), (1234567.89, 0.1, "f", "1234567.89+/-0.10"), (1234567.89, 0.1, "F", "1234567.89+/-0.10"), (1234567.89, 0.1, "g", "1234567.89+/-0.10"), (1234567.89, 0.1, "G", "1234567.89+/-0.10"), (1234567.89, 0.1, "%", "(123456789+/-10)%"), (1234567.89, 4.3, "g", "1234568+/-4"), # Case where g triggers the exponent notation (1234567.89, 43, "g", "(1.23457+/-0.00004)e+06"), (1234567.89, 43, "G", "(1.23457+/-0.00004)E+06"), (3.1415, 0.0001, "+09.2uf", "+03.14150+/-000.00010"), pytest.param( 1234.56789, 0.1, ".0f", "1235+/-0.", marks=pytest.mark.xfail(reason="Bug: missing trailing decimal."), ), # Approximate error indicated with "." (1234.56789, 0.1, "e", "(1.23457+/-0.00010)e+03"), (1234.56789, 0.1, "E", "(1.23457+/-0.00010)E+03"), (1234.56789, 0.1, "f", "1234.57+/-0.10"), (1234.56789, 0.1, "F", "1234.57+/-0.10"), (1234.56789, 0.1, "%", "(123457+/-10)%"), # Percent notation: # Because '%' does 0.0055*100, the value 0.5499999999999999 is obtained, which # rounds to 0.5. The original rounded value is 0.006. (0.42, 0.0055, ".1u%", "(42.0+/-0.5)%"), (0.42, 0.0055, ".1u%S", "42.0(5)%"), (0.42, 0.0055, "%P", "(42.0±0.5)%"), # Particle Data Group automatic convention, including limit cases: (1.2345678, 0.354, "", "1.23+/-0.35"), (1.2345678, 0.3549, "", "1.23+/-0.35"), (1.2345678, 0.355, "", "1.2+/-0.4"), (1.5678, 0.355, "", "1.6+/-0.4"), (1.2345678, 0.09499, "", "1.23+/-0.09"), (1.2345678, 0.095, "", "1.23+/-0.10"), # Automatic extension of the uncertainty up to the decimal point: (1000, 123, ".1uf", "1000+/-123"), # The nominal value has 1 <= mantissa < 10. The precision is the number of # significant digits of the uncertainty: (1000, 123, ".1ue", "(1.0+/-0.1)e+03"), # Spectroscopic notation: (-1.23, 3.4, "S", "-1.2(3.4)"), (-1.23, 3.4, ".2ufS", "-1.2(3.4)"), (-1.23, 3.4, ".3ufS", "-1.23(3.40)"), (-123.456, 0.123, "S", "-123.46(12)"), (-123.456, 0.123, ".1ufS", "-123.5(1)"), (-123.456, 0.123, ".2ufS", "-123.46(12)"), (-123.456, 0.123, ".3ufS", "-123.456(123)"), (-123.456, 0.567, "S", "-123.5(6)"), (-123.456, 0.567, ".1ufS", "-123.5(6)"), (-123.456, 0.567, ".2ufS", "-123.46(57)"), (-123.456, 0.567, ".3ufS", "-123.456(567)"), # The decimal point shows that the uncertainty is not exact (-123.456, 0.004, ".2fS", "-123.46(0.00)"), # LaTeX notation: (1234.56789, 0.1, "eL", r"\left(1.23457 \pm 0.00010\right) \times 10^{3}"), (1234.56789, 0.1, "EL", r"\left(1.23457 \pm 0.00010\right) \times 10^{3}"), (1234.56789, 0.1, "fL", r"1234.57 \pm 0.10"), (1234.56789, 0.1, "FL", r"1234.57 \pm 0.10"), (1234.56789, 0.1, "fL", r"1234.57 \pm 0.10"), (1234.56789, 0.1, "FL", r"1234.57 \pm 0.10"), (1234.56789, 0.1, "%L", r"\left(123457 \pm 10\right) \%"), # ... combined with the spectroscopic notation: (-1.23, 3.4, "SL", "-1.2(3.4)"), (-1.23, 3.4, "LS", "-1.2(3.4)"), (-1.23, 3.4, ".2ufSL", "-1.2(3.4)"), (-1.23, 3.4, ".2ufLS", "-1.2(3.4)"), # Special cases for the uncertainty (0, nan) and format strings # (extension S, L, U,..., global width, etc.). (-1.2e-12, 0, "12.2gPL", " -1.2×10⁻¹²± 0"), (-1.2e-12, 0, "13S", " -1.2(0)e-12"), (-1.2e-12, 0, "10P", "-1.2×10⁻¹²± 0"), (-1.2e-12, 0, "L", r"\left(-1.2 \pm 0\right) \times 10^{-12}"), # No factored exponent, LaTeX (-1.2e-12, 0, "1L", r"-1.2 \times 10^{-12} \pm 0"), (-1.2e-12, 0, "SL", r"-1.2(0) \times 10^{-12}"), (-1.2e-12, 0, "SP", "-1.2(0)×10⁻¹²"), ( -1.2e-12, float("nan"), ".2uG", "(-1.2+/-%s)E-12" % NaN_EFG, ), # u ignored, format used (-1.2e-12, float("nan"), "15GS", " -1.2(%s)E-12" % NaN_EFG), (-1.2e-12, float("nan"), "SL", r"-1.2(\mathrm{nan}) \times 10^{-12}"), # LaTeX NaN # Pretty-print priority, but not for NaN: (-1.2e-12, float("nan"), "PSL", r"-1.2(\mathrm{nan})×10⁻¹²"), ( -1.2e-12, float("nan"), "L", r"\left(-1.2 \pm \mathrm{nan}\right) \times 10^{-12}", ), # Uppercase NaN and LaTeX: ( -1.2e-12, float("nan"), ".1EL", (r"\left(-1.2 \pm \mathrm{%s}\right) \times 10^{-12}" % NaN_EFG), ), (-1.2e-12, float("nan"), "10", " -1.2e-12+/- nan"), (-1.2e-12, float("nan"), "15S", " -1.2(nan)e-12"), # Character (Unicode) strings: (3.14e-10, 0.01e-10, "P", "(3.140±0.010)×10⁻¹⁰"), # PDG rules: 2 digits # Pretty-print has higher priority (3.14e-10, 0.01e-10, "PL", "(3.140±0.010)×10⁻¹⁰"), # Truncated non-zero uncertainty: (3.14e-10, 0.01e-10, ".1e", "(3.1+/-0.0)e-10"), (3.14e-10, 0.01e-10, ".1eS", "3.1(0.0)e-10"), # Some special cases: (1, float("nan"), "g", "1+/-nan"), (1, float("nan"), "G", "1+/-%s" % NaN_EFG), (1, float("nan"), "%", "(100.000000+/-nan)%"), (1, float("nan"), "+05g", "+0001+/-00nan"), # 5 is the *minimal* width, 6 is the default number of digits after the decimal # point: (1, float("nan"), "+05%", "(+100.000000+/-00nan)%"), # There is a difference between '{}'.format(1.) and '{:g}'.format(1.), which is not # fully obvious in the documentation, which indicates that a None format type is # like g. The reason is that the empty format string is actually interpreted as # str(), and that str() does not have to behave like g # ('{}'.format(1.234567890123456789) and '{:g}'.format(1.234567890123456789) are # different). (1, float("nan"), "", "1.0+/-nan"), # This is ugly, but consistent with '{:+05}'.format(float('nan')) and format(1.) # (which differs from format(1)!): (1, float("nan"), "+05", "+01.0+/-00nan"), (9.9, 0.1, ".1ue", "(9.9+/-0.1)e+00"), (9.9, 0.1, ".0fS", "10(0.)"), # The precision has an effect on the exponent, like for floats: (9.99, 0.1, ".2ue", "(9.99+/-0.10)e+00"), # Same exponent as for 9.99 alone (9.99, 0.1, ".1ue", "(1.00+/-0.01)e+01"), # Same exponent as for 9.99 alone # 0 uncertainty: nominal value displayed like a float: (1.2345, 0, ".2ue", "(1.23+/-0)e+00"), (1.2345, 0, "1.2ue", "1.23e+00+/-0"), # No factored exponent (1.2345, 0, ".2uf", "1.23+/-0"), (1.2345, 0, ".2ufS", "1.23(0)"), (1.2345, 0, ".2fS", "1.23(0)"), (1.2345, 0, "g", "1.2345+/-0"), (1.2345, 0, "", "1.2345+/-0"), # Alignment and filling characters: (3.1415e10, 0, "<15", "31415000000.0 +/-0 "), (3.1415e10, 0, "<20S", "31415000000.0(0) "), # Trying to trip the format parsing with a fill character which is an alignment # character: (3.1415e10, 0, "=>15", "==31415000000.0+/-==============0"), (1234.56789, 0, "1.2ue", "1.23e+03+/-0"), # u ignored (1234.56789, 0, "1.2e", "1.23e+03+/-0"), # Default precision = 6 (1234.56789, 0, "eL", r"\left(1.234568 \pm 0\right) \times 10^{3}"), (1234.56789, 0, "EL", r"\left(1.234568 \pm 0\right) \times 10^{3}"), (1234.56789, 0, "fL", r"1234.567890 \pm 0"), (1234.56789, 0, "FL", r"1234.567890 \pm 0"), (1234.56789, 0, "%L", r"\left(123456.789000 \pm 0\right) \%"), (1e5, 0, "g", "100000+/-0"), # A default precision of 6 is used because the uncertainty cannot be used for # defining a default precision (it doesvnot have a magnitude): (1e6, 0, "g", "(1+/-0)e+06"), (1e6 + 10, 0, "g", "(1.00001+/-0)e+06"), # Rounding of the uncertainty that "changes" the number of significant digits: (1, 0.994, ".3uf", "1.000+/-0.994"), (1, 0.994, ".2uf", "1.00+/-0.99"), (1, 0.994, ".1uf", "1+/-1"), # Discontinuity in the number of digits (12.3, 2.3, ".2ufS", "12.3(2.3)"), # Decimal point on the uncertainty (12.3, 2.3, ".1ufS", "12(2)"), # No decimal point on the uncertainty # Make defining the first significant digit problematic (0, 0, ".1f", "0.0+/-0"), # Simple float formatting (0, 0, "g", "0+/-0"), (1.2e-34, 5e-67, ".6g", "(1.20000+/-0.00000)e-34"), (1.2e-34, 5e-67, "13.6g", " 1.20000e-34+/- 0.00000e-34"), (1.2e-34, 5e-67, "13.6G", " 1.20000E-34+/- 0.00000E-34"), (1.2e-34, 5e-67, ".6GL", r"\left(1.20000 \pm 0.00000\right) \times 10^{-34}"), (1.2e-34, 5e-67, ".6GLp", r"\left(1.20000 \pm 0.00000\right) \times 10^{-34}"), (float("nan"), 100, "", "nan+/-100.0"), # Like '{}'.format(100.) (float("nan"), 100, "g", "nan+/-100"), # Like '{:g}'.format(100.) (float("nan"), 100, ".1e", "(nan+/-1.0)e+02"), # Similar to 1±nan (float("nan"), 100, ".1E", "(%s+/-1.0)E+02" % NaN_EFG), (float("nan"), 100, ".1ue", "(nan+/-1)e+02"), (float("nan"), 100, "10.1e", " nan+/- 1.0e+02"), # NaN *nominal value* (float("nan"), 1e8, "", "nan+/-100000000.0"), # Like '{}'.format(1e8) (float("nan"), 1e8, "g", "(nan+/-1)e+08"), # Like '{:g}'.format(1e8) (float("nan"), 1e8, ".1e", "(nan+/-1.0)e+08"), (float("nan"), 1e8, ".1E", "(%s+/-1.0)E+08" % NaN_EFG), (float("nan"), 1e8, ".1ue", "(nan+/-1)e+08"), ( float("nan"), 1e8, "10.1e", " nan+/- 1.0e+08", ), # 'nane+08' would be strange # NaN *nominal value* ( float("nan"), 123456789, "", "nan+/-123456789.0", ), # Similar to '{}'.format(123456789.) ( float("nan"), 123456789, "g", "(nan+/-1.23457)e+08", ), # Similar to '{:g}'.format(123456789.) (float("nan"), 123456789, ".1e", "(nan+/-1.2)e+08"), (float("nan"), 123456789, ".1E", "(%s+/-1.2)E+08" % NaN_EFG), (float("nan"), 123456789, ".1ue", "(nan+/-1)e+08"), ( float("nan"), 123456789, ".1ueL", r"\left(\mathrm{nan} \pm 1\right) \times 10^{8}", ), (float("nan"), 123456789, "10.1e", " nan+/- 1.2e+08"), (float("nan"), 123456789, "10.1eL", r"\mathrm{nan} \pm 1.2 \times 10^{8}"), # *Double* NaN (float("nan"), float("nan"), "", "nan+/-nan"), (float("nan"), float("nan"), ".1e", "nan+/-nan"), (float("nan"), float("nan"), ".1E", "%s+/-%s" % (NaN_EFG, NaN_EFG)), (float("nan"), float("nan"), ".1ue", "nan+/-nan"), ( float("nan"), float("nan"), "EL", r"\mathrm{%s} \pm \mathrm{%s}" % (NaN_EFG, NaN_EFG), ), # Inf *nominal value* (float("inf"), 100, "", "inf+/-100.0"), # Like '{}'.format(100.) (float("inf"), 100, "g", "inf+/-100"), # Like '{:g}'.format(100.) (float("inf"), 100, ".1e", "(inf+/-1.0)e+02"), # Similar to 1±inf (float("inf"), 100, ".1E", "(%s+/-1.0)E+02" % Inf_EFG), (float("inf"), 100, ".1ue", "(inf+/-1)e+02"), (float("inf"), 100, "10.1e", " inf+/- 1.0e+02"), # Inf *nominal value* (float("inf"), 1e8, "", "inf+/-100000000.0"), # Like '{}'.format(1e8) (float("inf"), 1e8, "g", "(inf+/-1)e+08"), # Like '{:g}'.format(1e8) (float("inf"), 1e8, ".1e", "(inf+/-1.0)e+08"), (float("inf"), 1e8, ".1E", "(%s+/-1.0)E+08" % Inf_EFG), (float("inf"), 1e8, ".1ue", "(inf+/-1)e+08"), ( float("inf"), 1e8, "10.1e", " inf+/- 1.0e+08", ), # 'infe+08' would be strange # Inf *nominal value* ( float("inf"), 123456789, "", "inf+/-123456789.0", ), # Similar to '{}'.format(123456789.) ( float("inf"), 123456789, "g", "(inf+/-1.23457)e+08", ), # Similar to '{:g}'.format(123456789.) (float("inf"), 123456789, ".1e", "(inf+/-1.2)e+08"), (float("inf"), 123456789, ".1ep", "(inf+/-1.2)e+08"), (float("inf"), 123456789, ".1E", "(%s+/-1.2)E+08" % Inf_EFG), (float("inf"), 123456789, ".1ue", "(inf+/-1)e+08"), (float("inf"), 123456789, ".1ueL", r"\left(\infty \pm 1\right) \times 10^{8}"), (float("inf"), 123456789, ".1ueLp", r"\left(\infty \pm 1\right) \times 10^{8}"), (float("inf"), 123456789, "10.1e", " inf+/- 1.2e+08"), (float("inf"), 123456789, "10.1eL", r" \infty \pm 1.2 \times 10^{8}"), # *Double* Inf (float("inf"), float("inf"), "", "inf+/-inf"), (float("inf"), float("inf"), ".1e", "inf+/-inf"), (float("inf"), float("inf"), ".1E", "%s+/-%s" % (Inf_EFG, Inf_EFG)), (float("inf"), float("inf"), ".1ue", "inf+/-inf"), (float("inf"), float("inf"), "EL", r"\infty \pm \infty"), (float("inf"), float("inf"), "ELp", r"\left(\infty \pm \infty\right)"), # Like the tests for +infinity, but for -infinity: # Inf *nominal value* (float("-inf"), 100, "", "-inf+/-100.0"), # Like '{}'.format(100.) (float("-inf"), 100, "g", "-inf+/-100"), # Like '{:g}'.format(100.) (float("-inf"), 100, ".1e", "(-inf+/-1.0)e+02"), # Similar to 1±inf (float("-inf"), 100, ".1E", "(-%s+/-1.0)E+02" % Inf_EFG), (float("-inf"), 100, ".1ue", "(-inf+/-1)e+02"), (float("-inf"), 100, "10.1e", " -inf+/- 1.0e+02"), # Inf *nominal value* (float("-inf"), 1e8, "", "-inf+/-100000000.0"), # Like '{}'.format(1e8) (float("-inf"), 1e8, "g", "(-inf+/-1)e+08"), # Like '{:g}'.format(1e8) (float("-inf"), 1e8, ".1e", "(-inf+/-1.0)e+08"), (float("-inf"), 1e8, ".1E", "(-%s+/-1.0)E+08" % Inf_EFG), (float("-inf"), 1e8, ".1ue", "(-inf+/-1)e+08"), ( float("-inf"), 1e8, "10.1e", " -inf+/- 1.0e+08", ), # 'infe+08' would be strange # Inf *nominal value* ( float("-inf"), 123456789, "", "-inf+/-123456789.0", ), # Similar to '{}'.format(123456789.) ( float("-inf"), 123456789, "g", "(-inf+/-1.23457)e+08", ), # Similar to '{:g}'.format(123456789.) (float("-inf"), 123456789, ".1e", "(-inf+/-1.2)e+08"), (float("-inf"), 123456789, ".1E", "(-%s+/-1.2)E+08" % Inf_EFG), (float("-inf"), 123456789, ".1ue", "(-inf+/-1)e+08"), (float("-inf"), 123456789, ".1ueL", r"\left(-\infty \pm 1\right) \times 10^{8}"), (float("-inf"), 123456789, "10.1e", " -inf+/- 1.2e+08"), (float("-inf"), 123456789, "10.1eL", r" -\infty \pm 1.2 \times 10^{8}"), # *Double* Inf (float("-inf"), float("inf"), "", "-inf+/-inf"), (float("-inf"), float("inf"), ".1e", "-inf+/-inf"), (float("-inf"), float("inf"), ".1E", "-%s+/-%s" % (Inf_EFG, Inf_EFG)), (float("-inf"), float("inf"), ".1ue", "-inf+/-inf"), (float("-inf"), float("inf"), "EL", r"-\infty \pm \infty"), # The Particle Data Group convention trumps the "at least one digit past the decimal # point" for Python floats, but only with a non-zero uncertainty: (724.2, 26.4, "", "724+/-26"), (724.2, 26.4, "p", "(724+/-26)"), (724, 0, "", "724.0+/-0"), # More NaN and infinity, in particular with LaTeX and various options: (float("-inf"), float("inf"), "S", "-inf(inf)"), (float("-inf"), float("inf"), "LS", r"-\infty(\infty)"), (float("-inf"), float("inf"), "L", r"-\infty \pm \infty"), (float("-inf"), float("inf"), "LP", r"-\infty±\infty"), # The following is consistent with Python's own formatting, which depends on the # version of Python: formatting float("-inf") with format(..., "020") gives # '-0000000000000000inf' with Python 2.7, but # '-00000000000000.0inf' with Python 2.6. However, Python 2.6 gives the better, # Python 2.7 form when format()ting with "020g" instead, so this formatting would be # better, in principle, and similarly for "%020g" % ... Thus, Python's format() # breaks the official rule according to which no format type is equivalent to "g", # for floats. If the better behavior was needed, internal formatting could in # principle force the "g" formatting type when none is given; however, Python does # not actually fully treat the none format type in the same was as the "g" format, # so this solution cannot be used, as it would break other formatting behaviors in # this code. It is thus best to mimic the native behavior of none type formatting # (even if it does not look so good in Python 2.6). (float("-inf"), float("inf"), "020S", format(float("-inf"), "015") + "(inf)"), (-float("nan"), float("inf"), "S", "nan(inf)"), (-float("nan"), float("inf"), "LS", r"\mathrm{nan}(\infty)"), (-float("nan"), float("inf"), "L", r"\mathrm{nan} \pm \infty"), (-float("nan"), float("inf"), "LP", r"\mathrm{nan}±\infty"), # Leading zeroes in the shorthand notation: (-2, 3, "020S", "-000000000002.0(3.0)"), (1234.56789, 0.012, ",.1uf", "1,234.57+/-0.01"), # ',' format option: introduced in Python 2.7 (123456.789123, 1234.5678, ",f", "123,457+/-1,235"), # PDG convention (123456.789123, 1234.5678, ",.4f", "123,456.7891+/-1,234.5678"), ] @pytest.mark.parametrize("val, std_dev, fmt_spec, expected_str", formatting_cases) def test_format(val, std_dev, fmt_spec, expected_str): """Test the formatting of numbers with uncertainty.""" x = ufloat(val, std_dev) actual_str = format(x, fmt_spec) assert actual_str == expected_str if not fmt_spec: assert actual_str == str(x) # Parsing back into a number with uncertainty (unless theLaTeX or comma notation is # used): if ( not set(fmt_spec).intersection("L,*%") # * = fill with * and "0nan" not in actual_str.lower() and "0inf" not in actual_str.lower() and "=====" not in actual_str ): x_back = ufloat_fromstr(actual_str) """ The original number and the new one should be consistent with each other. The nominal value can be rounded to 0 when the uncertainty is larger (because p digits on the uncertainty can still show 0.00... for the nominal value). The relative error is infinite, so this should not cause an error: """ if x_back.nominal_value: assert numbers_close(x.nominal_value, x_back.nominal_value, 2.4e-1) # If the uncertainty is zero, then the relative change can be large: assert numbers_close(x.std_dev, x_back.std_dev, 3e-1) def test_unicode_format(): """Test of the unicode formatting of numbers with uncertainties""" x = ufloat(3.14159265358979, 0.25) assert isinstance("Résultat = %s" % x.format(""), str) assert isinstance("Résultat = %s" % x.format("P"), str) def test_custom_pretty_print_and_latex(): """Test of the pretty-print and LaTeX format customizations""" x = ufloat(2, 0.1) * 1e-11 # We will later restore the defaults: PREV_CUSTOMIZATIONS = { var: getattr(formatting, var).copy() for var in ["PM_SYMBOLS", "MULT_SYMBOLS", "GROUP_SYMBOLS"] } # Customizations: for format in ["pretty-print", "latex"]: formatting.PM_SYMBOLS[format] = " ± " formatting.MULT_SYMBOLS[format] = "⋅" formatting.GROUP_SYMBOLS[format] = ("[", "]") assert "{:P}".format(x) == "[2.00 ± 0.10]⋅10⁻¹¹" assert "{:L}".format(x) == "[2.00 ± 0.10] ⋅ 10^{-11}" # We restore the defaults: for var, setting in PREV_CUSTOMIZATIONS.items(): setattr(formatting, var, setting) uncertainties-3.2.3/tests/test_performance.py000066400000000000000000000033671500152063300214540ustar00rootroot00000000000000from math import log10 import time import timeit import pytest from uncertainties import ufloat def repeated_summation(num): """ generate and sum many floats together, then calculate the standard deviation of the output. Under the lazy expansion algorithm, the uncertainty remains non-expanded until a request is made to calculate the standard deviation. """ result = sum(ufloat(1, 0.1) for _ in range(num)).std_dev return result def test_repeated_summation_complexity(): """ Test that the execution time is linear in summation length """ approx_execution_time_per_n = 10e-6 # 10 us target_test_duration = 1 # 1 s n_list = [10, 100, 1000, 10000, 100000] t_list = [] for n in n_list: """ Choose the number of repetitions so that the test takes target_test_duration assuming the timing of a single run is approximately N * approx_execution_time_per_n """ # Choose the number of repetitions so that the test single_rep_duration = n * approx_execution_time_per_n num_reps = int(target_test_duration / single_rep_duration) t_tot = timeit.timeit( lambda: repeated_summation(n), number=num_reps, timer=time.process_time, ) t_single = t_tot / num_reps t_list.append(t_single) n0 = n_list[0] t0 = t_list[0] for n, t in zip(n_list[1:], t_list[1:]): # Check that the plot of t vs n is linear on a log scale to within 10% # See PR 275 assert 0.9 * log10(n / n0) < log10(t / t0) < 1.1 * log10(n / n0) @pytest.mark.parametrize("num", (10, 100, 1000, 10000, 100000)) @pytest.mark.benchmark def test_repeated_summation_speed(num): repeated_summation(num) uncertainties-3.2.3/tests/test_power.py000066400000000000000000000121241500152063300202760ustar00rootroot00000000000000from math import pow as math_pow import pytest from uncertainties import ufloat from uncertainties.ops import pow_deriv_0, pow_deriv_1 from uncertainties.umath_core import pow as umath_pow from helpers import nan_close pow_deriv_cases = [ (0.5, 2, 1.0, -0.17328679513998632), (0.5, 1.5, 1.0606601717798214, -0.2450645358671368), (0.5, 0, 0.0, -0.6931471805599453), (0.5, -1.5, -8.485281374238571, -1.9605162869370945), (0.5, -2, -16.0, -2.772588722239781), (0, 2, 0, 0), (0, 1.5, float("nan"), 0), (0, 0, 0, float("nan")), (0, -0.5, float("nan"), float("nan")), (0, -2, float("nan"), float("nan")), (-0.5, 2, -1.0, float("nan")), (-0.5, 1.5, float("nan"), float("nan")), (-0.5, 0, -0.0, float("nan")), (-0.5, -1.5, float("nan"), float("nan")), (-0.5, -2, 16.0, float("nan")), ] @pytest.mark.parametrize("x, y, x_deriv_expected, y_deriv_expected", pow_deriv_cases) def test_pow_deriv_0(x, y, x_deriv_expected, y_deriv_expected): x_deriv_actual = pow_deriv_0(x, y) assert nan_close(x_deriv_actual, x_deriv_expected) y_deriv_actual = pow_deriv_1(x, y) assert nan_close(y_deriv_actual, y_deriv_expected) zero = ufloat(0, 0.1) zero2 = ufloat(0, 0.1) one = ufloat(1, 0.1) two = ufloat(2, 0.2) positive = ufloat(0.3, 0.01) positive2 = ufloat(0.3, 0.01) negative = ufloat(-0.3, 0.01) integer = ufloat(-3, 0) non_int_larger_than_one = ufloat(3.1, 0.01) positive_smaller_than_one = ufloat(0.3, 0.01) power_derivative_cases = ( (negative, integer, -370.37037037037044, float("nan")), (negative, one, 1.0, float("nan")), (negative, zero, 0.0, float("nan")), (zero, non_int_larger_than_one, float("nan"), 0.0), (zero, one, 1.0, 0.0), (zero, two, 0.0, 0.0), (zero, positive_smaller_than_one, float("nan"), 0.0), (zero, zero2, 0.0, float("nan")), (positive, positive2, 0.696845301935949, -0.8389827923531782), (positive, zero, 0.0, -1.2039728043259361), (positive, negative, -1.4350387341664474, -1.7277476090907193), ) @pytest.mark.parametrize( "first_ufloat, second_ufloat, first_der, second_der", power_derivative_cases, ) def test_power_derivatives(first_ufloat, second_ufloat, first_der, second_der): result = pow(first_ufloat, second_ufloat) first_der_result = result.derivatives[first_ufloat] second_der_result = result.derivatives[second_ufloat] assert nan_close(first_der_result, first_der) assert nan_close(second_der_result, second_der) result = umath_pow(first_ufloat, second_ufloat) first_der_result = result.derivatives[first_ufloat] second_der_result = result.derivatives[second_ufloat] assert nan_close(first_der_result, first_der) assert nan_close(second_der_result, second_der) zero = ufloat(0, 0) one = ufloat(1, 0) p = ufloat(0.3, 0.01) power_float_result_cases = [ (0, p, 0), (zero, p, 0), (float("nan"), zero, 1), (one, float("nan"), 1), (p, 0, 1), (zero, 0, 1), (-p, 0, 1), (-10.3, zero, 1), (0, zero, 1), (0.3, zero, 1), (-p, zero, 1), (zero, zero, 1), (p, zero, 1), (one, -3, 1), (one, -3.1, 1), (one, 0, 1), (one, 3, 1), (one, 3.1, 1), (one, -p, 1), (one, zero, 1), (one, p, 1), (1, -p, 1), (1, zero, 1), (1, p, 1), ] @pytest.mark.parametrize( "first_ufloat, second_ufloat, result_float", power_float_result_cases, ) def test_power_float_result_cases(first_ufloat, second_ufloat, result_float): for op in [pow, umath_pow]: assert op(first_ufloat, second_ufloat) == result_float power_reference_cases = [ (ufloat(-1.1, 0.1), -9), (ufloat(-1, 0), 9), (ufloat(-1.1, 0), 9), ] @pytest.mark.parametrize("first_ufloat, second_float", power_reference_cases) def test_power_wrt_ref(first_ufloat, second_float): test_op_ref_op_pairs = [(pow, pow), (umath_pow, math_pow)] for test_op, ref_op in test_op_ref_op_pairs: test_result = test_op(first_ufloat, second_float).n ref_result = ref_op(first_ufloat.n, second_float) assert test_result == ref_result positive = ufloat(0.3, 0.01) negative = ufloat(-0.3, 0.01) power_exception_cases = [ (ufloat(0, 0), negative, ZeroDivisionError), (ufloat(0, 0.1), negative, ZeroDivisionError), (negative, positive, ValueError), ] @pytest.mark.parametrize("first_ufloat, second_ufloat, exc_type", power_exception_cases) def test_power_exceptions(first_ufloat, second_ufloat, exc_type): with pytest.raises(exc_type): pow(first_ufloat, second_ufloat) """ math.pow raises ValueError in these cases, in contrast to pow which raises ZeroDivisionError so these test cases are slightly different than those that appear for test_power_exceptions in test_uncertainties.py. """ umath_power_exception_cases = [ (ufloat(0, 0), negative, ValueError), (ufloat(0, 0.1), negative, ValueError), (negative, positive, ValueError), ] @pytest.mark.parametrize( "first_ufloat, second_ufloat, exc_type", umath_power_exception_cases, ) def test_umath_power_exceptions(first_ufloat, second_ufloat, exc_type): with pytest.raises(exc_type): umath_pow(first_ufloat, second_ufloat) uncertainties-3.2.3/tests/test_ulinalg.py000066400000000000000000000050461500152063300206020ustar00rootroot00000000000000# Some tests are already performed in test_unumpy (unumpy contains a # matrix inversion, for instance). They are not repeated here. try: import numpy except ImportError: import sys sys.exit() # There is no reason to test the interface to NumPy from uncertainties import unumpy, ufloat from helpers import uarrays_close def test_list_inverse(): "Test of the inversion of a square matrix" mat_list = [[1, 1], [1, 0]] # numpy.linalg.inv(mat_list) does calculate the inverse even # though mat_list is a list of lists (and not a matrix). Can # ulinalg do the same? Here is a test: mat_list_inv = unumpy.ulinalg.inv(mat_list) # More type testing: mat_matrix = numpy.asmatrix(mat_list) assert isinstance( unumpy.ulinalg.inv(mat_matrix), type(numpy.linalg.inv(mat_matrix)) ) # unumpy.ulinalg should behave in the same way as numpy.linalg, # with respect to types: mat_list_inv_numpy = numpy.linalg.inv(mat_list) assert type(mat_list_inv) == type(mat_list_inv_numpy) # The resulting matrix does not have to be a matrix that can # handle uncertainties, because the input matrix does not have # uncertainties: assert not isinstance(mat_list_inv, unumpy.matrix) # Individual element check: assert isinstance(mat_list_inv[1, 1], float) assert mat_list_inv[1, 1] == -1 x = ufloat(1, 0.1) y = ufloat(2, 0.1) mat = unumpy.matrix([[x, x], [y, 0]]) # Internal consistency: ulinalg.inv() must coincide with the # unumpy.matrix inverse, for square matrices (.I is the # pseudo-inverse, for non-square matrices, but inv() is not). assert uarrays_close(unumpy.ulinalg.inv(mat), mat.I) def test_list_pseudo_inverse(): "Test of the pseudo-inverse" x = ufloat(1, 0.1) y = ufloat(2, 0.1) mat = unumpy.matrix([[x, x], [y, 0]]) # Internal consistency: the inverse and the pseudo-inverse yield # the same result on square matrices: assert uarrays_close(mat.I, unumpy.ulinalg.pinv(mat), 1e-4) assert uarrays_close( unumpy.ulinalg.inv(mat), # Support for the optional pinv argument is # tested: unumpy.ulinalg.pinv(mat, 1e-15), 1e-4, ) # Non-square matrices: x = ufloat(1, 0.1) y = ufloat(2, 0.1) mat1 = unumpy.matrix([[x, y]]) # "Long" matrix mat2 = unumpy.matrix([[x, y], [1, 3 + x], [y, 2 * x]]) # "Tall" matrix # Internal consistency: assert uarrays_close(mat1.I, unumpy.ulinalg.pinv(mat1, 1e-10)) assert uarrays_close(mat2.I, unumpy.ulinalg.pinv(mat2, 1e-8)) uncertainties-3.2.3/tests/test_umath.py000066400000000000000000000225111500152063300202610ustar00rootroot00000000000000import json import inspect import math from math import isnan from pathlib import Path import pytest from uncertainties import ufloat import uncertainties.core as uncert_core import uncertainties.umath_core as umath_core from uncertainties.ops import partial_derivative from helpers import numbers_close ############################################################################### # Unit tests umath_function_cases_json_path = Path( Path(__file__).parent, "cases", "umath_function_cases.json", ) with open(umath_function_cases_json_path, "r") as f: umath_function_cases_dict = json.load(f) ufloat_cases_list = [] for func_name, ufloat_tuples_list in umath_function_cases_dict.items(): for ufloat_tuples in ufloat_tuples_list: ufloat_cases_list.append((func_name, ufloat_tuples)) @pytest.mark.parametrize( "func_name, ufloat_tuples", ufloat_cases_list, ids=lambda x: str(x), ) def test_umath_function_derivatives(func_name, ufloat_tuples): ufloat_arg_list = [] for nominal_value, std_dev in ufloat_tuples: ufloat_arg_list.append(ufloat(nominal_value, std_dev)) float_arg_list = [arg.n for arg in ufloat_arg_list] func = getattr(umath_core, func_name) result = func(*ufloat_arg_list) for arg_num, arg in enumerate(ufloat_arg_list): ufloat_deriv_value = result.derivatives[arg] numerical_deriv_func = partial_derivative(func, arg_num) numerical_deriv_value = numerical_deriv_func(*float_arg_list) assert math.isclose( ufloat_deriv_value, numerical_deriv_value, rel_tol=1e-6, abs_tol=1e-6, ) def test_compound_expression(): """ Test equality between different formulas. """ x = ufloat(3, 0.1) # Prone to numerical errors (but not much more than floats): assert umath_core.tan(x) == umath_core.sin(x) / umath_core.cos(x) def test_numerical_example(): "Test specific numerical examples" x = ufloat(3.14, 0.01) result = umath_core.sin(x) # In order to prevent big errors such as a wrong, constant value # for all analytical and numerical derivatives, which would make # test_fixed_derivatives_math_funcs() succeed despite incorrect # calculations: assert ( "%.6f +/- %.6f" % (result.nominal_value, result.std_dev) == "0.001593 +/- 0.010000" ) # Regular calculations should still work: assert "%.11f" % umath_core.sin(3) == "0.14112000806" def test_monte_carlo_comparison(): """ Full comparison to a Monte-Carlo calculation. Both the nominal values and the covariances are compared between the direct calculation performed in this module and a Monte-Carlo simulation. """ try: import numpy import numpy.random except ImportError: import warnings warnings.warn("Test not performed because NumPy is not available") return # Works on numpy.arrays of Variable objects (whereas umath_core.sin() # does not): sin_uarray_uncert = numpy.vectorize(umath_core.sin, otypes=[object]) # Example expression (with correlations, and multiple variables combined # in a non-linear way): def function(x, y): """ Function that takes two NumPy arrays of the same size. """ # The uncertainty due to x is about equal to the uncertainty # due to y: return 10 * x**2 - x * sin_uarray_uncert(y**3) x = ufloat(0.2, 0.01) y = ufloat(10, 0.001) function_result_this_module = function(x, y) nominal_value_this_module = function_result_this_module.nominal_value # Covariances "f*f", "f*x", "f*y": covariances_this_module = numpy.array( uncert_core.covariance_matrix((x, y, function_result_this_module)) ) def monte_carlo_calc(n_samples): """ Calculate function(x, y) on n_samples samples and returns the median, and the covariances between (x, y, function(x, y)). """ # Result of a Monte-Carlo simulation: x_samples = numpy.random.normal(x.nominal_value, x.std_dev, n_samples) y_samples = numpy.random.normal(y.nominal_value, y.std_dev, n_samples) # !! astype() is a fix for median() in NumPy 1.8.0: function_samples = function(x_samples, y_samples).astype(float) cov_mat = numpy.cov([x_samples, y_samples], function_samples) return (numpy.median(function_samples), cov_mat) (nominal_value_samples, covariances_samples) = monte_carlo_calc(1000000) ## Comparison between both results: # The covariance matrices must be close: # We rely on the fact that covariances_samples very rarely has # null elements: # !!! The test could be done directly with NumPy's comparison # tools, no? See assert_allclose, assert_array_almost_equal_nulp # or assert_array_max_ulp. This is relevant for all vectorized # occurrences of numbers_close. assert numpy.vectorize(numbers_close)( covariances_this_module, covariances_samples, 0.06 ).all(), ( "The covariance matrices do not coincide between" " the Monte-Carlo simulation and the direct calculation:\n" "* Monte-Carlo:\n%s\n* Direct calculation:\n%s" % (covariances_samples, covariances_this_module) ) # The nominal values must be close: assert numbers_close( nominal_value_this_module, nominal_value_samples, # The scale of the comparison depends on the standard # deviation: the nominal values can differ by a fraction of # the standard deviation: math.sqrt(covariances_samples[2, 2]) / abs(nominal_value_samples) * 0.5, ), ( "The nominal value (%f) does not coincide with that of" " the Monte-Carlo simulation (%f), for a standard deviation of %f." % ( nominal_value_this_module, nominal_value_samples, math.sqrt(covariances_samples[2, 2]), ) ) def test_math_module(): "Operations with the math module" x = ufloat(-1.5, 0.1) # The exponent must not be differentiated, when calculating the # following (the partial derivative with respect to the exponent # is not defined): assert (x**2).nominal_value == 2.25 # Regular operations are chosen to be unchanged: assert isinstance(umath_core.sin(3), float) # factorial() must not be "damaged" by the umath_core module, so as # to help make it a drop-in replacement for math (even though # factorial() does not work on numbers with uncertainties # because it is restricted to integers, as for # math.factorial()): assert umath_core.factorial(4) == 24 # fsum is special because it does not take a fixed number of # variables: assert umath_core.fsum([x, x]).nominal_value == -3 # Functions that give locally constant results are tested: they # should give the same result as their float equivalent: for name in umath_core.locally_cst_funcs: try: func = getattr(umath_core, name) except AttributeError: continue # Not in the math module, so not in umath_core either assert func(x) == func(x.nominal_value) # The type should be left untouched. For example, isnan() # should always give a boolean: assert isinstance(func(x), type(func(x.nominal_value))) # The same exceptions should be generated when numbers with uncertainties # are used: # The type of the expected exception is first determined, because # it varies between versions of Python (OverflowError in Python # 2.6+, ValueError in Python 2.5,...): try: math.log(0) except Exception as err_math: # Python 3 does not make exceptions local variables: they are # restricted to their except block: err_math_args = err_math.args exception_class = err_math.__class__ try: umath_core.log(0) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: raise Exception("%s exception expected" % exception_class.__name__) try: umath_core.log(ufloat(0, 0)) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: raise Exception("%s exception expected" % exception_class.__name__) try: umath_core.log(ufloat(0, 1)) except exception_class as err_ufloat: assert err_math_args == err_ufloat.args else: raise Exception("%s exception expected" % exception_class.__name__) def test_hypot(): """ Special cases where derivatives cannot be calculated: """ x = ufloat(0, 1) y = ufloat(0, 2) # Derivatives that cannot be calculated simply return NaN, with no # exception being raised, normally: result = umath_core.hypot(x, y) assert isnan(result.derivatives[x]) assert isnan(result.derivatives[y]) @pytest.mark.parametrize("function_name", umath_core.deprecated_functions) def test_deprecated_function(function_name): num_args = len(inspect.signature(getattr(math, function_name)).parameters) args = [ufloat(1, 0.1)] if num_args == 1: if function_name == "factorial": args[0] = 6 else: if function_name == "ldexp": args.append(3) else: args.append(ufloat(-12, 2.4)) with pytest.warns(FutureWarning, match="will be removed"): getattr(umath_core, function_name)(*args) uncertainties-3.2.3/tests/test_uncertainties.py000066400000000000000000001235221500152063300220240ustar00rootroot00000000000000import copy import json import inspect import math from pathlib import Path import random # noqa import pytest import uncertainties.core as uncert_core from uncertainties.core import ( ufloat, AffineScalarFunc, ufloat_fromstr, deprecated_methods, ) from uncertainties import ( umath, correlated_values, correlated_values_norm, correlation_matrix, ) from uncertainties.ops import partial_derivative from helpers import ( numbers_close, ufloats_close, ) try: import numpy as np except ImportError: np = None def test_value_construction(): """ Tests the various means of constructing a constant number with uncertainty *without a string* (see test_ufloat_fromstr(), for this). """ ## Simple construction: x = ufloat(3, 0.14) assert x.nominal_value == 3 assert x.std_dev == 0.14 assert x.tag is None # ... with tag as positional argument: x = ufloat(3, 0.14, "pi") assert x.nominal_value == 3 assert x.std_dev == 0.14 assert x.tag == "pi" # ... with tag keyword: x = ufloat(3, 0.14, tag="pi") assert x.nominal_value == 3 assert x.std_dev == 0.14 assert x.tag == "pi" # Negative standard deviations should be caught in a nice way # (with the right exception): try: x = ufloat(3, -0.1) except uncert_core.NegativeStdDev: pass ## Incorrect forms should not raise any deprecation warning, but ## raise an exception: try: ufloat(1) # Form that has never been allowed except TypeError: pass else: raise Exception("An exception should be raised") def test_ufloat_fromstr(): "Input of numbers with uncertainties as a string" # String representation, and numerical values: tests = { "-1.23(3.4)": (-1.23, 3.4), # (Nominal value, error) " -1.23(3.4) ": (-1.23, 3.4), # Spaces ignored "-1.34(5)": (-1.34, 0.05), "1(6)": (1, 6), "3(4.2)": (3, 4.2), "-9(2)": (-9, 2), "1234567(1.2)": (1234567, 1.2), "12.345(15)": (12.345, 0.015), "-12.3456(78)e-6": (-12.3456e-6, 0.0078e-6), "0.29": (0.29, 0.01), "31.": (31, 1), "-31.": (-31, 1), # The following tests that the ufloat() routine does # not consider '31' like the tuple ('3', '1'), which would # make it expect two numbers (instead of 2 1-character # strings): "31": (31, 1), "-3.1e10": (-3.1e10, 0.1e10), "169.0(7)": (169, 0.7), "-0.1+/-1": (-0.1, 1), "-13e-2+/-1e2": (-13e-2, 1e2), "-14.(15)": (-14, 15), "-100.0(15)": (-100, 1.5), "14.(15)": (14, 15), # Global exponent: "(3.141+/-0.001)E+02": (314.1, 0.1), ## Pretty-print notation: # ± sign, global exponent (not pretty-printed): "(3.141±0.001)E+02": (314.1, 0.1), # ± sign, individual exponent: "3.141E+02±0.001e2": (314.1, 0.1), # ± sign, times symbol, superscript (= full pretty-print): "(3.141 ± 0.001) × 10²": (314.1, 0.1), ## Others # Forced parentheses: "(2 +/- 0.1)": (2, 0.1), # NaN uncertainty: "(3.141±nan)E+02": (314.1, float("nan")), "3.141e+02+/-nan": (314.1, float("nan")), "3.4(nan)e10": (3.4e10, float("nan")), # NaN value: "nan+/-3.14e2": (float("nan"), 314), # "Double-floats" "(-3.1415 +/- 1e-4)e+200": (-3.1415e200, 1e196), "(-3.1415e-10 +/- 1e-4)e+200": (-3.1415e190, 1e196), # Special float representation: "-3(0.)": (-3, 0), } for representation, values in tests.items(): # We test the fact that surrounding spaces are removed: representation = " {} ".format(representation) # Without tag: num = ufloat_fromstr(representation) assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag is None # With a tag as positional argument: num = ufloat_fromstr(representation, "test variable") assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag == "test variable" # With a tag as keyword argument: num = ufloat_fromstr(representation, tag="test variable") assert numbers_close(num.nominal_value, values[0]) assert numbers_close(num.std_dev, values[1]) assert num.tag == "test variable" ############################################################################### ufloat_method_cases_json_path = Path( Path(__file__).parent, "cases", "ufloat_method_cases.json", ) with open(ufloat_method_cases_json_path, "r") as f: ufloat_method_cases_dict = json.load(f) ufloat_cases_list = [] for func_name, ufloat_tuples_list in ufloat_method_cases_dict.items(): for ufloat_tuples in ufloat_tuples_list: ufloat_cases_list.append((func_name, ufloat_tuples)) @pytest.mark.parametrize( "func_name, ufloat_tuples", ufloat_cases_list, ids=lambda x: str(x), ) def test_ufloat_method_derivativs(func_name, ufloat_tuples): ufloat_arg_list = [] for nominal_value, std_dev in ufloat_tuples: ufloat_arg_list.append(ufloat(nominal_value, std_dev)) float_arg_list = [arg.n for arg in ufloat_arg_list] """ Because of how the UFloat methods are wrapped, we must use the bound version of the methods to calculate the resulting UFloat but we must use the unbound version to extract the numerical partial derivative. """ bound_func = getattr(ufloat_arg_list[0], func_name) unbound_func = getattr(AffineScalarFunc, func_name) result = bound_func(*ufloat_arg_list[1:]) for arg_num, arg in enumerate(ufloat_arg_list): ufloat_deriv_value = result.derivatives[arg] numerical_deriv_func = partial_derivative(unbound_func, arg_num) numerical_deriv_value = numerical_deriv_func(*float_arg_list) assert math.isclose( ufloat_deriv_value, numerical_deriv_value, rel_tol=1e-6, abs_tol=1e-6, ) def test_copy(): "Standard copy module integration" import gc x = ufloat(3, 0.1) assert x == x y = copy.copy(x) assert x != y assert not (x == y) assert y in y.derivatives.keys() # y must not copy the dependence on x z = copy.deepcopy(x) assert x != z # Copy tests on expressions: t = x + 2 * z # t depends on x: assert x in t.derivatives # The relationship between the copy of an expression and the # original variables should be preserved: t_copy = copy.copy(t) # Shallow copy: the variables on which t depends are not copied: assert x in t_copy.derivatives assert uncert_core.covariance_matrix([t, z]) == uncert_core.covariance_matrix( [t_copy, z] ) # However, the relationship between a deep copy and the original # variables should be broken, since the deep copy created new, # independent variables: t_deepcopy = copy.deepcopy(t) assert x not in t_deepcopy.derivatives assert uncert_core.covariance_matrix([t, z]) != uncert_core.covariance_matrix( [t_deepcopy, z] ) # Test of implementations with weak references: # Weak references: destroying a variable should never destroy the # integrity of its copies (which would happen if the copy keeps a # weak reference to the original, in its derivatives member: the # weak reference to the original would become invalid): del x gc.collect() assert y in list(y.derivatives.keys()) ## Classes for the pickling tests (put at the module level, so that ## they can be unpickled): # Subclass without slots: class NewVariable_dict(uncert_core.Variable): pass # Subclass with slots defined by a tuple: class NewVariable_slots_tuple(uncert_core.Variable): __slots__ = ("new_attr",) # Subclass with slots defined by a string: class NewVariable_slots_str(uncert_core.Variable): __slots__ = "new_attr" def test_pickling(): "Standard pickle module integration." import pickle x = ufloat(2, 0.1) x_unpickled = pickle.loads(pickle.dumps(x)) assert x != x_unpickled # Pickling creates copies ## Tests with correlations and AffineScalarFunc objects: f = 2 * x assert isinstance(f, AffineScalarFunc) (f_unpickled, x_unpickled2) = pickle.loads(pickle.dumps((f, x))) # Correlations must be preserved: assert f_unpickled - x_unpickled2 - x_unpickled2 == 0 ## Tests with subclasses: for subclass in (NewVariable_dict, NewVariable_slots_tuple, NewVariable_slots_str): x = subclass(3, 0.14) # Pickling test with possibly uninitialized slots: pickle.loads(pickle.dumps(x)) # Unpickling test: x.new_attr = "New attr value" x_unpickled = pickle.loads(pickle.dumps(x)) # Must exist (from the slots of the parent class): x_unpickled.nominal_value x_unpickled.new_attr # Must exist ## # Corner case test: when an attribute is present both in __slots__ # and in __dict__, it is first looked up from the slots # (references: # http://docs.python.org/2/reference/datamodel.html#invoking-descriptors, # http://stackoverflow.com/a/15139208/42973). As a consequence, # the pickling process must pickle the correct value (i.e., not # the value from __dict__): x = NewVariable_dict(3, 0.14) x._nominal_value = "in slots" # Corner case: __dict__ key which is also a slot name (it is # shadowed by the corresponding slot, so this is very unusual, # though): x.__dict__["_nominal_value"] = "in dict" # Additional __dict__ attribute: x.dict_attr = "dict attribute" x_unpickled = pickle.loads(pickle.dumps(x)) # We make sure that the data is still there and untouched: assert x_unpickled._nominal_value == "in slots" assert x_unpickled.__dict__ == x.__dict__ ## # Corner case that should have no impact on the code but which is # not prevented by the documentation: case of constant linear # terms (the potential gotcha is that if the linear_combo # attribute is empty, __getstate__()'s result could be false, and # so __setstate__() would not be called and the original empty # linear combination would not be set in linear_combo. x = uncert_core.LinearCombination({}) assert pickle.loads(pickle.dumps(x)).linear_combo == {} def test_int_div(): "Integer division" # We perform all operations on floats, because derivatives can # otherwise be meaningless: x = ufloat(3.9, 2) // 2 assert x.nominal_value == 1.0 # All errors are supposed to be small, so the ufloat() # in x violates the assumption. Therefore, the following is # correct: assert x.std_dev == 0.0 def test_comparison_ops(): "Test of comparison operators" # Operations on quantities equivalent to Python numbers must still # be correct: a = ufloat(-3, 0) b = ufloat(10, 0) c = ufloat(10, 0) assert a < b assert a < 3 assert 3 < b # This is first given to int.__lt__() assert b == c x = ufloat(3, 0.1) # One constraint is that usual Python code for inequality testing # still work in a reasonable way (for instance, it is generally # desirable that functions defined by different formulas on # different intervals can still do "if 0 < x < 1:...". This # supposes again that errors are "small" (as for the estimate of # the standard error). assert x > 1 # The limit case is not obvious: assert not (x >= 3) assert not (x < 3) assert x == x # Comparaison between Variable and AffineScalarFunc: assert x == x + 0 # Comparaison between 2 _different_ AffineScalarFunc objects # representing the same value: assert x / 2 == x / 2 # With uncorrelated result that have the same behavior (value and # standard error): assert 2 * ufloat(1, 0.1) != ufloat(2, 0.2) # Comparaison between 2 _different_ Variable objects # that are uncorrelated: assert x != ufloat(3, 0.1) assert x != ufloat(3, 0.2) # Comparison to other types should work: assert x is not None # Not comparable assert x - x == 0 # Comparable, even though the types are different assert x != [1, 2] #################### # Checks of the semantics of logical operations: they return True # iff they are always True when the parameters vary in an # infinitesimal interval inside sigma (sigma == 0 is a special # case): def test_all_comparison_ops(x, y): """ Takes two Variable objects. Fails if any comparison operation fails to follow the proper semantics: a comparison only returns True if the correspond float comparison results are True for all the float values taken by the variables (of x and y) when they vary in an infinitesimal neighborhood within their uncertainty. This test is stochastic: it may, exceptionally, fail for correctly implemented comparison operators. """ def random_float(var): """ Returns a random value for Variable var, in an infinitesimal interval withing its uncertainty. The case of a zero uncertainty is special. """ return (random.random() - 0.5) * min(var.std_dev, 1e-5) + var.nominal_value # All operations are tested: for op in ["__%s__" % name for name in ("ne", "eq", "lt", "le", "gt", "ge")]: try: float_func = getattr(float, op) except AttributeError: # Python 2.3's floats don't have __ne__ continue # Determination of the correct truth value of func(x, y): sampled_results = [] # The "main" value is an important particular case, and # the starting value for the final result # (correct_result): sampled_results.append(float_func(x.nominal_value, y.nominal_value)) for check_num in range(50): # Many points checked sampled_results.append(float_func(random_float(x), random_float(y))) min_result = min(sampled_results) max_result = max(sampled_results) if min_result == max_result: correct_result = min_result else: # Almost all results must be True, for the final value # to be True: num_min_result = sampled_results.count(min_result) # 1 exception is considered OK: correct_result = num_min_result == 1 try: assert correct_result == getattr(x, op)(y) except AssertionError: print("Sampling results:", sampled_results) raise Exception( "Semantic value of %s %s (%s) %s not" " correctly reproduced." % (x, op, y, correct_result) ) # With different numbers: test_all_comparison_ops(ufloat(3, 0.1), ufloat(-2, 0.1)) test_all_comparison_ops( ufloat(0, 0), # Special number ufloat(1, 1), ) test_all_comparison_ops( ufloat(0, 0), # Special number ufloat(0, 0.1), ) # With identical numbers: test_all_comparison_ops(ufloat(0, 0), ufloat(0, 0)) test_all_comparison_ops(ufloat(1, 1), ufloat(1, 1)) def test_logic(): "Boolean logic: __nonzero__, bool." x = ufloat(3, 0) y = ufloat(0, 0) z = ufloat(0, 0.1) t = ufloat(-1, 2) assert bool(x) assert not bool(y) assert bool(z) assert bool(t) # Only infinitseimal neighborhood are used def test_basic_access_to_data(): "Access to data from Variable and AffineScalarFunc objects." x = ufloat(3.14, 0.01, "x var") assert x.tag == "x var" assert x.nominal_value == 3.14 assert x.std_dev == 0.01 # Case of AffineScalarFunc objects: y = x + 0 assert type(y) == AffineScalarFunc assert y.nominal_value == 3.14 assert y.std_dev == 0.01 # Details on the sources of error: a = ufloat(-1, 0.001) y = 2 * x + 3 * x + 2 + a error_sources = y.error_components() assert len(error_sources) == 2 # 'a' and 'x' assert error_sources[x] == 0.05 assert error_sources[a] == 0.001 # Derivative values should be available: assert y.derivatives[x] == 5 # Modification of the standard deviation of variables: x.std_dev = 1 assert y.error_components()[x] == 5 # New error contribution! # Calculated values with uncertainties should not have a settable # standard deviation: y = 2 * x try: y.std_dev = 1 except AttributeError: pass else: raise Exception("std_dev should not be settable for calculated results") # Calculation of deviations in units of the standard deviations: assert 10 / x.std_dev == x.std_score(10 + x.nominal_value) # "In units of the standard deviation" is not always meaningful: x.std_dev = 0 try: x.std_score(1) except ValueError: pass # Normal behavior def test_correlations(): "Correlations between variables" a = ufloat(1, 0) x = ufloat(4, 0.1) y = x * 2 + a # Correlations cancel "naive" additions of uncertainties: assert y.std_dev != 0 normally_zero = y - (x * 2 + 1) assert normally_zero.nominal_value == 0 assert normally_zero.std_dev == 0 def test_no_coercion(): """ Coercion of Variable object to a simple float. The coercion should be impossible, like for complex numbers. """ x = ufloat(4, 1) try: assert float(x) == 4 except TypeError: pass else: raise Exception("Conversion to float() should fail with TypeError") def test_wrapped_func_no_args_no_kwargs(): """ Wrap a function that takes only positional-or-keyword parameters. """ def f_auto_unc(x, y): return 2 * x + umath.sin(y) # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y): assert not isinstance(x, uncert_core.UFloat) assert not isinstance(y, uncert_core.UFloat) return f_auto_unc(x, y) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap(f, [lambda x, y: 2, lambda x, y: math.cos(y)]) assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) ## Automatic additional derivatives for non-defined derivatives: f_wrapped = uncert_core.wrap(f, [lambda x, y: 2]) # No derivative for y assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x)) def test_wrapped_func_args_no_kwargs(): """ Wrap a function that takes only positional-or-keyword and var-positional parameters. """ def f_auto_unc(x, y, *args): return 2 * x + umath.sin(y) + 3 * args[1] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, *args): assert not any( isinstance(value, uncert_core.UFloat) for value in [x, y] + list(args) ) return f_auto_unc(x, y, *args) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) s = "string arg" z = uncert_core.ufloat(100, 3) args = [s, z, s] # var-positional parameters ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap( f, [ lambda x, y, *args: 2, lambda x, y, *args: math.cos(y), None, lambda x, y, *args: 3, ], ) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y: f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2]) assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args)) def test_wrapped_func_no_args_kwargs(): """ Wrap a function that takes only positional-or-keyword and var-keyword parameters. """ def f_auto_unc(x, y, **kwargs): return 2 * x + umath.sin(y) + 3 * kwargs["z"] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, **kwargs): assert not any( isinstance(value, uncert_core.UFloat) for value in [x, y] + list(kwargs.values()) ) return f_auto_unc(x, y, **kwargs) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) s = "string arg" z = uncert_core.ufloat(100, 3) kwargs = {"s": s, "z": z} # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None]) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {"z": None}) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {"z": lambda x, y, **kwargs: 3}) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap( f, [lambda x, y, **kwargs: 2, lambda x, y, **kwargs: math.cos(y)], {"z:": lambda x, y, **kwargs: 3}, ) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: f_wrapped = uncert_core.wrap(f, [lambda x, y, **kwargs: 2]) assert ufloats_close(f_auto_unc(x, y, **kwargs), f_wrapped(x, y, **kwargs)) # Call with keyword arguments: assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs), f_wrapped(y=y, x=x, **kwargs)) def test_wrapped_func_args_kwargs(): """ Wrap a function that takes positional-or-keyword, var-positional and var-keyword parameters. """ def f_auto_unc(x, y, *args, **kwargs): return 2 * x + umath.sin(y) + 4 * args[1] + 3 * kwargs["z"] # Like f_auto_unc, but does not accept numbers with uncertainties: def f(x, y, *args, **kwargs): assert not any( isinstance(value, uncert_core.UFloat) for value in [x, y] + list(args) + list(kwargs.values()) ) return f_auto_unc(x, y, *args, **kwargs) x = uncert_core.ufloat(1, 0.1) y = uncert_core.ufloat(10, 2) t = uncert_core.ufloat(1000, 4) s = "string arg" z = uncert_core.ufloat(100, 3) args = [s, t, s] kwargs = {"u": s, "z": z} # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: f_wrapped = uncert_core.wrap(f) assert ufloats_close( f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5, ) ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None, None, None, lambda x, y, *args, **kwargs: 4]) assert ufloats_close( f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5, ) # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {"z": None}) assert ufloats_close( f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5, ) # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: f_wrapped = uncert_core.wrap(f, [None], {"z": lambda x, y, *args, **kwargs: 3}) assert ufloats_close( f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5, ) ### Explicit derivatives: ## Fully defined derivatives: f_wrapped = uncert_core.wrap( f, [lambda x, y, *args, **kwargs: 2, lambda x, y, *args, **kwargs: math.cos(y)], {"z:": lambda x, y, *args, **kwargs: 3}, ) assert ufloats_close( f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5, ) ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: f_wrapped = uncert_core.wrap(f, [lambda x, y, *args, **kwargs: 2]) assert ufloats_close( f_auto_unc(x, y, *args, **kwargs), f_wrapped(x, y, *args, **kwargs), tolerance=1e-5, ) def test_wrapped_func(): """ Test uncertainty-aware functions obtained through wrapping. """ ######################################## # Function which can automatically handle numbers with # uncertainties: def f_auto_unc(angle, *list_var): return umath.cos(angle) + sum(list_var) def f(angle, *list_var): # We make sure that this function is only ever called with # numbers with no uncertainty (since it is wrapped): assert not isinstance(angle, uncert_core.UFloat) assert not any(isinstance(arg, uncert_core.UFloat) for arg in list_var) return f_auto_unc(angle, *list_var) f_wrapped = uncert_core.wrap(f) my_list = [1, 2, 3] ######################################## # Test of a wrapped function that only calls the original # function: it should obtain the exact same result: assert f_wrapped(0, *my_list) == f(0, *my_list) # 1 == 1 +/- 0, so the type must be checked too: assert isinstance(f_wrapped(0, *my_list), type(f(0, *my_list))) ######################################## # Call with uncertainties: angle = uncert_core.ufloat(1, 0.1) list_value = uncert_core.ufloat(3, 0.2) # The random variables must be the same (full correlation): assert ufloats_close(f_wrapped(angle, *[1, angle]), f_auto_unc(angle, *[1, angle])) assert ufloats_close( f_wrapped(angle, *[list_value, angle]), f_auto_unc(angle, *[list_value, angle]) ) ######################################## # Non-numerical arguments, and explicit and implicit derivatives: def f(x, y, z, t, u): return x + 2 * z + 3 * t + 4 * u f_wrapped = uncert_core.wrap( f, [lambda *args: 1, None, lambda *args: 2, None] ) # No deriv. for u assert f_wrapped(10, "string argument", 1, 0, 0) == 12 x = uncert_core.ufloat(10, 1) assert numbers_close( f_wrapped(x, "string argument", x, x, x).std_dev, (1 + 2 + 3 + 4) * x.std_dev ) def test_wrap_with_kwargs(): """ Tests wrap() on functions with keyword arguments. Includes both wrapping a function that takes optional keyword arguments and calling a wrapped function with keyword arguments (optional or not). """ # Version of f() that automatically works with numbers with # uncertainties: def f_auto_unc(x, y, *args, **kwargs): return x + umath.sin(y) + 2 * args[0] + 3 * kwargs["t"] # We also add keyword arguments in the function which is wrapped: def f(x, y, *args, **kwargs): # We make sure that f is not called directly with a number with # uncertainty: for value in [x, y] + list(args) + list(kwargs.values()): assert not isinstance(value, uncert_core.UFloat) return f_auto_unc(x, y, *args, **kwargs) f_wrapped = uncert_core.wrap(f) x = ufloat(1, 0.1) y = ufloat(10, 0.11) z = ufloat(100, 0.111) t = ufloat(0.1, 0.1111) assert ufloats_close( f_wrapped(x, y, z, t=t), f_auto_unc(x, y, z, t=t), tolerance=1e-5 ) ######################################## # We make sure that analytical derivatives are indeed used. We # also test the automatic handling of additional *args arguments # beyond the number of supplied derivatives. f_wrapped2 = uncert_core.wrap(f, [None, lambda x, y, *args, **kwargs: math.cos(y)]) # The derivatives must be perfectly identical: # The *args parameter of f() is given as a keyword argument, so as # to try to confuse the code: assert ( f_wrapped2(x, y, z, t=t).derivatives[y] == f_auto_unc(x, y, z, t=t).derivatives[y] ) # Derivatives supplied through the keyword-parameter dictionary of # derivatives, and also derivatives supplied for the # var-positional arguments (*args[0]): f_wrapped3 = uncert_core.wrap( f, [None, None, lambda x, y, *args, **kwargs: 2], {"t": lambda x, y, *args, **kwargs: 3}, ) # The derivatives should be exactly the same, because they are # obtained with the exact same analytic formula: assert ( f_wrapped3(x, y, z, t=t).derivatives[z] == f_auto_unc(x, y, z, t=t).derivatives[z] ) assert ( f_wrapped3(x, y, z, t=t).derivatives[t] == f_auto_unc(x, y, z, t=t).derivatives[t] ) ######################################## # Making sure that user-supplied derivatives are indeed called: class FunctionCalled(Exception): """ Raised to signal that a function is indeed called. """ pass def failing_func(x, y, *args, **kwargs): raise FunctionCalled f_wrapped4 = uncert_core.wrap(f, [None, failing_func], {"t": failing_func}) try: f_wrapped4(x, 3.14, z, t=t) except FunctionCalled: pass else: raise Exception("User-supplied derivative should be called") try: f_wrapped4(x, y, z, t=3.14) except FunctionCalled: pass else: raise Exception("User-supplied derivative should be called") try: f_wrapped4(x, 3.14, z, t=3.14) except FunctionCalled: raise Exception("User-supplied derivative should *not* be called") ############################################################################### def test_access_to_std_dev(): "Uniform access to the standard deviation" x = ufloat(1, 0.1) y = 2 * x # std_dev for Variable and AffineScalarFunc objects: assert uncert_core.std_dev(x) == x.std_dev assert uncert_core.std_dev(y) == y.std_dev # std_dev for other objects: assert uncert_core.std_dev([]) == 0 assert uncert_core.std_dev(None) == 0 ############################################################################### def test_covariances(): "Covariance matrix" x = ufloat(1, 0.1) y = -2 * x + 10 z = -3 * x covs = uncert_core.covariance_matrix([x, y, z]) # Diagonal elements are simple: assert numbers_close(covs[0][0], 0.01) assert numbers_close(covs[1][1], 0.04) assert numbers_close(covs[2][2], 0.09) # Non-diagonal elements: assert numbers_close(covs[0][1], -0.02) ############################################################################### # The tests below require NumPy, which is an optional package: try: import numpy from helpers import uarrays_close except ImportError: pass else: def test_numpy_comparison(): "Comparison with a NumPy array." x = ufloat(1, 0.1) # Comparison with a different type: assert x != [x, x] # NumPy arrays can be compared, through element-wise # comparisons. Numbers with uncertainties should yield the # same kind of results as pure floats (i.e., a NumPy array, # etc.). # We test the comparison operators both for the uncertainties # package *and* the NumPy package: # Equalities, etc.: assert len(x == numpy.arange(10)) == 10 assert len(numpy.arange(10) == x) == 10 assert len(x != numpy.arange(10)) == 10 assert len(numpy.arange(10) != x) == 10 assert len(x == numpy.array([x, x, x])) == 3 assert len(numpy.array([x, x, x]) == x) == 3 assert numpy.all(x == numpy.array([x, x, x])) # Inequalities: assert len(x < numpy.arange(10)) == 10 assert len(numpy.arange(10) > x) == 10 assert len(x <= numpy.arange(10)) == 10 assert len(numpy.arange(10) >= x) == 10 assert len(x > numpy.arange(10)) == 10 assert len(numpy.arange(10) < x) == 10 assert len(x >= numpy.arange(10)) == 10 assert len(numpy.arange(10) <= x) == 10 # More detailed test, that shows that the comparisons are # meaningful (x >= 0, but not x <= 1): assert numpy.all((x >= numpy.arange(3)) == [True, False, False]) def test_correlated_values(): """ Correlated variables. Test through the input of the (full) covariance matrix. """ u = uncert_core.ufloat(1, 0.1) cov = uncert_core.covariance_matrix([u]) # "1" is used instead of u.nominal_value because # u.nominal_value might return a float. The idea is to force # the new variable u2 to be defined through an integer nominal # value: (u2,) = uncert_core.correlated_values([1], cov) expr = 2 * u2 # Calculations with u2 should be possible, like with u # noqa #################### # Covariances between output and input variables: x = ufloat(1, 0.1) y = ufloat(2, 0.3) z = -3 * x + y covs = uncert_core.covariance_matrix([x, y, z]) # Test of the diagonal covariance elements: assert uarrays_close( numpy.array([v.std_dev**2 for v in (x, y, z)]), numpy.array(covs).diagonal() ) # "Inversion" of the covariance matrix: creation of new # variables: (x_new, y_new, z_new) = uncert_core.correlated_values( [x.nominal_value, y.nominal_value, z.nominal_value], covs, tags=["x", "y", "z"], ) # Even the uncertainties should be correctly reconstructed: assert uarrays_close(numpy.array((x, y, z)), numpy.array((x_new, y_new, z_new))) # ... and the covariances too: assert uarrays_close( numpy.array(covs), numpy.array(uncert_core.covariance_matrix([x_new, y_new, z_new])), ) assert uarrays_close(numpy.array([z_new]), numpy.array([-3 * x_new + y_new])) #################### # ... as well as functional relations: u = ufloat(1, 0.05) v = ufloat(10, 0.1) sum_value = u + 2 * v # Covariance matrices: cov_matrix = uncert_core.covariance_matrix([u, v, sum_value]) # Correlated variables can be constructed from a covariance # matrix, if NumPy is available: (u2, v2, sum2) = uncert_core.correlated_values( [x.nominal_value for x in [u, v, sum_value]], cov_matrix ) # uarrays_close() is used instead of numbers_close() because # it compares uncertainties too: assert uarrays_close(numpy.array([u]), numpy.array([u2])) assert uarrays_close(numpy.array([v]), numpy.array([v2])) assert uarrays_close(numpy.array([sum_value]), numpy.array([sum2])) assert uarrays_close(numpy.array([0]), numpy.array([sum2 - (u2 + 2 * v2)])) # Spot checks of the correlation matrix: corr_matrix = uncert_core.correlation_matrix([u, v, sum_value]) assert numbers_close(corr_matrix[0, 0], 1) assert numbers_close(corr_matrix[1, 2], 2 * v.std_dev / sum_value.std_dev) #################### # Test of numerical robustness despite wildly different # orders of magnitude (see # https://github.com/lebigot/uncertainties/issues/95): cov = numpy.diag([1e-70, 1e-70, 1e10]) cov[0, 1] = cov[1, 0] = 0.9e-70 cov[[0, 1], 2] = -3e-34 cov[2, [0, 1]] = -3e-34 variables = uncert_core.correlated_values([0] * 3, cov) # Since the numbers are very small, we need to compare them # in a stricter way, that handles the case of a 0 variance # in `variables`: assert numbers_close( 1e66 * cov[0, 0], 1e66 * variables[0].s ** 2, tolerance=1e-5 ) assert numbers_close( 1e66 * cov[1, 1], 1e66 * variables[1].s ** 2, tolerance=1e-5 ) #################### # 0 variances are a bit special, since the correlation matrix # cannot be calculated naively, so we test that there is no # specific problem in this case: cov = numpy.diag([0, 0, 10]) nom_values = [1, 2, 3] variables = uncert_core.correlated_values(nom_values, cov) for variable, nom_value, variance in zip(variables, nom_values, cov.diagonal()): assert numbers_close(variable.n, nom_value) assert numbers_close(variable.s**2, variance) assert uarrays_close(cov, numpy.array(uncert_core.covariance_matrix(variables))) def test_correlated_values_correlation_mat(): """ Tests the input of correlated value. Test through their correlation matrix (instead of the covariance matrix). """ x = ufloat(1, 0.1) y = ufloat(2, 0.3) z = -3 * x + y cov_mat = uncert_core.covariance_matrix([x, y, z]) std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal()) corr_mat = cov_mat / std_devs / std_devs[numpy.newaxis].T # We make sure that the correlation matrix is indeed diagonal: assert (corr_mat - corr_mat.T).max() <= 1e-15 # We make sure that there are indeed ones on the diagonal: assert (corr_mat.diagonal() - 1).max() <= 1e-15 # We try to recover the correlated variables through the # correlation matrix (not through the covariance matrix): nominal_values = [v.nominal_value for v in (x, y, z)] std_devs = [v.std_dev for v in (x, y, z)] x2, y2, z2 = uncert_core.correlated_values_norm( list(zip(nominal_values, std_devs)), corr_mat ) # uarrays_close() is used instead of numbers_close() because # it compares uncertainties too: # Test of individual variables: assert uarrays_close(numpy.array([x]), numpy.array([x2])) assert uarrays_close(numpy.array([y]), numpy.array([y2])) assert uarrays_close(numpy.array([z]), numpy.array([z2])) # Partial correlation test: assert uarrays_close(numpy.array([0]), numpy.array([z2 - (-3 * x2 + y2)])) # Test of the full covariance matrix: assert uarrays_close( numpy.array(cov_mat), numpy.array(uncert_core.covariance_matrix([x2, y2, z2])), ) @pytest.mark.skipif( np is not None, reason="This test is only run when numpy is not installed.", ) def test_no_numpy(): nom_values = [1, 2, 3] std_devs = [0.1, 0.2, 0.3] cov = [ [1, 0, 0], [0, 1, 0], [0, 0, 1], ] with pytest.raises( NotImplementedError, match="not able to import numpy", ): _ = correlated_values(nom_values, cov) with pytest.raises( NotImplementedError, match="not able to import numpy", ): _ = correlated_values_norm( list(zip(nom_values, std_devs)), cov, ) x = ufloat(1, 0.1) y = ufloat(2, 0.2) z = ufloat(3, 0.3) with pytest.raises( NotImplementedError, match="not able to import numpy", ): _ = correlation_matrix([x, y, z]) @pytest.mark.parametrize("method_name", deprecated_methods) def test_deprecated_method(method_name): x = ufloat(1, 0.1) y = ufloat(-12, 2.4) num_args = len(inspect.signature(getattr(float, method_name)).parameters) with pytest.warns(FutureWarning, match="will be removed"): if num_args == 1: getattr(x, method_name)() else: getattr(x, method_name)(y) def test_zero_std_dev_warn(): with pytest.warns(UserWarning, match="std_dev==0.*unexpected results"): ufloat(1, 0) uncertainties-3.2.3/tests/test_unumpy.py000066400000000000000000000227771500152063300205160ustar00rootroot00000000000000try: import numpy except ImportError: import sys sys.exit() # There is no reason to test the interface to NumPy import uncertainties import uncertainties.core as uncert_core from uncertainties import ufloat, unumpy from uncertainties.unumpy import core from helpers import numbers_close, uarrays_close def test_numpy(): """ Interaction with NumPy, including matrix inversion, correlated_values, and calculation of the mean. """ arr = numpy.arange(3) num = ufloat(3.14, 0.01) # NumPy arrays can be multiplied by Variable objects, # whatever the order of the operands: prod1 = arr * num prod2 = num * arr # Additional check: assert (prod1 == prod2).all() # Operations with arrays work (they are first handled by NumPy, # then by this module): prod1 * prod2 # This should be calculable assert not (prod1 - prod2).any() # All elements must be 0 # Comparisons work too: # Usual behavior: assert len(arr[arr > 1.5]) == 1 # Comparisons with Variable objects: assert len(arr[arr > ufloat(1.5, 0.1)]) == 1 assert len(prod1[prod1 < prod1 * prod2]) == 2 # The following can be calculated (special NumPy abs() function): numpy.abs(arr + ufloat(-1, 0.1)) # The following does not completely work, because NumPy does not # implement numpy.exp on an array of general objects, apparently: assert numpy.exp(arr).all() # All elements > 0 # Equivalent with an array of AffineScalarFunc objects: try: numpy.exp(arr + ufloat(0, 0)) except (AttributeError, TypeError): # In numpy<1.17, an AttributeError is raised in this situation. This was # considered a bug however, and in numpy 1.17 it was changed to a # TypeError (see PR #12700 in numpy repository) pass else: raise Exception("numpy.exp unexpectedly worked") # Calculation of the mean, global and with a specific axis: arr_floats = numpy.random.random((10, 3, 5)) arr = unumpy.uarray(arr_floats, arr_floats / 100) assert arr.mean(axis=0).shape == (3, 5) assert arr.mean(axis=1).shape == (10, 5) arr.mean() # Global mean def test_matrix(): "Matrices of numbers with uncertainties" # Matrix inversion: # Matrix with a mix of Variable objects and regular # Python numbers: m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)]]) m_nominal_values = unumpy.nominal_values(m) # Test of the nominal_value attribute: assert numpy.all(m_nominal_values == m.nominal_values) assert type(m[0, 0]) == uncert_core.Variable # Test of scalar multiplication, both sides: 3 * m m * 3 def derivatives_close(x, y): """ Returns True iff the AffineScalarFunc objects x and y have derivatives that are close to each other (they must depend on the same variables). """ # x and y must depend on the same variables: if set(x.derivatives) != set(y.derivatives): return False # Not the same variables return all( numbers_close(x.derivatives[var], y.derivatives[var]) for var in x.derivatives ) def test_inverse(): "Tests of the matrix inverse" m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)]]) m_nominal_values = unumpy.nominal_values(m) # "Regular" inverse matrix, when uncertainties are not taken # into account: m_no_uncert_inv = m_nominal_values.I # The matrix inversion should not yield numbers with uncertainties: assert m_no_uncert_inv.dtype == numpy.dtype(float) # Inverse with uncertainties: m_inv_uncert = m.I # AffineScalarFunc elements # The inverse contains uncertainties: it must support custom # operations on matrices with uncertainties: assert isinstance(m_inv_uncert, unumpy.matrix) assert type(m_inv_uncert[0, 0]) == uncert_core.AffineScalarFunc # Checks of the numerical values: the diagonal elements of the # inverse should be the inverses of the diagonal elements of # m (because we started with a triangular matrix): assert numbers_close( 1 / m_nominal_values[0, 0], m_inv_uncert[0, 0].nominal_value ), "Wrong value" assert numbers_close( 1 / m_nominal_values[1, 1], m_inv_uncert[1, 1].nominal_value ), "Wrong value" #################### # Checks of the covariances between elements: x = ufloat(10, 1) m = unumpy.matrix([[x, x], [0, 3 + 2 * x]]) m_inverse = m.I # Check of the properties of the inverse: m_double_inverse = m_inverse.I # The initial matrix should be recovered, including its # derivatives, which define covariances: assert numbers_close(m_double_inverse[0, 0].nominal_value, m[0, 0].nominal_value) assert numbers_close(m_double_inverse[0, 0].std_dev, m[0, 0].std_dev) assert uarrays_close(m_double_inverse, m) # Partial test: assert derivatives_close(m_double_inverse[0, 0], m[0, 0]) assert derivatives_close(m_double_inverse[1, 1], m[1, 1]) #################### # Tests of covariances during the inversion: # There are correlations if both the next two derivatives are # not zero: assert m_inverse[0, 0].derivatives[x] assert m_inverse[0, 1].derivatives[x] # Correlations between m and m_inverse should create a perfect # inversion: assert uarrays_close(m * m_inverse, numpy.eye(m.shape[0])) def test_wrap_array_func(): """ Test of numpy.wrap_array_func(), with optional arguments and keyword arguments. """ # Function that works with numbers with uncertainties in mat (if # mat is an uncertainties.unumpy.matrix): def f_unc(mat, *args, **kwargs): return mat.I + args[0] * kwargs["factor"] # Test with optional arguments and keyword arguments: def f(mat, *args, **kwargs): # This function is wrapped: it should only be called with pure # numbers: assert not any(isinstance(v, uncert_core.UFloat) for v in mat.flat) return f_unc(mat, *args, **kwargs) # Wrapped function: f_wrapped = core.wrap_array_func(f) ########## # Full rank rectangular matrix: m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)], [1, -3.1]]) # Numerical and package (analytical) pseudo-inverses: they must be # the same: m_f_wrapped = f_wrapped(m, 2, factor=10) m_f_unc = f_unc(m, 2, factor=10) assert uarrays_close(m_f_wrapped, m_f_unc) def test_pseudo_inverse(): "Tests of the pseudo-inverse" # Numerical version of the pseudo-inverse: pinv_num = core.wrap_array_func(numpy.linalg.pinv) ########## # Full rank rectangular matrix: m = unumpy.matrix([[ufloat(10, 1), -3.1], [0, ufloat(3, 0)], [1, -3.1]]) # Numerical and package (analytical) pseudo-inverses: they must be # the same: rcond = 1e-8 # Test of the second argument to pinv() m_pinv_num = pinv_num(m, rcond) m_pinv_package = core.pinv(m, rcond) assert uarrays_close(m_pinv_num, m_pinv_package) ########## # Example with a non-full rank rectangular matrix: vector = [ufloat(10, 1), -3.1, 11] m = unumpy.matrix([vector, vector]) m_pinv_num = pinv_num(m, rcond) m_pinv_package = core.pinv(m, rcond) assert uarrays_close(m_pinv_num, m_pinv_package) ########## # Example with a non-full-rank square matrix: m = unumpy.matrix([[ufloat(10, 1), 0], [3, 0]]) m_pinv_num = pinv_num(m, rcond) m_pinv_package = core.pinv(m, rcond) assert uarrays_close(m_pinv_num, m_pinv_package) def test_broadcast_funcs(): """ Test of mathematical functions that work with NumPy arrays of numbers with uncertainties. """ x = ufloat(0.2, 0.1) arr = numpy.array([x, 2 * x]) assert unumpy.cos(arr)[1] == uncertainties.umath.cos(arr[1]) # Some functions do not bear the same name in the math module and # in NumPy (acos instead of arccos, etc.): assert unumpy.arccos(arr)[1] == uncertainties.umath.acos(arr[1]) # The acos() function should not exist in unumpy because the function # should have been renamed to arccos(). Starting with numpy 2 numpy.acos() # is an alias to numpy.arccos(). If similar aliases are added to unumpy, # the following tests can be removed. assert not hasattr(unumpy, "acos") # Test of the __all__ variable: assert "acos" not in unumpy.__all__ def test_array_and_matrix_creation(): "Test of custom array creation" arr = unumpy.uarray([1, 2], [0.1, 0.2]) assert arr[1].nominal_value == 2 assert arr[1].std_dev == 0.2 # Same thing for matrices: mat = unumpy.umatrix([1, 2], [0.1, 0.2]) assert mat[0, 1].nominal_value == 2 assert mat[0, 1].std_dev == 0.2 def test_component_extraction(): "Extracting the nominal values and standard deviations from an array" arr = unumpy.uarray([1, 2], [0.1, 0.2]) assert numpy.all(unumpy.nominal_values(arr) == [1, 2]) assert numpy.all(unumpy.std_devs(arr) == [0.1, 0.2]) # unumpy matrices, in addition, should have nominal_values that # are simply numpy matrices (not unumpy ones, because they have no # uncertainties): mat = unumpy.matrix(arr) assert numpy.all(unumpy.nominal_values(mat) == [1, 2]) assert numpy.all(unumpy.std_devs(mat) == [0.1, 0.2]) assert type(unumpy.nominal_values(mat)) == numpy.matrix def test_array_comparisons(): "Test of array and matrix comparisons" arr = unumpy.uarray([1, 2], [1, 4]) assert numpy.all((arr == [arr[0], 4]) == [True, False]) # For matrices, 1D arrays are converted to 2D arrays: mat = unumpy.umatrix([1, 2], [1, 4]) assert numpy.all((mat == [mat[0, 0], 4]) == [True, False]) uncertainties-3.2.3/uncertainties/000077500000000000000000000000001500152063300172445ustar00rootroot00000000000000uncertainties-3.2.3/uncertainties/__init__.py000066400000000000000000000220601500152063300213550ustar00rootroot00000000000000#!! Whenever the documentation below is updated, setup.py should be # checked for consistency. """ Calculations with full error propagation for quantities with uncertainties. Derivatives can also be calculated. Web user guide: https://pythonhosted.org/uncertainties/. Example of possible calculation: (0.2 +/- 0.01)**2 = 0.04 +/- 0.004. Correlations between expressions are correctly taken into account (for instance, with x = 0.2+/-0.01, 2*x-x-x is exactly zero, as is y-x-x with y = 2*x). Examples: import uncertainties from uncertainties import ufloat from uncertainties.umath import * # sin(), etc. # Mathematical operations: x = ufloat(0.20, 0.01) # x = 0.20+/-0.01 x = ufloat_fromstr("0.20+/-0.01") # Other representation x = ufloat_fromstr("0.20(1)") # Other representation # Implicit uncertainty of +/-1 on the last digit: x = ufloat_fromstr("0.20") print x**2 # Square: prints "0.040+/-0.004" print sin(x**2) # Prints "0.0399...+/-0.00399..." print x.std_score(0.17) # Prints "-3.0": deviation of -3 sigmas # Access to the nominal value, and to the uncertainty: square = x**2 # Square print square # Prints "0.040+/-0.004" print square.nominal_value # Prints "0.04" print square.std_dev # Prints "0.004..." print square.derivatives[x] # Partial derivative: 0.4 (= 2*0.20) # Correlations: u = ufloat(1, 0.05, "u variable") # Tag v = ufloat(10, 0.1, "v variable") sum_value = u+v u.std_dev = 0.1 # Standard deviations can be updated on the fly print sum_value - u - v # Prints "0+/-0" (exact result) # List of all sources of error: print sum_value # Prints "11.00+/-0.14" for (var, error) in sum_value.error_components().iteritems(): print "%s: %f" % (var.tag, error) # Individual error components # Covariance matrices: cov_matrix = uncertainties.covariance_matrix([u, v, sum_value]) print cov_matrix # 3x3 matrix # Correlated variables can be constructed from a covariance matrix, if # NumPy is available: (u2, v2, sum2) = uncertainties.correlated_values([1, 10, 11], cov_matrix) print u2 # Value and uncertainty of u: correctly recovered (1.00+/-0.10) print uncertainties.covariance_matrix([u2, v2, sum2]) # == cov_matrix - The main function provided by this module is ufloat, which creates numbers with uncertainties (Variable objects). Variable objects can be used as if they were regular Python numbers. The main attributes and methods of Variable objects are defined in the documentation of the Variable class. - Valid operations on numbers with uncertainties include basic mathematical functions (addition, etc.). Most operations from the standard math module (sin, etc.) can be applied on numbers with uncertainties by using their generalization from the uncertainties.umath module: from uncertainties.umath import sin print sin(ufloat_fromstr("1+/-0.01")) # 0.841+/-0.005 print sin(1) # umath.sin() also works on floats, exactly like math.sin() Logical operations (>, ==, etc.) are also supported. Basic operations on NumPy arrays or matrices of numbers with uncertainties can be performed: 2*numpy.array([ufloat(1, 0.01), ufloat(2, 0.1)]) More complex operations on NumPy arrays can be performed through the dedicated uncertainties.unumpy sub-module (see its documentation). Calculations that are performed through non-Python code (Fortran, C, etc.) can handle numbers with uncertainties instead of floats through the provided wrap() wrapper: import uncertainties # wrapped_f is a version of f that can take arguments with # uncertainties, even if f only takes floats: wrapped_f = uncertainties.wrap(f) If some derivatives of the wrapped function f are known (analytically, or numerically), they can be given to wrap()--see the documentation for wrap(). - Utility functions are also provided: the covariance matrix between random variables can be calculated with covariance_matrix(), or used as input for the definition of correlated quantities (correlated_values() function--defined only if the NumPy module is available). - Mathematical expressions involving numbers with uncertainties generally return AffineScalarFunc objects, which also print as a value with uncertainty. Their most useful attributes and methods are described in the documentation for AffineScalarFunc. Note that Variable objects are also AffineScalarFunc objects. UFloat is an alias for AffineScalarFunc, provided as a convenience: testing whether a value carries an uncertainty handled by this module should be done with insinstance(my_value, UFloat). - Mathematically, numbers with uncertainties are, in this package, probability distributions. These probabilities are reduced to two numbers: a nominal value and an uncertainty. Thus, both variables (Variable objects) and the result of mathematical operations (AffineScalarFunc objects) contain these two values (respectively in their nominal_value and std_dev attributes). The uncertainty of a number with uncertainty is simply defined in this package as the standard deviation of the underlying probability distribution. The numbers with uncertainties manipulated by this package are assumed to have a probability distribution mostly contained around their nominal value, in an interval of about the size of their standard deviation. This should cover most practical cases. A good choice of nominal value for a number with uncertainty is thus the median of its probability distribution, the location of highest probability, or the average value. - When manipulating ensembles of numbers, some of which contain uncertainties, it can be useful to access the nominal value and uncertainty of all numbers in a uniform manner: x = ufloat_fromstr("3+/-0.1") print nominal_value(x) # Prints 3 print std_dev(x) # Prints 0.1 print nominal_value(3) # Prints 3: nominal_value works on floats print std_dev(3) # Prints 0: std_dev works on floats - Probability distributions (random variables and calculation results) are printed as: nominal value +/- standard deviation but this does not imply any property on the nominal value (beyond the fact that the nominal value is normally inside the region of high probability density), or that the probability distribution of the result is symmetrical (this is rarely strictly the case). - Linear approximations of functions (around the nominal values) are used for the calculation of the standard deviation of mathematical expressions with this package. The calculated standard deviations and nominal values are thus meaningful approximations as long as the functions involved have precise linear expansions in the region where the probability distribution of their variables is the largest. It is therefore important that uncertainties be small. Mathematically, this means that the linear term of functions around the nominal values of their variables should be much larger than the remaining higher-order terms over the region of significant probability. For instance, sin(0+/-0.01) yields a meaningful standard deviation since it is quite linear over 0+/-0.01. However, cos(0+/-0.01) yields an approximate standard deviation of 0 (because the cosine is not well approximated by a line around 0), which might not be precise enough for all applications. - Comparison operations (``>``, ``==``, etc.) on numbers with uncertainties have a pragmatic semantics, in this package: numbers with uncertainties can be used wherever Python numbers are used, most of the time with a result identical to the one that would be obtained with their nominal value only. However, since the objects defined in this module represent probability distributions and not pure numbers, comparison operator are interpreted in a specific way. The result of a comparison operation (``==``, ``>``, etc.) is defined so as to be essentially consistent with the requirement that uncertainties be small: the value of a comparison operation is True only if the operation yields True for all infinitesimal variations of its random variables, except, possibly, for an infinitely small number of cases. Example: "x = 3.14; y = 3.14" is such that x == y but x = ufloat(3.14, 0.01) y = ufloat(3.14, 0.01) is not such that x == y, since x and y are independent random variables that almost never give the same value. However, x == x still holds. The boolean value (bool(x), ``if x...``) of a number with uncertainty x is the result of x != 0. - The uncertainties package is for Python 2.3 and above. - This package contains tests. They can be run either manually or automatically with the nose unit testing framework (nosetests). (c) 2009-2024 by Eric O. LEBIGOT (EOL) . Please use the Github project at https://github.com/lmfit/uncertainties for bug reports, feature requests, or feedback. This software is released under the BSD license. """ from .core import * # noqa from .core import __all__ # noqa For a correct help(uncertainties) from .version import __version__, __version_tuple__ # noqa # for backward compatibility __version_info__ = __version_tuple__ __author__ = "Eric O. LEBIGOT (EOL) " uncertainties-3.2.3/uncertainties/core.py000066400000000000000000001123141500152063300205500ustar00rootroot00000000000000# coding=utf-8 """ Main module for the uncertainties package, with internal functions. """ # The idea behind this module is to replace the result of mathematical # operations by a local approximation of the defining function. For # example, sin(0.2+/-0.01) becomes the affine function # (AffineScalarFunc object) whose nominal value is sin(0.2) and # whose variations are given by sin(0.2+delta) = 0.98...*delta. # Uncertainties can then be calculated by using this local linear # approximation of the original function. from __future__ import division # Many analytical derivatives depend on this from builtins import str, zip, range, object import functools from math import sqrt, isfinite # Optimization: no attribute look-up from warnings import warn import copy import collections from uncertainties.formatting import format_ufloat from uncertainties.parsing import str_to_number_with_uncert from . import ops from uncertainties.ops import ( _wrap, set_doc, nan_if_exception, modified_operators, modified_ops_with_reflection, ) # Attributes that are always exported (some other attributes are # exported only if the NumPy module is available...): __all__ = [ # All sub-modules and packages are not imported by default, # in particular because NumPy might be unavailable. "ufloat", # Main function: returns a number with uncertainty "ufloat_fromstr", # Important function: returns a number with uncertainty # Uniform access to nominal values and standard deviations: "nominal_value", "std_dev", # Utility functions (more are exported if NumPy is present): "covariance_matrix", # Class for testing whether an object is a number with # uncertainty. Not usually created by users (except through the # Variable subclass), but possibly manipulated by external code # ['derivatives()' method, etc.]. "UFloat", "Variable", # Wrapper for allowing non-pure-Python function to handle # quantitities with uncertainties: "wrap", # used internally and will be removed by linter if not here "nan_if_exception", "modified_operators", "modified_ops_with_reflection", "correlated_values", "correlated_values_norm", "correlation_matrix", ] ############################################################################### ## Definitions that depend on the availability of NumPy: try: import numpy except ImportError: numpy = None def correlated_values(nom_values, covariance_mat, tags=None): """ Return numbers with uncertainties (AffineScalarFunc objects) that correctly reproduce the given covariance matrix, and have the given (float) values as their nominal value. The correlated_values_norm() function returns the same result, but takes a correlation matrix instead of a covariance matrix. The list of values and the covariance matrix must have the same length, and the matrix must be a square (symmetric) one. The numbers with uncertainties returned depend on newly created, independent variables (Variable objects). nom_values -- sequence with the nominal (real) values of the numbers with uncertainties to be returned. covariance_mat -- full covariance matrix of the returned numbers with uncertainties. For example, the first element of this matrix is the variance of the first number with uncertainty. This matrix must be a NumPy array-like (list of lists, NumPy array, etc.). tags -- if 'tags' is not None, it must list the tag of each new independent variable. This function raises NotImplementedError if numpy cannot be imported. """ if numpy is None: msg = ( "uncertainties was not able to import numpy so " "correlated_values is unavailable." ) raise NotImplementedError(msg) # !!! It would in principle be possible to handle 0 variance # variables by first selecting the sub-matrix that does not contain # such variables (with the help of numpy.ix_()), and creating # them separately. std_devs = numpy.sqrt(numpy.diag(covariance_mat)) # For numerical stability reasons, we go through the correlation # matrix, because it is insensitive to any change of scale in the # quantities returned. However, care must be taken with 0 variance # variables: calculating the correlation matrix cannot be simply done # by dividing by standard deviations. We thus use specific # normalization values, with no null value: norm_vector = std_devs.copy() norm_vector[norm_vector == 0] = 1 return correlated_values_norm( # !! The following zip() is a bit suboptimal: correlated_values() # separates back the nominal values and the standard deviations: list(zip(nom_values, std_devs)), covariance_mat / norm_vector / norm_vector[:, numpy.newaxis], tags, ) def correlated_values_norm(values_with_std_dev, correlation_mat, tags=None): """ Return correlated values like correlated_values(), but takes instead as input: - nominal (float) values along with their standard deviation, and - a correlation matrix (i.e. a normalized covariance matrix). values_with_std_dev -- sequence of (nominal value, standard deviation) pairs. The returned, correlated values have these nominal values and standard deviations. correlation_mat -- correlation matrix between the given values, except that any value with a 0 standard deviation must have its correlations set to 0, with a diagonal element set to an arbitrary value (something close to 0-1 is recommended, for a better numerical precision). When no value has a 0 variance, this is the covariance matrix normalized by standard deviations, and thus a symmetric matrix with ones on its diagonal. This matrix must be an NumPy array-like (list of lists, NumPy array, etc.). tags -- like for correlated_values(). This function raises NotImplementedError if numpy cannot be imported. """ if numpy is None: msg = ( "uncertainties was not able to import numpy so " "correlated_values_norm is unavailable." ) raise NotImplementedError(msg) # If no tags were given, we prepare tags for the newly created # variables: if tags is None: tags = (None,) * len(values_with_std_dev) (nominal_values, std_devs) = numpy.transpose(values_with_std_dev) # We diagonalize the correlation matrix instead of the # covariance matrix, because this is generally more stable # numerically. In fact, the covariance matrix can have # coefficients with arbitrary values, through changes of units # of its input variables. This creates numerical instabilities. # # The covariance matrix is diagonalized in order to define # the independent variables that model the given values: (variances, transform) = numpy.linalg.eigh(correlation_mat) # Numerical errors might make some variances negative: we set # them to zero: variances[variances < 0] = 0.0 # Creation of new, independent variables: # We use the fact that the eigenvectors in 'transform' are # special: 'transform' is unitary: its inverse is its transpose: variables = tuple( # The variables represent "pure" uncertainties: Variable(0, sqrt(variance), tag) for (variance, tag) in zip(variances, tags) ) # The coordinates of each new uncertainty as a function of the # new variables must include the variable scale (standard deviation): transform *= std_devs[:, numpy.newaxis] # Representation of the initial correlated values: values_funcs = tuple( AffineScalarFunc(value, LinearCombination(dict(zip(variables, coords)))) for (coords, value) in zip(transform, nominal_values) ) return values_funcs def correlation_matrix(nums_with_uncert): """ Return the correlation matrix of the given sequence of numbers with uncertainties, as a NumPy array of floats. This function raises NotImplementedError if numpy cannot be imported. """ if numpy is None: msg = ( "uncertainties was not able to import numpy so " "correlation_matrix is unavailable." ) raise NotImplementedError(msg) cov_mat = numpy.array(covariance_matrix(nums_with_uncert)) std_devs = numpy.sqrt(cov_mat.diagonal()) return cov_mat / std_devs / std_devs[numpy.newaxis].T ######################################## class LinearCombination(object): """ Linear combination of Variable differentials. The linear_combo attribute can change formally, but its value always remains the same. Typically, the linear combination can thus be expanded. The expanded form of linear_combo is a mapping from Variables to the coefficient of their differential. """ # ! Invariant: linear_combo is represented internally exactly as # the linear_combo argument to __init__(): __slots__ = "linear_combo" def __init__(self, linear_combo): """ linear_combo can be modified by the object, during its lifetime. This allows the object to change its internal representation over time (for instance by expanding the linear combination and replacing the original expression with the expanded one). linear_combo -- if linear_combo is a dict, then it represents an expanded linear combination and must map Variables to the coefficient of their differential. Otherwise, it should be a list of (coefficient, LinearCombination) pairs (that represents a linear combination expression). """ self.linear_combo = linear_combo def __bool__(self): """ Return True only if the linear combination is non-empty, i.e. if the linear combination contains any term. """ return bool(self.linear_combo) def expanded(self): """ Return True if and only if the linear combination is expanded. """ return isinstance(self.linear_combo, dict) def expand(self): """ Expand the linear combination. The expansion is a collections.defaultdict(float). This should only be called if the linear combination is not yet expanded. """ # The derivatives are built progressively by expanding each # term of the linear combination until there is no linear # combination to be expanded. # Final derivatives, constructed progressively: derivatives = collections.defaultdict(float) while self.linear_combo: # The list of terms is emptied progressively # One of the terms is expanded or, if no expansion is # needed, simply added to the existing derivatives. # # Optimization note: since Python's operations are # left-associative, a long sum of Variables can be built # such that the last term is essentially a Variable (and # not a NestedLinearCombination): popping from the # remaining terms allows this term to be quickly put in # the final result, which limits the number of terms # remaining (and whose size can temporarily grow): (main_factor, main_expr) = self.linear_combo.pop() # print "MAINS", main_factor, main_expr if main_expr.expanded(): for var, factor in main_expr.linear_combo.items(): derivatives[var] += main_factor * factor else: # Non-expanded form for factor, expr in main_expr.linear_combo: # The main_factor is applied to expr: self.linear_combo.append((main_factor * factor, expr)) # print "DERIV", derivatives self.linear_combo = derivatives def __getstate__(self): # Not false, otherwise __setstate__() will not be called: return (self.linear_combo,) def __setstate__(self, state): (self.linear_combo,) = state class AffineScalarFunc(object): """ Affine functions that support basic mathematical operations (addition, etc.). Such functions can for instance be used for representing the local (linear) behavior of any function. This class can also be used to represent constants. The variables of affine scalar functions are Variable objects. AffineScalarFunc objects include facilities for calculating the 'error' on the function, from the uncertainties on its variables. Main attributes and methods: - nominal_value, std_dev: value at the origin / nominal value, and standard deviation. The standard deviation can be NaN or infinity. - n, s: abbreviations for nominal_value and std_dev. - error_components(): error_components()[x] is the error due to Variable x. - derivatives: derivatives[x] is the (value of the) derivative with respect to Variable x. This attribute is a Derivatives dictionary whose keys are the Variable objects on which the function depends. The values are the numerical values of the derivatives. All the Variable objects on which the function depends are in 'derivatives'. - std_score(x): position of number x with respect to the nominal value, in units of the standard deviation. """ # To save memory in large arrays: __slots__ = ("_nominal_value", "_linear_part") # !! Fix for mean() in NumPy 1.8.0: class dtype(object): type = staticmethod(lambda value: value) #! The code could be modified in order to accommodate for non-float # nominal values. This could for instance be done through # the operator module: instead of delegating operations to # float.__*__ operations, they could be delegated to # operator.__*__ functions (while taking care of properly handling # reverse operations: __radd__, etc.). def __init__(self, nominal_value, linear_part): """ nominal_value -- value of the function when the linear part is zero. linear_part -- LinearCombination that describes the linear part of the AffineScalarFunc. """ # ! A technical consistency requirement is that the # linear_part can be nested inside a NestedLinearCombination # (because this is how functions on AffineScalarFunc calculate # their result: by constructing nested expressions for them). # Defines the value at the origin: # Only float-like values are handled. One reason is that it # does not make sense for a scalar function to be affine to # not yield float values. Another reason is that it would not # make sense to have a complex nominal value, here (it would # not be handled correctly at all): converting to float should # be possible. self._nominal_value = float(nominal_value) # In order to have a linear execution time for long sums, the # _linear_part is generally left as is (otherwise, each # successive term would expand to a linearly growing sum of # terms: efficiently handling such terms [so, without copies] # is not obvious, when the algorithm should work for all # functions beyond sums). if not isinstance(linear_part, LinearCombination): linear_part = LinearCombination(linear_part) self._linear_part = linear_part # The following prevents the 'nominal_value' attribute from being # modified by the user: @property def nominal_value(self): "Nominal value of the random number." return self._nominal_value # Abbreviation (for formulas, etc.): n = nominal_value ############################################################ # Making derivatives a property gives the user a clean syntax, # which is consistent with derivatives becoming a dictionary. @property def derivatives(self): """ Return a mapping from each Variable object on which the function (self) depends to the value of the derivative with respect to that variable. This mapping should not be modified. Derivative values are always floats. This mapping is cached, for subsequent calls. """ if not self._linear_part.expanded(): self._linear_part.expand() # Attempts to get the contribution of a variable that the # function does not depend on raise a KeyError: self._linear_part.linear_combo.default_factory = None return self._linear_part.linear_combo ######################################## # Uncertainties handling: def error_components(self): """ Individual components of the standard deviation of the affine function (in absolute value), returned as a dictionary with Variable objects as keys. The returned variables are the independent variables that the affine function depends on. This method assumes that the derivatives contained in the object take scalar values (and are not a tuple, like what math.frexp() returns, for instance). """ # Calculation of the variance: error_components = {} for variable, derivative in self.derivatives.items(): # print "TYPE", type(variable), type(derivative) # Individual standard error due to variable: # 0 is returned even for a NaN derivative (in this case no # multiplication by the derivative is performed): an exact # variable obviously leads to no uncertainty in the # functions that depend on it. if variable._std_dev == 0: # !!! Shouldn't the errors always be floats, as a # convention of this module? error_components[variable] = 0 else: error_components[variable] = abs(derivative * variable._std_dev) return error_components @property def std_dev(self): """ Standard deviation of the affine function. This method assumes that the function returns scalar results. This returned standard deviation depends on the current standard deviations [std_dev] of the variables (Variable objects) involved. """ #! It would be possible to not allow the user to update the # std dev of Variable objects, in which case AffineScalarFunc # objects could have a pre-calculated or, better, cached # std_dev value (in fact, many intermediate AffineScalarFunc do # not need to have their std_dev calculated: only the final # AffineScalarFunc returned to the user does). return float(sqrt(sum(delta**2 for delta in self.error_components().values()))) # Abbreviation (for formulas, etc.): s = std_dev def __repr__(self): # Not putting spaces around "+/-" helps with arrays of # Variable, as each value with an uncertainty is a # block of signs (otherwise, the standard deviation can be # mistaken for another element of the array). std_dev = self.std_dev # Optimization, since std_dev is calculated # A zero standard deviation is printed because otherwise, # ufloat_fromstr() does not correctly parse back the value # ("1.23" is interpreted as "1.23(1)"): if std_dev: std_dev_str = repr(std_dev) else: std_dev_str = "0" return "%r+/-%s" % (self.nominal_value, std_dev_str) def __str__(self): # An empty format string and str() usually return the same # string # (http://docs.python.org/2/library/string.html#format-specification-mini-language): return self.format("") @set_doc(format_ufloat.__doc__) def __format__(self, format_spec): return format_ufloat(self, format_spec) @set_doc(""" Return the same result as self.__format__(format_spec), or equivalently as the format(self, format_spec) of Python 2.6+. This method is meant to be used for formatting numbers with uncertainties in Python < 2.6, with '... %s ...' % num.format('.2e'). """) def format(self, format_spec): return format_ufloat(self, format_spec) def std_score(self, value): """ Return 'value' - nominal value, in units of the standard deviation. Raises a ValueError exception if the standard deviation is zero. """ try: # The ._nominal_value is a float: there is no integer division, # here: return (value - self._nominal_value) / self.std_dev except ZeroDivisionError: raise ValueError("The standard deviation is zero:" " undefined result") def __deepcopy__(self, memo): """ Hook for the standard copy module. The returned AffineScalarFunc is a completely fresh copy, which is fully independent of any variable defined so far. New variables are specially created for the returned AffineScalarFunc object. """ return AffineScalarFunc(self._nominal_value, copy.deepcopy(self._linear_part)) def __getstate__(self): """ Hook for the pickle module. The slot attributes of the parent classes are returned, as well as those of the __dict__ attribute of the object (if any). """ # In general (case where this class is subclassed), data # attribute are stored in two places: possibly in __dict_, and # in slots. Data from both locations is returned by this # method. all_attrs = {} # Support for subclasses that do not use __slots__ (except # through inheritance): instances have a __dict__ # attribute. The keys in this __dict__ are shadowed by the # slot attribute names (reference: # http://stackoverflow.com/questions/15139067/attribute-access-in-python-first-slots-then-dict/15139208#15139208). # The method below not only preserves this behavior, but also # saves the full contents of __dict__. This is robust: # unpickling gives back the original __dict__ even if __dict__ # contains keys that are shadowed by slot names: try: all_attrs["__dict__"] = self.__dict__ except AttributeError: pass # All the slot attributes are gathered. # Classes that do not define __slots__ have the __slots__ of # one of their parents (the first parent with their own # __slots__ in MRO). This is why the slot names are first # gathered (with repetitions removed, in general), and their # values obtained later. all_slots = set() for cls in type(self).mro(): # In the diamond inheritance pattern, some parent classes # may not have __slots__: slot_names = getattr(cls, "__slots__", ()) # Slot names can be given in various forms (string, # sequence, iterable): if isinstance(slot_names, str): all_slots.add(slot_names) # Single name else: all_slots.update(slot_names) # The slot values are stored: for name in all_slots: try: # !! It might happen that '__dict__' is itself a slot # name. In this case, its value is saved # again. Alternatively, the loop could be done on # all_slots - {'__dict__'}: all_attrs[name] = getattr(self, name) except AttributeError: pass # Undefined slot attribute return all_attrs def __setstate__(self, data_dict): """ Hook for the pickle module. """ for name, value in data_dict.items(): # Contrary to the default __setstate__(), this does not # necessarily save to the instance dictionary (because the # instance might contain slots): setattr(self, name, value) ops.add_arithmetic_ops(AffineScalarFunc) ops.add_comparative_ops(AffineScalarFunc) to_affine_scalar = AffineScalarFunc._to_affine_scalar # Nicer name, for users: isinstance(ufloat(...), UFloat) is # True. Also: isinstance(..., UFloat) is the test for "is this a # number with uncertainties from the uncertainties package?": UFloat = AffineScalarFunc def wrap(f, derivatives_args=None, derivatives_kwargs=None): """Wrap a function f into one that accepts Variables. The function f must return a float or integer value. The returned wrapped function will return values with both uncertainties and correlations, but can be used as a drop-in replacement for the original function. Arguments: ---------- derivatives_args: list or iterable list or tupleof derivative functionss or None with respect to the positional arguments of `f`. See Note 1. derivatives_kwargs: dictionary dict of derivative functionss or None with respect to the keyword arguments of `f`. See Note 1. Notes: ------- 1. Each function must be the partial derivative of f with respect to the corresponding positional parameters, and must have the same signature as ``f``. `derivative_args` hold derivitative functions for positional arguments (include `*varargs`), while `derivative_kwargs` holds derivitative functions for keyword arguments (include `**kwargs`). If an entry is `None` or not supplied, and if the argument value isa numeric Variable, a numerical derivative will be used. Non-numeric are ignored. 2. If derivatives are meaningless or the function is not function is not differentiable, the derivative funcion should return NaN for values for which the the function is not differentiable. Example: -------- To wrap `sin`, one could do >>> from uncertainties import wrap, umath >>> import math >>> usin_a = wrap(math.sin) # uses numerical derivative >>> usin_b = wrap(math.sin, [math.cos]) # use analytic derivative >>> usin_c = umath.sin # builtin, same as usin_2 These will all give the same result. """ return _wrap( AffineScalarFunc, f, derivatives_args=derivatives_args, derivatives_kwargs=derivatives_kwargs, ) ############################################################################### class NegativeStdDev(Exception): """Raise for a negative standard deviation""" pass class Variable(AffineScalarFunc): """ Representation of a float-like scalar Variable with its uncertainty. Variables are independent from each other, but correlations between them are handled through the AffineScalarFunc class. """ # To save memory in large arrays: __slots__ = ("_std_dev", "tag") def __init__(self, value, std_dev, tag=None): """ The nominal value and the standard deviation of the variable are set. The value is converted to float. The standard deviation std_dev can be NaN. It should normally be a float or an integer. 'tag' is a tag that the user can associate to the variable. This is useful for tracing variables. The meaning of the nominal value is described in the main module documentation. """ #! The value, std_dev, and tag are assumed by __copy__() not to # be copied. Either this should be guaranteed here, or __copy__ # should be updated. # Only float-like values are handled. One reason is that the # division operator on integers would not produce a # differentiable functions: for instance, Variable(3, 0.1)/2 # has a nominal value of 3/2 = 1, but a "shifted" value # of 3.1/2 = 1.55. value = float(value) # If the variable changes by dx, then the value of the affine # function that gives its value changes by 1*dx: # ! Memory cycles are created. However, they are garbage # collected, if possible. Using a weakref.WeakKeyDictionary # takes much more memory. Thus, this implementation chooses # more cycles and a smaller memory footprint instead of no # cycles and a larger memory footprint. super(Variable, self).__init__(value, LinearCombination({self: 1.0})) self.std_dev = std_dev # Assignment through a Python property self.tag = tag @property def std_dev(self): return self._std_dev # Standard deviations can be modified (this is a feature). # AffineScalarFunc objects that depend on the Variable have their # std_dev automatically modified (recalculated with the new # std_dev of their Variables): @std_dev.setter def std_dev(self, std_dev): # We force the error to be float-like. Since it is considered # as a standard deviation, it must be either positive or NaN: # (Note: if NaN < 0 is False, there is no need to test # separately for NaN. But this is not guaranteed, even if it # should work on most platforms.) if std_dev < 0 and isfinite(std_dev): raise NegativeStdDev("The standard deviation cannot be negative") self._std_dev = float(std_dev) # The following method is overridden so that we can represent the tag: def __repr__(self): num_repr = super(Variable, self).__repr__() if self.tag is None: return num_repr else: return "< %s = %s >" % (self.tag, num_repr) def __hash__(self): # All Variable objects are by definition independent # variables, so they never compare equal; therefore, their # id() are allowed to differ # (http://docs.python.org/reference/datamodel.html#object.__hash__): return id(self) def __copy__(self): """ Hook for the standard copy module. """ # !!!!!! The comment below might not be valid anymore now that # Variables do not contain derivatives anymore. # This copy implicitly takes care of the reference of the # variable to itself (in self.derivatives): the new Variable # object points to itself, not to the original Variable. # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html #! The following assumes that the arguments to Variable are # *not* copied upon construction, since __copy__ is not supposed # to copy "inside" information: return Variable(self.nominal_value, self.std_dev, self.tag) def __deepcopy__(self, memo): """ Hook for the standard copy module. A new variable is created. """ # This deep copy implicitly takes care of the reference of the # variable to itself (in self.derivatives): the new Variable # object points to itself, not to the original Variable. # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html return self.__copy__() ############################################################################### # Utilities def nominal_value(x): """ Return the nominal value of x if it is a quantity with uncertainty (i.e., an AffineScalarFunc object); otherwise, returns x unchanged. This utility function is useful for transforming a series of numbers, when only some of them generally carry an uncertainty. """ if isinstance(x, AffineScalarFunc): return x.nominal_value else: return x def std_dev(x): """ Return the standard deviation of x if it is a quantity with uncertainty (i.e., an AffineScalarFunc object); otherwise, returns the float 0. This utility function is useful for transforming a series of numbers, when only some of them generally carry an uncertainty. """ if isinstance(x, AffineScalarFunc): return x.std_dev else: return 0.0 def covariance_matrix(nums_with_uncert): """ Return a matrix that contains the covariances between the given sequence of numbers with uncertainties (AffineScalarFunc objects). The resulting matrix implicitly depends on their ordering in 'nums_with_uncert'. The covariances are floats (never int objects). The returned covariance matrix is the exact linear approximation result, if the nominal values of the numbers with uncertainties and of their variables are their mean. Otherwise, the returned covariance matrix should be close to its linear approximation value. The returned matrix is a list of lists. """ # See PSI.411 in EOL's notes. covariance_matrix = [] for i1, expr1 in enumerate(nums_with_uncert, 1): derivatives1 = expr1.derivatives # Optimization vars1 = set(derivatives1) # !! Python 2.7+: viewkeys() would work coefs_expr1 = [] for expr2 in nums_with_uncert[:i1]: derivatives2 = expr2.derivatives # Optimization coefs_expr1.append( sum( ( (derivatives1[var] * derivatives2[var] * var._std_dev**2) # var is a variable common to both numbers with # uncertainties: for var in vars1.intersection(derivatives2) ), # The result is always a float (sum() with no terms # returns an integer): 0.0, ) ) covariance_matrix.append(coefs_expr1) # We symmetrize the matrix: for i, covariance_coefs in enumerate(covariance_matrix): covariance_coefs.extend( [covariance_matrix[j][i] for j in range(i + 1, len(covariance_matrix))] ) return covariance_matrix def ufloat_fromstr(representation, tag=None): """ Create an uncertainties Variable from a string representation. Several representation formats are supported. Arguments: ---------- representation: string string representation of a value with uncertainty tag: string or `None` optional tag for tracing and organizing Variables ['None'] Returns: -------- uncertainties Variable. Notes: -------- 1. Invalid representations raise a ValueError. 2. Using the form "nominal(std)" where "std" is an integer creates a Variable with "std" giving the least significant digit(s). That is, "1.25(3)" is the same as `ufloat(1.25, 0.03)`, while "1.25(3.)" is the same as `ufloat(1.25, 3.)` 3. If the representation does not contain an uncertainty, an uncertainty of 1 in the least significant digit is assigned to the nominal value. For nominal values corresponding to "nan", an uncertainty of 1 is assigned. Examples: ----------- >>> from uncertainties import ufloat_fromstr >>> x = ufloat_fromstr("12.58+/-0.23") # = ufloat(12.58, 0.23) >>> x = ufloat_fromstr("12.58 ± 0.23") # = ufloat(12.58, 0.23) >>> x = ufloat_fromstr("3.85e5 +/- 2.3e4") # = ufloat(3.8e5, 2.3e4) >>> x = ufloat_fromstr("(38.5 +/- 2.3)e4") # = ufloat(3.8e5, 2.3e4) >>> x = ufloat_fromstr("72.1(2.2)") # = ufloat(72.1, 2.2) >>> x = ufloat_fromstr("72.15(4)") # = ufloat(72.15, 0.04) >>> x = ufloat_fromstr("680(41)e-3") # = ufloat(0.68, 0.041) >>> x = ufloat_fromstr("23.2") # = ufloat(23.2, 0.1) >>> x = ufloat_fromstr("23.29") # = ufloat(23.29, 0.01) >>> x = ufloat_fromstr("nan") # = ufloat(numpy.nan, 1.0) >>> x = ufloat_fromstr("680.3(nan)") # = ufloat(680.3, numpy.nan) """ (nom, std) = str_to_number_with_uncert(representation.strip()) return ufloat(nom, std, tag) def ufloat(nominal_value, std_dev=None, tag=None): """ Create an uncertainties Variable Arguments: ---------- nominal_value: float nominal value of Variable std_dev: float or `None` standard error of Variable, or `None` if not available [`None`] tag: string or `None` optional tag for tracing and organizing Variables ['None'] Returns: -------- uncertainties Variable Examples ---------- >>> from uncertainties import ufloat >>> a = ufloat(5, 0.2) >>> b = ufloat(1000, 30, tag='kilo') Notes: -------- 1. `nominal_value` is typically interpreted as `mean` or `central value` 2. `std_dev` is typically interpreted as `standard deviation` or the 1-sigma level uncertainty. 3. The returned Variable will have attributes `nominal_value`, `std_dev`, and `tag` which match the input values. """ if std_dev == 0: warn("Using UFloat objects with std_dev==0 may give unexpected results.") return Variable(nominal_value, std_dev, tag=tag) # Deprecated UFloat methods def deprecation_wrapper(func, msg): @functools.wraps(func) def wrapped(*args, **kwargs): warn(msg, FutureWarning, stacklevel=2) return func(*args, **kwargs) return wrapped deprecated_methods = [ "__floordiv__", "__mod__", "__abs__", "__trunc__", "__lt__", "__gt__", "__le__", "__ge__", ] for method_name in deprecated_methods: message = ( f"AffineScalarFunc.{method_name}() is deprecated. It will be removed in a future " f"release." ) setattr( AffineScalarFunc, method_name, deprecation_wrapper(getattr(AffineScalarFunc, method_name), message), ) uncertainties-3.2.3/uncertainties/formatting.py000066400000000000000000001233041500152063300217730ustar00rootroot00000000000000from math import isinf, isnan, isfinite import math import re def first_digit(value): """ Return the first digit position of the given value, as an integer. 0 is the digit just before the decimal point. Digits to the right of the decimal point have a negative position. Return 0 for a null value. """ try: return int(math.floor(math.log10(abs(value)))) except ValueError: # Case of value == 0 return 0 def PDG_precision(std_dev): """ Return the number of significant digits to be used for the given standard deviation, according to the rounding rules of the Particle Data Group (2010) (http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf). Also returns the effective standard deviation to be used for display. """ exponent = first_digit(std_dev) # The first three digits are what matters: we get them as an # integer number in [100; 999). # # In order to prevent underflow or overflow when calculating # 10**exponent, the exponent is slightly modified first and a # factor to be applied after "removing" the new exponent is # defined. # # Furthermore, 10**(-exponent) is not used because the exponent # range for very small and very big floats is generally different. if exponent >= 0: # The -2 here means "take two additional digits": (exponent, factor) = (exponent - 2, 1) else: (exponent, factor) = (exponent + 1, 1000) digits = int(std_dev / 10.0**exponent * factor) # int rounds towards zero # Rules: if digits <= 354: return (2, std_dev) elif digits <= 949: return (1, std_dev) else: # The parentheses matter, for very small or very large # std_dev: return (2, 10.0**exponent * (1000 / factor)) # Definition of a basic (format specification only, no full-feature # format string) formatting function that works whatever the version # of Python. This function exists so that the more capable format() is # used instead of the % formatting operator, if available: robust_format = format # Exponent letter: the keys are the possible main_fmt_type values of # format_num(): EXP_LETTERS = {"f": "e", "F": "E"} def robust_align(orig_str, fill_char, align_option, width): """ Aligns the given string with the given fill character. orig_str -- string to be aligned (str or unicode object). fill_char -- if empty, space is used. align_option -- as accepted by format(). wdith -- string that contains the width. """ # print "ALIGNING", repr(orig_str), "WITH", fill_char+align_option, # print "WIDTH", width return format(orig_str, fill_char + align_option + width) # Maps some Unicode code points ("-", "+", and digits) to their # superscript version: TO_SUPERSCRIPT = { 0x2B: "⁺", 0x2D: "⁻", 0x30: "⁰", 0x31: "¹", 0x32: "²", 0x33: "³", 0x34: "⁴", 0x35: "⁵", 0x36: "⁶", 0x37: "⁷", 0x38: "⁸", 0x39: "⁹", } # Inverted TO_SUPERSCRIPT table, for use with unicode.translate(): # #! Python 2.7+ can use a dictionary comprehension instead: FROM_SUPERSCRIPT = {ord(sup): normal for (normal, sup) in TO_SUPERSCRIPT.items()} def to_superscript(value): """ Return a (Unicode) string with the given value as superscript characters. The value is formatted with the %d %-operator format. value -- integer. """ return ("%d" % value).translate(TO_SUPERSCRIPT) def nrmlze_superscript(number_str): """ Return a string with superscript digits transformed into regular digits. Non-superscript digits are not changed before the conversion. Thus, the string can also contain regular digits. ValueError is raised if the conversion cannot be done. number_str -- string to be converted (of type str, but also possibly, for Python 2, unicode, which allows this string to contain superscript digits). """ # !! Python 3 doesn't need this str(), which is only here for giving the # .translate() method to str objects in Python 2 (this str() comes # from the builtins module of the future package and is therefore # a subclass of unicode, in Python 2): return int(str(number_str).translate(FROM_SUPERSCRIPT)) PM_SYMBOLS = {"pretty-print": "±", "latex": r" \pm ", "default": "+/-"} # Multiplication symbol for pretty printing (so that pretty printing can # be customized): MULT_SYMBOLS = {"pretty-print": "×", "latex": r"\times"} # Function that transforms a numerical exponent produced by format_num() into # the corresponding string notation (for non-default modes): EXP_PRINT = { "pretty-print": lambda common_exp: "%s10%s" % (MULT_SYMBOLS["pretty-print"], to_superscript(common_exp)), "latex": lambda common_exp: r" %s 10^{%d}" % (MULT_SYMBOLS["latex"], common_exp), } # Symbols used for grouping (typically between parentheses) in format_num(): GROUP_SYMBOLS = { "pretty-print": ("(", ")"), # Because of possibly exponents inside the parentheses (case of a # specified field width), it is better to use auto-adjusting # parentheses. This has the side effect of making the part between # the parentheses non-breakable (the text inside parentheses in a # LaTeX math expression $...$ can be broken). "latex": (r"\left(", r"\right)"), "default": ("(", ")"), # Basic text mode } def format_num( nom_val_main, error_main, common_exp, fmt_parts, prec, main_pres_type, options ): """ Return a formatted number with uncertainty. Null errors (error_main) are displayed as the integer 0, with no decimal point. The formatting can be customized globally through the PM_SYMBOLS, MULT_SYMBOLS, GROUP_SYMBOLS and EXP_PRINT dictionaries, which contain respectively the symbol for ±, for multiplication, for parentheses, and a function that maps an exponent to something like "×10²" (using MULT_SYMBOLS). Each of these dictionary has (at least) a 'pretty-print' and a 'latex' key, that define the symbols to be used for these two output formats (the PM_SYMBOLS and GROUP_SYMBOLS also have a 'default' key for the default output format). For example, the defaults for the 'pretty-print' format are: - PM_SYMBOLS['pretty-print'] = '±' - MULT_SYMBOLS['pretty-print'] = '×' - GROUP_SYMBOLS['pretty-print'] = ( '(', ')' ) - EXP_PRINT['pretty-print']: see the source code. Arguments: nom_val_main, error_main -- nominal value and error, before using common_exp (e.g., "1.23e2" would have a main value of 1.23; similarly, "12.3+/-0.01" would have a main value of 12.3). common_exp -- common exponent to use. If None, no common exponent is used. fmt_parts -- mapping that contains at least the following parts of the format specification: fill, align, sign, zero, width, comma, type; the value are strings. These format specification parts are handled. The width is applied to each value, or, if the shorthand notation is used, globally. If the error is special (zero, NaN, inf), the parts are applied as much as possible to the nominal value. prec -- precision to use with the main_pres_type format type (see below). main_pres_type -- format presentation type, either "f" or "F". This defines how the mantissas, exponents and NaN/inf values are represented (in the same way as for float). None, the empty string, or "%" are not accepted. options -- options (as an object that support membership testing, like for instance a string). "P" is for pretty-printing ("±" between the nominal value and the error, superscript exponents, etc.). "L" is for a LaTeX output. "S" is for the shorthand notation 1.23(1). "p" is for making sure that the …±… part is surrounded by parentheses. "%" adds a final percent sign, and parentheses if the shorthand notation is not used. Options can be combined. The P option has priority over the L option (if both are given). For details, see the documentation for AffineScalarFunction.__format__(). """ # print (nom_val_main, error_main, common_exp, # fmt_parts, prec, main_pres_type, options) # If a decimal point were always present in zero rounded errors # that are not zero, the formatting would be difficult, in general # (because the formatting options are very general): an example # is'{:04.0f}'.format(0.1), which gives "0000" and would have to # give "000.". Another example is '{:<4.0f}'.format(0.1), which # gives "0 " but should give "0. ". This is cumbersome to # implement in the general case, because no format prints "0." # for 0. Furthermore, using the .0f format already brings the same # kind of difficulty: non-zero numbers can appear as the exact # integer zero, after rounding. The problem is not larger, for # numbers with an error. # # That said, it is good to indicate null errors explicitly when # possible: printing 3.1±0 with the default format prints 3.1+/-0, # which shows that the uncertainty is exactly zero. # The suffix of the result is calculated first because it is # useful for the width handling of the shorthand notation. # Printing type for parts of the result (exponent, parentheses), # taking into account the priority of the pretty-print mode over # the LaTeX mode. This setting does not apply to everything: for # example, NaN is formatted as \mathrm{nan} (or NAN) if the LaTeX # mode is required. if "P" in options: print_type = "pretty-print" elif "L" in options: print_type = "latex" else: print_type = "default" # Exponent part: if common_exp is None: exp_str = "" elif print_type == "default": # Case of e or E. The same convention as Python 2.7 # to 3.3 is used for the display of the exponent: exp_str = EXP_LETTERS[main_pres_type] + "%+03d" % common_exp else: exp_str = EXP_PRINT[print_type](common_exp) # Possible % sign: percent_str = "" if "%" in options: if "L" in options: # % is a special character, in LaTeX: it must be escaped. # # Using '\\' in the code instead of r'\' so as not to # confuse emacs's syntax highlighting: percent_str += " \\" percent_str += "%" #################### # Only true if the error should not have an exponent (has priority # over common_exp): special_error = not error_main or not isfinite(error_main) # Nicer representation of the main nominal part, with no trailing # zeros, when the error does not have a defined number of # significant digits: if special_error and fmt_parts["type"] in ("", "g", "G"): # The main part is between 1 and 10 because any possible # exponent is taken care of by common_exp, so it is # formatted without an exponent (otherwise, the exponent # would have to be handled for the LaTeX option): fmt_suffix_n = (fmt_parts["prec"] or "") + fmt_parts["type"] else: fmt_suffix_n = ".%d%s" % (prec, main_pres_type) # print "FMT_SUFFIX_N", fmt_suffix_n #################### # Calculation of the mostly final numerical part value_str (no % # sign, no global width applied). # Error formatting: if "S" in options: # Shorthand notation: # Calculation of the uncertainty part, uncert_str: if error_main == 0: # The error is exactly zero uncert_str = "0" elif isnan(error_main): uncert_str = robust_format(error_main, main_pres_type) if "L" in options: uncert_str = r"\mathrm{%s}" % uncert_str elif isinf(error_main): if "L" in options: uncert_str = r"\infty" else: uncert_str = robust_format(error_main, main_pres_type) else: # Error with a meaningful first digit (not 0, and real number) uncert = round(error_main, prec) # The representation uncert_str of the uncertainty (which will # be put inside parentheses) is calculated: # The uncertainty might straddle the decimal point: we # keep it as it is, in this case (e.g. 1.2(3.4), as this # makes the result easier to read); the shorthand # notation then essentially coincides with the +/- # notation: if first_digit(uncert) >= 0 and prec > 0: # This case includes a zero rounded error with digits # after the decimal point: uncert_str = "%.*f" % (prec, uncert) else: if uncert: # The round is important because 566.99999999 can # first be obtained when 567 is wanted (%d prints the # integer part, not the rounded value): uncert_str = "%d" % round(uncert * 10.0**prec) else: # The decimal point indicates a truncated float # (this is easy to do, in this case, since # fmt_prefix_e is ignored): uncert_str = "0." # End of the final number representation (width and alignment # not included). This string is important for the handling of # the width: value_end = "(%s)%s%s" % (uncert_str, exp_str, percent_str) any_exp_factored = True # Single exponent in the output ########## # Nominal value formatting: # Calculation of fmt_prefix_n (prefix for the format of the # main part of the nominal value): if fmt_parts["zero"] and fmt_parts["width"]: # Padding with zeros must be done on the nominal value alone: # Remaining width (for the nominal value): nom_val_width = max(int(fmt_parts["width"]) - len(value_end), 0) fmt_prefix_n = "%s%s%d%s" % ( fmt_parts["sign"], fmt_parts["zero"], nom_val_width, fmt_parts["comma"], ) else: # Any 'zero' part should not do anything: it is not # included fmt_prefix_n = fmt_parts["sign"] + fmt_parts["comma"] # print "FMT_PREFIX_N", fmt_prefix_n # print "FMT_SUFFIX_N", fmt_suffix_n nom_val_str = robust_format(nom_val_main, fmt_prefix_n + fmt_suffix_n) ########## # Overriding of nom_val_str for LaTeX,; possibly based on the # existing value (for NaN vs nan): if "L" in options: if isnan(nom_val_main): nom_val_str = r"\mathrm{%s}" % nom_val_str elif isinf(nom_val_main): # !! It is wasteful, in this case, to replace # nom_val_str: could this be avoided while avoiding to # duplicate the formula for nom_val_str for the common # case (robust_format(...))? nom_val_str = r"%s\infty" % ("-" if nom_val_main < 0 else "") value_str = nom_val_str + value_end # Global width, if any: if fmt_parts["width"]: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): value_str = robust_align( value_str, fmt_parts["fill"], fmt_parts["align"] or ">", fmt_parts["width"], ) else: # +/- notation: # The common exponent is factored or not, depending on the # width. This gives nice columns for the nominal values and # the errors (no shift due to a varying exponent), when a need # is given: any_exp_factored = not fmt_parts["width"] # True when the error part has any exponent directly attached # (case of an individual exponent for both the nominal value # and the error, when the error is a non-0, real number). # The goal is to avoid the strange notation nane-10, and to # avoid the 0e10 notation for an exactly zero uncertainty, # because .0e can give this for a non-zero error (the goal is # to have a zero uncertainty be very explicit): error_has_exp = not any_exp_factored and not special_error # Like error_has_exp, but only for real number handling # (there is no special meaning to a zero nominal value): nom_has_exp = not any_exp_factored and isfinite(nom_val_main) # Prefix for the parts: if fmt_parts["width"]: # Individual widths # If zeros are needed, then the width is taken into # account now (before the exponent is added): if fmt_parts["zero"]: width = int(fmt_parts["width"]) # Remaining (minimum) width after including the # exponent: remaining_width = max(width - len(exp_str), 0) fmt_prefix_n = "%s%s%d%s" % ( fmt_parts["sign"], fmt_parts["zero"], remaining_width if nom_has_exp else width, fmt_parts["comma"], ) fmt_prefix_e = "%s%d%s" % ( fmt_parts["zero"], remaining_width if error_has_exp else width, fmt_parts["comma"], ) else: fmt_prefix_n = fmt_parts["sign"] + fmt_parts["comma"] fmt_prefix_e = fmt_parts["comma"] else: # Global width fmt_prefix_n = fmt_parts["sign"] + fmt_parts["comma"] fmt_prefix_e = fmt_parts["comma"] ## print "ANY_EXP_FACTORED", any_exp_factored ## print "ERROR_HAS_EXP", error_has_exp ## print "NOM_HAS_EXP", nom_has_exp #################### # Nominal value formatting: # !! The following fails with Python < 2.6 when the format is # not accepted by the % operator. This can happen when # special_error is true, as the format used for the nominal # value is essentially the format provided by the user, which # may be empty: # print "FMT_PREFIX_N", fmt_prefix_n # print "FMT_SUFFIX_N", fmt_suffix_n nom_val_str = robust_format(nom_val_main, fmt_prefix_n + fmt_suffix_n) # print "NOM_VAL_STR", nom_val_str #################### # Error formatting: # !! Note: .0f applied to a float has no decimal point, but # this does not appear to be documented # (http://docs.python.org/2/library/string.html#format-specification-mini-language). This # feature is used anyway, because it allows a possible comma # format parameter to be handled more conveniently than if the # 'd' format was used. # # The following uses a special integer representation of a # zero uncertainty: if error_main: # The handling of NaN/inf in the nominal value identical to # the handling of NaN/inf in the standard deviation: if ( not isfinite(nom_val_main) # Only some formats have a nicer representation: and fmt_parts["type"] in ("", "g", "G") ): # The error can be formatted independently: fmt_suffix_e = (fmt_parts["prec"] or "") + fmt_parts["type"] else: fmt_suffix_e = ".%d%s" % (prec, main_pres_type) else: fmt_suffix_e = ".0%s" % main_pres_type error_str = robust_format(error_main, fmt_prefix_e + fmt_suffix_e) ########## # Overriding of nom_val_str and error_str for LaTeX: if "L" in options: if isnan(nom_val_main): nom_val_str = r"\mathrm{%s}" % nom_val_str elif isinf(nom_val_main): nom_val_str = r"%s\infty" % ("-" if nom_val_main < 0 else "") if isnan(error_main): error_str = r"\mathrm{%s}" % error_str elif isinf(error_main): error_str = r"\infty" if nom_has_exp: nom_val_str += exp_str if error_has_exp: error_str += exp_str #################### # Final alignment of each field, if needed: if fmt_parts["width"]: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): effective_align = fmt_parts["align"] or ">" # robust_format() is used because it may handle alignment # options, where the % operator does not: nom_val_str = robust_align( nom_val_str, fmt_parts["fill"], effective_align, fmt_parts["width"] ) error_str = robust_align( error_str, fmt_parts["fill"], effective_align, fmt_parts["width"] ) #################### pm_symbol = PM_SYMBOLS[print_type] # Shortcut #################### # Construction of the final value, value_str, possibly with # grouping (typically inside parentheses): (LEFT_GROUPING, RIGHT_GROUPING) = GROUP_SYMBOLS[print_type] # The nominal value and the error might have to be explicitly # grouped together with parentheses, so as to prevent an # ambiguous notation. This is done in parallel with the # percent sign handling because this sign may too need # parentheses. if any_exp_factored and common_exp is not None: # Exponent value_str = "".join( ( LEFT_GROUPING, nom_val_str, pm_symbol, error_str, RIGHT_GROUPING, exp_str, percent_str, ) ) else: # No exponent value_str = "".join([nom_val_str, pm_symbol, error_str]) if percent_str: value_str = "".join( (LEFT_GROUPING, value_str, RIGHT_GROUPING, percent_str) ) elif "p" in options: value_str = "".join((LEFT_GROUPING, value_str, RIGHT_GROUPING)) return value_str def signif_dgt_to_limit(value, num_signif_d): """ Return the precision limit necessary to display value with num_signif_d significant digits. The precision limit is given as -1 for 1 digit after the decimal point, 0 for integer rounding, etc. It can be positive. """ fst_digit = first_digit(value) limit_no_rounding = fst_digit - num_signif_d + 1 # The number of significant digits of the uncertainty, when # rounded at this limit_no_rounding level, can be too large by 1 # (e.g., with num_signif_d = 1, 0.99 gives limit_no_rounding = -1, but # the rounded value at that limit is 1.0, i.e. has 2 # significant digits instead of num_signif_d = 1). We correct for # this effect by adjusting limit if necessary: rounded = round(value, -limit_no_rounding) fst_digit_rounded = first_digit(rounded) if fst_digit_rounded > fst_digit: # The rounded limit is fst_digit_rounded-num_signif_d+1; # but this can only be 1 above the non-rounded limit: limit_no_rounding += 1 return limit_no_rounding def format_ufloat(ufloat_to_format, format_spec): """ Formats a number with uncertainty. The format specification are the same as for format() for floats, as defined for Python 2.6+ (restricted to what the % operator accepts, if using an earlier version of Python), except that the n presentation type is not supported. In particular, the usual precision, alignment, sign flag, etc. can be used. The behavior of the various presentation types (e, f, g, none, etc.) is similar. Moreover, the format is extended: the number of digits of the uncertainty can be controlled, as is the way the uncertainty is indicated (with +/- or with the short-hand notation 3.14(1), in LaTeX or with a simple text string,...). Beyond the use of options at the end of the format specification, the main difference with floats is that a "u" just before the presentation type (f, e, g, none, etc.) activates the "uncertainty control" mode (e.g.: ".6u"). This mode is also activated when not using any explicit precision (e.g.: "g", "10f", "+010,e" format specifications). If the uncertainty does not have a meaningful number of significant digits (0 and NaN uncertainties), this mode is automatically deactivated. The nominal value and the uncertainty always use the same precision. This implies trailing zeros, in general, even with the g format type (contrary to the float case). However, when the number of significant digits of the uncertainty is not defined (zero or NaN uncertainty), it has no precision, so there is no matching. In this case, the original format specification is used for the nominal value (any "u" is ignored). Any precision (".p", where p is a number) is interpreted (if meaningful), in the uncertainty control mode, as indicating the number p of significant digits of the displayed uncertainty. Example: .1uf will return a string with one significant digit in the uncertainty (and no exponent). If no precision is given, the rounding rules from the Particle Data Group are used, if possible (http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf). For example, the "f" format specification generally does not use the default 6 digits after the decimal point, but applies the PDG rules. A common exponent is used if an exponent is needed for the larger of the nominal value (in absolute value) and the standard deviation, unless this would result in a zero uncertainty being represented as 0e... or a NaN uncertainty as NaNe.... Thanks to this common exponent, the quantity that best describes the associated probability distribution has a mantissa in the usual 1-10 range. The common exponent is factored (as in "(1.2+/-0.1)e-5"). unless the format specification contains an explicit width (" 1.2e-5+/- 0.1e-5") (this allows numbers to be in a single column, when printing numbers over many lines). Specifying a minimum width of 1 is a way of forcing any common exponent to not be factored out. The fill, align, zero and width parameters of the format specification are applied individually to each of the nominal value and standard deviation or, if the shorthand notation is used, globally. The sign parameter of the format specification is only applied to the nominal value (since the standard deviation is positive). In the case of a non-LaTeX output, the returned string can normally be parsed back with ufloat_fromstr(). This however excludes cases where numbers use the "," thousands separator, for example. Options can be added, at the end of the format specification. Multiple options can be specified: - When "P" is present, the pretty-printing mode is activated: "±" separates the nominal value from the standard deviation, exponents use superscript characters, etc. - When "S" is present (like in .1uS), the short-hand notation 1.234(5) is used, indicating an uncertainty on the last digits; if the digits of the uncertainty straddle the decimal point, it uses a fixed-point notation, like in 12.3(4.5). - When "L" is present, the output is formatted with LaTeX. - "p" ensures that there are parentheses around the …±… part (no parentheses are added if some are already present, for instance because of an exponent or of a trailing % sign, etc.). This produces outputs like (1.0±0.2) or (1.0±0.2)e7, which can be useful for removing any ambiguity if physical units are added after the printed number. An uncertainty which is exactly zero is represented as the integer 0 (i.e. with no decimal point). The "%" format type forces the percent sign to be at the end of the returned string (it is not attached to each of the nominal value and the standard deviation). Some details of the formatting can be customized as described in format_num(). """ # Convention on limits "between" digits: 0 = exactly at the # decimal point, -1 = after the first decimal, 1 = before the # units digit, etc. # Convention on digits: 0 is units (10**0), 1 is tens, -1 is # tenths, etc. # This method does the format specification parsing, and # calculates the various parts of the displayed value # (mantissas, exponent, position of the last digit). The # formatting itself is delegated to format_num(). ######################################## # Format specification parsing: match = re.match( r""" (?P[^{}]??)(?P[<>=^]?) # fill cannot be { or } (?P[-+ ]?) (?P0?) (?P\d*) (?P,?) (?:\.(?P\d+))? (?Pu?) # Precision for the uncertainty? # The type can be omitted. Options must not go here: (?P[eEfFgG%]??) # n not supported (?P[PSLp]*) # uncertainties-specific flags $""", format_spec, re.VERBOSE, ) # Does the format specification look correct? if not match: raise ValueError( "Format specification %r cannot be used with object of type" " %r. Note that uncertainties-specific flags must be put at" " the end of the format string." # Sub-classes handled: % (format_spec, ufloat_to_format.__class__.__name__) ) # Effective format presentation type: f, e, g, etc., or None, # like in # https://docs.python.org/3.4/library/string.html#format-specification-mini-language. Contrary # to what is written in the documentation, it is not true that # None is "the same as 'g'": "{}".format() and "{:g}" do not # give the same result, on 31415000000.0. None is thus kept as # is instead of being replaced by "g". pres_type = match.group("type") or None # Shortcut: fmt_prec = match.group("prec") # Can be None ######################################## # Since the '%' (percentage) format specification can change # the value to be displayed, this value must first be # calculated. Calculating the standard deviation is also an # optimization: the standard deviation is generally # calculated: it is calculated only once, here: nom_val = ufloat_to_format.nominal_value std_dev = ufloat_to_format.std_dev # 'options' is the options that must be given to format_num(): options = set(match.group("options")) ######################################## # The '%' format is treated internally as a display option: it # should not be applied individually to each part: if pres_type == "%": # Because '%' does 0.0055*100, the value # 0.5499999999999999 is obtained, which rounds to 0.5. The # original rounded value is 0.006. The same behavior is # found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'. # If a different behavior is needed, a solution to this # problem would be to do the rounding before the # multiplication. std_dev *= 100 nom_val *= 100 pres_type = "f" options.add("%") # At this point, pres_type is in eEfFgG or None (not %). ######################################## # Non-real values (nominal value or standard deviation) must # be handled in a specific way: real_values = [value for value in [abs(nom_val), std_dev] if isfinite(value)] # Calculation of digits_limit, which defines the precision of # the nominal value and of the standard deviation (it can be # None when it does not matter, like for NaN±NaN): # Reference value for the calculation of a possible exponent, # if needed: if pres_type in (None, "e", "E", "g", "G"): # Reference value for the exponent: the largest value # defines what the exponent will be (another convention # could have been chosen, like using the exponent of the # nominal value, irrespective of the standard deviation): try: exp_ref_value = max(real_values) except ValueError: # No non-NaN value: NaN±NaN… # No meaningful common exponent can be obtained: pass ## else: ## print "EXP_REF_VAL", exp_ref_value # Should the precision be interpreted like for a float, or # should the number of significant digits on the uncertainty # be controlled? if ( ( # Default behavior: number of significant digits on the # uncertainty controlled (if useful, i.e. only in # situations where the nominal value and the standard # error digits are truncated at the same place): (not fmt_prec and len(real_values) == 2) or match.group("uncert_prec") ) # Explicit control # The number of significant digits of the uncertainty must # be meaningful, otherwise the position of the significant # digits of the uncertainty does not have a clear # meaning. This gives us the *effective* uncertainty # control mode: and std_dev and isfinite(std_dev) ): # The number of significant digits on the uncertainty is # controlled. # The limit digits_limit on the digits of nom_val and std_dev # to be displayed is calculated. If the exponent notation is # used, this limit is generally different from the finally # displayed limit (e.g. 314.15+/-0.01 has digits_limit=-2, but # will be displayed with an exponent as (3.1415+/-0.0001)e+02, # which corresponds to 4 decimals after the decimal point, not # 2). # Number of significant digits to use: if fmt_prec: num_signif_d = int(fmt_prec) # Can only be non-negative if not num_signif_d: raise ValueError( "The number of significant digits" " on the uncertainty should be positive" ) else: (num_signif_d, std_dev) = PDG_precision(std_dev) digits_limit = signif_dgt_to_limit(std_dev, num_signif_d) else: # No control of the number of significant digits on the # uncertainty. ## print "PRECISION NOT BASED ON UNCERTAINTY" # The precision has the same meaning as for floats (it is # not the uncertainty that defines the number of digits). # The usual default precision is used (this is useful for # 3.141592±NaN with an "f" format specification, for # example): # # prec is the precision for the main parts of the final # format (in the sense of float formatting): # # https://docs.python.org/3.4/library/string.html#format-specification-mini-language if fmt_prec: prec = int(fmt_prec) elif pres_type is None: prec = 12 else: prec = 6 if pres_type in ("f", "F"): digits_limit = -prec else: # Format type in None, eEgG # We first calculate the number of significant digits # to be displayed (if possible): if pres_type in ("e", "E"): # The precision is the number of significant # digits required - 1 (because there is a single # digit before the decimal point, which is not # included in the definition of the precision with # the e/E format type): num_signif_digits = prec + 1 else: # Presentation type in None, g, G # Effective format specification precision: the rule # of # http://docs.python.org/2.7/library/string.html#format-specification-mini-language # is used: # The final number of significant digits to be # displayed is not necessarily obvious: trailing # zeros are removed (with the gG presentation # type), so num_signif_digits is the number of # significant digits if trailing zeros were not # removed. This quantity is relevant for the # rounding implied by the exponent test of the g/G # format: # 0 is interpreted like 1 (as with floats with a # gG presentation type): num_signif_digits = prec or 1 # The number of significant digits is important for # example for determining the exponent: ## print "NUM_SIGNIF_DIGITS", num_signif_digits digits_limit = ( signif_dgt_to_limit(exp_ref_value, num_signif_digits) if real_values else None ) ## print "DIGITS_LIMIT", digits_limit ####################################### # Common exponent notation: should it be used? use_exp is set # accordingly. If a common exponent should be used (use_exp is # True), 'common_exp' is set to the exponent that should be # used. if pres_type in ("f", "F"): use_exp = False else: # e, E, g, G, None # The rules from # https://docs.python.org/3.4/library/string.html#format-specification-mini-language # are applied. # Python's native formatting (whose result could be parsed # in order to determine whether a common exponent should # be used) is not used because there is shared information # between the nominal value and the standard error (same # last digit, common exponent) and extracting this # information from Python would entail parsing its # formatted string, which is in principle inefficient # (internally, Python performs calculations that yield a # string, and the string would be parsed back into # separate parts and numbers, which is in principle # unnecessary). # Should the scientific notation be used? The same rule as # for floats is used ("-4 <= exponent of rounded value < # p"), on the nominal value. if use_exp := real_values: # The number of significant digits of the reference value # rounded at digits_limit is exponent-digits_limit+1: common_exp = first_digit(round(exp_ref_value, -digits_limit)) common_factor = 10.0**common_exp # cases where this doesn't apply: too many digits when expressed like this, # or the common factor is way too small. if pres_type not in ("e", "E"): # g, G, None use_exp = not (-4 <= common_exp < common_exp - digits_limit + 1) and ( common_factor != 0.0 ) ######################################## # Calculation of signif_limit (position of the significant # digits limit in the final fixed point representations; this # is either a non-positive number, or None), of # nom_val_mantissa ("mantissa" for the nominal value, # i.e. value possibly corrected for a factorized exponent), # and std_dev_mantissa (similarly for the standard # deviation). common_exp is also set to None if no common # exponent should be used. if use_exp: nom_val_mantissa = nom_val / common_factor std_dev_mantissa = std_dev / common_factor # Limit for the last digit of the mantissas: signif_limit = digits_limit - common_exp else: # No common exponent common_exp = None nom_val_mantissa = nom_val std_dev_mantissa = std_dev signif_limit = digits_limit ## print "SIGNIF_LIMIT", signif_limit ######################################## # Format of the main (i.e. with no exponent) parts (the None # presentation type is similar to the g format type): main_pres_type = "fF"[(pres_type or "g").isupper()] # The precision of the main parts must be adjusted so as # to take into account the special role of the decimal # point: if signif_limit is not None: # If signif_limit is pertinent # The decimal point location is always included in the # printed digits (e.g., printing 3456 with only 2 # significant digits requires to print at least four # digits, like in 3456 or 3500). # # The max() is important for example for # 1234567.89123+/-12345.678 with the f format: in this # case, signif_limit is +3 (2 significant digits necessary # for the error, as per the PDG rules), but the (Python # float formatting) precision to be used for the main # parts is 0 (all digits must be shown). # # The 1 for the None pres_type represents "at least one # digit past the decimal point" of Python # (https://docs.python.org/3.4/library/string.html#format-specification-mini-language). This # is only applied for null uncertainties. prec = max(-signif_limit, 1 if pres_type is None and not std_dev else 0) ## print "PREC", prec ######################################## # print ( # "FORMAT_NUM parameters: nom_val_mantissa={}," # " std_dev_mantissa={}, common_exp={}," # " match.groupdict()={}, prec={}, main_pres_type={}," # " options={}".format( # nom_val_mantissa, std_dev_mantissa, common_exp, # match.groupdict(), # prec, # main_pres_type, # options)) # Final formatting: return format_num( nom_val_mantissa, std_dev_mantissa, common_exp, match.groupdict(), prec=prec, main_pres_type=main_pres_type, options=options, ) uncertainties-3.2.3/uncertainties/ops.py000066400000000000000000000740471500152063300204330ustar00rootroot00000000000000# This file contains code for AffineScalarFunc's arithmetic and comparative ops. from math import sqrt, log # Optimization: no attribute look-up import sys import itertools from inspect import getfullargspec import numbers # Some types known to not depend on Variable objects are put in # CONSTANT_TYPES. The most common types can be put in front, as this # may slightly improve the execution speed. FLOAT_LIKE_TYPES = (numbers.Number,) CONSTANT_TYPES = FLOAT_LIKE_TYPES + (complex,) try: import numpy except ImportError: pass else: # NumPy numbers do not depend on Variable objects: FLOAT_LIKE_TYPES += (numpy.generic,) CONSTANT_TYPES += FLOAT_LIKE_TYPES[-1:] def set_doc(doc_string): """ Decorator function that sets the docstring to the given text. It is useful for functions whose docstring is calculated (including string substitutions). """ def set_doc_string(func): func.__doc__ = doc_string return func return set_doc_string # Some operators can have undefined derivatives but still give # meaningful values when some of their arguments have a zero # uncertainty. Such operators return NaN when their derivative is # not finite. This way, if the uncertainty of the associated # variable is not 0, a NaN uncertainty is produced, which # indicates an error; if the uncertainty is 0, then the total # uncertainty can be returned as 0. # Exception catching is used so as to not slow down regular # operation too much: def nan_if_exception(f): """ Wrapper around f(x, y) that let f return NaN when f raises one of a few numerical exceptions. """ def wrapped_f(*args, **kwargs): try: return f(*args, **kwargs) except (ValueError, ZeroDivisionError, OverflowError): return float("nan") return wrapped_f def pow_deriv_0(x, y): """ The formula below works if x is positive or if y is an integer and x is negative of y is an integer, x is zero and y is greater than or equal to 1. """ if x > 0 or (y % 1 == 0 and (x < 0 or y >= 1)): return y * x ** (y - 1) elif x == 0 and y == 0: return 0 else: return float("nan") def pow_deriv_1(x, y): if x > 0: return log(x) * x**y elif x == 0 and y > 0: return 0 else: return float("nan") def get_ops_with_reflection(): """ Return operators with a reflection, along with their partial derivatives. Operators are things like +, /, etc. Those considered here have two arguments and can be called through Python's reflected methods __r…__ (e.g. __radd__). See the code for details. """ # Operators with a reflection: # We do not include divmod(). This operator could be included, by # allowing its result (a tuple) to be differentiated, in # derivative_value(). However, a similar result can be achieved # by the user by calculating separately the division and the # result. # {operator(x, y): (derivative wrt x, derivative wrt y)}: # Note that unknown partial derivatives can be numerically # calculated by expressing them as something like # "partial_derivative(float.__...__, 1)(x, y)": # String expressions are used, so that reversed operators are easy # to code, and execute relatively efficiently: derivatives_list = { "add": ("1.", "1."), # 'div' is the '/' operator when __future__.division is not in # effect. Since '/' is applied to # AffineScalarFunc._nominal_value numbers, it is applied on # floats, and is therefore the "usual" mathematical division. "div": ("1/y", "-x/y**2"), "floordiv": ("0.", "0."), # Non exact: there is a discontinuity # The derivative wrt the 2nd arguments is something like (..., x//y), # but it is calculated numerically, for convenience: "mod": ("1.", "partial_derivative(float.__mod__, 1)(x, y)"), "mul": ("y", "x"), "sub": ("1.", "-1."), "truediv": ("1/y", "-x/y**2"), } # Conversion to Python functions: ops_with_reflection = {} for op, derivatives in derivatives_list.items(): ops_with_reflection[op] = [ eval("lambda x, y: %s" % expr) for expr in derivatives ] ops_with_reflection["r" + op] = [ eval("lambda y, x: %s" % expr) for expr in reversed(derivatives) ] ops_with_reflection["pow"] = [pow_deriv_0, pow_deriv_1] ops_with_reflection["rpow"] = [ lambda y, x: pow_deriv_1(x, y), lambda y, x: pow_deriv_0(x, y), ] # Undefined derivatives are converted to NaN when the function # itself can be calculated: for op in ["pow"]: ops_with_reflection[op] = [ nan_if_exception(func) for func in ops_with_reflection[op] ] ops_with_reflection["r" + op] = [ nan_if_exception(func) for func in ops_with_reflection["r" + op] ] return ops_with_reflection # Operators that have a reflection, along with their derivatives: ops_with_reflection = get_ops_with_reflection() # Some effectively modified operators (for the automated tests): modified_operators = [] modified_ops_with_reflection = [] # !!! This code is not run by the tests. It would be nice to have # it be tested. def no_complex_result(func): """ Return a function that does like func, but that raises a ValueError if the result is complex. """ def no_complex_func(*args, **kwargs): """ Like %s, but raises a ValueError exception if the result is complex. """ % func.__name__ value = func(*args, **kwargs) if isinstance(value, complex): raise ValueError( "The uncertainties module does not handle" " complex results" ) else: return value return no_complex_func # This module does not handle uncertainties on complex numbers: # complex results for the nominal value of some operations cannot # be calculated with an uncertainty: custom_ops = { "pow": no_complex_result(float.__pow__), "rpow": no_complex_result(float.__rpow__), } def add_arithmetic_ops(cls): """ Adds many operators (__add__, etc.) to the AffineScalarFunc class. """ ######################################## #! Derivatives are set to return floats. For one thing, # uncertainties generally involve floats, as they are based on # small variations of the parameters. It is also better to # protect the user from unexpected integer result that behave # badly with the division. ## Operators that return a numerical value: def _simple_add_deriv(x): if x >= 0: return 1.0 else: return -1.0 # Single-argument operators that should be adapted from floats to # AffineScalarFunc objects, associated to their derivative: simple_numerical_operators_derivatives = { "abs": _simple_add_deriv, "neg": lambda x: -1.0, "pos": lambda x: 1.0, "trunc": lambda x: 0.0, } for op, derivative in iter(simple_numerical_operators_derivatives.items()): attribute_name = "__%s__" % op # float objects don't exactly have the same attributes between # different versions of Python (for instance, __trunc__ was # introduced with Python 2.6): try: setattr( cls, attribute_name, _wrap(cls, getattr(float, attribute_name), [derivative]), ) except AttributeError: # Version of Python where floats don't have attribute_name: pass else: modified_operators.append(op) ######################################## # Final definition of the operators for AffineScalarFunc objects: # Reversed versions (useful for float*AffineScalarFunc, for instance): for op, derivatives in ops_with_reflection.items(): attribute_name = "__%s__" % op # float objects don't exactly have the same attributes between # different versions of Python (for instance, __div__ and # __rdiv__ were removed, in Python 3): # float objects don't exactly have the same attributes between # different versions of Python (for instance, __trunc__ was # introduced with Python 2.6): try: if op not in custom_ops: func_to_wrap = getattr(float, attribute_name) else: func_to_wrap = custom_ops[op] except AttributeError: # Version of Python with floats that don't have attribute_name: pass else: setattr(cls, attribute_name, _wrap(cls, func_to_wrap, derivatives)) modified_ops_with_reflection.append(op) ######################################## # Conversions to pure numbers are meaningless. Note that the # behavior of float(1j) is similar. for coercion_type in ("complex", "int", "long", "float"): def raise_error(self): raise TypeError( "can't convert an affine function (%s)" " to %s; use x.nominal_value" % (self.__class__, coercion_type) # In case AffineScalarFunc is sub-classed: ) setattr(cls, "__%s__" % coercion_type, raise_error) class IndexableIter(object): """ Iterable whose values can also be accessed through indexing. The input iterable values are cached. Some attributes: iterable -- iterable used for returning the elements one by one. returned_elements -- list with the elements directly accessible. through indexing. Additional elements are obtained from self.iterable. none_converter -- function that takes an index and returns the value to be returned when None is obtained form the iterable (instead of None). """ def __init__(self, iterable, none_converter=lambda index: None): """ iterable -- iterable whose values will be returned. none_converter -- function applied to None returned values. The value that replaces None is none_converter(index), where index is the index of the element. """ self.iterable = iterable self.returned_elements = [] self.none_converter = none_converter def __getitem__(self, index): returned_elements = self.returned_elements try: return returned_elements[index] except IndexError: # Element not yet cached for pos in range(len(returned_elements), index + 1): value = next(self.iterable) if value is None: value = self.none_converter(pos) returned_elements.append(value) return returned_elements[index] def __str__(self): return "<%s: [%s...]>" % ( self.__class__.__name__, ", ".join(map(str, self.returned_elements)), ) def _wrap(cls, f, derivatives_args=None, derivatives_kwargs=None): if derivatives_args is None: derivatives_args = [] if derivatives_kwargs is None: derivatives_kwargs = {} derivatives_args_index = IndexableIter( # Automatic addition of numerical derivatives in case the # supplied derivatives_args is shorter than the number of # arguments in *args: itertools.chain(derivatives_args, itertools.repeat(None)) ) # Derivatives for keyword arguments (includes var-keyword # parameters **kwargs, but also var-or-keyword parameters, and # keyword-only parameters (Python 3): derivatives_all_kwargs = {} for name, derivative in derivatives_kwargs.items(): # Optimization: None keyword-argument derivatives are converted # right away to derivatives (instead of doing this every time a # None derivative is encountered when calculating derivatives): if derivative is None: derivatives_all_kwargs[name] = partial_derivative(f, name) else: derivatives_all_kwargs[name] = derivative # When the wrapped function is called with keyword arguments that # map to positional-or-keyword parameters, their derivative is # looked for in derivatives_all_kwargs. We define these # additional derivatives: try: argspec = getfullargspec(f) except TypeError: # Some functions do not provide meta-data about their # arguments (see PEP 362). One cannot use keyword arguments # for positional-or-keyword parameters with them: nothing has # to be done: pass else: # With Python 3, there is no need to handle keyword-only # arguments (and therefore to use inspect.getfullargspec()) # because they are already handled by derivatives_kwargs. for index, name in enumerate(argspec.args): # The following test handles the case of # positional-or-keyword parameter for which automatic # numerical differentiation is used: when the wrapped # function is called with a keyword argument for this # parameter, the numerical derivative must be calculated # with respect to the parameter name. In the other case, # where the wrapped function is called with a positional # argument, the derivative with respect to its index must # be used: derivative = derivatives_args_index[index] if derivative is None: derivatives_all_kwargs[name] = partial_derivative(f, name) else: derivatives_all_kwargs[name] = derivative # Optimization: None derivatives for the positional arguments are # converted to the corresponding numerical differentiation # function (instead of doing this over and over later every time a # None derivative is found): none_converter = lambda index: partial_derivative(f, index) # noqa for index, derivative in enumerate(derivatives_args_index.returned_elements): if derivative is None: derivatives_args_index.returned_elements[index] = none_converter(index) # Future None values are also automatically converted: derivatives_args_index.none_converter = none_converter ## Wrapped function: #! Setting the doc string after "def f_with...()" does not # seem to work. We define it explicitly: @set_doc( """\ Version of %s(...) that returns an affine approximation (AffineScalarFunc object), if its result depends on variables (Variable objects). Otherwise, returns a simple constant (when applied to constant arguments). Warning: arguments of the function that are not AffineScalarFunc objects must not depend on uncertainties.Variable objects in any way. Otherwise, the dependence of the result in uncertainties.Variable objects will be incorrect. Original documentation: %s""" % (f.__name__, f.__doc__) ) def f_with_affine_output(*args, **kwargs): ######################################## # The involved random variables must first be gathered, so # that they can be independently updated. # The arguments that contain an uncertainty (AffineScalarFunc # objects) are gathered, as positions or names; they will be # replaced by their nominal value in order to calculate # the necessary derivatives of f. pos_w_uncert = [ index for (index, value) in enumerate(args) if isinstance(value, cls) ] names_w_uncert = [ key for (key, value) in kwargs.items() if isinstance(value, cls) ] ######################################## # Value of f() at the nominal value of the arguments with # uncertainty: # The usual behavior of f() is kept, if no number with # uncertainty is provided: if (not pos_w_uncert) and (not names_w_uncert): return f(*args, **kwargs) ### Nominal values of the (scalar) arguments: # !! Possible optimization: If pos_w_uncert is empty, there # is actually no need to create a mutable version of args and # one could do args_values = args. However, the wrapped # function is typically called with numbers with uncertainties # as positional arguments (i.e., pos_w_uncert is not emtpy), # so this "optimization" is not implemented here. ## Positional arguments: args_values = list(args) # Now mutable: modified below # Arguments with an uncertainty are converted to their nominal # value: for index in pos_w_uncert: args_values[index] = args[index].nominal_value ## Keyword arguments: # For efficiency reasons, kwargs is not copied. Instead, its # values with uncertainty are modified: # The original values with uncertainties are needed: they are # saved in the following dictionary (which only contains # values with uncertainty): kwargs_uncert_values = {} for name in names_w_uncert: value_with_uncert = kwargs[name] # Saving for future use: kwargs_uncert_values[name] = value_with_uncert # The original dictionary is modified (for efficiency reasons): kwargs[name] = value_with_uncert.nominal_value f_nominal_value = f(*args_values, **kwargs) # If the value is not a float, then this code cannot provide # the result, as it returns a UFloat, which represents a # random real variable. This happens for instance when # ufloat()*numpy.array() is calculated: the # AffineScalarFunc.__mul__ operator, obtained through wrap(), # returns a NumPy array, not a float: if not isinstance(f_nominal_value, FLOAT_LIKE_TYPES): return NotImplemented ######################################## # Calculation of the linear part of the function value, # defined by (coefficient, argument) pairs, where 'argument' # is an AffineScalarFunc (for all AffineScalarFunc found as # argument of f): linear_part = [] for pos in pos_w_uncert: linear_part.append( ( # Coefficient: derivatives_args_index[pos](*args_values, **kwargs), # Linear part of the AffineScalarFunc expression: args[pos]._linear_part, ) ) for name in names_w_uncert: # Optimization: caching of the automatic numerical # derivatives for keyword arguments that are # discovered. This gives a speedup when the original # function is called repeatedly with the same keyword # arguments: derivative = derivatives_all_kwargs.setdefault( name, # Derivative never needed before: partial_derivative(f, name), ) linear_part.append( ( # Coefficient: derivative(*args_values, **kwargs), # Linear part of the AffineScalarFunc expression: kwargs_uncert_values[name]._linear_part, ) ) # The function now returns the necessary linear approximation # to the function: return cls(f_nominal_value, linear_part) f_with_affine_output = set_doc( """\ Version of %s(...) that returns an affine approximation (AffineScalarFunc object), if its result depends on variables (Variable objects). Otherwise, returns a simple constant (when applied to constant arguments). Warning: arguments of the function that are not AffineScalarFunc objects must not depend on uncertainties.Variable objects in any way. Otherwise, the dependence of the result in uncertainties.Variable objects will be incorrect. Original documentation: %s""" % (f.__name__, f.__doc__) )(f_with_affine_output) # It is easier to work with f_with_affine_output, which represents # a wrapped version of 'f', when it bears the same name as 'f': # ! __name__ is read-only, in Python 2.3: f_with_affine_output.name = f.__name__ return f_with_affine_output # Step constant for numerical derivatives in # partial_derivative(). Value chosen to as to get better numerical # results: STEP_SIZE = sqrt(sys.float_info.epsilon) # !! It would be possible to split the partial derivative calculation # into two functions: one for positional arguments (case of integer # arg_ref) and one for keyword arguments (case of string # arg_ref). However, this would either duplicate the code for the # numerical differentiation, or require a call, which is probably more # expensive in time than the tests done here. def partial_derivative(f, arg_ref): """ Return a function that numerically calculates the partial derivative of function f with respect to its argument arg_ref. arg_ref -- describes which variable to use for the differentiation. If f is called with f(*args, **kwargs) arguments, an integer represents the index of an argument in args, and a string represents the name of an argument in kwargs. """ # Which set of function parameter contains the variable to be # changed? the positional or the optional keyword arguments? change_kwargs = isinstance(arg_ref, str) def partial_derivative_of_f(*args, **kwargs): """ Partial derivative, calculated with the (-epsilon, +epsilon) method, which is more precise than the (0, +epsilon) method. """ # args_with_var contains the arguments (either args or kwargs) # that contain the variable that must be shifted, as a mutable # object (because the variable contents will be modified): # The values in args need to be modified, for the # differentiation: it is converted to a list: if change_kwargs: args_with_var = kwargs else: args_with_var = list(args) # The step is relative to the parameter being varied, so that # shifting it does not suffer from finite precision limitations: step = STEP_SIZE * abs(args_with_var[arg_ref]) if not step: # Arbitrary, but "small" with respect to 1: step = STEP_SIZE args_with_var[arg_ref] += step if change_kwargs: shifted_f_plus = f(*args, **args_with_var) else: shifted_f_plus = f(*args_with_var, **kwargs) args_with_var[arg_ref] -= 2 * step # Optimization: only 1 list copy if change_kwargs: shifted_f_minus = f(*args, **args_with_var) else: shifted_f_minus = f(*args_with_var, **kwargs) return (shifted_f_plus - shifted_f_minus) / 2 / step return partial_derivative_of_f ######################################## # Definition of boolean operators, that assume that self and # y_with_uncert are AffineScalarFunc. # The fact that uncertainties must be small is used, here: the # comparison functions are supposed to be constant for most values of # the random variables. # Even though uncertainties are supposed to be small, comparisons # between 3+/-0.1 and 3.0 are handled correctly (even though x == 3.0 is # not a constant function in the 3+/-0.1 interval). The comparison # between x and x is handled too, when x has an uncertainty. In fact, # as explained in the main documentation, it is possible to give a # useful meaning to the comparison operators, in these cases. def eq_on_aff_funcs(self, y_with_uncert): """ __eq__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ difference = self - y_with_uncert # Only an exact zero difference means that self and y are # equal numerically: return not (difference._nominal_value or difference.std_dev) def ne_on_aff_funcs(self, y_with_uncert): """ __ne__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return not eq_on_aff_funcs(self, y_with_uncert) def gt_on_aff_funcs(self, y_with_uncert): """ __gt__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return self._nominal_value > y_with_uncert._nominal_value def ge_on_aff_funcs(self, y_with_uncert): """ __ge__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return gt_on_aff_funcs(self, y_with_uncert) or eq_on_aff_funcs(self, y_with_uncert) def lt_on_aff_funcs(self, y_with_uncert): """ __lt__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return self._nominal_value < y_with_uncert._nominal_value def le_on_aff_funcs(self, y_with_uncert): """ __le__ operator, assuming that both self and y_with_uncert are AffineScalarFunc objects. """ return lt_on_aff_funcs(self, y_with_uncert) or eq_on_aff_funcs(self, y_with_uncert) def add_comparative_ops(cls): def to_affine_scalar(x): """ Transforms x into a constant affine scalar function (AffineScalarFunc), unless it is already an AffineScalarFunc (in which case x is returned unchanged). Raises an exception unless x belongs to some specific classes of objects that are known not to depend on AffineScalarFunc objects (which then cannot be considered as constants). """ if isinstance(x, cls): return x if isinstance(x, CONSTANT_TYPES): # No variable => no derivative: return cls(x, {}) # Case of lists, etc. raise NotUpcast( "%s cannot be converted to a number with" " uncertainty" % type(x) ) cls._to_affine_scalar = to_affine_scalar def force_aff_func_args(func): """ Takes an operator op(x, y) and wraps it. The constructed operator returns func(x, to_affine_scalar(y)) if y can be upcast with to_affine_scalar(); otherwise, it returns NotImplemented. Thus, func() is only called on two AffineScalarFunc objects, if its first argument is an AffineScalarFunc. """ def op_on_upcast_args(x, y): """ Return %s(self, to_affine_scalar(y)) if y can be upcast through to_affine_scalar. Otherwise returns NotImplemented. """ % func.__name__ try: y_with_uncert = to_affine_scalar(y) except NotUpcast: # This module does not know how to handle the comparison: # (example: y is a NumPy array, in which case the NumPy # array will decide that func() should be applied # element-wise between x and all the elements of y): return NotImplemented else: return func(x, y_with_uncert) return op_on_upcast_args ### Operators: operators applied to AffineScalarFunc and/or ### float-like objects only are supported. This is why methods ### from float are used for implementing these operators. # Operators with no reflection: ######################################## # __nonzero__() is supposed to return a boolean value (it is used # by bool()). It is for instance used for converting the result # of comparison operators to a boolean, in sorted(). If we want # to be able to sort AffineScalarFunc objects, __nonzero__ cannot # return a AffineScalarFunc object. Since boolean results (such # as the result of bool()) don't have a very meaningful # uncertainty unless it is zero, this behavior is fine. def __bool__(self): """ Equivalent to self != 0. """ #! This might not be relevant for AffineScalarFunc objects # that contain values in a linear space which does not convert # the float 0 into the null vector (see the __eq__ function: # __nonzero__ works fine if subtracting the 0 float from a # vector of the linear space works as if 0 were the null # vector of that space): return self != 0.0 # Uses the AffineScalarFunc.__ne__ function cls.__bool__ = __bool__ ######################################## ## Logical operators: warning: the resulting value cannot always ## be differentiated. # The boolean operations are not differentiable everywhere, but # almost... # (1) I can rely on the assumption that the user only has "small" # errors on variables, as this is used in the calculation of the # standard deviation (which performs linear approximations): # (2) However, this assumption is not relevant for some # operations, and does not have to hold, in some cases. This # comes from the fact that logical operations (e.g. __eq__(x,y)) # are not differentiable for many usual cases. For instance, it # is desirable to have x == x for x = n+/-e, whatever the size of e. # Furthermore, n+/-e != n+/-e', if e != e', whatever the size of e or # e'. # (3) The result of logical operators does not have to be a # function with derivatives, as these derivatives are either 0 or # don't exist (i.e., the user should probably not rely on # derivatives for his code). # !! In Python 2.7+, it may be possible to use functools.total_ordering. # __eq__ is used in "if data in [None, ()]", for instance. It is # therefore important to be able to handle this case too, which is # taken care of when force_aff_func_args(eq_on_aff_funcs) # returns NotImplemented. cls.__eq__ = force_aff_func_args(eq_on_aff_funcs) cls.__ne__ = force_aff_func_args(ne_on_aff_funcs) cls.__gt__ = force_aff_func_args(gt_on_aff_funcs) # __ge__ is not the opposite of __lt__ because these operators do # not always yield a boolean (for instance, 0 <= numpy.arange(10) # yields an array). cls.__ge__ = force_aff_func_args(ge_on_aff_funcs) cls.__lt__ = force_aff_func_args(lt_on_aff_funcs) cls.__le__ = force_aff_func_args(le_on_aff_funcs) # Mathematical operations with local approximations (affine scalar # functions) class NotUpcast(Exception): "Raised when an object cannot be converted to a number with uncertainty" uncertainties-3.2.3/uncertainties/parsing.py000066400000000000000000000166651500152063300212770ustar00rootroot00000000000000import re from uncertainties.formatting import nrmlze_superscript ############################################################################### # Parsing of values with uncertainties: # Parsing of (part of) numbers. The reason why the decimal part is # parsed (if any), instead of using the parsing built in float(), is # that the presence (or not) of a decimal point does matter, in the # semantics of some representations (e.g. .1(2.) = .1+/-2, whereas # .1(2) = .1+/-0.2), so just getting the numerical value of the part # in parentheses would not be sufficient. POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE = r"((\d*)(\.\d*)?|nan|NAN|inf|INF)" # Regexp for a number with uncertainty (e.g., "-1.234(2)e-6"), where # the uncertainty is optional (in which case the uncertainty is # implicit). The uncertainty can also be nan or NAN: # # !! WARNING: in Python 2, the code relies on "… % " returning # a Unicode string (even if the template is not Unicode): NUMBER_WITH_UNCERT_RE_STR = """ ([+-])? # Sign %s # Main number (?:\\(%s\\))? # Optional uncertainty (?: (?:[eE]|\\s*×\\s*10) (.*) )? # Optional exponent """ % ( POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE, POSITIVE_DECIMAL_UNSIGNED_OR_NON_FINITE, ) NUMBER_WITH_UNCERT_RE_MATCH = re.compile( "%s$" % NUMBER_WITH_UNCERT_RE_STR, re.VERBOSE ).match # Number with uncertainty with a factored exponent (e.g., of the form # (... +/- ...)e10): this is a loose matching, so as to accommodate # for multiple formats: NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH = re.compile( """ \\( (?P.*) \\) (?:[eE]|\\s*×\\s*10) (?P.*) $""", re.VERBOSE, ).match class NotParenUncert(ValueError): """ Raised when a string representing an exact number or a number with an uncertainty indicated between parentheses was expected but not found. """ def parse_error_in_parentheses(representation): # !!!! The code seems to handle superscript exponents, but the # docstring doesn't reflect this!? """ Return (value, error) from a string representing a number with uncertainty like 12.34(5), 12.34(142), 12.5(3.4), 12.3(4.2)e3, or 13.4(nan)e10. If no parenthesis is given, an uncertainty of one on the last digit is assumed. The digits between parentheses correspond to the same number of digits at the end of the nominal value (the decimal point in the uncertainty is optional). Example: 12.34(142) = 12.34±1.42. Raises ValueError if the string cannot be parsed. """ match = NUMBER_WITH_UNCERT_RE_MATCH(representation) if match: # The 'main' part is the nominal value, with 'int'eger part, and # 'dec'imal part. The 'uncert'ainty is similarly broken into its # integer and decimal parts. (sign, main, _, main_dec, uncert, uncert_int, uncert_dec, exponent) = ( match.groups() ) else: raise NotParenUncert( "Unparsable number representation: '%s'." " See the documentation of ufloat_fromstr()." % representation ) # Global exponent: if exponent: factor = 10.0 ** nrmlze_superscript(exponent) else: factor = 1 # Nominal value: value = float((sign or "") + main) * factor if uncert is None: # No uncertainty was found: an uncertainty of 1 on the last # digit is assumed: uncert_int = "1" # The other parts of the uncertainty are None # Do we have a fully explicit uncertainty? if uncert_dec is not None or uncert in {"nan", "NAN", "inf", "INF"}: uncert_value = float(uncert) else: # uncert_int represents an uncertainty on the last digits: # The number of digits after the period defines the power of # 10 that must be applied to the provided uncertainty: if main_dec is None: num_digits_after_period = 0 else: num_digits_after_period = len(main_dec) - 1 uncert_value = int(uncert_int) / 10.0**num_digits_after_period # We apply the exponent to the uncertainty as well: uncert_value *= factor return (value, uncert_value) # Regexp for catching the two variable parts of -1.2×10⁻¹²: PRETTY_PRINT_MATCH = re.compile("(.*?)\\s*×\\s*10(.*)").match def to_float(value_str): """ Converts a string representing a float to a float. The usual valid Python float() representations are correctly parsed. In addition, the pretty-print notation -1.2×10⁻¹² is also converted. ValueError is raised if no float can be obtained. """ try: return float(value_str) except ValueError: pass # The pretty-print notation is tried: match = PRETTY_PRINT_MATCH(value_str) if match: try: return float(match.group(1)) * 10.0 ** nrmlze_superscript(match.group(2)) except ValueError: raise ValueError( "Mantissa or exponent incorrect in pretty-print" " form %s" % value_str ) else: raise ValueError( "No valid Python float or pretty-print form" " recognized in %s" % value_str ) cannot_parse_ufloat_msg_pat = ( "Cannot parse %s: see the documentation for ufloat_fromstr() for a" " list of accepted formats" ) # The following function is not exposed because it can in effect be # obtained by doing x = ufloat_fromstr(representation) and reading # x.nominal_value and x.std_dev: def str_to_number_with_uncert(representation): """ Given a string that represents a number with uncertainty, returns the nominal value and the uncertainty. See the documentation for ufloat_fromstr() for a list of accepted formats. When no numerical error is given, an uncertainty of 1 on the last digit is implied. Raises ValueError if the string cannot be parsed. representation -- string with no leading or trailing spaces. """ # The "p" format can add parentheses around the whole printed result: we # remove them: if representation.startswith("(") and representation.endswith(")"): representation = representation[1:-1] match = NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH(representation) # The representation is simplified, but the global factor is # calculated: if match: # We have a form with a factored exponent: (1.23 +/- 0.01)e10, # etc. exp_value_str = match.group("exp_value") try: exponent = nrmlze_superscript(exp_value_str) except ValueError: raise ValueError(cannot_parse_ufloat_msg_pat % representation) factor = 10.0**exponent representation = match.group("simple_num_with_uncert") else: factor = 1 # No global exponential factor match = re.match("(.*)(?:\\+/-|±)(.*)", representation) if match: (nom_value, uncert) = match.groups() try: # Simple form 1234.45+/-1.2 or 1234.45±1.2, or 1.23e-10+/-1e-23 # or -1.2×10⁻¹²±1e23: parsed_value = (to_float(nom_value) * factor, to_float(uncert) * factor) except ValueError: raise ValueError(cannot_parse_ufloat_msg_pat % representation) else: # Form with error parentheses or no uncertainty: try: parsed_value = parse_error_in_parentheses(representation) except NotParenUncert: raise ValueError(cannot_parse_ufloat_msg_pat % representation) return parsed_value uncertainties-3.2.3/uncertainties/umath.py000066400000000000000000000023621500152063300207370ustar00rootroot00000000000000""" Mathematical operations that generalize many operations from the standard math module so that they also work on numbers with uncertainties. Examples: from umath import sin # Manipulation of numbers with uncertainties: x = uncertainties.ufloat(3, 0.1) print sin(x) # prints 0.141120008...+/-0.098999... # The umath functions also work on regular Python floats: print sin(3) # prints 0.141120008... This is a Python float. Importing all the functions from this module into the global namespace is possible. This is encouraged when using a Python shell as a calculator. Example: import uncertainties from uncertainties.umath import * # Imports tan(), etc. x = uncertainties.ufloat(3, 0.1) print tan(x) # tan() is the uncertainties.umath.tan function The numbers with uncertainties handled by this module are objects from the uncertainties module, from either the Variable or the AffineScalarFunc class. (c) 2009-2016 by Eric O. LEBIGOT (EOL) . Please send feature requests, bug reports, or feedback to this address. This software is released under a dual license. (1) The BSD license. (2) Any other license, as long as it is obtained from the original author.""" from .umath_core import * # noqa uncertainties-3.2.3/uncertainties/umath_core.py000066400000000000000000000360331500152063300217510ustar00rootroot00000000000000# !!!!!!!!!!! Add a header to the documentation, that starts with something # like "uncertainties.UFloat-compatible version of...", for all functions. """ Implementation of umath.py, with internals. """ # This module exists so as to define __all__, which in turn defines # which functions are visible to the user in umath.py through from # umath import * and Python shell completion. from __future__ import division # Many analytical derivatives depend on this # Standard modules from builtins import map import math import sys import itertools # Local modules import uncertainties.core as uncert_core from uncertainties.core import to_affine_scalar, AffineScalarFunc, LinearCombination ############################################################################### # We wrap the functions from the math module so that they keep track of # uncertainties by returning a AffineScalarFunc object. # Some functions from the math module cannot be adapted in a standard # way so to work with AffineScalarFunc objects (either as their result # or as their arguments): # (1) Some functions return a result of a type whose value and # variations (uncertainties) cannot be represented by AffineScalarFunc # (e.g., math.frexp, which returns a tuple). The exception raised # when not wrapping them with wrap() is more obvious than the # one obtained when wrapping them (in fact, the wrapped functions # attempts operations that are not supported, such as calculation a # subtraction on a result of type tuple). # (2) Some functions don't take continuous scalar arguments (which can # be varied during differentiation): math.fsum, math.factorial... # Such functions can either be: # - wrapped in a special way. # - excluded from standard wrapping by adding their name to # no_std_wrapping # Math functions that have a standard interface: they take # one or more float arguments, and return a scalar: many_scalars_to_scalar_funcs = [] # Some functions require a specific treatment and must therefore be # excluded from standard wrapping. Functions # no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial'] # Functions with numerical derivatives: # # !! Python2.7+: {..., ...} num_deriv_funcs = set(["fmod", "gamma", "lgamma"]) # Functions are by definition locally constant (on real # numbers): their value does not depend on the uncertainty (because # this uncertainty is supposed to lead to a good linear approximation # of the function in the uncertainty region). The type of their output # for floats is preserved, as users should not care about deviations # in their value: their value is locally constant due to the nature of # the function (0 derivative). This situation is similar to that of # comparisons (==, >, etc.). # # !! Python 2.7+: {..., ...} locally_cst_funcs = set(["ceil", "floor", "isinf", "isnan", "trunc"]) # Functions that do not belong in many_scalars_to_scalar_funcs, but # that have a version that handles uncertainties. These functions are # also not in numpy (see unumpy/core.py). non_std_wrapped_funcs = [] # Function that copies the relevant attributes from generalized # functions from the math module: # This is a copy&paste job from the functools module, changing # the default arugment for assigned def wraps(wrapper, wrapped, assigned=("__doc__",), updated=("__dict__",)): """Update a wrapper function to look like the wrapped function. wrapper -- function to be updated wrapped -- original function assigned -- tuple naming the attributes assigned directly from the wrapped function to the wrapper function updated -- tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function. """ for attr in assigned: setattr(wrapper, attr, getattr(wrapped, attr)) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) # Return the wrapper so this can be used as a decorator via partial() return wrapper ######################################## # Wrapping of math functions: # Fixed formulas for the derivatives of some functions from the math # module (some functions might not be present in all version of # Python). Singular points are not taken into account. The user # should never give "large" uncertainties: problems could only appear # if this assumption does not hold. # Functions not mentioned in _fixed_derivatives have their derivatives # calculated numerically. # Functions that have singularities (possibly at infinity) benefit # from analytical calculations (instead of the default numerical # calculation) because their derivatives generally change very fast. # Even slowly varying functions (e.g., abs()) yield more precise # results when differentiated analytically, because of the loss of # precision in numerical calculations. # def log_1arg_der(x): # """ # Derivative of log(x) (1-argument form). # """ # return 1/x def log_der0(*args): """ Derivative of math.log() with respect to its first argument. Works whether 1 or 2 arguments are given. """ if len(args) == 1: return 1 / args[0] else: return 1 / args[0] / math.log(args[1]) # 2-argument form # The following version goes about as fast: ## A 'try' is used for the most common case because it is fast when no ## exception is raised: # try: # return log_1arg_der(*args) # Argument number check # except TypeError: # return 1/args[0]/math.log(args[1]) # 2-argument form def _deriv_copysign(x, y): if x >= 0: return math.copysign(1, y) else: return -math.copysign(1, y) def _deriv_fabs(x): if x >= 0: return 1 else: return -1 def _deriv_pow_0(x, y): if y == 0: return 0.0 elif x != 0 or y % 1 == 0: return y * math.pow(x, y - 1) else: return float("nan") def _deriv_pow_1(x, y): if x == 0 and y > 0: return 0.0 else: return math.log(x) * math.pow(x, y) erf_coef = 2 / math.sqrt(math.pi) # Optimization for erf() fixed_derivatives = { # In alphabetical order, here: "acos": [lambda x: -1 / math.sqrt(1 - x**2)], "acosh": [lambda x: 1 / math.sqrt(x**2 - 1)], "asin": [lambda x: 1 / math.sqrt(1 - x**2)], "asinh": [lambda x: 1 / math.sqrt(1 + x**2)], "atan": [lambda x: 1 / (1 + x**2)], "atan2": [ lambda y, x: x / (x**2 + y**2), # Correct for x == 0 lambda y, x: -y / (x**2 + y**2), ], # Correct for x == 0 "atanh": [lambda x: 1 / (1 - x**2)], "copysign": [_deriv_copysign, lambda x, y: 0], "cos": [lambda x: -math.sin(x)], "cosh": [math.sinh], "degrees": [lambda x: math.degrees(1)], "erf": [lambda x: math.exp(-(x**2)) * erf_coef], "erfc": [lambda x: -math.exp(-(x**2)) * erf_coef], "exp": [math.exp], "expm1": [math.exp], "fabs": [_deriv_fabs], "hypot": [lambda x, y: x / math.hypot(x, y), lambda x, y: y / math.hypot(x, y)], "log": [log_der0, lambda x, y: -math.log(x, y) / y / math.log(y)], "log10": [lambda x: 1 / x / math.log(10)], "log1p": [lambda x: 1 / (1 + x)], "pow": [_deriv_pow_0, _deriv_pow_1], "radians": [lambda x: math.radians(1)], "sin": [math.cos], "sinh": [math.cosh], "sqrt": [lambda x: 0.5 / math.sqrt(x)], "tan": [lambda x: 1 + math.tan(x) ** 2], "tanh": [lambda x: 1 - math.tanh(x) ** 2], } # Many built-in functions in the math module are wrapped with a # version which is uncertainty aware: this_module = sys.modules[__name__] def wrap_locally_cst_func(func): """ Return a function that returns the same arguments as func, but after converting any AffineScalarFunc object to its nominal value. This function is useful for wrapping functions that are locally constant: the uncertainties should have no role in the result (since they are supposed to keep the function linear and hence, here, constant). """ def wrapped_func(*args, **kwargs): args_float = map(uncert_core.nominal_value, args) # !! In Python 2.7+, dictionary comprehension: {argname:...} kwargs_float = dict( (arg_name, uncert_core.nominal_value(value)) for (arg_name, value) in kwargs.items() ) return func(*args_float, **kwargs_float) return wrapped_func # for (name, attr) in vars(math).items(): for name in dir(math): if name in fixed_derivatives: # Priority to functions in fixed_derivatives derivatives = fixed_derivatives[name] elif name in num_deriv_funcs: # Functions whose derivatives are calculated numerically by # this module fall here (isinf, fmod,...): derivatives = [] # Means: numerical calculation required elif name not in locally_cst_funcs: continue # 'name' not wrapped by this module (__doc__, e, etc.) func = getattr(math, name) if name in locally_cst_funcs: wrapped_func = wrap_locally_cst_func(func) else: # Function with analytical or numerical derivatives: # Errors during the calculation of the derivatives are converted # to a NaN result: it is assumed that a mathematical calculation # that cannot be calculated indicates a non-defined derivative # (the derivatives in fixed_derivatives must be written this way): wrapped_func = uncert_core.wrap( func, map(uncert_core.nan_if_exception, derivatives) ) # !! The same effect could be achieved with globals()[...] = ... setattr(this_module, name, wraps(wrapped_func, func)) many_scalars_to_scalar_funcs.append(name) ############################################################################### ######################################## # Special cases: some of the functions from no_std_wrapping: ########## # The math.factorial function is not converted to an uncertainty-aware # function, because it does not handle non-integer arguments: it does # not make sense to give it an argument with a numerical error # (whereas this would be relevant for the gamma function). ########## # fsum takes a single argument, which cannot be differentiated. # However, each of the arguments inside this single list can # be a variable. We handle this in a specific way: # Only for Python 2.6+: # For drop-in compatibility with the math module: factorial = math.factorial non_std_wrapped_funcs.append("factorial") # We wrap math.fsum original_func = math.fsum # For optimization purposes # The function below exists so that temporary variables do not # pollute the module namespace: def wrapped_fsum(): """ Return an uncertainty-aware version of math.fsum, which must be contained in _original_func. """ # The fsum function is flattened, in order to use the # wrap() wrapper: flat_fsum = lambda *args: original_func(args) # noqa flat_fsum_wrap = uncert_core.wrap(flat_fsum, itertools.repeat(lambda *args: 1)) return wraps(lambda arg_list: flat_fsum_wrap(*arg_list), original_func) # !!!!!!!! Documented? fsum = wrapped_fsum() non_std_wrapped_funcs.append("fsum") ########## # Some functions that either return multiple arguments (modf, frexp) # or take some non-float arguments (which should not be converted to # numbers with uncertainty). # ! The arguments have the same names as in the math module # documentation, so that the docstrings are consistent with them. @uncert_core.set_doc(math.modf.__doc__) def modf(x): """ Version of modf that works for numbers with uncertainty, and also for regular numbers. """ # The code below is inspired by uncert_core.wrap(). It is # simpler because only 1 argument is given, and there is no # delegation to other functions involved (as for __mul__, etc.). aff_func = to_affine_scalar(x) # Uniform treatment of all numbers (frac_part, int_part) = math.modf(aff_func.nominal_value) if aff_func._linear_part: # If not a constant # The derivative of the fractional part is simply 1: the # linear part of modf(x)[0] is the linear part of x: return (AffineScalarFunc(frac_part, aff_func._linear_part), int_part) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: return (frac_part, int_part) many_scalars_to_scalar_funcs.append("modf") @uncert_core.set_doc(math.ldexp.__doc__) def ldexp(x, i): # Another approach would be to add an additional argument to # uncert_core.wrap() so that some arguments are automatically # considered as constants. aff_func = to_affine_scalar(x) # y must be an integer, for math.ldexp if aff_func._linear_part: return AffineScalarFunc( math.ldexp(aff_func.nominal_value, i), LinearCombination([(2**i, aff_func._linear_part)]), ) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: # aff_func.nominal_value is not passed instead of x, because # we do not have to care about the type of the return value of # math.ldexp, this way (aff_func.nominal_value might be the # value of x coerced to a difference type [int->float, for # instance]): return math.ldexp(x, i) many_scalars_to_scalar_funcs.append("ldexp") @uncert_core.set_doc(math.frexp.__doc__) def frexp(x): """ Version of frexp that works for numbers with uncertainty, and also for regular numbers. """ # The code below is inspired by uncert_core.wrap(). It is # simpler because only 1 argument is given, and there is no # delegation to other functions involved (as for __mul__, etc.). aff_func = to_affine_scalar(x) if aff_func._linear_part: (mantissa, exponent) = math.frexp(aff_func.nominal_value) return ( AffineScalarFunc( mantissa, # With frexp(x) = (m, e), x = m*2**e, so m = x*2**-e # and therefore dm/dx = 2**-e (as e in an integer that # does not vary when x changes): LinearCombination([2**-exponent, aff_func._linear_part]), ), # The exponent is an integer and is supposed to be # continuous (errors must be small): exponent, ) else: # This function was not called with an AffineScalarFunc # argument: there is no need to return numbers with uncertainties: return math.frexp(x) non_std_wrapped_funcs.append("frexp") # Deprecated functions deprecated_functions = [ "ceil", "copysign", "fabs", "factorial", "floor", "fmod", "frexp", "ldexp", "modf", "trunc", ] for function_name in deprecated_functions: message = ( f"umath.{function_name}() is deprecated. It will be removed in a future " f"release." ) setattr( this_module, function_name, uncert_core.deprecation_wrapper( getattr(this_module, function_name), message, ), ) ############################################################################### # Exported functions: __all__ = many_scalars_to_scalar_funcs + non_std_wrapped_funcs uncertainties-3.2.3/uncertainties/unumpy/000077500000000000000000000000001500152063300206015ustar00rootroot00000000000000uncertainties-3.2.3/uncertainties/unumpy/__init__.py000066400000000000000000000054561500152063300227240ustar00rootroot00000000000000""" Utilities for NumPy arrays and matrices that contain numbers with uncertainties. This package contains: 1) utilities that help with the creation and manipulation of NumPy arrays and matrices of numbers with uncertainties; 2) generalizations of multiple NumPy functions so that they also work with arrays that contain numbers with uncertainties. - Arrays of numbers with uncertainties can be built as follows: arr = unumpy.uarray([1, 2], [0.01, 0.002]) # (values, uncertainties) NumPy arrays of numbers with uncertainties can also be built directly through NumPy, thanks to NumPy's support of arrays of arbitrary objects: arr = numpy.array([uncertainties.ufloat(1, 0.1),...]) - Matrices of numbers with uncertainties are best created in one of two ways: mat = unumpy.umatrix(([1, 2], [0.01, 0.002])) # (values, uncertainties) Matrices can also be built by converting arrays of numbers with uncertainties, through the unumpy.matrix class: mat = unumpy.matrix(arr) unumpy.matrix objects behave like numpy.matrix objects of numbers with uncertainties, but with better support for some operations (such as matrix inversion): # The inverse or pseudo-inverse of a unumpy.matrix can be calculated: print mat.I # Would not work with numpy.matrix([[ufloat(...),...]]).I - Nominal values and uncertainties of arrays can be directly accessed: print unumpy.nominal_values(arr) # [ 1. 2.] print unumpy.std_devs(mat) # [ 0.01 0.002] - This module defines uncertainty-aware mathematical functions that generalize those from uncertainties.umath so that they work on NumPy arrays of numbers with uncertainties instead of just scalars: print unumpy.cos(arr) # Array with the cosine of each element NumPy's function names are used, and not those of the math module (for instance, unumpy.arccos is defined, like in NumPy, and is not named acos like in the standard math module). The definitions of the mathematical quantities calculated by these functions are available in the documentation of uncertainties.umath. - The unumpy.ulinalg module contains more uncertainty-aware functions for arrays that contain numbers with uncertainties (see the documentation for this module). This module requires the NumPy package. (c) 2009-2016 by Eric O. LEBIGOT (EOL) . Please send feature requests, bug reports, or feedback to this address. This software is released under a dual license. (1) The BSD license. (2) Any other license, as long as it is obtained from the original author.""" # Local modules: from .core import * # noqa from . import ulinalg # noqa Local sub-module # __all__ is set so that pydoc shows all important functions: __all__ = core.__all__ # noqa # "import numpy" makes numpy.linalg available. This behavior is # copied here, for maximum compatibility: __all__.append("ulinalg") uncertainties-3.2.3/uncertainties/unumpy/core.py000066400000000000000000000652331500152063300221140ustar00rootroot00000000000000""" Core functions used by unumpy and some of its submodules. (c) 2010-2016 by Eric O. LEBIGOT (EOL). """ # The functions found in this module cannot be defined in unumpy or # its submodule: this creates import loops, when unumpy explicitly # imports one of the submodules in order to make it available to the # user. # Standard modules: from builtins import next from builtins import zip from builtins import range import sys import inspect # 3rd-party modules: import numpy # Local modules: import uncertainties.umath_core as umath_core import uncertainties.core as uncert_core __all__ = [ # Factory functions: "uarray", "umatrix", # Utilities: "nominal_values", "std_devs", # Classes: "matrix", ] ############################################################################### # Utilities: # nominal_values() and std_devs() are defined as functions (instead of # as additional methods of the unumpy.matrix class) because the user # might well directly build arrays of numbers with uncertainties # without going through the factory functions found in this module # (uarray() and umatrix()). Thus, # numpy.array([uncert_core.ufloat((1, 0.1))]) would not # have a nominal_values() method. Adding such a method to, say, # unumpy.matrix, would break the symmetry between NumPy arrays and # matrices (no nominal_values() method), and objects defined in this # module. # ! Warning: the __doc__ is set, but help(nominal_values) does not # display it, but instead displays the documentation for the type of # nominal_values (i.e. the documentation of its class): to_nominal_values = numpy.vectorize( uncert_core.nominal_value, otypes=[float], # Because vectorize() has side effects (dtype setting) doc=( "Return the nominal value of the numbers with uncertainties contained" " in a NumPy (or unumpy) array (this includes matrices)." ), ) to_std_devs = numpy.vectorize( uncert_core.std_dev, otypes=[float], # Because vectorize() has side effects (dtype setting) doc=( "Return the standard deviation of the numbers with uncertainties" " contained in a NumPy array, or zero for other objects." ), ) def unumpy_to_numpy_matrix(arr): """ If arr in a unumpy.matrix, it is converted to a numpy.matrix. Otherwise, it is returned unchanged. """ if isinstance(arr, matrix): return arr.view(numpy.matrix) else: return arr def nominal_values(arr): """ Return the nominal values of the numbers in NumPy array arr. Elements that are not numbers with uncertainties (derived from a class from this module) are passed through untouched (because a numpy.array can contain numbers with uncertainties and pure floats simultaneously). If arr is of type unumpy.matrix, the returned array is a numpy.matrix, because the resulting matrix does not contain numbers with uncertainties. """ return unumpy_to_numpy_matrix(to_nominal_values(arr)) def std_devs(arr): """ Return the standard deviations of the numbers in NumPy array arr. Elements that are not numbers with uncertainties (derived from a class from this module) are passed through untouched (because a numpy.array can contain numbers with uncertainties and pure floats simultaneously). If arr is of type unumpy.matrix, the returned array is a numpy.matrix, because the resulting matrix does not contain numbers with uncertainties. """ return unumpy_to_numpy_matrix(to_std_devs(arr)) ############################################################################### def derivative(u, var): """ Return the derivative of u along var, if u is an uncert_core.AffineScalarFunc instance, and if var is one of the variables on which it depends. Otherwise, return 0. """ if isinstance(u, uncert_core.AffineScalarFunc): try: return u.derivatives[var] except KeyError: return 0.0 else: return 0.0 def wrap_array_func(func): # !!! This function is not used in the code, except in the tests. # # !!! The implementation seems superficially similar to # uncertainties.core.wrap(): is there code/logic duplication # (which should be removed)? """ Return a version of the function func() that works even when func() is given a NumPy array that contains numbers with uncertainties, as first argument. This wrapper is similar to uncertainties.core.wrap(), except that it handles an array argument instead of float arguments, and that the result can be an array. However, the returned function is more restricted: the array argument cannot be given as a keyword argument with the name in the original function (it is not a drop-in replacement). func -- function whose first argument is a single NumPy array, and which returns a NumPy array. """ @uncert_core.set_doc( """\ Version of %s(...) that works even when its first argument is a NumPy array that contains numbers with uncertainties. Warning: elements of the first argument array that are not AffineScalarFunc objects must not depend on uncert_core.Variable objects in any way. Otherwise, the dependence of the result in uncert_core.Variable objects will be incorrect. Original documentation: %s""" % (func.__name__, func.__doc__) ) def wrapped_func(arr, *args, **kwargs): # Nominal value: arr_nominal_value = nominal_values(arr) func_nominal_value = func(arr_nominal_value, *args, **kwargs) # The algorithm consists in numerically calculating the derivatives # of func: # Variables on which the array depends are collected: variables = set() for element in arr.flat: # floats, etc. might be present if isinstance(element, uncert_core.AffineScalarFunc): # !!!! The following forces an evaluation of the # derivatives!? Isn't this very slow, when # working with a large number of arrays? # # !! set() is only needed for Python 2 compatibility: variables |= set(element.derivatives.keys()) # If the matrix has no variables, then the function value can be # directly returned: if not variables: return func_nominal_value # Calculation of the derivatives of each element with respect # to the variables. Each element must be independent of the # others. The derivatives have the same shape as the output # array (which might differ from the shape of the input array, # in the case of the pseudo-inverse). derivatives = numpy.vectorize(lambda _: {})(func_nominal_value) for var in variables: # A basic assumption of this package is that the user # guarantees that uncertainties cover a zone where # evaluated functions are linear enough. Thus, numerical # estimates of the derivative should be good over the # standard deviation interval. This is true for the # common case of a non-zero standard deviation of var. If # the standard deviation of var is zero, then var has no # impact on the uncertainty of the function func being # calculated: an incorrect derivative has no impact. One # scenario can give incorrect results, however, but it # should be extremely uncommon: the user defines a # variable x with 0 standard deviation, sets y = func(x) # through this routine, changes the standard deviation of # x, and prints y; in this case, the uncertainty on y # might be incorrect, because this program had no idea of # the scale on which func() is linear, when it calculated # the numerical derivative. # The standard deviation might be numerically too small # for the evaluation of the derivative, though: we set the # minimum variable shift. shift_var = max(var._std_dev / 1e5, 1e-8 * abs(var._nominal_value)) # An exceptional case is that of var being exactly zero. # In this case, an arbitrary shift is used for the # numerical calculation of the derivative. The resulting # derivative value might be quite incorrect, but this does # not matter as long as the uncertainty of var remains 0, # since it is, in this case, a constant. if not shift_var: shift_var = 1e-8 # Shift of all the elements of arr when var changes by shift_var: shift_arr = array_derivative(arr, var) * shift_var # Origin value of array arr when var is shifted by shift_var: shifted_arr_values = arr_nominal_value + shift_arr func_shifted = func(shifted_arr_values, *args, **kwargs) numerical_deriv = (func_shifted - func_nominal_value) / shift_var # Update of the list of variables and associated # derivatives, for each element: for derivative_dict, derivative_value in zip( derivatives.flat, numerical_deriv.flat ): if derivative_value: derivative_dict[var] = derivative_value # numbers with uncertainties are built from the result: return numpy.vectorize(uncert_core.AffineScalarFunc)( func_nominal_value, numpy.vectorize(uncert_core.LinearCombination)(derivatives), ) wrapped_func = uncert_core.set_doc( """\ Version of %s(...) that works even when its first argument is a NumPy array that contains numbers with uncertainties. Warning: elements of the first argument array that are not AffineScalarFunc objects must not depend on uncert_core.Variable objects in any way. Otherwise, the dependence of the result in uncert_core.Variable objects will be incorrect. Original documentation: %s""" % (func.__name__, func.__doc__) )(wrapped_func) # It is easier to work with wrapped_func, which represents a # wrapped version of 'func', when it bears the same name as # 'func' (the name is used by repr(wrapped_func)). wrapped_func.__name__ = func.__name__ return wrapped_func ############################################################################### # Arrays def uarray(nominal_values, std_devs=None): """ Return a NumPy array of numbers with uncertainties initialized with the given nominal values and standard deviations. nominal_values, std_devs -- valid arguments for numpy.array, with identical shapes (list of numbers, list of lists, numpy.ndarray, etc.). std_devs=None is only used for supporting legacy code, where nominal_values can be the tuple of nominal values and standard deviations. """ if std_devs is None: # Obsolete, single tuple argument call raise TypeError("uarray() should be called with two arguments.") return numpy.vectorize( # ! Looking up uncert_core.Variable beforehand through # '_Variable = uncert_core.Variable' does not result in a # significant speed up: lambda v, s: uncert_core.Variable(v, s), otypes=[object], )(nominal_values, std_devs) ############################################################################### def array_derivative(array_like, var): """ Return the derivative of the given array with respect to the given variable. The returned derivative is a NumPy ndarray of the same shape as array_like, that contains floats. array_like -- array-like object (list, etc.) that contains scalars or numbers with uncertainties. var -- Variable object. """ return numpy.vectorize( lambda u: derivative(u, var), # The type is set because an # integer derivative should not # set the output type of the # array: otypes=[float], )(array_like) def func_with_deriv_to_uncert_func(func_with_derivatives): # This function is used for instance for the calculation of the # inverse and pseudo-inverse of a matrix with uncertainties. """ Return a function that can be applied to array-like objects that contain numbers with uncertainties (lists, lists of lists, NumPy arrays, etc.). func_with_derivatives -- defines a function that takes an array-like object containing scalars and returns an array. Both the value and the derivatives of this function with respect to multiple scalar parameters are calculated by this func_with_derivatives() argument. func_with_derivatives(arr, input_type, derivatives, *args, **kwargs) must return an iterator. The first element returned by this iterator is the value of the function at the n-dimensional array-like 'arr' (with the correct type). The following elements are arrays that represent the derivative of the function for each derivative array from the iterator 'derivatives'. func_with_derivatives() takes the following arguments: arr -- NumPy ndarray of scalars where the function must be evaluated. input_type -- data type of the input array-like object. This type is used for determining the type that the function should return. derivatives -- iterator that returns the derivatives of the argument of the function with respect to multiple scalar variables. func_with_derivatives() returns the derivatives of the defined function with respect to these variables. args -- additional arguments that define the result (example: for the pseudo-inverse numpy.linalg.pinv: numerical cutoff). Examples of func_with_derivatives: inv_with_derivatives(). """ def wrapped_func(array_like, *args, **kwargs): """ array_like -- n-dimensional array-like object that contains numbers with uncertainties (list, NumPy ndarray or matrix, etc.). args -- additional arguments that are passed directly to func_with_derivatives. """ # The calculation below is not lazy, contrary to the linear # error propagation done in AffineScalarFunc. Making it lazy # in the same way would be quite a specific task: basically # this would amount to generalizing scalar coefficients in # core.LinearCombination to more general matrix # multiplications, and to replace Variable differentials by # full matrices of coefficients. This does not look very # efficient, as matrices are quite big, and since caching the # result of a few matrix functions that are not typically # stringed one after the other (unlike a big sum of numbers) # should not be needed. # So that .flat works even if array_like is a list: array_version = numpy.asanyarray(array_like) # Variables on which the array depends are collected: variables = set() for element in array_version.flat: # floats, etc. might be present if isinstance(element, uncert_core.AffineScalarFunc): # !!! set() is only needed for Python 2 compatibility: variables |= set(element.derivatives.keys()) array_nominal = nominal_values(array_version) # Function value, then derivatives at array_nominal (the # derivatives are with respect to the variables contained in # array_like): func_then_derivs = func_with_derivatives( array_nominal, type(array_like), (array_derivative(array_version, var) for var in variables), *args, **kwargs, ) func_nominal_value = next(func_then_derivs) if not variables: return func_nominal_value # The result is built progressively, with the contribution of # each variable added in turn: # Calculation of the derivatives of the result with respect to # the variables. derivatives = numpy.array( [{} for _ in range(func_nominal_value.size)], dtype=object ).reshape(func_nominal_value.shape) # Memory-efficient approach. A memory-hungry approach would # be to calculate the matrix derivatives will respect to all # variables and then combine them into a matrix of # AffineScalarFunc objects. The approach followed here is to # progressively build the matrix of derivatives, by # progressively adding the derivatives with respect to # successive variables. for var, deriv_wrt_var in zip(variables, func_then_derivs): # Update of the list of variables and associated # derivatives, for each element: for derivative_dict, derivative_value in zip( derivatives.flat, deriv_wrt_var.flat ): if derivative_value: derivative_dict[var] = derivative_value # An array of numbers with uncertainties is built from the # result: result = numpy.vectorize(uncert_core.AffineScalarFunc)( func_nominal_value, numpy.vectorize(uncert_core.LinearCombination)(derivatives), ) # NumPy matrices that contain numbers with uncertainties are # better as unumpy matrices: if isinstance(result, numpy.matrix): result = result.view(matrix) return result return wrapped_func ########## Matrix inverse def inv_with_derivatives(arr, input_type, derivatives): """ Defines the matrix inverse and its derivatives. See the definition of func_with_deriv_to_uncert_func() for its detailed semantics. """ inverse = numpy.linalg.inv(arr) # The inverse of a numpy.matrix is a numpy.matrix. It is assumed # that numpy.linalg.inv is such that other types yield # numpy.ndarrays: if issubclass(input_type, numpy.matrix): inverse = inverse.view(numpy.matrix) yield inverse # It is mathematically convenient to work with matrices: inverse_mat = numpy.asmatrix(inverse) # Successive derivatives of the inverse: for derivative in derivatives: derivative_mat = numpy.asmatrix(derivative) yield -inverse_mat * derivative_mat * inverse_mat inv = func_with_deriv_to_uncert_func(inv_with_derivatives) inv.__doc__ = """ Version of numpy.linalg.inv that works with array-like objects that contain numbers with uncertainties. The result is a unumpy.matrix if numpy.linalg.pinv would return a matrix for the array of nominal values. Analytical formulas are used. """ ########## Matrix pseudo-inverse def pinv_with_derivatives(arr, input_type, derivatives, rcond): """ Defines the matrix pseudo-inverse and its derivatives. Works with real or complex matrices. See the definition of func_with_deriv_to_uncert_func() for its detailed semantics. """ inverse = numpy.linalg.pinv(arr, rcond) # The pseudo-inverse of a numpy.matrix is a numpy.matrix. It is # assumed that numpy.linalg.pinv is such that other types yield # numpy.ndarrays: if issubclass(input_type, numpy.matrix): inverse = inverse.view(numpy.matrix) yield inverse # It is mathematically convenient to work with matrices: inverse_mat = numpy.asmatrix(inverse) # Formula (4.12) from The Differentiation of Pseudo-Inverses and # Nonlinear Least Squares Problems Whose Variables # Separate. Author(s): G. H. Golub and V. Pereyra. Source: SIAM # Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973), # pp. 413-432 # See also # http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse # Shortcuts. All the following factors should be numpy.matrix objects: PA = arr * inverse_mat AP = inverse_mat * arr factor21 = inverse_mat * inverse_mat.H factor22 = numpy.eye(arr.shape[0]) - PA factor31 = numpy.eye(arr.shape[1]) - AP factor32 = inverse_mat.H * inverse_mat # Successive derivatives of the inverse: for derivative in derivatives: derivative_mat = numpy.asmatrix(derivative) term1 = -inverse_mat * derivative_mat * inverse_mat derivative_mat_H = derivative_mat.H term2 = factor21 * derivative_mat_H * factor22 term3 = factor31 * derivative_mat_H * factor32 yield term1 + term2 + term3 # Default rcond argument for the generalization of numpy.linalg.pinv: # # Most common modern case first: try: pinv_default = inspect.signature(numpy.linalg.pinv).parameters["rcond"].default except AttributeError: # No inspect.signature() before Python 3.3 try: # In numpy 1.17+, pinv is wrapped using a decorator which unfortunately # results in the metadata (argument defaults) being lost. However, we # can still get at the original function using the __wrapped__ # attribute (which is what inspect.signature() does). pinv_default = numpy.linalg.pinv.__wrapped__.__defaults__[0] except AttributeError: # Function not wrapped in NumPy < 1.17 pinv_default = numpy.linalg.pinv.__defaults__[0] # Python 1, 2.6+: pinv_with_uncert = func_with_deriv_to_uncert_func(pinv_with_derivatives) def pinv(array_like, rcond=pinv_default): return pinv_with_uncert(array_like, rcond) pinv = uncert_core.set_doc( """ Version of numpy.linalg.pinv that works with array-like objects that contain numbers with uncertainties. The result is a unumpy.matrix if numpy.linalg.pinv would return a matrix for the array of nominal values. Analytical formulas are used. """ )(pinv) ########## Matrix class class matrix(numpy.matrix): # The name of this class is the same as NumPy's, which is why it # does not follow PEP 8. """ Class equivalent to numpy.matrix, but that behaves better when the matrix contains numbers with uncertainties. """ def __rmul__(self, other): # ! NumPy's matrix __rmul__ uses an apparently restrictive # dot() function that cannot handle the multiplication of a # scalar and of a matrix containing objects (when the # arguments are given in this order). We go around this # limitation: if numpy.isscalar(other): return numpy.dot(self, other) else: return numpy.dot(other, self) # The order is important def getI(self): """Matrix inverse or pseudo-inverse.""" m, n = self.shape return (inv if m == n else pinv)(self) I = numpy.matrix.I.getter(getI) # noqa # !!! The following function is not in the official documentation # of the module. Maybe this is because arrays with uncertainties # do not have any equivalent in this module, and they should be # the first ones to have such methods? @property def nominal_values(self): """ Nominal value of all the elements of the matrix. """ return nominal_values(self) # !!! The following function is not in the official documentation # of the module. Maybe this is because arrays with uncertainties # do not have any equivalent in this module, and they should be # the first ones to have such methods? @property def std_devs(self): return numpy.matrix(std_devs(self)) def umatrix(nominal_values, std_devs=None): """ Constructs a matrix that contains numbers with uncertainties. The arguments are the same as for uarray(...): nominal values, and standard deviations. The returned matrix can be inverted, thanks to the fact that it is a unumpy.matrix object instead of a numpy.matrix one. """ if std_devs is None: # Obsolete, single tuple argument call raise TypeError("umatrix() should be called with two arguments.") return uarray(nominal_values, std_devs).view(matrix) ############################################################################### def define_vectorized_funcs(): """ Defines vectorized versions of functions from uncertainties.umath_core. Some functions have their name translated, so as to follow NumPy's convention (example: math.acos -> numpy.arccos). """ this_module = sys.modules[__name__] # NumPy does not always use the same function names as the math # module: func_name_translations = dict( [ (f_name, "arc" + f_name[1:]) for f_name in ["acos", "acosh", "asin", "atan", "atan2", "atanh"] ] ) new_func_names = [ func_name_translations.get(function_name, function_name) # The functions from umath_core.non_std_wrapped_funcs # (available from umath) are normally not in # NumPy, so they are not included here: for function_name in umath_core.many_scalars_to_scalar_funcs ] for function_name, unumpy_name in zip( umath_core.many_scalars_to_scalar_funcs, new_func_names ): # ! The newly defined functions (uncertainties.unumpy.cos, etc.) # do not behave exactly like their NumPy equivalent (numpy.cos, # etc.): cos(0) gives an array() and not a # numpy.float... (equality tests succeed, though). func = getattr(umath_core, function_name) # Data type of the result of the unumpy function: otypes = ( # It is much more convenient to preserve the type of # functions that return a number without # uncertainty. Thus, for example, unumpy.isnan() can # return an array with a boolean data type (instead of # object), which allows the result to be used with NumPy's # boolean indexing. {} if function_name in umath_core.locally_cst_funcs # If by any chance a function returns, in a particular # case, an integer instead of a number with uncertainty, # side-effects in vectorize() would fix the resulting # dtype to integer, which is not what is wanted (as # vectorize(), at least in NumPy around 2010 maybe, # decided about the output data type by looking at the # type of first element only). else {"otypes": [object]} ) setattr( this_module, unumpy_name, #!!!! For umath_core.locally_cst_funcs, would it make sense # to optimize this by using instead the equivalent (? see # above) vectorized NumPy function on the nominal values? numpy.vectorize( func, doc="""\ Vectorized version of umath.%s. Original documentation: %s""" % (function_name, func.__doc__), **otypes, ), ) __all__.append(unumpy_name) define_vectorized_funcs() uncertainties-3.2.3/uncertainties/unumpy/ulinalg.py000066400000000000000000000005151500152063300226070ustar00rootroot00000000000000""" This module provides uncertainty-aware functions that generalize some of the functions from numpy.linalg. (c) 2010-2016 by Eric O. LEBIGOT (EOL) . """ from uncertainties.unumpy.core import inv, pinv # This module cannot import unumpy because unumpy imports this module. __all__ = ["inv", "pinv"]