pax_global_header00006660000000000000000000000064147220512220014507gustar00rootroot0000000000000052 comment=5d37d813a6e7ff931d8c88fcd6a6fa9f7ec7ba99 aiocoap-0.4.12/000077500000000000000000000000001472205122200132065ustar00rootroot00000000000000aiocoap-0.4.12/.github/000077500000000000000000000000001472205122200145465ustar00rootroot00000000000000aiocoap-0.4.12/.github/ISSUE_TEMPLATE/000077500000000000000000000000001472205122200167315ustar00rootroot00000000000000aiocoap-0.4.12/.github/ISSUE_TEMPLATE/new_issue.md000066400000000000000000000003311472205122200212510ustar00rootroot00000000000000--- name: New issue about: Template for any newly opened issue --- aiocoap-0.4.12/.gitignore000066400000000000000000000003011472205122200151700ustar00rootroot00000000000000__pycache__ build *.egg-info dist # managed by doc/aiocoap_index.py doc/module # generated by coverage tests .coverage* htmlcov # generated by `setup.py tests` .eggs # generated by tox .tox aiocoap-0.4.12/.readthedocs.yaml000066400000000000000000000010701472205122200164330ustar00rootroot00000000000000version: 2 build: # from the limited choice there is; preferred would be a Python image os: ubuntu-22.04 tools: python: "3.11" sphinx: builder: html python: install: - method: pip path: . extra_requirements: - docs # all except dtls (because DTLSSocket doesn't have wheels, and # readthedocs shouldn't be bothered with compiling C code, but then, # that module can be imported even when DTLSSocket is not available) # # also synced with .woodpecker.yml - oscore - ws - prettyprint aiocoap-0.4.12/.reuse/000077500000000000000000000000001472205122200144075ustar00rootroot00000000000000aiocoap-0.4.12/.reuse/dep5000066400000000000000000000041731472205122200151740ustar00rootroot00000000000000Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: aiocoap Upstream-Contact: Christian Amsüss Source: https://github.com/chrysn/aiocoap/ # We *could* just go with `*` here, but I rather prefer being explicit about # what's in here to guard against accidentally added files. # # This list should include all checked in files that are not Python files. For # those, having the headers in is at least how we've been doing it a long time, # is more aligned with what reuse.software recommends. But for all those config # files, JSON examples etc, having comments in there in each config file's # specific comment format (if any) is cumbersome. Files: .flake8 .github/ISSUE_TEMPLATE/new_issue.md .gitignore .gitlab-ci.yml .readthedocs.yaml .woodpecker.yml MANIFEST.in NEWS.rst README.rst aiocoap/credentials.cddl contrib/aiocoap.ipynb contrib/aiocoap-proxy.ipynb contrib/aiocoap-server.ipynb contrib/android/.gitignore contrib/android/README.rst contrib/edhoc-demo-server.ipynb contrib/deterministic-demo/README.rst contrib/oscore-plugtest/.gitignore contrib/oscore-plugtest/common-context/a/secret.json contrib/oscore-plugtest/common-context/a/settings.json contrib/oscore-plugtest/common-context/b/secret.json contrib/oscore-plugtest/common-context/b/settings.json contrib/oscore-plugtest/common-context/c/secret.json contrib/oscore-plugtest/common-context/c/settings.json contrib/oscore-plugtest/common-context/d/secret.json contrib/oscore-plugtest/common-context/d/settings.json doc/README.doc doc/api.rst doc/design.rst doc/examples.rst doc/faq.rst doc/guidedtour.rst doc/index.rst doc/installation.rst doc/logo-square.svg doc/logo.svg doc/news.rst doc/pyodide.rst doc/release-checklist doc/stateofedhoc.rst doc/stateofoscore.rst doc/tools.rst pyproject.toml tests/test_credentials_oscore_context/sequence.json tests/test_credentials_oscore_context/settings.json tox.ini Copyright: 2012-2014 Maciej Wasilak , 2013-2014 Christian Amsüss License: MIT aiocoap-0.4.12/.woodpecker.yml000066400000000000000000000147711472205122200161630ustar00rootroot00000000000000when: - event: push # While we're still on the GitHub issue tracker, the pull_request event stays off, and we evaluate every single branch. # branch: [main, woodpecker] # - event: pull_request # running tests twice, so if something breaks when optional dependencies are # missing, it still shows up. (full coverage would mean running each # combination, but let's not blow the test matrix out of proportion). # --skip-env is often used to mask out Python versions that are installed on # the system but are not what we want to test here (for example, the Python # 3.13 image is based on a Debian that ships its own older Python). # Generally, all can run in parallel; setting depends_on to one hero test so # that we don't waste resources if one already fails. steps: test:tox-bookworm: image: debian:bookworm depends_on: [test:3.13] environment: FORCE_COLOR: "1" commands: - apt-get update - apt-get -y install tox build-essential python3.11-dev libssl-dev autoconf python3-setuptools python3-pip iproute2 libffi-dev libgirepository1.0-dev libcairo2-dev # Separate run so I don't waste time telling errors in setup apart from errors at runtime - tox --notest - "AIOCOAP_TEST_MCIF=\"$(ip -j -6 route list default | python3 -c 'import sys, json; print(json.load(sys.stdin)[0][\"dev\"])')\" tox" - mkdir collected-coverage/tox-bookworm/ -p - mv .coverage* collected-coverage/tox-bookworm/ test:pypy: image: docker.io/pypy:3 depends_on: [test:3.13] environment: FORCE_COLOR: "1" commands: - apt-get update - apt-get -y install build-essential libssl-dev autoconf iproute2 libffi-dev # lakers-python is not yet built for pypy; tracked at - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh /dev/stdin -y - . ~/.cargo/env - pip install tox - tox --notest --skip-env '^py[^p]' - "AIOCOAP_TEST_MCIF=\"$(ip -j -6 route list default | python3 -c 'import sys, json; print(json.load(sys.stdin)[0][\"dev\"])')\" tox --skip-env '^py[^p]'" - mkdir collected-coverage/pypy/ -p - mv .coverage* collected-coverage/pypy/ test:py312: image: docker.io/python:3.12 depends_on: [test:3.13] environment: FORCE_COLOR: "1" commands: - apt-get update # cmake, libgirepository1.0-dev: required for building pygobject - apt-get -y install iproute2 cmake libgirepository1.0-dev - pip install tox # Separate run so I don't waste time telling errors in setup apart from errors at runtime - tox --notest --skip-env '^py31[^2]' - "AIOCOAP_TEST_MCIF=\"$(ip -j -6 route list default | python3 -c 'import sys, json; print(json.load(sys.stdin)[0][\"dev\"])')\" tox --skip-env '^py31[^2]'" - mkdir collected-coverage/tox-3.12/ -p - mv .coverage* collected-coverage/tox-3.12/ test:3.13: image: docker.io/python:3.13 depends_on: [] environment: # Possibly necessary because the image uses some Debian as a base might have another Python installed TOXENV: "py313-noextras,py313-allextras" FORCE_COLOR: "1" commands: - apt-get update - apt-get -y install iproute2 # The image's Debian has Rust 1.63, which is too old for the `half` module # required by cbor-diag; having Rust is required because neither cbor-diag # nor lakers (https://github.com/openwsn-berkeley/lakers/pull/291) build # 3.13 wheels yet - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh /dev/stdin -y - . ~/.cargo/env - pip install tox # Workaround for https://github.com/python-cffi/cffi/issues/71 - tox -e py313-allextras exec --skip-pkg-install -- pip install 'cffi >=1.17.0rc1' # Until something > 0.3.3 is out (https://github.com/openwsn-berkeley/lakers/pull/293 is already merged) # The version is pinned to where 293 was merged but before https://github.com/openwsn-berkeley/lakers/issues/303 happened. - tox -e py313-allextras exec --skip-pkg-install -- pip install 'git+https://github.com/openwsn-berkeley/lakers/@26eab7d5cf08b67f6cabe4debcbd9cf0dcaca81c#egg=lakers-python&subdirectory=lakers-python' # Separate run so I don't waste time telling errors in setup apart from errors at runtime - tox --notest --skip-env '^py31[^3]' - "AIOCOAP_TEST_MCIF=\"$(ip -j -6 route list default | python3 -c 'import sys, json; print(json.load(sys.stdin)[0][\"dev\"])')\" tox --skip-env '^py31[^3]'" - mkdir collected-coverage/tox-3.13/ -p - mv .coverage* collected-coverage/tox-3.13/ mypy: image: docker.io/python:3.12 depends_on: [] environment: FORCE_COLOR: "1" commands: - pip install mypy - pip install '.[all]' - mypy --install-types --non-interactive aiocoap ruff: image: python:3 depends_on: [] environment: FORCE_COLOR: "1" commands: - pip install ruff - ruff format --check # Excluding the client scripts as their heavy reliance on `from aiocoap # import *` would decrease their usefullness as easy quick-start script - ruff check aiocoap reuse: image: python:3 depends_on: [] environment: FORCE_COLOR: "1" commands: - pip install reuse - reuse lint doc: # FIXME: Use python:3 once lakers and cbor-diag have 3.13 wheels again. image: python:3.12 depends_on: [] environment: FORCE_COLOR: "1" commands: # synced with .readthedocs.yaml and pyproject.toml - pip install '.[docs,oscore,prettyprint]' - python3 -m sphinx doc public/doc/ - python3 -m docutils README.rst --strict > /dev/null build-pages: image: docker.io/python:3 depends_on: - test:tox-bookworm - test:py312 - test:3.13 - test:pypy environment: FORCE_COLOR: "1" commands: - python3 -m pip install coverage - mv collected-coverage/*/.coverage* . - python3 -m coverage combine - python3 -m coverage report --include=aiocoap/\* - python3 -m coverage html --include=aiocoap/\* - mkdir -p pubic - mv htmlcov public/coverage/ - echo 'aiocoap build artifacts

aiocoap build artifacts
  • Coverage report
  • Documentation' > public/index.html publish-pages: image: codeberg.org/xfix/plugin-codeberg-pages-deploy:1 depends_on: - build-pages settings: folder: public ssh_key: from_secret: ssh_key aiocoap-0.4.12/LICENSES/000077500000000000000000000000001472205122200144135ustar00rootroot00000000000000aiocoap-0.4.12/LICENSES/BSD-3-Clause.txt000066400000000000000000000027031472205122200171400ustar00rootroot00000000000000Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. aiocoap-0.4.12/LICENSES/MIT.txt000066400000000000000000000017771472205122200156210ustar00rootroot00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. aiocoap-0.4.12/MANIFEST.in000066400000000000000000000001411472205122200147400ustar00rootroot00000000000000include *.py include contrib/* include contrib/*/* include doc/* include LICENSES/* include NEWS aiocoap-0.4.12/NEWS.rst000066400000000000000000000722721472205122200145260ustar00rootroot00000000000000Version 0.4.12 -------------- Enhancements ~~~~~~~~~~~~ * Better errors are shown when using malformed (esp. incomplete) URIs, eg. on ``aiocoap-client hostname:port``. * Support for ephemeral identities (``{"unauthenticated: true}``) is extended to the local side. * OSCORE groupcomm is updated to the latest draft version. Breaking changes ~~~~~~~~~~~~~~~~ * By updating to lakers 0.4.1, the EDHOC implementation now complies with the specification regarding credentials-by-value; due to bugs in prior versions, this breaks EDHOC establishment with credentials-by-value with later versions. The common case of credentials by KID is unaffected. Bug fixes ~~~~~~~~~ * Cases of invalid URIs are reported more reliably, rather than producing incorrect requests. Internal refactoring ~~~~~~~~~~~~~~~~~~~~ * CI and main source hosting now run on codeberg. * Tests are made resilient to high system load. Version 0.4.11 -------------- New features ~~~~~~~~~~~~ * Group OSCORE is updated to draft version -21. * max_regular_block_size_exp can now be set on remotes by the client. This allows influencing both the Block1 and the Block2 size. * EDHOC: Allow private keys to be generated in RAM, and specified directly in the credentials file. Examples ~~~~~~~~ * Add EDHOC demo for Jupyter. Minor fixes ~~~~~~~~~~~ * IP addresses are subjected to URI syntax normalization. * Avoid mixups between TLS and non-TLS contexts. * Send Uri-Host and Uri-Scheme in manually constructed EDHOC message. Version 0.4.10 -------------- New features ~~~~~~~~~~~~ * Initial experimental support for EDHOC key establishment was added. * CLI: New aiocoap-keygen command was added. * Credentials can be processed as CBOR Diagnostic Notation (EDN). * aiocoap.cli.defaults can be run as a module. Deprecations ~~~~~~~~~~~~ * OSCORE: The context argument "contextfile" was renamed to "basedir". Minor fixes ~~~~~~~~~~~ * Many indenting and quoting changes due to the switch to enforced ruff lints and formatting. * Various broken, missing and duplicate references fixed in the documentation. * Doctest failure in 0.4.9 _repr_html_ was fixed. Version 0.4.9 ------------- This is a bugfix release to restore functionality when used through Jupyter and in Python's optimized mode. Bug fixes ~~~~~~~~~ * enum: Fix visibility of _repr_html_ on Python versions < 3.13. * numbers: Don't export _code, which is only present with __debug__. Version 0.4.8 ------------- Compatibility ~~~~~~~~~~~~~ * Block-wise requests now send Size1 Error handling ~~~~~~~~~~~~~~ * Errors raised through the udp6 interface now report name and description in addition to their error number. * Many errors now have an ``.extra_help()`` method, which is shown in aiocoap-client to guide the user's debugging. * Some non-aiocoap errors being raised as a result of network errors were turned into error.NetworkError. * All CoAP error response codes now have a corresponding ``ConstructionRenderableError`` and can thus be raised easily from handers. Platform support ~~~~~~~~~~~~~~~~ * Support for Python versions below 3.10 was dropped. * Inconsistent platform implementations of AI_V4MAPPED and AI_ADDRCONFIG are now worked around by custom implementations of the lookup process. * Android is now supported. * Python 3.13 is now supported. * Kivy examples were updated to current mainline Kivy. * gbulb support is being phased out in favor of pygobject's upcoming native async support. Infrastructure ~~~~~~~~~~~~~~ * Build system was modernized and migrated to pyproject.toml. Tests are now run using tox or ``python3 -m unittest`` * Type annotations are now tested using mypy. * The ``ExtensibleIntEnum`` type underlying ``ContentFormat`` and ``OptionNumber`` was altered to now use ``enum.IntEnum`` as its base. Deprecations ~~~~~~~~~~~~ * The request.observation.register_callback / register_errback interface is deprecated in favor of the asynchronous iteration interface (aiter). * Setting media type and encoding on a ContentFormat is deprecated, use ``.define(...)`` instead. * ``OptionNumber.OBJECT_SECURITY`` is deprecated; it is an alias for ``.OSCORE``. (Same goes for the ``message.opt.object_security`` attribute). Minor fixes ~~~~~~~~~~~ * aiocoap-client can now use the iPATCH method. * aiocoap-client output colors were improved. * cbor-diag is recognized as a prerequisite for pretty printing. * Corner cases for SSL configuration for WebSockets were fixed. * Documentation updates, including references to pyodide. * Corner cases of implicit observation cancellation were fixed. * Access to cryptography internals now uses the proper public interfaces. Version 0.4.7 ------------- Compatibility ~~~~~~~~~~~~~ * Group OSCORE updated to -17. The setup of group contexts requires altered parameters, as the descriptions of these contexts changed in the underlying specification. Minor fixes ~~~~~~~~~~~ * Several minor documentation fixes. Version 0.4.6-alpha3 -------------------- Bug fixes ~~~~~~~~~ * Include vendored modules in sdist and wheels. Version 0.4.6-alpha2 -------------------- Bug fixes ~~~~~~~~~ * ``request.get_request_uri()`` in a server handler now reports the URI with the correct path. * Broken links fixed in documentation. Meta ~~~~ * Updated copyright statements, now complying with reuse.software specs. * LinkHeader dependency moved from unmaintained PyPI package into vendored copy to avoid trouble with missing .whl (wheel) files. Version 0.4.6-alpha1 -------------------- CLI changes ~~~~~~~~~~~ * aiocoap-client now uses CBOR Diagnostic Notation both for pretty-printed output and when adjusting a ``--payload`` argument to a CBOR ``--content-format``. This should be a compatible change for users who previously used JSON for input, but needs adjustments for users who used Python literals. * CBOR sequences are now recognized for pretty-printing, and accepted (wrapped in an array) for ``--payload`` format adjustment. New features ~~~~~~~~~~~~ * Initial support for pyodide (eg. in Jupyter): * The websockets client transport is made available through the browser's APIs. * Messages and other elements are available for HTML pretty-printing. * Messages now have a ``.transport_tuning`` property, which may be overwritten to influence transmission characteristics. Bug fixes ~~~~~~~~~ * BERT blocks are now extracted correctly. * oscore: Constant with typo renamed (``COSE_COUNTERSI(NG→GN)ATURE0``). Deprecations ~~~~~~~~~~~~ * numbers.constants: Transport related parameters are deprecated, use ``.transport_tuning`` (see above). Version 0.4.5 ------------- Behavioral changes ~~~~~~~~~~~~~~~~~~ * RSTs are not sent on unrecognized responses any more unless the received message was a CON; the previous behavior was violating the specification. Deprecations ~~~~~~~~~~~~ * UNSUPPORTED_MEDIA_TYPE is now formally deprecated, use UNSUPPORTED_CONTENT_FORMAT instead. Minor enhancements ~~~~~~~~~~~~~~~~~~ * Fix tests for Python 3.11. * Lower log level of "but could not match it to a running exchange" from warning to info. * Shorten the string representation of message types (to "CON", "ACK" etc.) Version 0.4.4 ------------- New features ~~~~~~~~~~~~ * Content-Format / Accept option now use a dedicated ContentFormat type. Applications should be unaffected as the type is still derived from int. * Non-traditional responses are now experimentally supported by implementing ``.render_to_pipe()`` on a resource. Deprecations ~~~~~~~~~~~~ * Building custom resources by inheriting from ``interfaces.Resource`` / ``interfaces.ObservableResource`` and implementing ``.render()`` etc. is deprecated. Instead, inherit from ``resource.Resource`` (recommended), or implement ``.render_to_pipe()`` (eg. when implementing a proxy). * numbers.media_type and media_type_rev: Use the ContentFormat type's constructor and accessors instead. Tools ~~~~~ * aiocoap-fileserver now has optiojnal write support, and ETag and If-* option handling. * aiocoap-client now assembles and displays the Location-* options of responses. * aiocoap-rd now has dedicated logging independent of aiocoap's. * Various small fixes to aiocoap-rd. * Help and error texts were improved. Minor enhancements ~~~~~~~~~~~~~~~~~~ * Documentation now uses ``await`` idiom, as it is available even inside the asyncio REPL. * The default cut-off for block-wise fragmentation was increased from 1024 to 1124 bytes. This allows OSCORE to use the full inner block-wise size without inadvertently causing outer fragmentation, while still fitting within the IPv6 minimum MTU. * Connection shutdown for TCP and WebSockets has been implemented, they now send Release messages and wait for the peer to close the connection. * Type annotations are now used more widely. * Library shutdown works more cleanly by not relying on the presence of the async loop. * OSCORE contexts now only access the disk when necessary. * OSCORE now supports inner block-wise transfer and observations. * WebSocket servers can now pick an ephemeral port (when binding to port 0). * Tasks created by the library are now named for easier debugging. * Bugs fixed around handling of IP literals in proxies. Internal refactoring ~~~~~~~~~~~~~~~~~~~~ * Pipes (channels for asynchronously producing resposnes, previously called PlumbingResponse) are now used also for resource rendering. Block-wise and observation handling could thus be moved away from the core protocol and into the resource implementations. * Exception chaining was started to be reworked into explicit re-raises. Version 0.4.3 ------------- Compatibility ~~~~~~~~~~~~~ * Fix compatibility with websockets 10.1. Minor enhancements ~~~~~~~~~~~~~~~~~~ * Failure path fixes. Version 0.4.2 ------------- New features ~~~~~~~~~~~~ * Experimental support for DTLS server operation (PSK only). Tools ~~~~~ * aiocoap-client reports responder address if different from requested. * aiocoap-rd is aligned with draft version -27 (e.g. using .well-known/rd). * aiocoap-proxy can be registered to an RD. Compatibility ~~~~~~~~~~~~~ * Group OSCORE updated to -11. * Fixes to support Python 3.10, including removal of some deprecated idioms and inconsistent loop handling. Examples / contrib ~~~~~~~~~~~~~~~~~~ * Demo for Deterministic OSCORE added. Deprecations ~~~~~~~~~~~~ * util.quote_nonascii * error.{RequestTimedOut,WaitingForClientTimedOut} * Direct use of AsyncCLIDaemon from asynchronous contexts (replacement not available yet). Minor enhancements ~~~~~~~~~~~~~~~~~~ * Resources can hide themselves from the listing in /.well-known/core. * RD's built-in proxy handles block-wise better. * Added __repr__ to TokenManager and MessageManager. * Pretty printer errs gracefully. * Failure path fixes. * Documentation updates. * Removed distutils dependency. Internal refactoring ~~~~~~~~~~~~~~~~~~~~ * CI testing now uses pytest. * dispatch_error now passes on exceptions. * DTLS client cleaned up. * Build process now uses the build module. Version 0.4.1 ------------- * Fix Python version reference to clearly indicate the 3.7 requirement everywhere. A Python requirement of ">= 3.6.9" was left over in the previous release's metadata from earlier intermediate steps that accomodated PyPy's pre-3.7 version. Version 0.4 ----------- Multicast improvements ~~~~~~~~~~~~~~~~~~~~~~ * Multicast groups are not joined by default any more. Instead, groups and interfaces on which to join need to be specified explicitly. The previous mechanism was unreliable, and only joined on one (more or less random) interface. * Network interfaces can now be specified in remotes of larger than link-local scope. * In udp6, network interface are selected via PKTINFO now. They used to be selected using the socket address tuple, but that was limited to link-local addresses, but PKTINFO worked just as well for link-local addresses. * Remote addresses in udp6 now have a ``netif`` property. New features ~~~~~~~~~~~~ * The simple6 transport can now indicate the local address when supported by the platoforrm. This makes it a viable candidate for LwM2M clients as they often operate using role reversal. * Servers (including the shipped examples) can now offer OSCORE through the OSCORE sitewrapper. Access control is only rudimentary in that the authorization information is not available in a convenient form yet. * CoAP over WebSockets is now supported (in client and server role, with and without TLS). Please note that the default port bound to is not the HTTP default port but 8683. * OSCORE group communication is now minimally supported (based on draft version 10). No automated ways of setting up a context are provided yet. This includes highly experimental support for deterministic requests. * DTLS: Terminating connections are now handled correctly, and shut down when unused. The associated refactoring also reduces the resource usage of DTLS connections. Tools updates ~~~~~~~~~~~~~ * aiocoap-client: New options to * set initial Block1 size (``--payload-initial-szx``), and to * elide the Uri-Host option from requests to named hosts. * aiocoap-client: CBOR input now accepts Python literals or JSON automatically, and can thus produce numeric keys and byte strings. * aiocoap-client: Preprocessed CBOR output now works for any CBOR-based content format. * resource-directory: Updated to draft -25. * resource-directory: Compatibility mode for LwM2M added. * resource-directory: Proxying extension implemented. With this, and RD can be configured to allow access to endpoints behind a firewalls or NAT. * Example server: Add /whoami resource. Dependencies ~~~~~~~~~~~~ * The minimum required Python version is now 3.7. * The cbor library dependency was replaced with the cbor2 library. * The dependency on the hkdf library was removed. * The ge25519 library dependency was added to perform key conversion steps necessary for Group OSCORE. Portability ~~~~~~~~~~~ * Several small adjustments were made to accomodate execution on Windows. * FreeBSD was added to the list of supported systems (without any need for changes). Fixes possibly breaking applications ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Some cases of OSError were previously raised in responses. Those are now all expressed as an aiocoap.error.NetworkError, so that an application only need to catch aiocoap.error.Error for anything that's expected to go wrong. The original error cause is available in a chained exception. * Responses are not deduplicated any more; as a result, less state is kept in the library. As a result, separate responses whose ACKs get lost produce an RST the second time the CON comes. This changes nothing about the client-side handling (which is complete either way with the first response), but may upset servers that do not anticipate this allowed behavior. Minor fixes ~~~~~~~~~~~ * The repr of udp6 addresses now shows all address components. * Debug information output was increased in several spots. * The ``loop=`` parameter was removed where it is deprecated by Python 3.8. * asyncio Futures are created using create_future in some places. * Binding to port 0 works again. * The file server's registration at an RD was fixed. * File server directories can now use block-wise transfer. * Server errors from rendering exceptions to messages are now caught. * Notifications now respect the block size limit. * Several improvements to the test infrastructure. * Refactoring around request processing internals (PlumbingRequest) alleviated potential memory leaks. * Update option numbers from draft-ietf-echo-request-tag-10. * Various proxying fixes and enhancements. * TLS: Use SNI (Python >= 3.8), set correct hostinfo based on it. * Internally used NoResponse options on responses are not leaked any more. * Timeouts from one remote are now correctly propagated to all pending requests. * Various logging improvements and changes. * udp6: Show warnings when operating system fails to deliver pktinfo (happens with very old Linux kernels). * Reduce installation clobber by excluding tests. * Enhanced error reporting for erroneous ``coap://2001:db8::1/`` style URIs * Improve OSCORE's shutdown robustness. * Sending to IPv4 literals now does not send the Uri-Host automatically any more. Version 0.4b3 ------------- Behavioral changes ~~~~~~~~~~~~~~~~~~ * Responses to NON requests are now sent as NON. Portability ~~~~~~~~~~~ * All uses of SO_REUSEPORT were changed to SO_REUSEADDR, as REUSEPORT is considered dangerous by some and removed from newer Python versions. On platoforms without support for that option, it is not set. Automatic load-balancing by running parallel servers is not supported there. * The udp6 module is now usable on platforms without MSG_ERRQUEUE (ie. anything but Linux). This comes with caveats, so it is still only enabled by default on Linux. The required constants are now shipped with aiocoap for macOS for the benefit of Python versions less than 3.9. Minor fixes ~~~~~~~~~~~ * More effort is made to sync OSCORE persistence files to disk. * Memory leakage fixes on server and client side. * Option numbers for Echo and Request-Tag were updated according to the latest draft version. Other ~~~~~ * FAQ section started in the documentation. * With ``./setup.py test`` being phased out, tests are now run via tox. Version 0.4b2 ------------- New features ~~~~~~~~~~~~ * OSCORE: Implement Appendix B.1 recovery. This allows the aiocoap program to run OSCORE without writing sequence numbers and replay windows to disk all the time. Instead, they write pessimistic values to disk that are rarely updated, write the last values on shutdown. In the event of an unclean shutdown, the sender sequence number is advanced by some, and the first request from a client is sent back for another roundtrip using the Echo option. An aiocoap client now also contains the code required to transparently resubmit requests if a server is in such a recovery situation. * OSCORE: Security contexts are now protected against simultaneous use by multiple aiocoap processes. This incurs an additional dependency on the ``filelock`` package. Breaking changes ~~~~~~~~~~~~~~~~ * OSCORE: The file format of security context descriptions is changed. Instead of the previous roles concept, they now carry explicit sender and recipient IDs, and consequently do not take a role parameter in the credentials file any more. The sequence number format has changed incompatibly. No automatic conversion is available. It is recommended to replace old security contexts with new keys. Minor fixes ~~~~~~~~~~~ * b4540f9: Fix workaround for missing definitions, restoring Python 3.5 support on non-amd64 platforms. * b4b886d: Fix regression in the display of zone identifiers in IPv6 addresses. * 5055bd5: The server now does not send RSTs in response to multicasts any more. * OSCORE: The replay window used is now the prescribed 32bit large DTLS-like window. Version 0.4b1 ------------- Tools ~~~~~ * aiocoap-client can now re-format binary output (hex-dumping binary files, showing CBOR files in JSON-like notation) and apply syntax highlighting. By default, this is enabled if the output is a terminal. If output redirection is used, data is passed on as-is. * aiocoap-fileserver is now provided as a standalone tool. It provides directory listings in link format, guesses the content format of provided files, and allows observation. * aiocoap-rd is now provided as a standalone tool and offers a simple CoRE Resource Directory server. Breaking changes ~~~~~~~~~~~~~~~~ * Client observations that have been requested by sending the Observe option must now be taken up by the client. The warning that was previously shown when an observation was shut down due to garbage collection can not be produced easily in this version, and will result in a useless persisting observation in the background. (See ) * Server resources that expect the library to do handle blockwise by returning true to ``needs_blockwise_assembly`` do not allow random initial access any more; this this is especially problematic with clients that use a different source port for every package. The old behavior was prone to triggering an action twice on non-safe methods, and generating wrong results in block1+block2 scenarios when a later ``FETCH block2:2/x/x`` request would be treated as a new operation and return the result of an empty request body rather than being aligned with an earlier ``FETCH block1:x/x/x`` operation. * fdc8b024: Support for Python 3.4 is dropped; minimum supported version is now 3.5.2. * 0124ad0e: The network dumping feature was removed, as it would have been overly onerous to support it with the new more flexible transports. * 092cf49f, 89c2a2e0: The content type mapped to the content format 0 was changed from "text/plain" (which was incorrect as it was just the bare media type) to the actual content of the IANA registry, 'text/plain;charset="utf8"'. For looking up the content format, text/plain is is still supported but deprecated. * 17d1de5a: Handling of the various components of a remote was unified into the .remote property of messages. If you were previously setting unresolved addresses or even a tuple-based remote manualy, please set them using the ``uri`` pseudo-option now. * 47863a29: Re-raise transport specific errors as aiocoap errors as aiocoap.error.ResolutionError or NetworkError. This allows API users to catch them independently of the underlying transport. * f9824eb2: Plain strings as paths in add_resource are rejected. Applications that did this are very unlikely to have produced the intended behavior, and if so can be easily fixed by passing in ``tuple(s)`` rather than ``s``. New features ~~~~~~~~~~~~ * 88f44a5d: TCP and TLS support added; TLS is currently limited to PKI certificates. This includes support for preserving the URI scheme in exchanges (0b0214db). * a50da1a8: The credentials module was added to dispatch DTLS and OSCORE credentials * f302da07: On the client side, OSCORE can now be used as a transport without any manual protection steps. It is automatically used for URIs for which a security context has been registered with the context's client credentials. * 5e5388ae: Support for PyPy * 0d09b2eb: NoResponse is now handled automatically. Handlers can override the default handling by setting a No-Response option on their response messages, whose value will them be examined by the library to decide whether the message is actually sent; the No-Response option is stripped from the outgoing message in the course of that (as it's actually not a response option). * b048a50a: Some improvements on multicast handling. There is still no good support for sending a request to multicast and receiving the individual responses, but requests to multicast addresses are now unconditionally handled under the rules of multicast CoAP, even if they're used over the regular request interface (ie. sending to multicast but processing only the first response). * c7ca0286: The software version used to run the server (by default, aiocoap's version) is now shown in .well-known/core using the impl-info relation. Deprecations ~~~~~~~~~~~~ * 0d09b2eb: Returning a NoResponse sentinel value is now deprecated. Assorted changes ~~~~~~~~~~~~~~~~ * Additions to the contrib/ collection of aiocoap based tools: - widgets, kivy-widgets - rd-relay * 95c681a5 and others: Internal interfaces were introduced for the various CoAP sublayers. This should largely not affect operation (though it does change the choice of tokens or message IDs); where it does, it's noted above in the breaking changes. * 5e5388ae, 9e17180e, 60137bd8: Various fixes to the OSCORE implementation, which is not considered experimental any more. * Various additions to the test suite * 61843d41: Asynchronous ``recvmsg`` calling (as used by the udp6 backend) was reworked from monkey-patching into using asyncio's ``add_reader`` method, and should thus now be usable on all asyncio implementations, including uvloop and gbulb. * 3ab14c49: .well-known/core filtering will now properly filter by content format (ct=) in the presence of multiple supported content types. * 9bd612de: Fix encoding of block size 16. * 029a8f0e: Don't enforce V4MAPPED addresses in the simple6 backend. This makes the backend effectively a simple-any backend, as the address family can be picked arbitrarily by the operating system. * 8e93eeb9: The simple6 backend now reuses the most recently used 64 sockets. * cb8743b6: Resolve the name given as binding server name. This enables creating servers bound exclusively to a link-local address. * d6aa5f8c: TinyDTLS now pulls in a more recent version of DTLSSocket that has its version negotiation fixed, and can thus interoperate with recent versions of libcoap and RIOT's the pending support for DTLS on Gcoap. * 3d9613ab: Errors in URI encoding were fixed Version 0.4a1 ------------- Security fixes ~~~~~~~~~~~~~~ * 18ddf8c: Proxy now only creates log files when explicitly requested * Support for secured protocols added (see Experimental Features) Experimental features ~~~~~~~~~~~~~~~~~~~~~ * Support for OSCORE (formerly OSCOAP) and CoAP over DTLS was included These features both lack proper key management so far, which will be available in a 0.4 release. * Added implementations of Resource Directory (RD) server and endpoint * Support for different transports was added. The transport backends to enable are chosen heuristically depending on operating system and installed modules. * Transports for platforms not supporting all POSIX operations to run CoAP correctly were added (simple6, simplesocketserver). This should allow running aiocoap on Windows, MacOS and using uvloop, but with some disadvantages (see the the respective transport documentations). Breaking changes ~~~~~~~~~~~~~~~~ * 8641b5c: Blockwise handling is now available as stand-alone responder. Applications that previously created a Request object rather than using Protocol.request now need to create a BlockwiseRequest object. * 8641b5c: The ``.observation`` property can now always be present in responses, and applications that previously checked for its presence should now check whether it is None. * cdfeaeb: The multicast interface using queuewithend was replaced with asynchronous iterators * d168f44: Handling of sub-sites changed, subsites' root resources now need to reside at path ``("",)`` Deprecations ~~~~~~~~~~~~ * e50e994: Rename UnsupportedMediaType to UnsupportedContentFormat * 9add964 and others: The ``.remote`` message property is not necessarily a tuple any more, and has its own interface * 25cbf54, c67c2c2: Drop support for Python versions < 3.4.4; the required version will be incremented to 3.5 soon. Assorted changes ~~~~~~~~~~~~~~~~ * 750d88d: Errors from predefined exceptions like BadRequest("...") are now sent with their text message in the diagnostic payload * 3c7635f: Examples modernized * 97fc5f7: Multicast handling changed (but is still not fully supported) * 933f2b1: Added support for the No-Response option (RFC7967) * baa84ee: V4MAPPED addresses are now properly displayed as IPv4 addresses Tests ~~~~~ * Test suite is now run at Gitlab, and coverage reported * b2396bf: Test suite probes for usable hostnames for localhost * b4c5b1d: Allow running tests with a limited set of extras installed * General improvements on coverage Version 0.3 ----------- Features ~~~~~~~~ * 4d07615: ICMP errors are handled * 1b61a29: Accept 'fe80::...%eth0' style addresses * 3c0120a: Observations provide modern ``async for`` interface * 4e4ff7c: New demo: file server * ef2e45e, 991098b, 684ccdd: Messages can be constructed with options, modified copies can be created with the ``.copy`` method, and default codes are provided * 08845f2: Request objects have ``.response_nonraising`` and ``.response_raising`` interfaces for easier error handling * ab5b88a, c49b5c8: Sites can be nested by adding them to an existing site, catch-all resources can be created by subclassing PathCapable Possibly breaking changes ~~~~~~~~~~~~~~~~~~~~~~~~~ * ab5b88a: Site nesting means that server resources do not get their original Uri-Path any more * bc76a7c: Location-{Path,Query} were opaque (bytes) objects instead of strings; disctinction between accidental and intentional opaque options is now clarified Small features ~~~~~~~~~~~~~~ * 2bb645e: set_request_uri allows URI parsing without sending Uri-Host * e6b4839: Take block1.size_exponent as a sizing hint when sending block1 data * 9eafd41: Allow passing in a loop into context creation * 9ae5bdf: ObservableResource: Add update_observation_count * c9f21a6: Stop client-side observations when unused * dd46682: Drop dependency on obscure built-in IN module * a18c067: Add numbers from draft-ietf-core-etch-04 * fabcfd5: .well-known/core supports filtering Internals ~~~~~~~~~ * f968d3a: All low-level networking is now done in aiocoap.transports; it's not really hotpluggable yet and only UDPv6 (with implicit v4 support) is implemented, but an extension point for alternative transports. * bde8c42: recvmsg is used instead of recvfrom, requiring some asyncio hacks Package management ~~~~~~~~~~~~~~~~~~ * 01f7232, 0a9d03c: aiocoap-client and -proxy are entry points * 0e4389c: Establish an extra requirement for LinkHeader aiocoap-0.4.12/README.rst000066400000000000000000000151161472205122200147010ustar00rootroot00000000000000aiocoap -- The Python CoAP library ================================== The aiocoap package is an implementation of CoAP, the `Constrained Application Protocol`_. It is written in Python 3 using its `native asyncio`_ methods to facilitate concurrent operations while maintaining an easy to use interface. .. _`Constrained Application Protocol`: http://coap.space/ .. _`native asyncio`: https://docs.python.org/3/library/asyncio Usage ----- For how to use the aiocoap library, have a look at the guidedtour_, or at the examples_ and tools_ provided. A full reference is available in the `API documentation`_. All examples can be run directly from a source code copy. If you prefer to install it, the usual Python mechanisms apply (see installation_). .. _`API documentation`: http://aiocoap.readthedocs.io/en/latest/api.html Features / Standards -------------------- This library supports the following standards in full or partially: * RFC7252_ (CoAP): Supported for clients and servers. Multicast is supported on the server side, and partially for clients. DTLS is supported but experimental, and lacking some security properties. No caching is done inside the library. * RFC7641_ (Observe): Basic support for clients and servers. Reordering, re-registration, and active cancellation are missing. * RFC7959_ (Blockwise): Supported both for atomic and random access. * RFC8323_ (TCP, WebSockets): Supports CoAP over TCP, TLS, and WebSockets (both over HTTP and HTTPS). The TLS parts are server-certificate only; preshared, raw public keys and client certificates are not supported yet. * RFC7967_ (No-Response): Supported. * RFC8132_ (PATCH/FETCH): Types and codes known, FETCH observation supported. * RFC9176_: A standalone resource directory server is provided along with a library function to register at one. They lack support for groups and security considerations, and are generally rather simplistic. * RFC8613_ (OSCORE): Full support client-side; protected servers can be implemented based on it but are not automatic yet. * draft-ietf-core-oscore-groupcomm-23_ (Group OSCORE): Supported for both group and pairwise mode in groups that are fully known. (The lack of an implemented joining or persistence mechanism makes this impractical for anything but experimentation.) * RFC9528_ (EDHOC): Experimental and rudimentary support for configured peers using the lakers_ implementation. If something described by one of the standards but not implemented, it is considered a bug; please file at the `github issue tracker`_. (If it's not on the list or in the excluded items, file a wishlist item at the same location). .. _RFC7252: https://tools.ietf.org/html/rfc7252 .. _RFC7641: https://tools.ietf.org/html/rfc7641 .. _RFC7959: https://tools.ietf.org/html/rfc7959 .. _RFC7967: https://tools.ietf.org/html/rfc7967 .. _RFC8132: https://tools.ietf.org/html/rfc8132 .. _RFC8323: https://tools.ietf.org/html/rfc8323 .. _RFC8613: https://tools.ietf.org/html/rfc8613 .. _RFC9176: https://tools.ietf.org/html/rfc9176 .. _RFC9528: https://tools.ietf.org/html/rfc9528 .. _draft-ietf-core-oscore-groupcomm-23: https://tools.ietf.org/html/draft-ietf-core-oscore-groupcomm-23 .. _lakers: https://pypi.org/project/lakers-python/ Dependencies ------------ Basic aiocoap works out of the box on Python_ 3.10 or newer (also works on PyPy3_). For full support (DTLS, OSCORE and link-format handling) follow the installation_ instructions as these require additional libraries. aiocoap provides different network backends for different platforms. The most featureful backend is available for Linux, but most operations work on BSDs, Windows and macOS as well. See the FAQ_ for more details. If your library depends on aiocoap, it should pick the required extras (as per installation_) and declare a dependency like ``aiocoap[linkheader,oscore] >= 0.4b2``. .. _Python: https://www.python.org/ .. _PyPy3: http://pypy.org/ .. _FAQ: http://aiocoap.readthedocs.io/en/latest/faq.html Development ----------- aiocoap tries to stay close to PEP8_ recommendations and general best practice, and should thus be easy to contribute to. Bugs (ranging from "design goal" and "wishlist" to typos) are currently tracked in the `github issue tracker`_. Pull requests are welcome there; if you start working on larger changes, please coordinate on the issue tracker. Documentation is built using sphinx_ with ``python3 -m sphinx doc/ ${TARGET}``; hacks used there are described in ``./doc/README.doc``. Unit tests are implemented in the ``./tests/`` directory and easiest run using tox_ (but also available through ``python3 -m unittest`` to test the local environment); complete test coverage is aimed for, but not yet complete (and might never be, as the error handling for pathological network partners is hard to trigger with a library designed not to misbehave). The tests are regularly run at the `CI suite at codeberg`_, from where `coverage reports`_ are available. .. _PEP8: http://legacy.python.org/dev/peps/pep-0008/ .. _sphinx: http://sphinx-doc.org/ .. _`github issue tracker`: https://github.com/chrysn/aiocoap/issues .. _`CI suite at codeberg`: https://ci.codeberg.org/repos/12879 .. _`coverage reports`: https://aiocoap.codeberg.page/aiocoap/coverage/ .. _tox: https://tox.readthedocs.io/ Relevant URLs ------------- * https://codeberg.org/aiocoap/aiocoap This is where the latest source code can be found. Generally, this serves as the project web site. * http://aiocoap.readthedocs.org/ Online documentation built from the sources. * https://coap.space/ Further general information on CoAP, the standard documents involved, and other implementations and tools available. Licensing --------- aiocoap is published under the MIT License, and follows the best practice of `reuse.software`_. Files in ``aiocoap/util/vendored/`` may have different (but compatible and OSI approved) licenses. When using aiocoap for a publication, please cite it according to the output of ``./setup.py cite [--bibtex]``. Copyright Christian Amsüss and the aiocoap contributors. aiocoap was originally based on txThings_ by Maciej Wasilak. The full list of aiocoap contributors can be obtained from the version control history. .. Any filtering by a mailmap would apply, but no need to state that unless we do get a mailmap. .. _guidedtour: http://aiocoap.readthedocs.io/en/latest/guidedtour.html .. _examples: http://aiocoap.readthedocs.io/en/latest/examples.html .. _tools: http://aiocoap.readthedocs.io/en/latest/tools.html .. _installation: http://aiocoap.readthedocs.io/en/latest/installation.html .. _reuse.software: https://reuse.software/ .. _txThings: https://github.com/siskin/txThings aiocoap-0.4.12/aiocoap-client000077500000000000000000000005551472205122200160300ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This script can be used to access the aiocoap command line client when setup.py is not used to create an entry point for it (eg. when running from the source directory).""" import aiocoap.cli.client aiocoap.cli.client.sync_main() aiocoap-0.4.12/aiocoap-fileserver000077500000000000000000000005771472205122200167240ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This script can be used to access the aiocoap file server when setup.py is not used to create an entry point for it (eg. when running from the source directory).""" import aiocoap.cli.fileserver aiocoap.cli.fileserver.FileServerProgram.sync_main() aiocoap-0.4.12/aiocoap-keygen000077500000000000000000000005421472205122200160300ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This script can be used to access the aiocoap key generator when setup.py is not used to create an entry point for it (eg. when running from the source directory).""" import aiocoap.cli.keygen aiocoap.cli.keygen.main() aiocoap-0.4.12/aiocoap-proxy000077500000000000000000000005521472205122200157300ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This script can be used to access the aiocoap command line proxy when setup.py is not used to create an entry point for it (eg. when running from the source directory).""" import aiocoap.cli.proxy aiocoap.cli.proxy.sync_main() aiocoap-0.4.12/aiocoap-rd000077500000000000000000000005611472205122200151540ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This script can be used to access the aiocoap command line resource directory when setup.py is not used to create an entry point for it (eg. when running from the source directory).""" import aiocoap.cli.rd aiocoap.cli.rd.sync_main() aiocoap-0.4.12/aiocoap/000077500000000000000000000000001472205122200146215ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/__init__.py000066400000000000000000000021701472205122200167320ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """ The aiocoap package is a library that implements CoAP, the `Constrained Application Protocol`_. If you are reading this through the Python documentation, be aware that there is additional documentation available online_ and in the source code's ``doc`` directory. .. _`Constrained Application Protocol`: http://coap.technology/ .. _online: http://aiocoap.readthedocs.io/ Module contents --------------- This root module re-exports the most commonly used classes in aiocoap: :class:`.Context`, :class:`.Message` as well as all commonly used numeric constants from :mod:`.numbers`; see their respective documentation entries. The presence of :class:`.Message` and :class:`.Context` in the root module is stable. Submodules are described in the :doc:`the API overview <../api>`. """ import numbers # flake8 doesn't see through the global re-export from .numbers import * # noqa: F401, F403 from .message import Message, NoResponse from .protocol import Context __all__ = numbers.__all__ + ["Message", "NoResponse", "Context"] aiocoap-0.4.12/aiocoap/blockwise.py000066400000000000000000000111251472205122200171550ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Helpers for the implementation of RFC7959 blockwise transfers""" from typing import Awaitable, Callable from . import numbers from .numbers.optionnumbers import OptionNumber from .numbers import codes from .error import ConstructionRenderableError from .message import Message from .optiontypes import BlockOption from .util.asyncio.timeoutdict import TimeoutDict def _extract_block_key(message): """Extract a key that hashes equally for all blocks of a blockwise operation from a request message. See discussion at . """ return ( message.remote.blockwise_key, message.code, message.get_cache_key( [ OptionNumber.BLOCK1, OptionNumber.BLOCK2, OptionNumber.OBSERVE, ] ), ) class ContinueException(ConstructionRenderableError): """Not an error in the CoAP sense, but an error in the processing sense, indicating that no complete request message is available for processing. It reflects back the request's block1 option when rendered. """ def __init__(self, block1): self.block1 = block1 def to_message(self): m = super().to_message() m.opt.block1 = self.block1 return m code = codes.CONTINUE class IncompleteException(ConstructionRenderableError): code = codes.REQUEST_ENTITY_INCOMPLETE class Block1Spool: def __init__(self): # FIXME: introduce an actual parameter here self._assemblies = TimeoutDict(numbers.TransportTuning().MAX_TRANSMIT_WAIT) def feed_and_take(self, req: Message) -> Message: """Assemble the request into the spool. This either produces a reassembled request message, or raises either a Continue or a Request Entity Incomplete exception. Requests without block1 are simply passed through.""" if req.opt.block1 is None: return req block_key = _extract_block_key(req) if req.opt.block1.block_number == 0: # silently discarding any old incomplete operation self._assemblies[block_key] = req else: try: self._assemblies[block_key]._append_request_block(req) except KeyError: # KeyError: Received unmatched blockwise response # ValueError: Failed to assemble -- gaps or overlaps in data raise IncompleteException from None if req.opt.block1.more: raise ContinueException(req.opt.block1) else: return self._assemblies[block_key] # which happens to carry the last block's block1 option class Block2Cache: """A cache of responses to a give block key. Use this when result rendering is expensive, not idempotent or has varying output -- otherwise it's often better to calculate the full response again and serve chunks. """ def __init__(self): # FIXME: introduce an actual parameter here self._completes = TimeoutDict(numbers.TransportTuning().MAX_TRANSMIT_WAIT) async def extract_or_insert( self, req: Message, response_builder: Callable[[], Awaitable[Message]] ): """Given a request message, * if it is querying a particular block, look it up in the cache or raise Request Entity Incomplete. * otherwise, * await the response builder * return the response if it doesn't need chunking, or * return the first chunk and store it for later use """ block_key = _extract_block_key(req) if req.opt.block2 is None or req.opt.block2.block_number == 0: assembled = await response_builder() else: try: assembled = self._completes[block_key] except KeyError: raise IncompleteException from None if ( len(assembled.payload) > req.remote.maximum_payload_size or req.opt.block2 is not None and len(assembled.payload) > req.opt.block2.size ): self._completes[block_key] = assembled block2 = req.opt.block2 or BlockOption.BlockwiseTuple( 0, 0, req.remote.maximum_block_size_exp ) return assembled._extract_block( block2.block_number, block2.size_exponent, req.remote.maximum_payload_size, ) else: return assembled aiocoap-0.4.12/aiocoap/cli/000077500000000000000000000000001472205122200153705ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/cli/__init__.py000066400000000000000000000010031472205122200174730ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Container module for command line utilities bundled with aiocoap. These modules are not considered to be a part of the aioCoAP API, and are thus subject to change even when the project reaches a stable version number. If you want to use any of that infrastructure, please file a feature request for stabilization in the project's issue tracker. The tools themselves are documented in :doc:`/tools`. """ aiocoap-0.4.12/aiocoap/cli/client.py000066400000000000000000000467601472205122200172350ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """aiocoap-client is a simple command-line tool for interacting with CoAP servers""" import sys import asyncio import argparse import logging import subprocess from pathlib import Path import shlex # even though not used directly, this has side effects on the input() function # used in interactive mode try: import readline # noqa: F401 except ImportError: pass # that's normal on some platforms, and ok since it's just a usability enhancement import aiocoap import aiocoap.defaults import aiocoap.meta import aiocoap.proxy.client from aiocoap.util import contenttype from aiocoap.util.cli import ActionNoYes from aiocoap.numbers import ContentFormat def build_parser(): p = argparse.ArgumentParser(description=__doc__) p.add_argument( "--non", help="Send request as non-confirmable (NON) message", action="store_true", ) p.add_argument( "-m", "--method", help="Name or number of request method to use (default: %(default)s)", default="GET", ) p.add_argument( "--observe", help="Register an observation on the resource", action="store_true" ) p.add_argument( "--observe-exec", help="Run the specified program whenever the observed resource changes, feeding the response data to its stdin", metavar="CMD", ) p.add_argument( "--accept", help="Content format to request", metavar="MIME", ) p.add_argument( "--proxy", help="Relay the CoAP request to a proxy for execution", metavar="URI" ) p.add_argument( "--payload", help="Send X as request payload (eg. with a PUT). If X starts with an '@', its remainder is treated as a file name and read from; '@-' reads from the console. Non-file data may be recoded, see --content-format.", metavar="X", ) p.add_argument( "--payload-initial-szx", help="Size exponent to limit the initial block's size (0 ≙ 16 Byte, 6 ≙ 1024 Byte)", metavar="SZX", type=int, ) p.add_argument( "--content-format", help="Content format of the --payload data. If a known format is given and --payload has a non-file argument, the payload is converted from CBOR Diagnostic Notation.", metavar="MIME", ) p.add_argument( "--no-set-hostname", help="Suppress transmission of Uri-Host even if the host name is not an IP literal", dest="set_hostname", action="store_false", default=True, ) p.add_argument( "-v", "--verbose", help="Increase the debug output", action="count", ) p.add_argument( "-q", "--quiet", help="Decrease the debug output", action="count", ) # careful: picked before parsing p.add_argument( "--interactive", help="Enter interactive mode", action="store_true", ) p.add_argument( "--credentials", help="Load credentials to use from a given file", type=Path, ) p.add_argument( "--version", action="version", version="%(prog)s " + aiocoap.meta.version ) p.add_argument( "--color", help="Color output (default on TTYs if all required modules are installed)", default=None, action=ActionNoYes, ) p.add_argument( "--pretty-print", help="Pretty-print known content formats (default on TTYs if all required modules are installed)", default=None, action=ActionNoYes, ) p.add_argument( "url", help="CoAP address to fetch", ) return p def configure_logging(verbosity): logging.basicConfig() if verbosity <= -2: logging.getLogger("coap").setLevel(logging.CRITICAL + 1) elif verbosity == -1: logging.getLogger("coap").setLevel(logging.ERROR) elif verbosity == 0: logging.getLogger("coap").setLevel(logging.WARNING) elif verbosity == 1: logging.getLogger("coap").setLevel(logging.INFO) elif verbosity >= 2: logging.getLogger("coap").setLevel(logging.DEBUG) def colored(text, options, tokenlambda): """Apply pygments based coloring if options.color is set. Tokelambda is a callback to which pygments.token is passed and which returns a token type; this makes it easy to not need to conditionally react to pygments' possible absence in all color locations.""" if not options.color: return str(text) from pygments.formatters import TerminalFormatter from pygments import token, format return format( [(tokenlambda(token), str(text))], TerminalFormatter(), ) def incoming_observation(options, response): if options.observe_exec: p = subprocess.Popen(options.observe_exec, shell=True, stdin=subprocess.PIPE) # FIXME this blocks p.communicate(response.payload) else: sys.stdout.write(colored("---", options, lambda token: token.Comment.Preproc)) if response.code.is_successful(): present(response, options, file=sys.stderr) else: sys.stdout.flush() print( colored( response.code, options, lambda token: token.Token.Generic.Error ), file=sys.stderr, ) if response.payload: present(response, options, file=sys.stderr) def apply_credentials(context, credentials, errfn): if credentials.suffix == ".json": import json context.client_credentials.load_from_dict(json.load(credentials.open("rb"))) elif credentials.suffix == ".diag": try: import cbor_diag import cbor2 except ImportError: raise errfn( "Loading credentials in CBOR diagnostic format requires cbor2 and cbor_diag package" ) context.client_credentials.load_from_dict( cbor2.loads(cbor_diag.diag2cbor(credentials.open().read())) ) else: raise errfn( "Unknown suffix: %s (expected: .json or .diag)" % (credentials.suffix) ) def present(message, options, file=sys.stdout): """Write a message payload to the output, pretty printing and/or coloring it as configured in the options.""" if not options.quiet and (message.opt.location_path or message.opt.location_query): # FIXME: Percent encoding is completely missing; this would be done # most easily with a CRI library location_ref = "/" + "/".join(message.opt.location_path) if message.opt.location_query: location_ref += "?" + "&".join(message.opt.location_query) print( colored( f"Location options indicate new resource: {location_ref}", options, lambda token: token.Token.Generic.Inserted, ), file=sys.stderr, ) if not message.payload: return payload = None cf = message.opt.content_format or message.request.opt.content_format if cf is not None and cf.is_known(): mime = cf.media_type else: mime = "application/octet-stream" if options.pretty_print: from aiocoap.util.prettyprint import pretty_print prettyprinted = pretty_print(message) if prettyprinted is not None: (infos, mime, payload) = prettyprinted if not options.quiet: for i in infos: print( colored("# " + i, options, lambda token: token.Comment), file=sys.stderr, ) color = options.color if color: from aiocoap.util.prettyprint import lexer_for_mime import pygments try: lexer = lexer_for_mime(mime) except pygments.util.ClassNotFound: color = False if color and payload is None: # Coloring requires a unicode-string style payload, either from the # mime type or from the pretty printer. try: payload = message.payload.decode("utf8") except UnicodeDecodeError: color = False if color: from pygments.formatters import TerminalFormatter from pygments import highlight highlit = highlight( payload, lexer, TerminalFormatter(), ) # The TerminalFormatter already adds an end-of-line character, not # trying to add one for any missing trailing newlines. print(highlit, file=file, end="") file.flush() else: if payload is None: file.buffer.write(message.payload) if file.isatty() and message.payload[-1:] != b"\n": file.write("\n") else: file.write(payload) if file.isatty() and payload[-1] != "\n": file.write("\n") async def single_request(args, context): parser = build_parser() options = parser.parse_args(args) pretty_print_modules = aiocoap.defaults.prettyprint_missing_modules() if pretty_print_modules and (options.color is True or options.pretty_print is True): parser.error( "Color and pretty printing require the following" " additional module(s) to be installed: %s" % ", ".join(pretty_print_modules) ) if options.color is None: options.color = sys.stdout.isatty() and not pretty_print_modules if options.pretty_print is None: options.pretty_print = sys.stdout.isatty() and not pretty_print_modules configure_logging((options.verbose or 0) - (options.quiet or 0)) try: try: code = getattr( aiocoap.numbers.codes.Code, options.method.upper().replace("IPATCH", "iPATCH"), ) except AttributeError: try: code = aiocoap.numbers.codes.Code(int(options.method)) except ValueError: raise parser.error("Unknown method") if options.credentials is not None: apply_credentials(context, options.credentials, parser.error) request = aiocoap.Message( code=code, mtype=aiocoap.NON if options.non else aiocoap.CON ) request.set_request_uri(options.url, set_uri_host=options.set_hostname) if options.accept: try: request.opt.accept = ContentFormat(int(options.accept)) except ValueError: try: request.opt.accept = ContentFormat.by_media_type(options.accept) except KeyError: raise parser.error("Unknown accept type") if options.observe: request.opt.observe = 0 observation_is_over = asyncio.get_event_loop().create_future() if options.content_format: try: request.opt.content_format = ContentFormat(int(options.content_format)) except ValueError: try: request.opt.content_format = ContentFormat.by_media_type( options.content_format ) except KeyError: raise parser.error("Unknown content format") if options.payload: if options.payload.startswith("@"): filename = options.payload[1:] if filename == "-": f = sys.stdin.buffer else: f = open(filename, "rb") try: request.payload = f.read() except OSError as e: raise parser.error("File could not be opened: %s" % e) else: request_classification = contenttype.categorize( request.opt.content_format.media_type if request.opt.content_format is not None and request.opt.content_format.is_known() else "" ) if request_classification in ("cbor", "cbor-seq"): try: import cbor_diag except ImportError as e: raise parser.error(f"CBOR recoding not available ({e})") try: encoded = cbor_diag.diag2cbor(options.payload) except ValueError as e: raise parser.error( f"Parsing CBOR diagnostic notation failed. Make sure quotation marks are escaped from the shell. Error: {e}" ) if request_classification == "cbor-seq": try: import cbor2 except ImportError as e: raise parser.error( f"CBOR sequence recoding not available ({e})" ) decoded = cbor2.loads(encoded) if not isinstance(decoded, list): raise parser.error( "CBOR sequence recoding requires an array as the top-level element." ) request.payload = b"".join(cbor2.dumps(d) for d in decoded) else: request.payload = encoded else: request.payload = options.payload.encode("utf8") if options.payload_initial_szx is not None: request.remote.maximum_block_size_exp = options.payload_initial_szx if options.proxy is None: interface = context else: interface = aiocoap.proxy.client.ProxyForwarder(options.proxy, context) requested_uri = request.get_request_uri() requester = interface.request(request) if options.observe: requester.observation.register_errback(observation_is_over.set_result) requester.observation.register_callback( lambda data, options=options: incoming_observation(options, data) ) try: response_data = await requester.response finally: if not requester.response.done(): requester.response.cancel() if options.observe and not requester.observation.cancelled: requester.observation.cancel() response_uri = response_data.get_request_uri() if requested_uri != response_uri: print( colored( f"Response arrived from different address; base URI is {response_uri}", options, lambda token: token.Generic.Inserted, ), file=sys.stderr, ) if response_data.code.is_successful(): present(response_data, options) else: print( colored(response_data.code, options, lambda token: token.Generic.Error), file=sys.stderr, ) present(response_data, options, file=sys.stderr) sys.exit(1) if options.observe: exit_reason = await observation_is_over print("Observation is over: %r" % (exit_reason,), file=sys.stderr) except aiocoap.error.HelpfulError as e: print(str(e), file=sys.stderr) extra_help = e.extra_help( hints=dict( original_uri=options.url, request=request, ) ) if extra_help: print("Debugging hint:", extra_help, file=sys.stderr) sys.exit(1) # Fallback while not all backends raise NetworkErrors except OSError as e: text = str(e) if not text: text = repr(e) if not text: # eg ConnectionResetError flying out of a misconfigured SSL server text = type(e) print( "Warning: OS errors should not be raised this way any more.", file=sys.stderr, ) # not telling what to do precisely: the form already tells users to # include `aiocoap.cli.defaults` output, which is exactly what we # need. print( f"Even if the cause of the error itself is clear, please file an issue at {aiocoap.meta.bugreport_uri}.", file=sys.stderr, ) print("Error:", text, file=sys.stderr) sys.exit(1) async def single_request_with_context(args): """Wrapper around single_request until sync_main gets made fully async, and async context managers are used to manage contexts.""" context = await aiocoap.Context.create_client_context() try: await single_request(args, context) finally: await context.shutdown() interactive_expecting_keyboard_interrupt = None async def interactive(): global interactive_expecting_keyboard_interrupt interactive_expecting_keyboard_interrupt = asyncio.get_event_loop().create_future() context = await aiocoap.Context.create_client_context() while True: try: # when http://bugs.python.org/issue22412 is resolved, use that instead line = await asyncio.get_event_loop().run_in_executor( None, lambda: input("aiocoap> ") ) except EOFError: line = "exit" line = shlex.split(line) if not line: continue if line in (["help"], ["?"]): line = ["--help"] if line in (["quit"], ["q"], ["exit"]): break current_task = asyncio.create_task( single_request(line, context=context), name="Interactive prompt command %r" % line, ) interactive_expecting_keyboard_interrupt = ( asyncio.get_event_loop().create_future() ) done, pending = await asyncio.wait( [current_task, interactive_expecting_keyboard_interrupt], return_when=asyncio.FIRST_COMPLETED, ) if current_task not in done: current_task.cancel() else: try: await current_task except SystemExit as e: if e.code != 0: print("Exit code: %d" % e.code, file=sys.stderr) continue except Exception as e: print("Unhandled exception raised: %s" % (e,)) await context.shutdown() def sync_main(args=None): # interactive mode is a little messy, that's why this is not using aiocoap.util.cli yet if args is None: args = sys.argv[1:] if "--interactive" not in args: try: asyncio.run(single_request_with_context(args)) except KeyboardInterrupt: sys.exit(3) else: if len(args) != 1: print( "No other arguments must be specified when entering interactive mode", file=sys.stderr, ) sys.exit(1) loop = asyncio.get_event_loop() task = loop.create_task( interactive(), name="Interactive prompt", ) while not task.done(): try: loop.run_until_complete(task) except KeyboardInterrupt: if not interactive_expecting_keyboard_interrupt.done(): interactive_expecting_keyboard_interrupt.set_result(None) except SystemExit: continue # asyncio/tasks.py(242) raises those after setting them as results, but we particularly want them back in the loop if __name__ == "__main__": sync_main() aiocoap-0.4.12/aiocoap/cli/common.py000066400000000000000000000135521472205122200172400ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Common options of aiocoap command line utilities Unlike those in :mod:`aiocoap.util.cli`, these are particular to aiocoap functionality. Typical use is like this:: >>> p = argparse.ArgumentParser() >>> p.add_argument('--foo') # doctest: +ELLIPSIS _... >>> add_server_arguments(p) >>> opts = p.parse_args(['--bind', '[::1]:56830', '--foo=bar']) You can then either pass opts directly to :func:`server_context_from_arguments`, or split up the arguments:: >>> server_opts = extract_server_arguments(opts) >>> opts Namespace(foo='bar') Then, server_opts can be passed to `server_context_from_arguments`. """ import sys import argparse from pathlib import Path from ..util import hostportsplit from ..protocol import Context from ..credentials import CredentialsMap class _HelpBind(argparse.Action): def __init__(self, *args, **kwargs): kwargs["nargs"] = 0 super().__init__(*args, **kwargs) def __call__(self, parser, namespace, values, option_string=None): print( "The --bind option can take either of the following formats:" "\n :port -- bind to a given port on all available interfaces" "\n host -- bind to default ports on a given host name (can also be an IP address; IPv6 addresses need to be in square brackets)" "\n host:port -- bind only to a specific port on a given host" "\n\nBy default, the server will bind to all available addressess and protocols on the respective default ports." "\nIf a port is specified, and (D)TLS support is available, those protocols will be bound to one port higher (as are the default ports, 5683 for CoAP and 5684 for CoAP over (D)TLS)." "\n", file=sys.stderr, ) parser.exit() def add_server_arguments(parser): """Add the --bind option to an argparse parser""" def hostportsplit_helper(arg): """Wrapper around hostportsplit that gives better error messages than 'invalid hostportsplit value'""" if arg.isnumeric(): raise parser.error( f"Invalid argument to --bind. Did you mean --bind :{arg}?" ) try: return hostportsplit(arg) except ValueError: raise parser.error( f"Invalid argument to --bind. Did you mean --bind '[{arg}]'?" if arg.count(":") >= 2 and "[" not in arg else " See --help-bind for details." ) parser.add_argument( "--bind", help="Host and/or port to bind to (see --help-bind for details)", type=hostportsplit_helper, default=None, ) parser.add_argument( "--credentials", help="JSON file pointing to credentials for the server's identity/ies.", type=Path, ) # These are to be eventually migrated into credentials parser.add_argument( "--tls-server-certificate", help="TLS certificate (chain) to present to connecting clients (in PEM format)", metavar="CRT", ) parser.add_argument( "--tls-server-key", help="TLS key to load that supports the server certificate", metavar="KEY", ) parser.add_argument("--help-bind", help=argparse.SUPPRESS, action=_HelpBind) def extract_server_arguments(namespace): """Given the output of .parse() on a ArgumentParser that had add_server_arguments called with it, remove the resulting option in-place from namespace and return them in a separate namespace.""" server_arguments = type(namespace)() server_arguments.bind = namespace.bind server_arguments.tls_server_certificate = namespace.tls_server_certificate server_arguments.tls_server_key = namespace.tls_server_key server_arguments.credentials = namespace.credentials del namespace.bind del namespace.tls_server_certificate del namespace.tls_server_key del namespace.credentials del namespace.help_bind return server_arguments async def server_context_from_arguments(site, namespace, **kwargs): """Create a bound context like :meth:`.aiocoap.Context.create_server_context`, but take the bind and TLS settings from a namespace returned from an argparse parser that has had :func:`add_server_arguments` run on it. """ if namespace.tls_server_certificate: import ssl ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain( certfile=namespace.tls_server_certificate, keyfile=namespace.tls_server_key ) ssl_context.set_alpn_protocols(["coap"]) ssl_context.sni_callback = lambda obj, name, context: setattr( obj, "indicated_server_name", name ) else: ssl_context = None if namespace.credentials: server_credentials = CredentialsMap() try: import cbor2 import cbor_diag server_credentials.load_from_dict( cbor2.loads(cbor_diag.diag2cbor(namespace.credentials.open().read())) ) except ImportError: import json server_credentials.load_from_dict( json.load(namespace.credentials.open("rb")) ) # FIXME: could be non-OSCORE as well -- can we build oscore_sitewrapper # in such a way it only depends on the OSCORE dependencies if there are # actual identities present? from aiocoap.oscore_sitewrapper import OscoreSiteWrapper site = OscoreSiteWrapper(site, server_credentials) else: server_credentials = None return await Context.create_server_context( site, namespace.bind, _ssl_context=ssl_context, server_credentials=server_credentials, **kwargs, ) aiocoap-0.4.12/aiocoap/cli/defaults.py000066400000000000000000000046171472205122200175610ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This helper script can be used to easily inspect aiocoap's environment autodetection (ie. whether all modules required for particular subsystems are available, losely corresponding to the "features" made available through setup.py); run it as `python3 -m aiocoap.cli.defaults`.""" import sys from aiocoap.meta import version from aiocoap.defaults import ( has_reuse_port, get_default_clienttransports, get_default_servertransports, missing_module_functions, ) import argparse import os def main(argv=None): p = argparse.ArgumentParser(description=__doc__) # Allow passing this in as AIOCOAP_DEFAULTS_EXPECT_ALL=1 via the # environment, as that's easier to set in tox p.add_argument( "--expect-all", help="Exit with an error unless all subsystems are available", action="store_true", default=os.environ.get("AIOCOAP_DEFAULTS_EXPECT_ALL") == "1", ) p.add_argument("--version", action="version", version=version) args = p.parse_args(sys.argv[1:] if argv is None else argv) error = 0 print("Python version: %s" % sys.version) print("aiocoap version: %s" % version) print("Modules missing for subsystems:") for name, f in missing_module_functions.items(): missing = f() if missing and args.expect_all: error = 1 print( " %s: %s" % ( name, "everything there" if not missing else "missing " + ", ".join(missing), ) ) print("Python platform: %s" % sys.platform) print( "Default server transports: %s" % ":".join(get_default_servertransports(use_env=False)) ) print("Selected server transports: %s" % ":".join(get_default_servertransports())) print( "Default client transports: %s" % ":".join(get_default_clienttransports(use_env=False)) ) print("Selected client transports: %s" % ":".join(get_default_clienttransports())) print( "SO_REUSEPORT available (default, selected): %s, %s" % (has_reuse_port(use_env=False), has_reuse_port()) ) if error: print( "Exiting unsuccessfully because --expect-all was set and not all extras are available." ) return error if __name__ == "__main__": sys.exit(main()) aiocoap-0.4.12/aiocoap/cli/fileserver.py000066400000000000000000000347671472205122200201310ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A simple file server that serves the contents of a given directory in a read-only fashion via CoAP. It provides directory listings, and guesses the media type of files it serves. It follows the conventions set out for the [kitchen-sink fileserver], optionally with write support, with some caveats: * There are some time-of-check / time-of-use race conditions around the handling of ETags, which could probably only be resolved if heavy file system locking were used. Some of these races are a consequence of this server implementing atomic writes through renames. As long as no other processes access the working area, and aiocoap is run single threaded, the races should not be visible to CoAP users. * ETags are constructed based on information in the file's (or directory's) `stat` output -- this avoids reaing the whole file on overwrites etc. This means that forcing the MTime to stay constant across a change would confuse clients. * While GET requests on files are served block by block (reading only what is being requested), PUT operations are spooled in memory rather than on the file system. * Directory creation and deletion is not supported at the moment. [kitchen-sink fileserver]: https://www.ietf.org/archive/id/draft-amsuess-core-coap-kitchensink-00.html#name-coap """ import argparse import asyncio from pathlib import Path import logging from stat import S_ISREG, S_ISDIR import mimetypes import tempfile import hashlib import aiocoap import aiocoap.error as error import aiocoap.numbers.codes as codes from aiocoap.resource import Resource from aiocoap.util.cli import AsyncCLIDaemon from aiocoap.cli.common import ( add_server_arguments, server_context_from_arguments, extract_server_arguments, ) from aiocoap.resourcedirectory.client.register import Registerer class InvalidPathError(error.ConstructionRenderableError): code = codes.BAD_REQUEST class TrailingSlashMissingError(error.ConstructionRenderableError): code = codes.BAD_REQUEST message = "Error: Not a file (add trailing slash)" class AbundantTrailingSlashError(error.ConstructionRenderableError): code = codes.BAD_REQUEST message = "Error: Not a directory (strip the trailing slash)" class NoSuchFile(error.NotFound): # just for the better error msg message = "Error: File not found!" class PreconditionFailed(error.ConstructionRenderableError): code = codes.PRECONDITION_FAILED class FileServer(Resource, aiocoap.interfaces.ObservableResource): # Resource is only used to give the nice render_xxx methods def __init__(self, root, log, *, write=False): super().__init__() self.root = root self.log = log self.write = write self._observations = {} # path -> [last_stat, [callbacks]] # While we don't have a .well-known/core resource that would need this, we # still allow registration at an RD and thus need something in here. # # As we can't possibly register all files in here, we're just registering a # single link to the index. def get_resources_as_linkheader(self): # Resource type indicates draft-amsuess-core-coap-kitchensink-00 file # service, might use registered name later return ';ct=40;rt="tag:chrysn@fsfe.org,2022:fileserver"' async def check_files_for_refreshes(self): while True: await asyncio.sleep(10) for path, data in list(self._observations.items()): last_stat, callbacks = data if last_stat is None: continue # this hit before the original response even triggered try: new_stat = path.stat() except Exception: new_stat = False def relevant(s): return (s.st_ino, s.st_dev, s.st_size, s.st_mtime, s.st_ctime) if relevant(new_stat) != relevant(last_stat): self.log.info("New stat for %s", path) data[0] = new_stat for cb in callbacks: cb() def request_to_localpath(self, request): path = request.opt.uri_path if any("/" in p or p in (".", "..") for p in path): raise InvalidPathError() return self.root / "/".join(path) async def needs_blockwise_assembly(self, request): if request.code != codes.GET: return True if ( not request.opt.uri_path or request.opt.uri_path[-1] == "" or request.opt.uri_path == (".well-known", "core") ): return True # Only GETs to non-directory access handle it explicitly return False @staticmethod def hash_stat(stat): # The subset that the author expects to (possibly) change if the file changes data = (stat.st_mtime_ns, stat.st_ctime_ns, stat.st_size) return hashlib.sha256(repr(data).encode("ascii")).digest()[:8] async def render_get(self, request): if request.opt.uri_path == (".well-known", "core"): return aiocoap.Message( payload=str(self.get_resources_as_linkheader()).encode("utf8"), content_format=40, ) path = self.request_to_localpath(request) try: st = path.stat() except FileNotFoundError: raise NoSuchFile() etag = self.hash_stat(st) if etag in request.opt.etags: response = aiocoap.Message(code=codes.VALID) else: if S_ISDIR(st.st_mode): response = await self.render_get_dir(request, path) elif S_ISREG(st.st_mode): response = await self.render_get_file(request, path) response.opt.etag = etag return response async def render_put(self, request): if not self.write: return aiocoap.Message(code=codes.FORBIDDEN) if not request.opt.uri_path or not request.opt.uri_path[-1]: # Attempting to write to a directory return aiocoap.Message(code=codes.BAD_REQUEST) path = self.request_to_localpath(request) if request.opt.if_none_match: # FIXME: This is locally a race condition; files could be created # in the "x" mode, but then how would writes to them be made # atomic? if path.exists(): raise PreconditionFailed() if request.opt.if_match and b"" not in request.opt.if_match: # FIXME: This is locally a race condition; not sure how to prevent # that. try: st = path.stat() except FileNotFoundError: # Absent file in particular doesn't have the expected ETag raise PreconditionFailed() if self.hash_stat(st) not in request.opt.if_match: raise PreconditionFailed() # Is there a way to get both "Use umask for file creation (or the # existing file's permissions)" logic *and* atomic file creation on # portable UNIX? If not, all we could do would be emulate the logic of # just opening the file (by interpreting umask and the existing file's # permissions), and that fails horrobly if there are ACLs in place that # bites rsync in https://bugzilla.samba.org/show_bug.cgi?id=9377. # # If there is not, secure temporary file creation is as good as # anything else. with tempfile.NamedTemporaryFile(dir=path.parent, delete=False) as spool: spool.write(request.payload) temppath = Path(spool.name) try: temppath.rename(path) except Exception: temppath.unlink() raise st = path.stat() etag = self.hash_stat(st) return aiocoap.Message(code=codes.CHANGED, etag=etag) async def render_delete(self, request): if not self.write: return aiocoap.Message(code=codes.FORBIDDEN) if not request.opt.uri_path or not request.opt.uri_path[-1]: # Deleting directories is not supported as they can't be created return aiocoap.Message(code=codes.BAD_REQUEST) path = self.request_to_localpath(request) if request.opt.if_match and b"" not in request.opt.if_match: # FIXME: This is locally a race condition; not sure how to prevent # that. try: st = path.stat() except FileNotFoundError: # Absent file in particular doesn't have the expected ETag raise NoSuchFile() if self.hash_stat(st) not in request.opt.if_match: raise PreconditionFailed() try: path.unlink() except FileNotFoundError: raise NoSuchFile() return aiocoap.Message(code=codes.DELETED) async def render_get_dir(self, request, path): if request.opt.uri_path and request.opt.uri_path[-1] != "": raise TrailingSlashMissingError() self.log.info("Serving directory %s", path) response = "" for f in path.iterdir(): rel = f.relative_to(self.root) if f.is_dir(): response += ";ct=40," % rel else: response += "," % rel return aiocoap.Message(payload=response[:-1].encode("utf8"), content_format=40) async def render_get_file(self, request, path): if request.opt.uri_path and request.opt.uri_path[-1] == "": raise AbundantTrailingSlashError() self.log.info("Serving file %s", path) block_in = request.opt.block2 or aiocoap.optiontypes.BlockOption.BlockwiseTuple( 0, 0, 6 ) with path.open("rb") as f: f.seek(block_in.start) data = f.read(block_in.size + 1) if path in self._observations and self._observations[path][0] is None: # FIXME this is not *completely* precise, as it might mean that in # a (Observation 1 established, check loop run, file modified, # observation 2 established) situation, observation 2 could receive # a needless update on the next check, but it's simple and errs on # the side of caution. self._observations[path][0] = path.stat() guessed_type, _ = mimetypes.guess_type(str(path)) block_out = aiocoap.optiontypes.BlockOption.BlockwiseTuple( block_in.block_number, len(data) > block_in.size, block_in.size_exponent ) content_format = None if guessed_type is not None: try: content_format = aiocoap.numbers.ContentFormat.by_media_type( guessed_type ) except KeyError: if guessed_type and guessed_type.startswith("text/"): content_format = aiocoap.numbers.ContentFormat.TEXT return aiocoap.Message( payload=data[: block_in.size], block2=block_out, content_format=content_format, observe=request.opt.observe, ) async def add_observation(self, request, serverobservation): path = self.request_to_localpath(request) # the actual observable flag will only be set on files anyway, the # library will cancel the file observation accordingly if the requested # thing is not actually a file -- so it can be done unconditionally here last_stat, callbacks = self._observations.setdefault(path, [None, []]) cb = serverobservation.trigger callbacks.append(cb) serverobservation.accept( lambda self=self, path=path, cb=cb: self._observations[path][1].remove(cb) ) class FileServerProgram(AsyncCLIDaemon): async def start(self): logging.basicConfig() self.registerer = None p = self.build_parser() opts = p.parse_args() server_opts = extract_server_arguments(opts) await self.start_with_options(**vars(opts), server_opts=server_opts) @staticmethod def build_parser(): p = argparse.ArgumentParser(description=__doc__) p.add_argument( "-v", "--verbose", help="Be more verbose (repeat to debug)", action="count", dest="verbosity", default=0, ) p.add_argument( "--register", help="Register with a Resource directory", metavar="RD-URI", nargs="?", default=False, ) p.add_argument("--write", help="Allow writes by any user", action="store_true") p.add_argument( "path", help="Root directory of the server", nargs="?", default=".", type=Path, ) add_server_arguments(p) return p async def start_with_options( self, path, verbosity=0, register=False, server_opts=None, write=False ): log = logging.getLogger("fileserver") coaplog = logging.getLogger("coap-server") if verbosity == 1: log.setLevel(logging.INFO) elif verbosity == 2: log.setLevel(logging.DEBUG) coaplog.setLevel(logging.INFO) elif verbosity >= 3: log.setLevel(logging.DEBUG) coaplog.setLevel(logging.DEBUG) server = FileServer(path, log, write=write) if server_opts is None: self.context = await aiocoap.Context.create_server_context(server) else: self.context = await server_context_from_arguments(server, server_opts) self.refreshes = asyncio.create_task( server.check_files_for_refreshes(), name="Refresh on %r" % (path,), ) if register is not False: if register is not None and register.count("/") != 2: log.warn("Resource directory does not look like a host-only CoAP URI") self.registerer = Registerer(self.context, rd=register, lt=60) if verbosity == 2: self.registerer.log.setLevel(logging.INFO) elif verbosity >= 3: self.registerer.log.setLevel(logging.DEBUG) async def shutdown(self): if self.registerer is not None: await self.registerer.shutdown() self.refreshes.cancel() await self.context.shutdown() # used by doc/aiocoap_index.py build_parser = FileServerProgram.build_parser if __name__ == "__main__": FileServerProgram.sync_main() aiocoap-0.4.12/aiocoap/cli/keygen.py000066400000000000000000000042401472205122200172240ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A tool for creating key pairs for use in credentials Note that this tool operates on secrets stored in unencrypted files, protectd by restrictively set file system permissions. While this is common practice with many tools in the UNIX world, it might be surprising to users coming from multi-factor environments.""" import aiocoap.meta import aiocoap.defaults import argparse from pathlib import Path def build_parser(): p = argparse.ArgumentParser(description=__doc__) p.add_argument( "--version", action="version", version="%(prog)s " + aiocoap.meta.version ) subparsers = p.add_subparsers(required=True, dest="subcommand") generate = subparsers.add_parser( "generate", help="This generates an" " EDHOC key, stores it in a key file that is only readable by the" " user, and prints the corresponding public key information in a way" " suitable for inclusion in credentials maps.", ) generate.add_argument("keyfile", help="File to store the secret key in", type=Path) generate.add_argument( "--kid", help="Hexadecimal key identifier", type=bytes.fromhex ) generate.add_argument("--subject", help="Text placed in the CCS", type=str) return p def main(): p = build_parser() args = p.parse_args() missmods = aiocoap.defaults.oscore_missing_modules() if missmods: p.error( f"Dependencies missing, consider installing aiocoap as \"aiocoap[oscore]\" or \"aiocoap[all]\". Missing modules: {', '.join(missmods)}" ) from aiocoap import edhoc import cbor2 import cbor_diag if args.subcommand == "generate": try: key = edhoc.CoseKeyForEdhoc.generate(args.keyfile) except FileExistsError: raise p.error("Output file already exists") public = key.as_ccs(args.kid, args.subject) print(cbor_diag.cbor2diag(cbor2.dumps(public, canonical=True), pretty=False)) else: raise RuntimeError(f"Unimplemented subcommand {args.subcommand=}") if __name__ == "__main__": main() aiocoap-0.4.12/aiocoap/cli/proxy.py000066400000000000000000000153631472205122200171330ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """a plain CoAP proxy that can work both as forward and as reverse proxy""" import sys import argparse import aiocoap from aiocoap.proxy.server import ( ForwardProxyWithPooledObservations, ProxyWithPooledObservations, NameBasedVirtualHost, SubdomainVirtualHost, SubresourceVirtualHost, UnconditionalRedirector, ) from aiocoap.util.cli import AsyncCLIDaemon from aiocoap.cli.common import add_server_arguments, server_context_from_arguments def build_parser(): p = argparse.ArgumentParser(description=__doc__) mode = p.add_argument_group( "mode", "Required argument for setting the operation mode" ) mode.add_argument("--forward", help="Run as forward proxy", action="store_true") mode.add_argument("--reverse", help="Run as reverse proxy", action="store_true") details = p.add_argument_group( "details", "Options that govern how requests go in and out" ) add_server_arguments(details) details.add_argument( "--register", help="Register with a Resource directory", metavar="RD-URI", nargs="?", default=False, ) details.add_argument( "--register-as", help="Endpoint name (with possibly a domain after a dot) to register as", metavar="EP[.D]", default=None, ) details.add_argument( "--register-proxy", help="Ask the RD to serve as a reverse proxy. Note that this is only practical for --unconditional or --pathbased reverse proxies.", action="store_true", ) r = p.add_argument_group( "Rules", description="Sequence of forwarding rules " "that, if matched by a request, specify a forwarding destination. Destinations can be prefixed to change their behavior: With an '@' sign, they are treated as forward proxies. With a '!' sign, the destination is set as Uri-Host.", ) class TypedAppend(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest) is None: setattr(namespace, self.dest, []) getattr(namespace, self.dest).append((option_string, values)) r.add_argument( "--namebased", help="If Uri-Host matches NAME, route to DEST", metavar="NAME:DEST", action=TypedAppend, dest="r", ) r.add_argument( "--subdomainbased", help="If Uri-Host is anything.NAME, route to DEST", metavar="NAME:DEST", action=TypedAppend, dest="r", ) r.add_argument( "--pathbased", help="If a requested path starts with PATH, split that part off and route to DEST", metavar="PATH:DEST", action=TypedAppend, dest="r", ) r.add_argument( "--unconditional", help="Route all requests not previously matched to DEST", metavar="DEST", action=TypedAppend, dest="r", ) return p def destsplit(dest): use_as_proxy = False rewrite_uri_host = False if dest.startswith("!"): dest = dest[1:] rewrite_uri_host = True if dest.startswith("@"): dest = dest[1:] use_as_proxy = True return dest, rewrite_uri_host, use_as_proxy class Main(AsyncCLIDaemon): async def start(self, args=None): parser = build_parser() options = parser.parse_args(args if args is not None else sys.argv[1:]) self.options = options if not options.forward and not options.reverse: raise parser.error("At least one of --forward and --reverse must be given.") self.outgoing_context = await aiocoap.Context.create_client_context() if options.forward: proxy = ForwardProxyWithPooledObservations(self.outgoing_context) else: proxy = ProxyWithPooledObservations(self.outgoing_context) for kind, data in options.r or (): if kind in ("--namebased", "--subdomainbased"): try: name, dest = data.split(":", 1) except Exception: raise parser.error("%s needs NAME:DEST as arguments" % kind) dest, rewrite_uri_host, use_as_proxy = destsplit(dest) if rewrite_uri_host and kind == "--subdomainbased": parser.error( "The flag '!' makes no sense for subdomain based redirection as the subdomain data would be lost" ) r = ( NameBasedVirtualHost if kind == "--namebased" else SubdomainVirtualHost )(name, dest, rewrite_uri_host, use_as_proxy) elif kind == "--pathbased": try: path, dest = data.split(":", 1) except Exception: raise parser.error("--pathbased needs PATH:DEST as arguments") r = SubresourceVirtualHost(path.split("/"), dest) elif kind == "--unconditional": dest, rewrite_uri_host, use_as_proxy = destsplit(data) if rewrite_uri_host: parser.error( "The flag '!' makes no sense for unconditional redirection as the host name data would be lost" ) r = UnconditionalRedirector(dest, use_as_proxy) else: raise AssertionError("Unknown redirectory kind") proxy.add_redirector(r) self.proxy_context = await server_context_from_arguments(proxy, options) if options.register is not False: from aiocoap.resourcedirectory.client.register import Registerer params = {} if options.register_as: ep, _, d = options.register_as.partition(".") params["ep"] = ep if d: params["d"] = d if options.register_proxy: # FIXME: Check this in discovery params["proxy"] = "on" # FIXME: Construct this from settings (path-based), and forward results proxy.get_resources_as_linkheader = lambda: "" self.registerer = Registerer( self.proxy_context, rd=options.register, lt=60, registration_parameters=params, ) async def shutdown(self): if self.options.register is not False: await self.registerer.shutdown() await self.outgoing_context.shutdown() await self.proxy_context.shutdown() sync_main = Main.sync_main if __name__ == "__main__": # if you want to run this using `python3 -m`, see http://bugs.python.org/issue22480 sync_main() aiocoap-0.4.12/aiocoap/cli/rd.py000066400000000000000000000745161472205122200163640ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A plain CoAP resource directory according to RFC9176_ Known Caveats: * It is very permissive. Not only is no security implemented. * This may and will make exotic choices about discoverable paths whereever it can (see StandaloneResourceDirectory documentation) * Split-horizon is not implemented correctly * Unless enforced by security (ie. not so far), endpoint and sector names (ep, d) are not checked for their lengths or other validity. * Simple registrations don't cache .well-known/core contents .. _RFC9176: https://datatracker.ietf.org/doc/html/rfc9176 """ import string import sys import logging import asyncio import argparse from urllib.parse import urljoin import itertools import aiocoap from aiocoap.resource import ( Site, Resource, ObservableResource, PathCapable, WKCResource, link_format_to_message, ) from aiocoap.proxy.server import Proxy from aiocoap.util.cli import AsyncCLIDaemon import aiocoap.util.uri from aiocoap import error from aiocoap.cli.common import add_server_arguments, server_context_from_arguments from aiocoap.numbers import codes, ContentFormat import aiocoap.proxy.server from aiocoap.util.linkformat import Link, LinkFormat, parse from ..util.linkformat import link_header IMMUTABLE_PARAMETERS = ("ep", "d", "proxy") class NoActiveRegistration(error.ConstructionRenderableError): code = codes.PROXYING_NOT_SUPPORTED message = "no registration with that name" def query_split(msg): """Split a message's query up into (key, [*value]) pairs from a ?key=value&key2=value2 style Uri-Query options. Keys without an `=` sign will have a None value, and all values are expressed as an (at least 1-element) list of repetitions. >>> m = aiocoap.Message(uri="coap://example.com/foo?k1=v1.1&k1=v1.2&obs") >>> query_split(m) {'k1': ['v1.1', 'v1.2'], 'obs': [None]} """ result = {} for q in msg.opt.uri_query: if "=" not in q: k = q # matching the representation in link_header v = None else: k, v = q.split("=", 1) result.setdefault(k, []).append(v) return result def pop_single_arg(query, name): """Out of query which is the output of query_split, pick the single value at the key name, raise a suitable BadRequest on error, or return None if nothing is there. The value is removed from the query dictionary.""" if name not in query: return None if len(query[name]) > 1: raise error.BadRequest("Multiple values for %r" % name) return query.pop(name)[0] class CommonRD: # "Key" here always means an (ep, d) tuple. entity_prefix = ("reg",) def __init__(self, proxy_domain=None, log=None): super().__init__() self.log = log or logging.getLogger("resource-directory") self._by_key = {} # key -> Registration self._by_path = {} # path -> Registration self._updated_state_cb = [] self.proxy_domain = proxy_domain self.proxy_active = {} # uri_host -> Remote class Registration: # FIXME: split this into soft and hard grace period (where the former # may be 0). the node stays discoverable for the soft grace period, but # the registration stays alive for a (possibly much longer, at least # +lt) hard grace period, in which any action on the reg resource # reactivates it -- preventing premature reuse of the resource URI grace_period = 15 @property def href(self): return "/" + "/".join(self.path) def __init__( self, static_registration_parameters, path, network_remote, delete_cb, update_cb, registration_parameters, proxy_host, setproxyremote_cb, ): # note that this can not modify d and ep any more, since they are # already part of the key and possibly the path self.path = path self.links = LinkFormat([]) self._delete_cb = delete_cb self._update_cb = update_cb self.registration_parameters = static_registration_parameters self.lt = 90000 self.base_is_explicit = False self.proxy_host = proxy_host self._setproxyremote_cb = setproxyremote_cb self.update_params(network_remote, registration_parameters, is_initial=True) def update_params( self, network_remote, registration_parameters, is_initial=False ): """Set the registration_parameters from the parsed query arguments, update any effects of them, and trigger any observation updates if required (the typical ones don't because their registration_parameters are {} and all it does is restart the lifetime counter)""" if any(k in ("ep", "d") for k in registration_parameters.keys()): # The ep and d of initial registrations are already popped out raise error.BadRequest("Parameters 'd' and 'ep' can not be updated") # Not in use class "R" or otherwise conflict with common parameters if any( k in ("page", "count", "rt", "href", "anchor") for k in registration_parameters.keys() ): raise error.BadRequest("Unsuitable parameter for registration") if ( is_initial or not self.base_is_explicit ) and "base" not in registration_parameters: # check early for validity to avoid side effects of requests # answered with 4.xx if self.proxy_host is None: try: network_base = network_remote.uri except error.AnonymousHost: raise error.BadRequest("explicit base required") else: # FIXME: Advertise alternative transports (write alternative-transports) network_base = "coap://" + self.proxy_host if is_initial: # technically might be a re-registration, but we can't catch that at this point actual_change = True else: actual_change = False # Don't act while still checking set_lt = None set_base = None if "lt" in registration_parameters: try: set_lt = int(pop_single_arg(registration_parameters, "lt")) except ValueError: raise error.BadRequest("lt must be numeric") if "base" in registration_parameters: set_base = pop_single_arg(registration_parameters, "base") if set_lt is not None and self.lt != set_lt: actual_change = True self.lt = set_lt if set_base is not None and (is_initial or self.base != set_base): actual_change = True self.base = set_base self.base_is_explicit = True if not self.base_is_explicit and (is_initial or self.base != network_base): self.base = network_base actual_change = True if any( v != self.registration_parameters.get(k) for (k, v) in registration_parameters.items() ): self.registration_parameters.update(registration_parameters) actual_change = True if is_initial: self._set_timeout() else: self.refresh_timeout() if actual_change: self._update_cb() if self.proxy_host: self._setproxyremote_cb(network_remote) def delete(self): self.timeout.cancel() self._update_cb() self._delete_cb() def _set_timeout(self): delay = self.lt + self.grace_period # workaround for python issue20493 async def longwait(delay, callback): await asyncio.sleep(delay) callback() self.timeout = asyncio.create_task( longwait(delay, self.delete), name="RD Timeout for %r" % self, ) def refresh_timeout(self): self.timeout.cancel() self._set_timeout() def get_host_link(self): attr_pairs = [] for k, values in self.registration_parameters.items(): for v in values: attr_pairs.append([k, v]) return Link( href=self.href, attr_pairs=attr_pairs, base=self.base, rt="core.rd-ep" ) def get_based_links(self): """Produce a LinkFormat object that represents all statements in the registration, resolved to the registration's base (and thus suitable for comparing anchors).""" result = [] for link in self.links.links: href = urljoin(self.base, link.href) if "anchor" in link: absanchor = urljoin(self.base, link.anchor) data = [(k, v) for (k, v) in link.attr_pairs if k != "anchor"] + [ ["anchor", absanchor] ] else: data = link.attr_pairs + [["anchor", urljoin(href, "/")]] result.append(Link(href, data)) return LinkFormat(result) async def shutdown(self): pass def register_change_callback(self, callback): """Ask RD to invoke the callback whenever any of the RD state changed""" # This has no unregister equivalent as it's only called by the lookup # resources that are expected to be live for the remainder of the # program, like the Registry is. self._updated_state_cb.append(callback) def _updated_state(self): for cb in self._updated_state_cb: cb() def _new_pathtail(self): for i in itertools.count(1): # In the spirit of making legal but unconvential choices (see # StandaloneResourceDirectory documentation): Whoever strips or # ignores trailing slashes shall have a hard time keeping # registrations alive. path = (str(i), "") if path not in self._by_path: return path def initialize_endpoint(self, network_remote, registration_parameters): # copying around for later use in static, but not checking again # because reading them from the original will already have screamed by # the time this is used static_registration_parameters = { k: v for (k, v) in registration_parameters.items() if k in IMMUTABLE_PARAMETERS } ep = pop_single_arg(registration_parameters, "ep") if ep is None: raise error.BadRequest("ep argument missing") d = pop_single_arg(registration_parameters, "d") proxy = pop_single_arg(registration_parameters, "proxy") if proxy is not None and proxy != "on": raise error.BadRequest("Unsupported proxy value") key = (ep, d) if static_registration_parameters.pop("proxy", None): # FIXME: 'ondemand' is done unconditionally if not self.proxy_domain: raise error.BadRequest("Proxying not enabled") def is_usable(s): # Host names per RFC1123 (which is stricter than what RFC3986 would allow). # # Only supporting lowercase names as to avoid ambiguities due # to hostname capitalizatio normalization (otherwise it'd need # to be first-registered-first-served) return s and all( x in string.ascii_lowercase + string.digits + "-" for x in s ) if not is_usable(ep) or (d is not None and not is_usable(d)): raise error.BadRequest( "Proxying only supported for limited ep and d set (lowercase, digits, dash)" ) proxy_host = ep if d is not None: proxy_host += "." + d proxy_host = proxy_host + "." + self.proxy_domain else: proxy_host = None # No more errors should fly out from below here, as side effects start now try: oldreg = self._by_key[key] except KeyError: path = self._new_pathtail() else: path = oldreg.path[len(self.entity_prefix) :] oldreg.delete() # this was the brutal way towards idempotency (delete and re-create). # if any actions based on that are implemented here, they have yet to # decide wheter they'll treat idempotent recreations like deletions or # just ignore them unless something otherwise unchangeable (ep, d) # changes. def delete(): del self._by_path[path] del self._by_key[key] self.proxy_active.pop(proxy_host, None) def setproxyremote(remote): self.proxy_active[proxy_host] = remote reg = self.Registration( static_registration_parameters, self.entity_prefix + path, network_remote, delete, self._updated_state, registration_parameters, proxy_host, setproxyremote, ) self._by_key[key] = reg self._by_path[path] = reg return reg def get_endpoints(self): return self._by_key.values() def link_format_from_message(message): """Convert a response message into a LinkFormat object This expects an explicit media type set on the response (or was explicitly requested) """ certain_format = message.opt.content_format if certain_format is None and hasattr(message, "request"): certain_format = message.request.opt.accept try: if certain_format == ContentFormat.LINKFORMAT: return parse(message.payload.decode("utf8")) else: raise error.UnsupportedMediaType() except (UnicodeDecodeError, link_header.ParseException): raise error.BadRequest() class ThingWithCommonRD: def __init__(self, common_rd): super().__init__() self.common_rd = common_rd if isinstance(self, ObservableResource): self.common_rd.register_change_callback(self.updated_state) class DirectoryResource(ThingWithCommonRD, Resource): ct = link_format_to_message.supported_ct # type: ignore rt = "core.rd" #: Issue a custom warning when registrations come in via this interface registration_warning = None async def render_post(self, request): links = link_format_from_message(request) registration_parameters = query_split(request) if self.registration_warning: # Conveniently placed so it could be changed to something setting # additional registration_parameters instead self.common_rd.log.warning( "Warning from registration: %s", self.registration_warning ) regresource = self.common_rd.initialize_endpoint( request.remote, registration_parameters ) regresource.links = links return aiocoap.Message(code=aiocoap.CREATED, location_path=regresource.path) class RegistrationResource(Resource): """The resource object wrapping a registration is just a very thin and ephemeral object; all those methods could just as well be added to Registration with `s/self.reg/self/g`, making RegistrationResource(reg) = reg (or handleded in a single RegistrationDispatchSite), but this is kept here for better separation of model and interface.""" def __init__(self, registration): super().__init__() self.reg = registration async def render_get(self, request): return link_format_to_message(request, self.reg.links) def _update_params(self, msg): query = query_split(msg) self.reg.update_params(msg.remote, query) async def render_post(self, request): self._update_params(request) if request.opt.content_format is not None or request.payload: raise error.BadRequest("Registration update with body not specified") return aiocoap.Message(code=aiocoap.CHANGED) async def render_put(self, request): # this is not mentioned in the current spec, but seems to make sense links = link_format_from_message(request) self._update_params(request) self.reg.links = links return aiocoap.Message(code=aiocoap.CHANGED) async def render_delete(self, request): self.reg.delete() return aiocoap.Message(code=aiocoap.DELETED) class RegistrationDispatchSite(ThingWithCommonRD, Resource, PathCapable): async def render(self, request): try: entity = self.common_rd._by_path[request.opt.uri_path] except KeyError: raise error.NotFound entity = RegistrationResource(entity) return await entity.render(request.copy(uri_path=())) def _paginate(candidates, query): page = pop_single_arg(query, "page") count = pop_single_arg(query, "count") try: candidates = list(candidates) if page is not None: candidates = candidates[int(page) * int(count) :] if count is not None: candidates = candidates[: int(count)] except (KeyError, ValueError): raise error.BadRequest("page requires count, and both must be ints") return candidates def _link_matches(link, key, condition): return any(k == key and condition(v) for (k, v) in link.attr_pairs) class EndpointLookupInterface(ThingWithCommonRD, ObservableResource): ct = link_format_to_message.supported_ct # type: ignore rt = "core.rd-lookup-ep" async def render_get(self, request): query = query_split(request) candidates = self.common_rd.get_endpoints() for search_key, search_values in query.items(): if search_key in ("page", "count"): continue # filtered last for search_value in search_values: if search_value is not None and search_value.endswith("*"): def matches(x, start=search_value[:-1]): return x.startswith(start) else: def matches(x, search_value=search_value): return x == search_value if search_key in ("if", "rt"): def matches(x, original_matches=matches): return any(original_matches(v) for v in x.split()) if search_key == "href": candidates = ( c for c in candidates if matches(c.href) or any(matches(r.href) for r in c.get_based_links().links) ) continue candidates = ( c for c in candidates if ( search_key in c.registration_parameters and any( matches(x) for x in c.registration_parameters[search_key] ) ) or any( _link_matches(r, search_key, matches) for r in c.get_based_links().links ) ) candidates = _paginate(candidates, query) result = [c.get_host_link() for c in candidates] return link_format_to_message(request, LinkFormat(result)) class ResourceLookupInterface(ThingWithCommonRD, ObservableResource): ct = link_format_to_message.supported_ct # type: ignore rt = "core.rd-lookup-res" async def render_get(self, request): query = query_split(request) eps = self.common_rd.get_endpoints() candidates = ((e, c) for e in eps for c in e.get_based_links().links) for search_key, search_values in query.items(): if search_key in ("page", "count"): continue # filtered last for search_value in search_values: if search_value is not None and search_value.endswith("*"): def matches(x, start=search_value[:-1]): return x.startswith(start) else: def matches(x, search_value=search_value): return x == search_value if search_key in ("if", "rt"): def matches(x, original_matches=matches): return any(original_matches(v) for v in x.split()) if search_key == "href": candidates = ( (e, c) for (e, c) in candidates if matches(c.href) or matches( e.href ) # FIXME: They SHOULD give this as relative as we do, but don't have to ) continue candidates = ( (e, c) for (e, c) in candidates if _link_matches(c, search_key, matches) or ( search_key in e.registration_parameters and any( matches(x) for x in e.registration_parameters[search_key] ) ) ) # strip endpoint candidates = (c for (e, c) in candidates) candidates = _paginate(candidates, query) # strip needless anchors candidates = [ Link(link.href, [(k, v) for (k, v) in link.attr_pairs if k != "anchor"]) if dict(link.attr_pairs)["anchor"] == urljoin(link.href, "/") else link for link in candidates ] return link_format_to_message(request, LinkFormat(candidates)) class SimpleRegistration(ThingWithCommonRD, Resource): #: Issue a custom warning when registrations come in via this interface registration_warning = None def __init__(self, common_rd, context): super().__init__(common_rd) self.context = context async def render_post(self, request): query = query_split(request) if "base" in query: raise error.BadRequest("base is not allowed in simple registrations") await self.process_request( network_remote=request.remote, registration_parameters=query, ) return aiocoap.Message(code=aiocoap.CHANGED) async def process_request(self, network_remote, registration_parameters): if "proxy" not in registration_parameters: try: network_base = network_remote.uri except error.AnonymousHost: raise error.BadRequest("explicit base required") fetch_address = network_base + "/.well-known/core" get = aiocoap.Message(uri=fetch_address) else: # ignoring that there might be a based present, that will err later get = aiocoap.Message(uri_path=[".well-known", "core"]) get.remote = network_remote get.code = aiocoap.GET get.opt.accept = ContentFormat.LINKFORMAT # not trying to catch anything here -- the errors are most likely well renderable into the final response response = await self.context.request(get).response_raising links = link_format_from_message(response) if self.registration_warning: # Conveniently placed so it could be changed to something setting # additional registration_parameters instead self.common_rd.log.warning( "Warning from registration: %s", self.registration_warning ) registration = self.common_rd.initialize_endpoint( network_remote, registration_parameters ) registration.links = links class SimpleRegistrationWKC(WKCResource, SimpleRegistration): def __init__(self, listgenerator, common_rd, context): super().__init__( listgenerator=listgenerator, common_rd=common_rd, context=context ) self.registration_warning = "via .well-known/core" class StandaloneResourceDirectory(Proxy, Site): """A site that contains all function sets of the CoAP Resource Directoru To prevent or show ossification of example paths in the specification, all function set paths are configurable and default to values that are different from the specification (but still recognizable).""" rd_path = ("resourcedirectory", "") ep_lookup_path = ("endpoint-lookup", "") res_lookup_path = ("resource-lookup", "") def __init__(self, context, lwm2m_compat=None, **kwargs): if lwm2m_compat is True: self.rd_path = ("rd",) # Double inheritance: works as everything up of Proxy has the same interface super().__init__(outgoing_context=context) common_rd = CommonRD(**kwargs) self.add_resource( [".well-known", "core"], SimpleRegistrationWKC( self.get_resources_as_linkheader, common_rd=common_rd, context=context ), ) self.add_resource( [".well-known", "rd"], SimpleRegistration(common_rd=common_rd, context=context), ) self.add_resource(self.rd_path, DirectoryResource(common_rd=common_rd)) if list(self.rd_path) != ["rd"] and lwm2m_compat is None: second_dir_resource = DirectoryResource(common_rd=common_rd) second_dir_resource.registration_warning = "via unannounced /rd" # Hide from listing second_dir_resource.get_link_description = lambda *args: None self.add_resource(["rd"], second_dir_resource) self.add_resource( self.ep_lookup_path, EndpointLookupInterface(common_rd=common_rd) ) self.add_resource( self.res_lookup_path, ResourceLookupInterface(common_rd=common_rd) ) self.add_resource( common_rd.entity_prefix, RegistrationDispatchSite(common_rd=common_rd) ) self.common_rd = common_rd def apply_redirection(self, request): # Fully overriding so we don't need to set an add_redirector try: actual_remote = self.common_rd.proxy_active[request.opt.uri_host] except KeyError: self.common_rd.log.info( "Request to proxied host %r rejected: No such registration", request.opt.uri_host, ) raise NoActiveRegistration self.common_rd.log.debug( "Forwarding request to %r to remote %s", request.opt.uri_host, actual_remote ) request.remote = actual_remote request.opt.uri_host = None return request async def shutdown(self): await self.common_rd.shutdown() async def render(self, request): # Full override switching which of the parents' behavior to choose if ( self.common_rd.proxy_domain is not None and request.opt.uri_host is not None and request.opt.uri_host.endswith("." + self.common_rd.proxy_domain) ): # in self.common_rd.proxy_active: return await Proxy.render(self, request) else: return await Site.render(self, request) # See render; necessary on all functions thanks to https://github.com/chrysn/aiocoap/issues/251 async def needs_blockwise_assembly(self, request): if request.opt.uri_host in self.common_rd.proxy_active: return await Proxy.needs_blockwise_assembly(self, request) else: return await Site.needs_blockwise_assembly(self, request) async def add_observation(self, request, serverobservation): if request.opt.uri_host in self.common_rd.proxy_active: return await Proxy.add_observation(self, request, serverobservation) else: return await Site.add_observation(self, request, serverobservation) def build_parser(): p = argparse.ArgumentParser(description=__doc__) add_server_arguments(p) return p class Main(AsyncCLIDaemon): async def start(self, args=None): parser = build_parser() parser.add_argument( "--proxy-domain", help="Enable the RD proxy extension. Example: `proxy.example.net` will produce base URIs like `coap://node1.proxy.example.net/`. The names must all resolve to an address the RD is bound to.", type=str, ) parser.add_argument( "--lwm2m-compat", help="Compatibility mode for LwM2M clients that can not perform some discovery steps (moving the registration resource to `/rd`)", action="store_true", default=None, ) parser.add_argument( "--no-lwm2m-compat", help="Disable all compativility with LwM2M clients that can not perform some discovery steps (not even accepting registrations at `/rd` with warnings)", action="store_false", dest="lwm2m_compat", ) parser.add_argument( "--verbose", help="Increase debug log output (repeat for increased verbosity)", action="count", default=0, ) options = parser.parse_args(args if args is not None else sys.argv[1:]) # Putting in an empty site to construct the site with a context self.context = await server_context_from_arguments(None, options) self.log = logging.getLogger("resource-directory") if options.verbose >= 2: self.log.setLevel(logging.DEBUG) elif options.verbose == 1: self.log.setLevel(logging.INFO) self.site = StandaloneResourceDirectory( context=self.context, proxy_domain=options.proxy_domain, lwm2m_compat=options.lwm2m_compat, log=self.log, ) self.context.serversite = self.site async def shutdown(self): await self.site.shutdown() await self.context.shutdown() sync_main = Main.sync_main if __name__ == "__main__": sync_main() aiocoap-0.4.12/aiocoap/credentials.cddl000066400000000000000000000035031472205122200177470ustar00rootroot00000000000000; While using binary strings in several occasions, this format is designed to ; be usable also from JSON and TOML by allowing alternatives to the bstr, and by ; not using numeric keys. ; ; While CDDL places the compatible-bstr items in particular positions, the it ; also takes care to never have any maps where {"ascii" / "hex": tstr} is an ; acceptable value. Consequently, when loading, it is fine to just replace anything that looks like a compatible-bstr with a bstr. credentials-map = { * key => entry } ; key = tstr key = uripattern / credential-reference uripattern = tstr .regexp "[^:].*" entry = credential-reference / credential credential-reference = tstr .regexp ":.*" credential = any-of / all-of / dtls / oscore / edhoc-oscore any-of = { "any" => [+ credential-reference] } all-of = { "all" => [+ credential-reference] } dtls = { "dtls" => { "client-identity" => compatible-bstr, "psk" => compatible-bstr, }} oscore = { "oscore" => { "basedir" => tstr, }} edhoc-oscore = { "edhoc-oscore" => { "suite" => int, "method" => int, ? "own_cred_style" => ("by-key-id" / "by-value"), ? "peer_cred" => any, ? "own_cred" => any, ? "private_key_file" => tstr, ; Direct inclusion is generally not recommended, but when building an ; ephemeral identity, this is convenient. ? "private_key" => any, }} ; This is for serializations like JSON that can't easily express binary ; strings. compatible-bstr = bstr / encoded-ascii / encoded-hex encoded-ascii = { "ascii": tstr } encoded-hex = { "hex": tstr } ; Currently accepts hex while stripping out any ; whitespace, dash or colon delimiters ; It would be nice to have this actually used during destructuring in python, ; maybe like this: ; ; @parser.construct('encoded-ascii') ; def encoded_ascii(ascii: str): ; return ascii.decode('ascii') aiocoap-0.4.12/aiocoap/credentials.py000066400000000000000000000344771472205122200175070ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module describes how security credentials are expressed in aiocoap, how security protocols (TLS, DTLS, OSCOAP) can store and access their key material, and for which URIs they are used. For consistency, mappings between accessible resources and their credentials are always centered around URIs. This is slightly atypical, because a client will typically use a particular set of credentials for all operations on one server, while a server first loads all available credentials and then filters out whether the client may actually access a resource per-path, but it works with full URIs (or patterns thereof) just as well. That approach allows using more similar structures both on the server and the client, and works smoothly for virtual hosting, firewalling and clients accessing resources with varying credentials. Still, client and server credentials are kept apart, lest a server open up (and potentially reveal) to a PSK set it is only configured to use as a client. While client credentials already have their place in :attr:`aiocoap.protocol.Context.client_credentials`, server credentials are not in use at a standardized location yet because there is only code in the OSCORE plug tests that can use it so far. Library developer notes ~~~~~~~~~~~~~~~~~~~~~~~ This whole module currently relies on a mixture of introspection and manual parsing of the JSON-ish tree. A preferred expression of the same would rely on the credentials.cddl description and build an object tree from that, but the author is unaware of any existing CDDL Python implementation. That might also ease porting to platforms that don't support inspect like micropython does. """ import re import inspect from typing import Optional, List, Tuple """ server: { 'coaps://mysite/*': { 'dtls-psk' (or other granularity): { 'psk': 'abcd' }}, 'coap://mysite/*': { 'oscore': { 'basedir': 'my-basedir/' } }, 'coap://myothersite/firmware': ':myotherkey', 'coap://myothersite/reset': ':myotherkey', 'coap://othersite*': { 'unprotected': true }, ':myotherkey': { 'oscore': { 'basedir': 'my-basedir/' } } } server can of course just say it doesn't want to have the Site handle it and just say '*': { 'unprotected': true }, add some ':foo': {'dtls-psk': ...} entries (so communication can be established in the first place) and let individual resources decide whether they return 4.01 or something else. client can be the same with different implied role, or have something like client: { 'coap://myothersite/*': ':myotherkey', ... } in future also server: { 'coaps://mysite/*': { 'dtls-cert': {'key': '...pem', 'cert': '...crt'} } } client: { '*': { 'dtls-cert': { 'ca': '/etc/ssl/...' } } } or more complex ones: server: { 'coaps://myothersite/wellprotected': { 'all': [ ':mydtls', ':myotherkey' ]} 'coaps://myothersite/*': { 'any': [ ':mydtls', ':myotherkey' ]} } """ class CredentialsLoadError(ValueError): """Raised by functions that create a CredentialsMap or its parts from simple data structures""" class CredentialsMissingError(RuntimeError): """Raised when no suiting credentials can be found for a message, or credentials are found but inapplicable to a transport's security mechanisms.""" class CredentialReference: def __init__(self, target, map): if not target.startswith(":"): raise CredentialsLoadError( "Credential references must start with a colon (':')" ) self.target = target self.map = map # FIXME either generalize this with getattr, or introduce a function to # resolve any indirect credentials to a particular instance. def as_dtls_psk(self): return self.map[self.target].as_dtls_psk() class _Listish(list): @classmethod def from_item(cls, v): if not isinstance(v, list): raise CredentialsLoadError("%s goes with a list" % cls.__name__) return cls(v) class AnyOf(_Listish): pass class AllOf(_Listish): pass def _call_from_structureddata(constructor, name, init_data): if not isinstance(init_data, dict): raise CredentialsLoadError("%s goes with an object" % name) init_data = {k.replace("-", "_"): v for (k, v) in init_data.items()} sig = inspect.signature(constructor) checked_items = {} for k, v in init_data.items(): try: annotation = sig.parameters[k].annotation except KeyError: # let this raise later in binding checked_items[k] = object() annotation = "attribute does not exist" if isinstance(v, dict) and "ascii" in v: if len(v) != 1: raise CredentialsLoadError("ASCII objects can only have one elemnt.") try: v = v["ascii"].encode("ascii") except UnicodeEncodeError: raise CredentialsLoadError( "Elements of the ASCII object can not be represented in ASCII, please use binary or hex representation." ) if isinstance(v, dict) and "hex" in v: if len(v) != 1: raise CredentialsLoadError("Hex objects can only have one elemnt.") try: v = bytes.fromhex( v["hex"].replace("-", "").replace(" ", "").replace(":", "") ) except ValueError as e: raise CredentialsLoadError( "Hex object can not be read: %s" % (e.args[0]) ) # Not using isinstance because I foundno way to extract the type # information from an Optional/Union again; this whole thing works # only for strings and ints anyway, so why not. # # The second or-branch is for functions from modules with __future__.annotations if annotation not in (type(v), Optional[type(v)]) and annotation not in ( type(v).__name__, "Optional[%s]" % type(v).__name__, ): # explicitly not excluding inspect._empty here: constructors # need to be fully annotated raise CredentialsLoadError( "Type mismatch in attribute %s of %s: expected %s, got %r" % (k, name, annotation, v) ) checked_items[k] = v try: bound = sig.bind(**checked_items) except TypeError as e: raise CredentialsLoadError("%s: %s" % (name, e.args[0])) return constructor(*bound.args, **bound.kwargs) class _Objectish: @classmethod def from_item(cls, init_data): return _call_from_structureddata(cls, cls.__name__, init_data) class DTLS(_Objectish): def __init__(self, psk: bytes, client_identity: bytes): self.psk = psk self.client_identity = client_identity def as_dtls_psk(self): return (self.client_identity, self.psk) class TLSCert(_Objectish): """Indicates that a client can use the given certificate file to authenticate the server. Can only be used with 'coaps+tcp://HOSTINFO/*' and 'coaps+tcp://*' forms. """ def __init__(self, certfile: str): self.certfile = certfile def as_ssl_params(self): """Generate parameters suitable for passing via ** to ssl.create_default_context when purpose is alreay set""" return {"cafile": self.certfile} def import_filesystem_security_context(): from .oscore import FilesystemSecurityContext return FilesystemSecurityContext def import_edhoc_credential_pair(): from . import edhoc return edhoc.EdhocCredentials _re_cache = {} class CredentialsMap(dict): """ FIXME: outdated, rewrite when usable A CredentialsMap, for any URI template and operation, which security contexts are sufficient to to perform the operation on a matching URI. The same context can be used both by the server and the client, where the client uses the information on allowed client credentials to decide which credentials to present, and the information on allowed server credentials to decide whether the server can be trusted. Conversely, the server typically loads all available server credentials at startup, and then uses the client credentials list to decide whether to serve the request.""" def load_from_dict(self, d): """Populate the map from a dictionary, which would typically have been loaded from a JSON/YAML file and needs to match the CDDL in credentials.cddl. Running this multiple times will overwriter individual entries in the map.""" for k, v in d.items(): if v is None: if k in self: del self[k] else: self[k] = self._item_from_dict(v) # FIXME only works that way for OSCORE clients self[k].authenticated_claims = [k] def _item_from_dict(self, v): if isinstance(v, str): return CredentialReference(v, self) elif isinstance(v, dict): try: ((key, value),) = v.items() except ValueError: # this follows how Rust Enums are encoded in serde JSON raise CredentialsLoadError( "Items in a credentials map must have exactly one key" " (found %s)" % (",".join(v.keys()) or "empty") ) try: type_ = self._class_map[key] except KeyError: raise CredentialsLoadError("Unknown credential type: %s" % key) return type_().from_item(value) # Phrased as callbacks so they can import lazily. We make sure that all are # still present so that an entry that is not loadable raises an error # rather than possibly being ignored. _class_map = { "dtls": lambda: DTLS, "oscore": import_filesystem_security_context, "tlscert": lambda: TLSCert, "any-of": lambda: AnyOf, "all-of": lambda: AllOf, "edhoc-oscore": import_edhoc_credential_pair, } @staticmethod def _wildcard_match(searchterm, pattern): if pattern not in _re_cache: _re_cache[pattern] = re.compile(re.escape(pattern).replace("\\*", ".*")) return _re_cache[pattern].fullmatch(searchterm) is not None # used by a client def credentials_from_request(self, msg): """Return the most specific match to a request message. Matching is currently based on wildcards, but not yet very well thought out.""" uri = msg.get_request_uri() for i in range(1000): for k, v in sorted(self.items(), key=lambda x: len(x[0]), reverse=True): if self._wildcard_match(uri, k): if isinstance(v, str): uri = v continue return v else: raise CredentialsMissingError("No suitable credentials for %s" % uri) else: raise CredentialsLoadError( "Search for suitable credentials for %s exceeds recursion limit" ) def ssl_client_context(self, scheme, hostinfo): """Return an SSL client context as configured for the given request scheme and hostinfo (no full message is to be processed here, as connections are used across requests to the same origin). If no credentials are configured, this returns None (for which the user may need to fill in ssl.create_default_context() if None is not already a good indicator for the eventual consumer to use the default).""" ssl_params = {} tlscert = self.get("%s://%s/*" % (scheme, hostinfo), None) # FIXME: handle Any or All if they include TLSCert, or deprecate them if not isinstance(tlscert, TLSCert): return if tlscert is None: tlscert = self.get("%s://*" % scheme, None) if tlscert is not None: ssl_params = tlscert.as_ssl_params() if ssl_params: import ssl return ssl.create_default_context(**ssl_params) # used by a server def find_oscore(self, unprotected): # FIXME: this is not constant-time as it should be, but too much in # flux to warrant optimization # FIXME: duplicate contexts for being tried out are not supported yet. for item in self.values(): if not hasattr(item, "get_oscore_context_for"): continue ctx = item.get_oscore_context_for(unprotected) if ctx is not None: return ctx raise KeyError() def find_all_used_contextless_oscore_kid(self) -> set[bytes]: all_kid = set() for item in self.values(): if not hasattr(item, "find_all_used_contextless_oscore_kid"): continue all_kid |= item.find_all_used_contextless_oscore_kid() return all_kid def find_edhoc_by_id_cred_peer(self, id_cred_peer) -> Tuple[bytes, List[str]]: for label, item in self.items(): if not hasattr(item, "find_edhoc_by_id_cred_peer"): continue # typically returning self credential = item.find_edhoc_by_id_cred_peer(id_cred_peer) if credential is not None: return (credential, [label]) from . import edhoc import cbor2 for label, item in self.items(): if ( isinstance(item, edhoc.EdhocCredentials) and item.peer_cred_is_unauthenticated() ): id_cred_peer = cbor2.loads(id_cred_peer) if isinstance(id_cred_peer, dict) and 14 in id_cred_peer: return (cbor2.dumps(id_cred_peer[14], canonical=True), [label]) raise KeyError def find_dtls_psk(self, identity): # FIXME similar to find_oscore for entry, item in self.items(): if not hasattr(item, "as_dtls_psk"): continue psk_id, psk = item.as_dtls_psk() if psk_id != identity: continue # FIXME is returning the entry name a sane value to later put in to # authenticated_claims? OSCORE does something different. return (psk, entry) raise KeyError() aiocoap-0.4.12/aiocoap/defaults.py000066400000000000000000000171461472205122200170130ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module contains helpers that inspect available modules and platform specifics to give sane values to aiocoap defaults. All of this should eventually overridable by other libraries wrapping/using aiocoap and by applications using aiocoap; however, these overrides do not happen in the defaults module but where these values are actually accessed, so this module is considered internal to aiocoap and not part of the API. The ``_missing_modules`` functions are helpers for inspecting what is reasonable to expect to work. They can influence default values, but should not be used in the rest of the code for feature checking (just raise the ImportErrors) unless it's directly user-visible ("You configured OSCORE key material, but OSCORE needs the following unavailable modules") or in the test suite to decide which tests to skip. """ import os import socket import sys import warnings try: import pyodide # noqa: F401 import js # noqa: F401 except ImportError: is_pyodide = False else: is_pyodide = True def get_default_clienttransports(*, loop=None, use_env=True): """Return a list of transports that should be connected when a client context is created. If an explicit ``AIOCOAP_CLIENT_TRANSPORT`` environment variable is set, it is read as a colon separated list of transport names. By default, a DTLS mechanism will be picked if the required modules are available, and a UDP transport will be selected depending on whether the full udp6 transport is known to work. """ if use_env and "AIOCOAP_CLIENT_TRANSPORT" in os.environ: yield from os.environ["AIOCOAP_CLIENT_TRANSPORT"].split(":") return if not oscore_missing_modules(): yield "oscore" if not dtls_missing_modules(): yield "tinydtls" yield "tcpclient" yield "tlsclient" if not ws_missing_modules(): yield "ws" if sys.platform != "linux": # udp6 was never reported to work on anything but linux; would happily # add more platforms. yield "simple6" return # on android it seems that it's only the AI_V4MAPPED that causes trouble, # that should be managable in udp6 too. yield "udp6" return def get_default_servertransports(*, loop=None, use_env=True): """Return a list of transports that should be connected when a server context is created. If an explicit ``AIOCOAP_SERVER_TRANSPORT`` environment variable is set, it is read as a colon separated list of transport names. By default, a DTLS mechanism will be picked if the required modules are available, and a UDP transport will be selected depending on whether the full udp6 transport is known to work. Both a simple6 and a simplesocketserver will be selected when udp6 is not available, and the simple6 will be used for any outgoing requests, which the simplesocketserver could serve but is worse at. """ if use_env and "AIOCOAP_SERVER_TRANSPORT" in os.environ: yield from os.environ["AIOCOAP_SERVER_TRANSPORT"].split(":") return if not oscore_missing_modules(): yield "oscore" if not dtls_missing_modules(): if "AIOCOAP_DTLSSERVER_ENABLED" in os.environ: yield "tinydtls_server" yield "tinydtls" yield "tcpserver" yield "tcpclient" yield "tlsserver" yield "tlsclient" if not ws_missing_modules(): yield "ws" if sys.platform != "linux": # udp6 was never reported to work on anything but linux; would happily # add more platforms. yield "simple6" yield "simplesocketserver" return # on android it seems that it's only the AI_V4MAPPED that causes trouble, # that should be managable in udp6 too. yield "udp6" return def has_reuse_port(*, use_env=True): """Return true if the platform indicates support for SO_REUSEPORT. Can be overridden by explicitly setting ``AIOCOAP_REUSE_PORT`` to 1 or 0.""" if use_env and os.environ.get("AIOCOAP_REUSE_PORT"): return bool(int(os.environ["AIOCOAP_REUSE_PORT"])) return hasattr(socket, "SO_REUSEPORT") def use_ai_v4mapped_emulation(): """This used to indicate when ai_v4mapped emulation was used. Given it is not used at all any more, the function is deprecated.""" warnings.warn( "AI_V4MAPPED emulation is not used any more at all", warnings.DeprecationWarning ) return False # FIXME: If there were a way to check for the extras defined in setup.py, or to link these lists to what is descibed there, that'd be great. def dtls_missing_modules(): """Return a list of modules that are missing in order to use the DTLS transport, or a false value if everything is present""" missing = [] try: from DTLSSocket import dtls # noqa: F401 except ImportError: missing.append("DTLSSocket") return missing def oscore_missing_modules(): """Return a list of modules that are missing in order to use OSCORE, or a false value if everything is present""" missing = [] try: import cbor2 # noqa: F401 except ImportError: missing.append("cbor2") try: import cryptography # noqa: F401 except ImportError: missing.append("cryptography") else: try: from cryptography.hazmat.primitives.ciphers.aead import AESCCM AESCCM(b"x" * 16, 8) except (cryptography.exceptions.UnsupportedAlgorithm, ImportError): missing.append("a version of OpenSSL that supports AES-CCM") try: import filelock # noqa: F401 except ImportError: missing.append("filelock") try: import ge25519 # noqa: F401 except ImportError: missing.append("ge25519") try: import lakers # noqa: F401 except ImportError: missing.append("lakers-python") return missing def ws_missing_modules(): """Return a list of modules that are missing in order to user CoAP-over-WS, or a false value if everything is present""" if is_pyodide: return [] missing = [] try: import websockets # noqa: F401 except ImportError: missing.append("websockets") return missing def linkheader_missing_modules(): """Return a list of moudles that are missing in order to use link_header functionaity (eg. running a resource directory), of a false value if everything is present.""" missing = [] # The link_header module is now provided in-tree return missing def prettyprint_missing_modules(): """Return a list of modules that are missing in order to use pretty printing (ie. full aiocoap-client)""" missing = [] missing.extend(linkheader_missing_modules()) try: import cbor2 # noqa: F401 except ImportError: missing.append("cbor2") try: import pygments # noqa: F401 except ImportError: missing.append("pygments") try: import cbor_diag # noqa: F401 except ImportError: missing.append("cbor-diag") return missing missing_module_functions = { "dtls": dtls_missing_modules, "oscore": oscore_missing_modules, "linkheader": linkheader_missing_modules, "prettyprint": prettyprint_missing_modules, "ws": ws_missing_modules, } __all__ = [ "get_default_clienttransports", "get_default_servertransports", "has_reuse_port", "dtls_missing_modules", "oscore_missing_modules", "ws_missing_modules", "linkheader_missing_modules", "prettyprint_missing_modules", "missing_module_functions", ] aiocoap-0.4.12/aiocoap/edhoc.py000066400000000000000000000457131472205122200162670ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Internal module containing types used inside EDHOC security contexts""" import abc import enum import io from pathlib import Path import random from typing import Optional, Dict, Literal import os import cbor2 import lakers from . import oscore, credentials, error from . import Message from .numbers import POST def load_cbor_or_edn(filename: Path): """Common heuristic for whether something is CBOR or EDN""" import cbor_diag import cbor2 with filename.open("rb") as binary: try: result = cbor2.load(binary) except cbor2.CBORDecodeError: pass else: if binary.read(1) == b"": return result # else it apparently hasn't been CBOR all through... with filename.open() as textual: try: converted = cbor_diag.diag2cbor(textual.read()) except ValueError: raise credentials.CredentialsLoadError( "Data loaded from %s was recognized neither as CBOR nor CBOR Diagnostic Notation (EDN)" % filename ) # no need to check for completeness: diag2cbor doesn't do diagnostic # sequences, AIU that's not even a thing return cbor2.loads(converted) class CoseKeyForEdhoc: kty: int crv: int d: bytes @classmethod def from_file(cls, filename: Path) -> "CoseKeyForEdhoc": """Load a key from a file (in CBOR or EDN), asserting that the file is not group/world readable""" if filename.stat().st_mode & 0o077 != 0: raise credentials.CredentialsLoadError( "Refusing to load private key that is group or world accessible" ) loaded = load_cbor_or_edn(filename) return cls.from_map(loaded) @classmethod def from_map(cls, key: dict) -> "CoseKeyForEdhoc": if not isinstance(key, dict): raise credentials.CredentialsLoadError( "Data is not shaped like COSE_KEY (expected top-level dictionary)" ) if 1 not in key: raise credentials.CredentialsLoadError( "Data is not shaped like COSE_KEY (expected key 1 (kty) in top-level dictionary)" ) if key[1] != 2: raise credentials.CredentialsLoadError( "Private key type %s is not supported (currently only 2 (EC) is supported)" % (key[1],) ) if key.get(-1) != 1: raise credentials.CredentialsLoadError( "Private key of type EC requires key -1 (crv), currently supported values: 1 (P-256)" ) if not isinstance(key.get(-4), bytes) or len(key[-4]) != 32: raise credentials.CredentialsLoadError( "Private key of type EC P-256 requires key -4 (d) to be a 32-byte long byte string" ) if any(k not in (1, -1, -4) for k in key): raise credentials.CredentialsLoadError( "Extraneous data in key, consider allow-listing the item if acceptable" ) s = cls() s.kty = 2 # EC s.crv = 1 # P-256 s.d = key[-4] return s def secret_to_map(self) -> dict: # kty: EC, crv: P-256, d: ... return {1: self.kty, -1: self.crv, -4: self.d} # Should we deprecate filename, add a generate_in_file method? (It's there # because generate originally depended on a file system) @classmethod def generate(cls, filename: Optional[Path] = None) -> "CoseKeyForEdhoc": """Generate a key inside a file This returns the generated private key. """ from cryptography.hazmat.primitives.asymmetric import ec key = ec.generate_private_key(curve=ec.SECP256R1()) s = cls() s.kty = 2 # EC s.crv = 1 # P-256 s.d = key.private_numbers().private_value.to_bytes(32, "big") if filename is not None: flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL if hasattr(os, "O_BINARY"): flags |= os.O_BINARY descriptor = os.open(filename, flags, mode=0o600) try: with open(descriptor, "wb") as keyfile: cbor2.dump(s.secret_to_map(), keyfile) except Exception: filename.unlink() raise return s def as_ccs( self, kid: Optional[bytes], subject: Optional[str] ) -> Dict[Literal[14], dict]: """Given a key, generate a corresponding KCCS""" from cryptography.hazmat.primitives.asymmetric import ec private = ec.derive_private_key(int.from_bytes(self.d, "big"), ec.SECP256R1()) public = private.public_key() x = public.public_numbers().x.to_bytes(32, "big") y = public.public_numbers().y.to_bytes(32, "big") # kty: EC2, crv: P-256, x, y cosekey = {1: 2, -1: 1, -2: x, -3: y} if kid is not None: cosekey[2] = kid # cnf: COSE_Key credential_kccs: dict = {8: {1: cosekey}} if subject is not None: credential_kccs[2] = subject # kccs: cnf return {14: credential_kccs} class EdhocCredentials(credentials._Objectish): own_key: Optional[CoseKeyForEdhoc] suite: int method: int own_cred: Optional[dict] peer_cred: Optional[dict] def __init__( self, suite: int, method: int, own_cred_style: Optional[str] = None, peer_cred: Optional[dict] = None, own_cred: Optional[dict] = None, private_key_file: Optional[str] = None, private_key: Optional[dict] = None, ): from . import edhoc self.suite = suite self.method = method self.own_cred = own_cred self.peer_cred = peer_cred if private_key_file is not None and private_key is not None: raise credentials.CredentialsLoadError( "private_key is mutually exclusive with private_key_file" ) if private_key_file is not None: # FIXME: We should carry around a base private_key_path = Path(private_key_file) # FIXME: We left loading the file to the user, and now we're once more # in a position where we guess the file type self.own_key = CoseKeyForEdhoc.from_file(private_key_path) elif private_key is not None: self.own_key = CoseKeyForEdhoc.from_map(private_key) else: self.own_key = None if own_cred_style is None: self.own_cred_style = None else: self.own_cred_style = edhoc.OwnCredStyle(own_cred_style) if self.own_cred == {"unauthenticated": True}: if self.own_key is not None or self.own_cred_style not in ( None, OwnCredStyle.ByValue, ): raise credentials.CredentialsLoadError( "For local unauthenticated use, no key can be give, and own_cred_style needs to be by-value or absent" ) self.own_key = CoseKeyForEdhoc.generate() # FIXME: kid and subj should rather not be sent, but lakers insists on their presence self.own_cred = self.own_key.as_ccs(kid=b"?", subject="") self.own_cred_style = OwnCredStyle.ByValue else: if (own_cred is None) != (own_cred_style is None) or (own_cred is None) != ( self.own_key is None ): raise credentials.CredentialsLoadError( "If own credentials are given, all of own_cred, own_cred_style and private_key(_path) need to be given" ) # FIXME: This is only used on the client side, and expects that all parts (own and peer) are present self._established_context = None def find_edhoc_by_id_cred_peer(self, id_cred_peer): if self.peer_cred is None: return None if 14 not in self.peer_cred: # Only recognizing CCS so far return None if id_cred_peer == cbor2.dumps(self.peer_cred, canonical=True): # credential by value return cbor2.dumps(self.peer_cred[14], canonical=True) # cnf / COS_Key / kid, should be present in all CCS kid = self.peer_cred[14][8][1].get(2) if kid is not None and id_cred_peer == cbor2.dumps({4: kid}, canonical=True): # credential by kid return cbor2.dumps(self.peer_cred[14], canonical=True) def peer_cred_is_unauthenticated(self): # FIXME: This is rather weird internal API, and rather weird # format-wise -- but it will suffice until credentials are rewritten. return self.peer_cred is not None and self.peer_cred == { "unauthenticated": True } async def establish_context( self, wire, underlying_address, underlying_proxy_scheme, underlying_uri_host, logger, ): logger.info( "No OSCORE context found for EDHOC context %r, initiating one.", self ) # FIXME: We don't support role reversal yet, but once we # register this context to be available for incoming # requests, we'll have to pick more carefully c_i = bytes([random.randint(0, 23)]) initiator = lakers.EdhocInitiator() message_1 = initiator.prepare_message_1(c_i) msg1 = Message( code=POST, proxy_scheme=underlying_proxy_scheme, uri_host=underlying_uri_host, uri_path=[".well-known", "edhoc"], payload=cbor2.dumps(True) + message_1, ) msg1.remote = underlying_address msg2 = await wire.request(msg1).response_raising (c_r, id_cred_r, ead_2) = initiator.parse_message_2(msg2.payload) assert isinstance(self.own_cred, dict) and list(self.own_cred.keys()) == [14], ( "So far can only process CCS style own credentials a la {14: ...}, own_cred = %r" % self.own_cred ) cred_i = cbor2.dumps(self.own_cred[14], canonical=True) key_i = self.own_key.d logger.debug("EDHOC responder sent message_2 with ID_CRED_R = %r", id_cred_r) if self.peer_cred_is_unauthenticated(): # Not doing further checks (eg. for trailing bytes) or re-raising: This # was already checked by lakers parsed = cbor2.loads(id_cred_r) if 14 not in parsed: raise credentials.CredentialsMissingError( "Peer presented credential-by-reference (or anything else that's not a KCCS) when no credential was pre-agreed" ) # We could also pick the [14] out of the serialized stream and # treat it as an opaque item, but cbor2 doesn't really do serialized items. cred_r = cbor2.dumps(parsed[14], canonical=True) else: # We could look into id_cred_r, which is a CBOR encoded # byte string, and could start comparing ... but actually # EDHOC and Lakers protect us from misbinding attacks (is # that what they are called?), so we can just put in our # expected credential here # # FIXME: But looking into it might give us a better error than just # "Mac2 verification failed" # FIXME add assert on the structure or start doing the # generalization that'll fail at startup cred_r = cbor2.dumps(self.peer_cred[14], canonical=True) initiator.verify_message_2( key_i, cred_i, cred_r, ) # odd that we provide that here rather than in the next function logger.debug("Message 2 was verified") return EdhocInitiatorContext(initiator, c_i, c_r, self.own_cred_style, logger) class _EdhocContextBase( oscore.CanProtect, oscore.CanUnprotect, oscore.SecurityContextUtils ): def __init__(self, logger): self.log = logger def post_seqnoincrease(self): # The context is not persisted pass def protect(self, message, request_id=None, *, kid_context=True): outer_message, request_id = super().protect( message, request_id=request_id, kid_context=kid_context ) message_3 = self.message_3_to_include() if message_3 is not None: outer_message.opt.edhoc = True outer_message.payload = message_3 + outer_message.payload return outer_message, request_id def _make_ready(self, edhoc_context, c_ours, c_theirs): # only in lakers >= 0.5 that is present if hasattr(edhoc_context, "completed_without_message_4"): edhoc_context.completed_without_message_4() # FIXME: both should offer this if ( isinstance(edhoc_context, lakers.EdhocResponder) or edhoc_context.selected_cipher_suite() == 2 ): self.alg_aead = oscore.algorithms["AES-CCM-16-64-128"] self.hashfun = oscore.hashfunctions["sha256"] else: raise RuntimeError("Unknown suite") # we did check for critical EADs, there was no out-of-band agreement, so 8 it is oscore_salt_length = 8 # I figure that one would be ageed out-of-band as well (currently no # options to set/change this are known) self.id_context = None self.recipient_replay_window = oscore.ReplayWindow(32, lambda: None) master_secret = edhoc_context.edhoc_exporter(0, [], self.alg_aead.key_bytes) master_salt = edhoc_context.edhoc_exporter(1, [], oscore_salt_length) self.sender_id = c_theirs self.recipient_id = c_ours if self.sender_id == self.recipient_id: raise ValueError("Bad IDs: identical ones were picked") self.derive_keys(master_salt, master_secret) self.sender_sequence_number = 0 self.recipient_replay_window.initialize_empty() self.log.debug("EDHOC context %r ready for OSCORE operation", self) @abc.abstractmethod def message_3_to_include(self) -> Optional[bytes]: """An encoded message_3 to include in outgoing messages This may modify self to only return something once.""" class EdhocInitiatorContext(_EdhocContextBase): """An OSCORE context that is derived from an EDHOC exchange. It does not require that the EDHOC exchange has completed -- it can be set up by an initiator already when message 2 has been received, prepares a message 3 at setup time, and sends it with the first request that is sent through it.""" # FIXME: Should we rather send it with *every* request that is sent before a message 4 is received implicitly? def __init__(self, initiator, c_ours, c_theirs, cred_i_mode, logger): super().__init__(logger) # Only this line is role specific self._message_3, _i_prk_out = initiator.prepare_message_3( cred_i_mode.as_lakers(), None ) self._make_ready(initiator, c_ours, c_theirs) def message_3_to_include(self) -> Optional[bytes]: if self._message_3 is not None: result = self._message_3 self._message_3 = None return result return None class EdhocResponderContext(_EdhocContextBase): def __init__(self, responder, c_i, c_r, server_credentials, logger): super().__init__(logger) # storing them where they will later be overwritten with themselves self.recipient_id = c_r self.sender_id = c_i self._responder = responder # Through these we'll look up id_cred_i self._server_credentials = server_credentials self.authenticated_claims = [] # Not sure why mypy even tolerates this -- we're clearly not ready for # a general protect/unprotect, and things only work because all # relevant functions get their checks introduced self._incomplete = True def message_3_to_include(self) -> Optional[bytes]: # as a responder we never send one return None def get_oscore_context_for(self, unprotected): if oscore.COSE_KID_CONTEXT in unprotected: return None if unprotected.get(oscore.COSE_KID) == self.recipient_id: return self def find_all_used_contextless_oscore_kid(self) -> set[bytes]: return set((self.recipient_id,)) def protect(self, *args, **kwargs): if self._incomplete: raise RuntimeError( "EDHOC has not completed yet, waiting for message 3, can not protect own messages yet" ) return super().protect(*args, **kwargs) def unprotect(self, protected_message, request_id=None): if self._incomplete: if not protected_message.opt.edhoc: self.log.error( "OSCORE failed: No EDHOC message 3 received and none present" ) raise error.BadRequest("EDHOC incomplete") payload_stream = io.BytesIO(protected_message.payload) # discarding result -- just need to have a point to split _ = cbor2.load(payload_stream) m3len = payload_stream.tell() message_3 = protected_message.payload[:m3len] id_cred_i, ead_3 = self._responder.parse_message_3(message_3) if ead_3 is not None: self.log.error("Aborting EDHOC: EAD3 present") raise error.BadRequest try: (cred_i, claims) = self._server_credentials.find_edhoc_by_id_cred_peer( id_cred_i ) except KeyError: self.log.error( "Aborting EDHOC: No credentials found for client with id_cred_i=h'%s'", id_cred_i.hex(), ) raise error.BadRequest self.authenticated_claims.extend(claims) self._responder.verify_message_3(cred_i) self._make_ready(self._responder, self.recipient_id, self.sender_id) self._incomplete = False protected_message = protected_message.copy( edhoc=False, payload=protected_message.payload[m3len:] ) return super().unprotect(protected_message, request_id) class OwnCredStyle(enum.Enum): """Guidance for how the own credential should be sent in an EDHOC exchange""" ByKeyId = "by-key-id" ByValue = "by-value" def as_lakers(self): """Convert the enum into Lakers' reepresentation of the same concept. The types may eventually be unified, but so far, Lakers doesn't make the distinctions we expect to make yet.""" if self == self.ByKeyId: # FIXME: Mismatch to be fixed in lakers -- currently the only way # it allows sending by reference is by Key ID return lakers.CredentialTransfer.ByReference if self == self.ByValue: return lakers.CredentialTransfer.ByValue else: raise RuntimeError("enum variant not covered") aiocoap-0.4.12/aiocoap/error.py000066400000000000000000000326711472205122200163350ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """ Common errors for the aiocoap library """ import warnings import abc import errno from typing import Optional from .numbers import codes from . import util class Error(Exception): """ Base exception for all exceptions that indicate a failed request """ class HelpfulError(Error): def __str__(self): """User presentable string. This should start with "Error:", or with "Something Error:", because the context will not show that this was an error.""" return type(self).__name__ def extra_help(self, hints={}) -> Optional[str]: """Information printed at aiocoap-client or similar occasions when the error message itself may be insufficient to point the user in the right direction The `hints` dictonary may be populated with context that the caller has; the implementation must tolerate their absence. Currently established keys: * original_uri (str): URI that was attemted to access * request (Message): Request that was assembled to be sent """ return None class RenderableError(Error, metaclass=abc.ABCMeta): """ Exception that can meaningfully be represented in a CoAP response """ @abc.abstractmethod def to_message(self): """Create a CoAP message that should be sent when this exception is rendered""" class ResponseWrappingError(Error): """ An exception that is raised due to an unsuccessful but received response. A better relationship with :mod:`.numbers.codes` should be worked out to do ``except UnsupportedMediaType`` (similar to the various ``OSError`` subclasses). """ def __init__(self, coapmessage): self.coapmessage = coapmessage def to_message(self): return self.coapmessage def __repr__(self): return "<%s: %s %r>" % ( type(self).__name__, self.coapmessage.code, self.coapmessage.payload, ) class ConstructionRenderableError(RenderableError): """ RenderableError that is constructed from class attributes :attr:`code` and :attr:`message` (where the can be overridden in the constructor). """ def __init__(self, message=None): if message is not None: self.message = message def to_message(self): from .message import Message return Message(code=self.code, payload=self.message.encode("utf8")) code = codes.INTERNAL_SERVER_ERROR #: Code assigned to messages built from it message = "" #: Text sent in the built message's payload # This block is code-generated to make the types available to static checkers. # The __debug__ check below ensures that it stays up to date. class BadRequest(ConstructionRenderableError): code = codes.BAD_REQUEST class Unauthorized(ConstructionRenderableError): code = codes.UNAUTHORIZED class BadOption(ConstructionRenderableError): code = codes.BAD_OPTION class Forbidden(ConstructionRenderableError): code = codes.FORBIDDEN class NotFound(ConstructionRenderableError): code = codes.NOT_FOUND class MethodNotAllowed(ConstructionRenderableError): code = codes.METHOD_NOT_ALLOWED class NotAcceptable(ConstructionRenderableError): code = codes.NOT_ACCEPTABLE class RequestEntityIncomplete(ConstructionRenderableError): code = codes.REQUEST_ENTITY_INCOMPLETE class Conflict(ConstructionRenderableError): code = codes.CONFLICT class PreconditionFailed(ConstructionRenderableError): code = codes.PRECONDITION_FAILED class RequestEntityTooLarge(ConstructionRenderableError): code = codes.REQUEST_ENTITY_TOO_LARGE class UnsupportedContentFormat(ConstructionRenderableError): code = codes.UNSUPPORTED_CONTENT_FORMAT class UnprocessableEntity(ConstructionRenderableError): code = codes.UNPROCESSABLE_ENTITY class TooManyRequests(ConstructionRenderableError): code = codes.TOO_MANY_REQUESTS class InternalServerError(ConstructionRenderableError): code = codes.INTERNAL_SERVER_ERROR class NotImplemented(ConstructionRenderableError): code = codes.NOT_IMPLEMENTED class BadGateway(ConstructionRenderableError): code = codes.BAD_GATEWAY class ServiceUnavailable(ConstructionRenderableError): code = codes.SERVICE_UNAVAILABLE class GatewayTimeout(ConstructionRenderableError): code = codes.GATEWAY_TIMEOUT class ProxyingNotSupported(ConstructionRenderableError): code = codes.PROXYING_NOT_SUPPORTED class HopLimitReached(ConstructionRenderableError): code = codes.HOP_LIMIT_REACHED if __debug__: _missing_codes = False _full_code = "" for code in codes.Code: if code.is_successful() or not code.is_response(): continue classname = "".join(w.title() for w in code.name.split("_")) _full_code += f""" class {classname}(ConstructionRenderableError): code = codes.{code.name}""" if classname not in locals(): warnings.warn(f"Missing exception type: f{classname}") _missing_codes = True continue if locals()[classname].code != code: warnings.warn( f"Mismatched code for {classname}: Should be {code}, is {locals()[classname].code}" ) _missing_codes = True continue if _missing_codes: warnings.warn( "Generated exception list is out of sync, should be:\n" + _full_code ) # More detailed versions of code based errors class NoResource(NotFound): """ Raised when resource is not found. """ message = "Error: Resource not found!" def __init__(self): warnings.warn( "NoResource is deprecated in favor of NotFound", DeprecationWarning, stacklevel=2, ) class UnallowedMethod(MethodNotAllowed): """ Raised by a resource when request method is understood by the server but not allowed for that particular resource. """ message = "Error: Method not allowed!" class UnsupportedMethod(MethodNotAllowed): """ Raised when request method is not understood by the server at all. """ message = "Error: Method not recognized!" class NetworkError(HelpfulError): """Base class for all "something went wrong with name resolution, sending or receiving packages". Errors of these kinds are raised towards client callers when things went wrong network-side, or at context creation. They are often raised from socket.gaierror or similar classes, but these are wrapped in order to make catching them possible independently of the underlying transport.""" def __str__(self): return f"Network error: {type(self).__name__}" def extra_help(self, hints={}): if isinstance(self.__cause__, OSError): if self.__cause__.errno == errno.ECONNREFUSED: # seen trying to reach any used address with the port closed return "The remote host could be reached, but reported that the requested port is not open. Check whether a CoAP server is running at the address, or whether it is running on a different port." if self.__cause__.errno == errno.EHOSTUNREACH: # seen trying to reach any unused local address return "No way of contacting the remote host could be found. This could be because a host on the local network is offline or firewalled. Tools for debugging in the next step could be ping or traceroute." if self.__cause__.errno == errno.ENETUNREACH: # seen trying to reach an IPv6 host through an IP literal from a v4-only system, or trying to reach 2001:db8::1 return "No way of contacting the remote network could be found. This may be due to lack of IPv6 connectivity, lack of a concrete route (eg. trying to reach a private use network which there is no route to). Tools for debugging in the next step could be ping or traceroute." if self.__cause__.errno == errno.EACCES: # seen trying to reach the broadcast address of a local network return "The operating system refused to send the request. For example, this can occur when attempting to send broadcast requests instead of multicast requests." class ResolutionError(NetworkError): """Resolving the host component of a URI to a usable transport address was not possible""" def __str__(self): return f"Name resolution error: {self.args[0]}" class MessageError(NetworkError): """Received an error from the remote on the CoAP message level (typically a RST)""" class RemoteServerShutdown(NetworkError): """The peer a request was sent to in a stateful connection closed the connection around the time the request was sent""" class TimeoutError(NetworkError): """Base for all timeout-ish errors. Like NetworkError, receiving this alone does not indicate whether the request may have reached the server or not. """ def extra_help(self, hints={}): return "Neither a response nor an error was received. This can have a wide range of causes, from the address being wrong to the server being stuck." class ConRetransmitsExceeded(TimeoutError): """A transport that retransmits CON messages has failed to obtain a response within its retransmission timeout. When this is raised in a transport, requests failing with it may or may have been received by the server. """ class RequestTimedOut(TimeoutError): """ Raised when request is timed out. This error is currently not produced by aiocoap; it is deprecated. Users can now catch error.TimeoutError, or newer more detailed subtypes introduced later. """ class WaitingForClientTimedOut(TimeoutError): """ Raised when server expects some client action: - sending next PUT/POST request with block1 or block2 option - sending next GET request with block2 option but client does nothing. This error is currently not produced by aiocoap; it is deprecated. Users can now catch error.TimeoutError, or newer more detailed subtypes introduced later. """ class ResourceChanged(Error): """ The requested resource was modified during the request and could therefore not be received in a consistent state. """ class UnexpectedBlock1Option(Error): """ Raised when a server responds with block1 options that just don't match. """ class UnexpectedBlock2(Error): """ Raised when a server responds with another block2 than expected. """ class MissingBlock2Option(Error): """ Raised when response with Block2 option is expected (previous response had Block2 option with More flag set), but response without Block2 option is received. """ class NotObservable(Error): """ The server did not accept the request to observe the resource. """ class ObservationCancelled(Error): """ The server claimed that it will no longer sustain the observation. """ class UnparsableMessage(Error): """ An incoming message does not look like CoAP. Note that this happens rarely -- the requirements are just two bit at the beginning of the message, and a minimum length. """ class LibraryShutdown(Error): """The library or a transport registered with it was requested to shut down; this error is raised in all outstanding requests.""" class AnonymousHost(Error): """This is raised when it is attempted to express as a reference a (base) URI of a host or a resource that can not be reached by any process other than this. Typically, this happens when trying to serialize a link to a resource that is hosted on a CoAP-over-TCP or -WebSockets client: Such resources can be accessed for as long as the connection is active, but can not be used any more once it is closed or even by another system.""" class MalformedUrlError(ValueError, HelpfulError): def __str__(self): if self.args: return f"Malformed URL: {self.args[0]}" else: return f"Malformed URL: {self.__cause__}" class IncompleteUrlError(ValueError, HelpfulError): def __str__(self): return "URL incomplete: Must start with a scheme." def extra_help(self, hints={}): return "Most URLs in aiocoap need to be given with a scheme, eg. the 'coap' in 'coap://example.com/path'." class MissingRemoteError(HelpfulError): """A request is sent without a .remote attribute""" def __str__(self): return "Error: No remote endpoint set for request." def extra_help(self, hints={}): original_uri = hints.get("original_uri", None) requested_message = hints.get("request", None) if requested_message and ( requested_message.opt.proxy_uri or requested_message.opt.proxy_scheme ): if original_uri: return f"The message is set up for use with a proxy (because the scheme of {original_uri!r} is not supported), but no proxy was set." else: return ( "The message is set up for use with a proxy, but no proxy was set." ) # Nothing helpful otherwise: The message was probably constructed in a # rather manual way. __getattr__ = util.deprecation_getattr( { "UnsupportedMediaType": "UnsupportedContentFormat", "RequestTimedOut": "TimeoutError", "WaitingForClientTimedOut": "TimeoutError", }, globals(), ) aiocoap-0.4.12/aiocoap/interfaces.py000066400000000000000000000522571472205122200173310ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module provides interface base classes to various aiocoap software components, especially with respect to request and response handling. It describes `abstract base classes`_ for messages, endpoints etc. It is *completely unrelated* to the concept of "network interfaces". .. _`abstract base classes`: https://docs.python.org/3/library/abc""" from __future__ import annotations import abc import asyncio import warnings from aiocoap.pipe import Pipe from aiocoap.numbers.constants import MAX_REGULAR_BLOCK_SIZE_EXP from typing import Optional, Callable class MessageInterface(metaclass=abc.ABCMeta): """A MessageInterface is an object that can exchange addressed messages over unreliable transports. Implementations send and receive messages with message type and message ID, and are driven by a Context that deals with retransmission. Usually, an MessageInterface refers to something like a local socket, and send messages to different remote endpoints depending on the message's addresses. Just as well, a MessageInterface can be useful for one single address only, or use various local addresses depending on the remote address. """ @abc.abstractmethod async def shutdown(self): """Deactivate the complete transport, usually irrevertably. When the coroutine returns, the object must have made sure that it can be destructed by means of ref-counting or a garbage collector run.""" @abc.abstractmethod def send(self, message): """Send a given :class:`Message` object""" @abc.abstractmethod async def determine_remote(self, message): """Return a value suitable for the message's remote property based on its .opt.uri_host or .unresolved_remote. May return None, which indicates that the MessageInterface can not transport the message (typically because it is of the wrong scheme).""" class EndpointAddress(metaclass=abc.ABCMeta): """An address that is suitable for routing through the application to a remote endpoint. Depending on the MessageInterface implementation used, an EndpointAddress property of a message can mean the message is exchanged "with [2001:db8::2:1]:5683, while my local address was [2001:db8:1::1]:5683" (typical of UDP6), "over the connected , whereever that's connected to" (simple6 or TCP) or "with participant 0x01 of the OSCAP key 0x..., routed over ". EndpointAddresses are only constructed by MessageInterface objects, either for incoming messages or when populating a message's .remote in :meth:`MessageInterface.determine_remote`. There is no requirement that those address are always identical for a given address. However, incoming addresses must be hashable and hash-compare identically to requests from the same context. The "same context", for the purpose of EndpointAddresses, means that the message must be eligible for request/response, blockwise (de)composition and observations. (For example, in a DTLS context, the hash must change between epochs due to RFC7252 Section 9.1.2). So far, it is required that hash-identical objects also compare the same. That requirement might go away in future to allow equality to reflect finer details that are not hashed. (The only property that is currently known not to be hashed is the local address in UDP6, because that is *unknown* in initially sent packages, and thus disregarded for comparison but needed to round-trip through responses.) """ @property @abc.abstractmethod def hostinfo(self): """The authority component of URIs that this endpoint represents when request are sent to it Note that the presence of a hostinfo does not necessarily mean that globally meaningful or even syntactically valid URI can be constructed out of it; use the :attr:`.uri` property for this.""" @property @abc.abstractmethod def hostinfo_local(self): """The authority component of URIs that this endpoint represents when requests are sent from it. As with :attr:`.hostinfo`, this does not necessarily produce sufficient input for a URI; use :attr:`.uri_local` instead.""" @property def uri(self): """Deprecated alias for uri_base""" return self.uri_base @property @abc.abstractmethod def uri_base(self): """The base URI for the peer (typically scheme plus .hostinfo). This raises :class:`.error.AnonymousHost` when executed on an address whose peer coordinates can not be expressed meaningfully in a URI.""" @property @abc.abstractmethod def uri_base_local(self): """The base URI for the local side of this remote. This raises :class:`.error.AnonymousHost` when executed on an address whose local coordinates can not be expressed meaningfully in a URI.""" @property @abc.abstractmethod def is_multicast(self): """True if the remote address is a multicast address, otherwise false.""" @property @abc.abstractmethod def is_multicast_locally(self): """True if the local address is a multicast address, otherwise false.""" @property @abc.abstractmethod def scheme(Self): """The that is used with addresses of this kind This is usually a class property. It is applicable to both sides of the communication. (Should there ever be a scheme that addresses the participants differently, a scheme_local will be added.)""" @property def maximum_block_size_exp(self) -> int: """The maximum negotiated block size that can be sent to this remote.""" return MAX_REGULAR_BLOCK_SIZE_EXP # Giving some slack so that barely-larger messages (like OSCORE typically # are) don't get fragmented -- but still for migration to maximum message # size so we don't have to guess any more how much may be option and how # much payload @property def maximum_payload_size(self) -> int: """The maximum payload size that can be sent to this remote. Only relevant if maximum_block_size_exp is 7. This will be removed in favor of a maximum message size when the block handlers can get serialization length predictions from the remote.""" return 1124 def as_response_address(self): """Address to be assigned to a response to messages that arrived with this message This can (and does, by default) return self, but gives the protocol the opportunity to react to create a modified copy to deal with variations from multicast. """ return self @property def authenticated_claims(self): """Iterable of objects representing any claims (e.g. an identity, or generally objects that can be used to authorize particular accesses) that were authenticated for this remote. This is experimental and may be changed without notice. Its primary use is on the server side; there, a request handler (or resource decorator) can use the claims to decide whether the client is authorized for a particular request. Use on the client side is planned as a requirement on a request, although (especially on side-effect free non-confidential requests) it can also be used in response processing.""" # "no claims" is a good default return () @property @abc.abstractmethod def blockwise_key(self): """A hashable (ideally, immutable) value that is only the same for remotes from which blocks may be combined. (With all current transports that means that the network addresses need to be in there, and the identity of the security context). It does *not* just hinge on the identity of the address object, as a first block may come in an OSCORE group request and follow-ups may come in pairwise requests. (And there might be allowed relaxations on the transport under OSCORE, but that'd need further discussion).""" # FIXME: should this behave like something that keeps the address # alive? Conversely, if the address gets deleted, can this reach the # block keys and make their stuff vanish from the caches? # # FIXME: what do security mechanisms best put here? Currently it's a # wild mix of keys (OSCORE -- only thing guaranteed to never be reused; # DTLS client because it's available) and claims (DTLS server, because # it's available and if the claims set matches it can't be that wrong # either can it?) class MessageManager(metaclass=abc.ABCMeta): """The interface an entity that drives a MessageInterface provides towards the MessageInterface for callbacks and object acquisition.""" @abc.abstractmethod def dispatch_message(self, message): """Callback to be invoked with an incoming message""" @abc.abstractmethod def dispatch_error(self, error: Exception, remote): """Callback to be invoked when the operating system indicated an error condition from a particular remote.""" @property @abc.abstractmethod def client_credentials(self): """A CredentialsMap that transports should consult when trying to establish a security context""" class TokenInterface(metaclass=abc.ABCMeta): @abc.abstractmethod def send_message( self, message, messageerror_monitor ) -> Optional[Callable[[], None]]: """Send a message. If it returns a a callable, the caller is asked to call in case it no longer needs the message sent, and to dispose of if it doesn't intend to any more. messageerror_monitor is a function that will be called at most once by the token interface: When the underlying layer is indicating that this concrete message could not be processed. This is typically the case for RSTs on from the message layer, and used to cancel observations. Errors that are not likely to be specific to a message (like retransmission timeouts, or ICMP errors) are reported through dispatch_error instead. (While the information which concrete message triggered that might be available, it is not likely to be relevant). Currently, it is up to the TokenInterface to unset the no_response option in response messages, and to possibly not send them.""" @abc.abstractmethod async def fill_or_recognize_remote(self, message): """Return True if the message is recognized to already have a .remote managedy by this TokenInterface, or return True and set a .remote on message if it should (by its unresolved remote or Uri-* options) be routed through this TokenInterface, or return False otherwise.""" class TokenManager(metaclass=abc.ABCMeta): # to be described in full; at least there is a dispatch_error in analogy to MessageManager's pass class RequestInterface(metaclass=abc.ABCMeta): @abc.abstractmethod async def fill_or_recognize_remote(self, message): pass @abc.abstractmethod def request(self, request: Pipe): pass class RequestProvider(metaclass=abc.ABCMeta): """ .. automethod:: request .. (which we have to list here manually because the private override in the method is needed for the repeated signature in Context) """ @abc.abstractmethod def request(self, request_message, handle_blockwise=True): """Create and act on a :class:`Request` object that will be handled according to the provider's implementation. Note that the request is not necessarily sent on the wire immediately; it may (but, depend on the transport does not necessarily) rely on the response to be waited for. If handle_blockwise is True (the default), the request provider will split the request and/or collect the response parts automatically. The block size indicated by the remote is used, and can be decreased by setting the message's :attr:`.remote.maximum_block_size_exp ` property. Note that by being a property of the remote, this may affect other block-wise operations on the same remote -- this should be desirable behavior. :meta private: (not actually private, just hiding from automodule due to being grouped with the important functions) """ class Request(metaclass=abc.ABCMeta): """A CoAP request, initiated by sending a message. Typically, this is not instanciated directly, but generated by a :meth:`RequestProvider.request` method.""" response = """A future that is present from the creation of the object and \ fullfilled with the response message. When legitimate errors occur, this becomes an aiocoap.Error. (Eg. on any kind of network failure, encryption trouble, or protocol violations). Any other kind of exception raised from this is a bug in aiocoap, and should better stop the whole application. """ class Resource(metaclass=abc.ABCMeta): """Interface that is expected by a :class:`.protocol.Context` to be present on the serversite, which renders all requests to that context.""" def __init__(self): super().__init__() # FIXME: These keep addresses alive, and thus possibly transports. # Going through the shutdown dance per resource seems extraneous. # Options are to accept addresses staying around (making sure they # don't keep their transports alive, if that's a good idea), to hash # them, or to make them weak. from .blockwise import Block1Spool, Block2Cache self._block1 = Block1Spool() self._block2 = Block2Cache() @abc.abstractmethod async def render(self, request): """Return a message that can be sent back to the requester. This does not need to set any low-level message options like remote, token or message type; it does however need to set a response code. A response returned may carry a no_response option (which is actually specified to apply to requests only); the underlying transports will decide based on that and its code whether to actually transmit the response.""" @abc.abstractmethod async def needs_blockwise_assembly(self, request): """Indicator whether aiocoap should assemble request blocks to a single request and extract the requested blocks from a complete-resource answer (True), or whether the resource will do that by itself (False).""" async def _render_to_pipe(self, pipe: Pipe) -> None: if not hasattr(self, "_block1"): warnings.warn( "No attribute _block1 found on instance of " f"{type(self).__name__}, make sure its __init__ code " "properly calls super()!", DeprecationWarning, ) from .blockwise import Block1Spool, Block2Cache self._block1 = Block1Spool() self._block2 = Block2Cache() req = pipe.request if await self.needs_blockwise_assembly(req): req = self._block1.feed_and_take(req) # Note that unless the lambda get's called, we're not fully # accessing req any more -- we're just looking at its block2 # option, and the blockwise key extracted earlier. res = await self._block2.extract_or_insert(req, lambda: self.render(req)) res.opt.block1 = req.opt.block1 else: res = await self.render(req) pipe.add_response(res, is_last=True) async def render_to_pipe(self, pipe: Pipe) -> None: """Create any number of responses (as indicated by the request) into the request stream. This method is provided by the base Resource classes; if it is overridden, then :meth:`~interfaces.Resource.render`, :meth:`needs_blockwise_assembly` and :meth:`~.interfaces.ObservableResource.add_observation` are not used any more. (They still need to be implemented to comply with the interface definition, which is yet to be updated).""" warnings.warn( "Request interface is changing: Resources should " "implement render_to_pipe or inherit from " "resource.Resource which implements that based on any " "provided render methods", DeprecationWarning, ) if isinstance(self, ObservableResource): # While the above deprecation is used, a resource previously # inheriting from (X, ObservableResource) with X inheriting from # Resource might find itself using this method. When migrating over # to inheriting from resource.Resource, this error will become # apparent and this can die with the rest of this workaround. return await ObservableResource._render_to_pipe(self, pipe) await self._render_to_pipe(pipe) class ObservableResource(Resource, metaclass=abc.ABCMeta): """Interface the :class:`.protocol.ServerObservation` uses to negotiate whether an observation can be established based on a request. This adds only functionality for registering and unregistering observations; the notification contents will be retrieved from the resource using the regular :meth:`~.Resource.render` method from crafted (fake) requests. """ @abc.abstractmethod async def add_observation(self, request, serverobservation): """Before the incoming request is sent to :meth:`~.Resource.render`, the :meth:`.add_observation` method is called. If the resource chooses to accept the observation, it has to call the `serverobservation.accept(cb)` with a callback that will be called when the observation ends. After accepting, the ObservableResource should call `serverobservation.trigger()` whenever it changes its state; the ServerObservation will then initiate notifications by having the request rendered again.""" async def _render_to_pipe(self, pipe: Pipe) -> None: from .protocol import ServerObservation # If block2:>0 comes along, we'd just ignore the observe if pipe.request.opt.observe != 0: return await Resource._render_to_pipe(self, pipe) # If block1 happens here, we can probably just not support it for the # time being. (Given that block1 + observe is untested and thus does # not work so far anyway). servobs = ServerObservation() await self.add_observation(pipe.request, servobs) try: first_response = await self.render(pipe.request) if ( not servobs._accepted or servobs._early_deregister or not first_response.code.is_successful() ): pipe.add_response(first_response, is_last=True) return # FIXME: observation numbers should actually not be per # asyncio.task, but per (remote, token). if a client renews an # observation (possibly with a new ETag or whatever is deemed # legal), the new observation events should still carry larger # numbers. (if they did not, the client might be tempted to discard # them). first_response.opt.observe = next_observation_number = 0 # If block2 were to happen here, we'd store the full response # here, and pick out block2:0. pipe.add_response(first_response, is_last=False) while True: await servobs._trigger # if you wonder why the lines around this are not just `response = # await servobs._trigger`, have a look at the 'double' tests in # test_observe.py: A later triggering could have replaced # servobs._trigger in the meantime. response = servobs._trigger.result() servobs._trigger = asyncio.get_running_loop().create_future() if response is None: response = await self.render(pipe.request) # If block2 were to happen here, we'd store the full response # here, and pick out block2:0. is_last = servobs._late_deregister or not response.code.is_successful() if not is_last: next_observation_number += 1 response.opt.observe = next_observation_number pipe.add_response(response, is_last=is_last) if is_last: return finally: servobs._cancellation_callback() async def render_to_pipe(self, request: Pipe) -> None: warnings.warn( "Request interface is changing: Resources should " "implement render_to_pipe or inherit from " "resource.Resource which implements that based on any " "provided render methods", DeprecationWarning, ) await self._render_to_pipe(request) aiocoap-0.4.12/aiocoap/message.py000066400000000000000000000734161472205122200166320ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from __future__ import annotations import ipaddress import urllib.parse import struct import copy import string from collections import namedtuple from . import error, optiontypes from .numbers.codes import Code, CHANGED from .numbers.types import Type from .numbers.constants import TransportTuning, MAX_REGULAR_BLOCK_SIZE_EXP from .options import Options from .util import hostportjoin, hostportsplit, Sentinel, quote_nonascii from .util.uri import quote_factory, unreserved, sub_delims from . import interfaces __all__ = ["Message", "NoResponse"] # FIXME there should be a proper inteface for this that does all the urllib # patching possibly required and works with pluggable transports. urls qualify # if they can be parsed into the Proxy-Scheme / Uri-* structure. coap_schemes = ["coap", "coaps", "coap+tcp", "coaps+tcp", "coap+ws", "coaps+ws"] # Monkey patch urllib to make URL joining available in CoAP # This is a workaround for . urllib.parse.uses_relative.extend(coap_schemes) urllib.parse.uses_netloc.extend(coap_schemes) class Message(object): """CoAP Message with some handling metadata This object's attributes provide access to the fields in a CoAP message and can be directly manipulated. * Some attributes are additional data that do not round-trip through serialization and deserialization. They are marked as "non-roundtrippable". * Some attributes that need to be filled for submission of the message can be left empty by most applications, and will be taken care of by the library. Those are marked as "managed". The attributes are: * :attr:`payload`: The payload (body) of the message as bytes. * :attr:`mtype`: Message type (CON, ACK etc, see :mod:`.numbers.types`). Managed unless set by the application. * :attr:`code`: The code (either request or response code), see :mod:`.numbers.codes`. * :attr:`opt`: A container for the options, see :class:`.options.Options`. * :attr:`mid`: The message ID. Managed by the :class:`.Context`. * :attr:`token`: The message's token as bytes. Managed by the :class:`.Context`. * :attr:`remote`: The socket address of the other side, managed by the :class:`.protocol.Request` by resolving the ``.opt.uri_host`` or ``unresolved_remote``, or by the stack by echoing the incoming request's. Follows the :class:`.interfaces.EndpointAddress` interface. Non-roundtrippable. While a message has not been transmitted, the property is managed by the :class:`.Message` itself using the :meth:`.set_request_uri()` or the constructor `uri` argument. * :attr:`transport_tuning`: Parameters used by one or more transports to guide transmission. These are purely advisory hints; unknown properties of that object are ignored, and transports consider them over built-in constants on a best-effort basis. Note that many attributes are mandatory if this is not None; it is recommended that any objects passed in here are based on the :class:`aiocoap.numbers.constants.TransportTuning` class. * :attr:`request`: The request to which an incoming response message belongs; only available at the client. Managed by the :class:`.interfaces.RequestProvider` (typically a :class:`.Context`). These properties are still available but deprecated: * requested_*: Managed by the :class:`.protocol.Request` a response results from, and filled with the request's URL data. Non-roundtrippable. * unresolved_remote: ``host[:port]`` (strictly speaking; hostinfo as in a URI) formatted string. If this attribute is set, it overrides ``.RequestManageropt.uri_host`` (and ``-_port``) when it comes to filling the ``remote`` in an outgoing request. Use this when you want to send a request with a host name that would not normally resolve to the destination address. (Typically, this is used for proxying.) Options can be given as further keyword arguments at message construction time. This feature is experimental, as future message parameters could collide with options. The four messages involved in an exchange ----------------------------------------- :: Requester Responder +-------------+ +-------------+ | request msg | ---- send request ---> | request msg | +-------------+ +-------------+ | processed into | v +-------------+ +-------------+ | response m. | <--- send response --- | response m. | +-------------+ +-------------+ The above shows the four message instances involved in communication between an aiocoap client and server process. Boxes represent instances of Message, and the messages on the same line represent a single CoAP as passed around on the network. Still, they differ in some aspects: * The requested URI will look different between requester and responder if the requester uses a host name and does not send it in the message. * If the request was sent via multicast, the response's requested URI differs from the request URI because it has the responder's address filled in. That address is not known at the responder's side yet, as it is typically filled out by the network stack. * It is yet unclear whether the response's URI should contain an IP literal or a host name in the unicast case if the Uri-Host option was not sent. * Properties like Message ID and token will differ if a proxy was involved. * Some options or even the payload may differ if a proxy was involved. """ def __init__( self, *, mtype=None, mid=None, code=None, payload=b"", token=b"", uri=None, transport_tuning=None, **kwargs, ): self.version = 1 if mtype is None: # leave it unspecified for convenience, sending functions will know what to do self.mtype = None else: self.mtype = Type(mtype) self.mid = mid if code is None: # as above with mtype self.code = None else: self.code = Code(code) self.token = token self.payload = payload self.opt = Options() self.remote = None self.transport_tuning = transport_tuning or TransportTuning() # deprecation error, should go away roughly after 0.2 release if self.payload is None: raise TypeError("Payload must not be None. Use empty string instead.") if uri: self.set_request_uri(uri) for k, v in kwargs.items(): setattr(self.opt, k, v) def __repr__(self): return "" % ( id(self), self.mtype if self.mtype is not None else "no mtype,", self.code, "MID %s" % self.mid if self.mid is not None else "no MID", "token %s" % self.token.hex() if self.token else "empty token", self.remote, ", %s option(s)" % len(self.opt._options) if self.opt._options else "", ", %s byte(s) payload" % len(self.payload) if self.payload else "", ) def _repr_html_(self): """An HTML representation for Jupyter and similar environments While the precise format is not guaranteed, it will be usable through tooltips and possibly fold-outs: >>> from aiocoap import * >>> msg = Message(code=GET, uri="coap://localhost/other/separate") >>> html = msg._repr_html_() >>> 'Message with code GET' in html True >>> '3 options' in html True """ import html if not self.payload: payload_rendered = "

    No payload

    " else: from . import defaults if defaults.prettyprint_missing_modules(): payload_rendered = f"{html.escape(repr(self.payload))}" else: from .util.prettyprint import pretty_print, lexer_for_mime (notes, mediatype, text) = pretty_print(self) import pygments from pygments.formatters import HtmlFormatter try: lexer = lexer_for_mime(mediatype) text = pygments.highlight(text, lexer, HtmlFormatter()) except pygments.util.ClassNotFound: text = html.escape(text) payload_rendered = ( "
    " + "".join( f'

    {html.escape(n)}

    ' for n in notes ) + f"
    {text}
    " + "
    " ) return f"""
    Message with code {self.code._repr_html_() if self.code is not None else 'None'}, remote {html.escape(str(self.remote))} {self.opt._repr_html_()}{payload_rendered}""" def copy(self, **kwargs): """Create a copy of the Message. kwargs are treated like the named arguments in the constructor, and update the copy.""" # This is part of moving messages in an "immutable" direction; not # necessarily hard immutable. Let's see where this goes. new = type(self)( mtype=kwargs.pop("mtype", self.mtype), mid=kwargs.pop("mid", self.mid), code=kwargs.pop("code", self.code), payload=kwargs.pop("payload", self.payload), token=kwargs.pop("token", self.token), # Assuming these are not readily mutated, but rather passed # around in a class-like fashion transport_tuning=kwargs.pop("transport_tuning", self.transport_tuning), ) new.remote = kwargs.pop("remote", self.remote) new.opt = copy.deepcopy(self.opt) if "uri" in kwargs: new.set_request_uri(kwargs.pop("uri")) for k, v in kwargs.items(): setattr(new.opt, k, v) return new @classmethod def decode(cls, rawdata, remote=None): """Create Message object from binary representation of message.""" try: (vttkl, code, mid) = struct.unpack("!BBH", rawdata[:4]) except struct.error: raise error.UnparsableMessage("Incoming message too short for CoAP") version = (vttkl & 0xC0) >> 6 if version != 1: raise error.UnparsableMessage("Fatal Error: Protocol Version must be 1") mtype = (vttkl & 0x30) >> 4 token_length = vttkl & 0x0F msg = Message(mtype=mtype, mid=mid, code=code) msg.token = rawdata[4 : 4 + token_length] msg.payload = msg.opt.decode(rawdata[4 + token_length :]) msg.remote = remote return msg def encode(self): """Create binary representation of message from Message object.""" if self.code is None or self.mtype is None or self.mid is None: raise TypeError( "Fatal Error: Code, Message Type and Message ID must not be None." ) rawdata = bytes( [ (self.version << 6) + ((self.mtype & 0x03) << 4) + (len(self.token) & 0x0F) ] ) rawdata += struct.pack("!BH", self.code, self.mid) rawdata += self.token rawdata += self.opt.encode() if len(self.payload) > 0: rawdata += bytes([0xFF]) rawdata += self.payload return rawdata def get_cache_key(self, ignore_options=()): """Generate a hashable and comparable object (currently a tuple) from the message's code and all option values that are part of the cache key and not in the optional list of ignore_options (which is the list of option numbers that are not technically NoCacheKey but handled by the application using this method). >>> from aiocoap.numbers import GET >>> m1 = Message(code=GET) >>> m2 = Message(code=GET) >>> m1.opt.uri_path = ('s', '1') >>> m2.opt.uri_path = ('s', '1') >>> m1.opt.size1 = 10 # the only no-cache-key option in the base spec >>> m2.opt.size1 = 20 >>> m1.get_cache_key() == m2.get_cache_key() True >>> m2.opt.etag = b'000' >>> m1.get_cache_key() == m2.get_cache_key() False >>> from aiocoap.numbers.optionnumbers import OptionNumber >>> ignore = [OptionNumber.ETAG] >>> m1.get_cache_key(ignore) == m2.get_cache_key(ignore) True """ options = [] for option in self.opt.option_list(): if option.number in ignore_options or ( option.number.is_safetoforward() and option.number.is_nocachekey() ): continue options.append((option.number, option.value)) return (self.code, tuple(options)) # # splitting and merging messages into and from message blocks # def _extract_block(self, number, size_exp, max_bert_size): """Extract block from current message.""" if size_exp == 7: start = number * 1024 size = 1024 * (max_bert_size // 1024) else: size = 2 ** (size_exp + 4) start = number * size if start >= len(self.payload): raise error.BadRequest("Block request out of bounds") end = start + size if start + size < len(self.payload) else len(self.payload) more = True if end < len(self.payload) else False payload = self.payload[start:end] blockopt = (number, more, size_exp) if self.code.is_request(): return self.copy(payload=payload, mid=None, block1=blockopt) else: return self.copy(payload=payload, mid=None, block2=blockopt) def _append_request_block(self, next_block): """Modify message by appending another block""" if not self.code.is_request(): raise ValueError("_append_request_block only works on requests.") block1 = next_block.opt.block1 if block1.more: if len(next_block.payload) == block1.size: pass elif ( block1.size_exponent == 7 and len(next_block.payload) % block1.size == 0 ): pass else: raise error.BadRequest("Payload size does not match Block1") if block1.start == len(self.payload): self.payload += next_block.payload self.opt.block1 = block1 self.token = next_block.token self.mid = next_block.mid if not block1.more and next_block.opt.block2 is not None: self.opt.block2 = next_block.opt.block2 else: # possible extension point: allow messages with "gaps"; then # ValueError would only be raised when trying to overwrite an # existing part; it is doubtful though that the blockwise # specification even condones such behavior. raise ValueError() def _append_response_block(self, next_block): """Append next block to current response message. Used when assembling incoming blockwise responses.""" if not self.code.is_response(): raise ValueError("_append_response_block only works on responses.") block2 = next_block.opt.block2 if not block2.is_valid_for_payload_size(len(next_block.payload)): raise error.UnexpectedBlock2("Payload size does not match Block2") if block2.start != len(self.payload): # Does not need to be implemented as long as the requesting code # sequentially clocks out data raise error.NotImplemented() if next_block.opt.etag != self.opt.etag: raise error.ResourceChanged() self.payload += next_block.payload self.opt.block2 = block2 self.token = next_block.token self.mid = next_block.mid def _generate_next_block2_request(self, response): """Generate a sub-request for next response block. This method is used by client after receiving blockwise response from server with "more" flag set.""" # Note: response here is the assembled response, but (due to # _append_response_block's workings) it carries the Block2 option of # the last received block. next_after_received = len(response.payload) // response.opt.block2.size blockopt = optiontypes.BlockOption.BlockwiseTuple( next_after_received, False, response.opt.block2.size_exponent ) # has been checked in assembly, just making sure assert blockopt.start == len( response.payload ), "Unexpected state of preassembled message" blockopt = blockopt.reduced_to(response.remote.maximum_block_size_exp) return self.copy( payload=b"", mid=None, token=None, block2=blockopt, block1=None, observe=None, ) def _generate_next_block1_response(self): """Generate a response to acknowledge incoming request block. This method is used by server after receiving blockwise request from client with "more" flag set.""" response = Message(code=CHANGED, token=self.token) response.remote = self.remote if ( self.opt.block1.block_number == 0 and self.opt.block1.size_exponent > self.transport_tuning.DEFAULT_BLOCK_SIZE_EXP ): new_size_exponent = self.transport_tuning.DEFAULT_BLOCK_SIZE_EXP response.opt.block1 = (0, True, new_size_exponent) else: response.opt.block1 = ( self.opt.block1.block_number, True, self.opt.block1.size_exponent, ) return response # # the message in the context of network and addresses # def get_request_uri(self, *, local_is_server=False): """The absolute URI this message belongs to. For requests, this is composed from the options (falling back to the remote). For responses, this is largely taken from the original request message (so far, that could have been trackecd by the requesting application as well), but -- in case of a multicast request -- with the host replaced by the responder's endpoint details. This implements Section 6.5 of RFC7252. By default, these values are only valid on the client. To determine a message's request URI on the server, set the local_is_server argument to True. Note that determining the request URI on the server is brittle when behind a reverse proxy, may not be possible on all platforms, and can only be applied to a request message in a renderer (for the response message created by the renderer will only be populated when it gets transmitted; simple manual copying of the request's remote to the response will not magically make this work, for in the very case where the request and response's URIs differ, that would not catch the difference and still report the multicast address, while the actual sending address will only be populated by the operating system later). """ # maybe this function does not belong exactly *here*, but it belongs to # the results of .request(message), which is currently a message itself. if hasattr(self, "_original_request_uri"): # During server-side processing, a message's options may be altered # to the point where its options don't accurately reflect its URI # any more. In that case, this is stored. return self._original_request_uri if self.code.is_response(): refmsg = self.request if refmsg.remote.is_multicast: if local_is_server: multicast_netloc_override = self.remote.hostinfo_local else: multicast_netloc_override = self.remote.hostinfo else: multicast_netloc_override = None else: refmsg = self multicast_netloc_override = None proxyuri = refmsg.opt.proxy_uri if proxyuri is not None: return proxyuri scheme = refmsg.opt.proxy_scheme or refmsg.remote.scheme query = refmsg.opt.uri_query or () path = refmsg.opt.uri_path if multicast_netloc_override is not None: netloc = multicast_netloc_override else: if local_is_server: netloc = refmsg.remote.hostinfo_local else: netloc = refmsg.remote.hostinfo if refmsg.opt.uri_host is not None or refmsg.opt.uri_port is not None: host, port = hostportsplit(netloc) host = refmsg.opt.uri_host or host port = refmsg.opt.uri_port or port # FIXME: This sounds like it should be part of # hpostportjoin/-split escaped_host = quote_nonascii(host) # FIXME: "If host is not valid reg-name / IP-literal / IPv4address, # fail" netloc = hostportjoin(escaped_host, port) # FIXME this should follow coap section 6.5 more closely query = "&".join(_quote_for_query(q) for q in query) path = "".join("/" + _quote_for_path(p) for p in path) or "/" fragment = None params = "" # are they not there at all? # Eases debugging, for when they raise from urunparse you won't know # which of them it was assert scheme is not None, "Remote has no scheme set" assert netloc is not None, "Remote has no netloc set" return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment)) def set_request_uri(self, uri, *, set_uri_host=True): """Parse a given URI into the uri_* fields of the options. The remote does not get set automatically; instead, the remote data is stored in the uri_host and uri_port options. That is because name resolution is coupled with network specifics the protocol will know better by the time the message is sent. Whatever sends the message, be it the protocol itself, a proxy wrapper or an alternative transport, will know how to handle the information correctly. When ``set_uri_host=False`` is passed, the host/port is stored in the ``unresolved_remote`` message property instead of the uri_host option; as a result, the unresolved host name is not sent on the wire, which breaks virtual hosts but makes message sizes smaller. This implements Section 6.4 of RFC7252. This raises IncompleteUrlError if URI references are passed in (instead of a full URI), and MalformedUrlError if the URI specification or the library's expectations of URI shapes (eg. 'coap+tcp:no-slashes') are violated. """ try: parsed = urllib.parse.urlparse(uri) except ValueError as e: raise error.MalformedUrlError from e if parsed.fragment: raise error.MalformedUrlError( "Fragment identifiers can not be set on a request URI" ) if not parsed.scheme: raise error.IncompleteUrlError() if parsed.scheme not in coap_schemes: self.opt.proxy_uri = uri return if not parsed.hostname: raise error.MalformedUrlError("CoAP URIs need a hostname") if parsed.username or parsed.password: raise error.MalformedUrlError("User name and password not supported.") try: if parsed.path not in ("", "/"): # FIXME: This tolerates incomplete % sequences. self.opt.uri_path = [ urllib.parse.unquote(x, errors="strict") for x in parsed.path.split("/")[1:] ] else: self.opt.uri_path = [] if parsed.query: # FIXME: This tolerates incomplete % sequences. self.opt.uri_query = [ urllib.parse.unquote(x, errors="strict") for x in parsed.query.split("&") ] else: self.opt.uri_query = [] except UnicodeError as e: raise error.MalformedUrlError( "Percent encoded strings in CoAP URIs need to be UTF-8 encoded" ) from e self.remote = UndecidedRemote(parsed.scheme, parsed.netloc) try: _ = parsed.port except ValueError as e: raise error.MalformedUrlError("Port must be numeric") from e is_ip_literal = parsed.netloc.startswith("[") or ( parsed.hostname.count(".") == 3 and all(c in "0123456789." for c in parsed.hostname) and all(int(x) <= 255 for x in parsed.hostname.split(".")) ) if set_uri_host and not is_ip_literal: try: self.opt.uri_host = urllib.parse.unquote( parsed.hostname, errors="strict" ).translate(_ascii_lowercase) except UnicodeError as e: raise error.MalformedUrlError( "Percent encoded strings in CoAP URI hosts need to be UTF-8 encoded" ) from e # Deprecated accessors to moved functionality @property def unresolved_remote(self): return self.remote.hostinfo @unresolved_remote.setter def unresolved_remote(self, value): # should get a big fat deprecation warning if value is None: self.remote = UndecidedRemote("coap", None) else: self.remote = UndecidedRemote("coap", value) @property def requested_scheme(self): if self.code.is_request(): return self.remote.scheme else: return self.request.requested_scheme @requested_scheme.setter def requested_scheme(self, value): self.remote = UndecidedRemote(value, self.remote.hostinfo) @property def requested_proxy_uri(self): return self.request.opt.proxy_uri @property def requested_hostinfo(self): return self.request.opt.uri_host or self.request.unresolved_remote @property def requested_path(self): return self.request.opt.uri_path @property def requested_query(self): return self.request.opt.uri_query class UndecidedRemote( namedtuple("_UndecidedRemote", ("scheme", "hostinfo")), interfaces.EndpointAddress ): """Remote that is set on messages that have not been sent through any any transport. It describes scheme, hostname and port that were set in :meth:`.set_request_uri()` or when setting a URI per Message constructor. * :attr:`scheme`: The scheme string * :attr:`hostinfo`: The authority component of the URI, as it would occur in the URI. In order to produce URIs identical to those received in responses, and because the underlying types should really be binary anyway, IP addresses in the hostinfo are normalized: >>> UndecidedRemote("coap+tcp", "[::0001]:1234") UndecidedRemote(scheme='coap+tcp', hostinfo='[::1]:1234') """ # This is settable per instance, for other transports to pick it up. maximum_block_size_exp = MAX_REGULAR_BLOCK_SIZE_EXP def __new__(cls, scheme, hostinfo): if "[" in hostinfo: (host, port) = hostportsplit(hostinfo) ip = ipaddress.ip_address(host) host = str(ip) hostinfo = hostportjoin(host, port) return super().__new__(cls, scheme, hostinfo) @classmethod def from_pathless_uri(cls, uri: str) -> UndecidedRemote: """Create an UndecidedRemote for a given URI that has no query, path, fragment or other components not expressed in an UndecidedRemote >>> from aiocoap.message import UndecidedRemote >>> UndecidedRemote.from_pathless_uri("coap://localhost") UndecidedRemote(scheme='coap', hostinfo='localhost') """ parsed = urllib.parse.urlparse(uri) if parsed.username or parsed.password: raise ValueError("User name and password not supported.") if parsed.path not in ("", "/") or parsed.query or parsed.fragment: raise ValueError( "Paths and query and fragment can not be set on an UndecidedRemote" ) return cls(parsed.scheme, parsed.netloc) _ascii_lowercase = str.maketrans(string.ascii_uppercase, string.ascii_lowercase) _quote_for_path = quote_factory(unreserved + sub_delims + ":@") _quote_for_query = quote_factory( unreserved + "".join(c for c in sub_delims if c != "&") + ":@/?" ) #: Result that can be returned from a render method instead of a Message when #: due to defaults (eg. multicast link-format queries) or explicit #: configuration (eg. the No-Response option), no response should be sent at #: all. Note that per RFC7967 section 2, an ACK is still sent to a CON #: request. #: #: Depercated; set the no_response option on a regular response instead (see #: :meth:`.interfaces.Resource.render` for details). NoResponse = Sentinel("NoResponse") aiocoap-0.4.12/aiocoap/messagemanager.py000066400000000000000000000534421472205122200201620ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module contains all internals needed to manage messages on unreliable transports, ie. everything that deals in message types or Message IDs. Currently, it also provides the mechanisms for managing tokens, but those will be split into dedicated classes. """ import asyncio import functools import random from typing import Callable, Dict, List, Tuple, Optional from . import error from . import interfaces from .interfaces import EndpointAddress from .message import Message from .numbers.types import CON, ACK, RST, NON from .numbers.codes import EMPTY class MessageManager(interfaces.TokenInterface, interfaces.MessageManager): """This MessageManager Drives a message interface following the rules of RFC7252 CoAP over UDP. It takes care of picking message IDs (mid) for outgoing messages, retransmitting CON messages, and to react appropriately to incoming messages' type, sending ACKs either immediately or later. It creates piggy-backed responses by keeping an eye on the tokens the messages are sent with, but otherwise ignores the tokens. (It inspects tokens *only* where required by its sub-layer). """ def __init__(self, token_manager) -> None: self.token_manager = token_manager self.message_id = random.randint(0, 65535) #: Tracker of recently received messages (by remote and message ID). #: Maps them to a response message when one is already known. self._recent_messages: Dict[Tuple[EndpointAddress, int], Optional[Message]] = {} #: Active exchanges i.e. sent CON messages (remote, message-id): #: (messageerror_monitor monitor, cancellable timeout) self._active_exchanges: Dict[ Tuple[EndpointAddress, int], Tuple[Callable[[], None], asyncio.Handle] ] = {} #: Per-remote list of (backlogged package, messageerror_monitor) #: tuples (keys exist iff there is an active_exchange with that node) self._backlogs: Dict[ EndpointAddress, List[Tuple[Message, Callable[[], None]]] ] = {} #: Maps pending remote/token combinations to the MID a response can be #: piggybacked on, and the timeout that should be cancelled if it is. self._piggyback_opportunities: Dict[ Tuple[EndpointAddress, bytes], Tuple[int, asyncio.TimerHandle] ] = {} self.log = token_manager.log self.loop = token_manager.loop # self.message_interface = … -- needs to be set post-construction, because the message_interface in its constructor already needs to get its manager def __repr__(self): return "<%s for %s>" % ( type(self).__name__, getattr(self, "message_interface", "(unbound)"), ) @property def client_credentials(self): return self.token_manager.client_credentials async def shutdown(self): for messageerror_monitor, cancellable in self._active_exchanges.values(): # Not calling messageerror_monitor: This is not message specific, # and its shutdown will take care of these things cancellable.cancel() self._active_exchanges = None await self.message_interface.shutdown() # # implementing the MessageManager interface # def dispatch_message(self, message): """Feed a message through the message-id, message-type and message-code sublayers of CoAP""" self.log.debug("Incoming message %r", message) if message.code.is_request(): # Responses don't get deduplication because they "are idempotent or # can be handled in an idempotent fashion" (RFC 7252 Section 4.5). # This means that a separate response may get a RST when it is # arrives at the aiocoap client twice. Note that this does not # impede the operation of observations: Their token is still active # so they are ACK'd, and deduplication based on observation numbers # filters out the rest. # # This saves memory, and allows stateful transports to be shut down # expeditiously unless kept alive by something else (otherwise, # they'd linger for EXCHANGE_LIFETIME with no good reason). if self._deduplicate_message(message) is True: return if message.mtype in (ACK, RST): self._remove_exchange(message) if message.code is EMPTY and message.mtype is CON: self._process_ping(message) elif message.code is EMPTY and message.mtype in (ACK, RST): pass # empty ack has already been handled above elif message.code.is_request() and message.mtype in (CON, NON): # the request handler will have to deal with sending ACK itself, as # it might be timeout-related self._process_request(message) elif message.code.is_response() and message.mtype in (CON, NON, ACK): success = self._process_response(message) if success: if message.mtype is CON: self._send_empty_ack( message.remote, message.mid, reason="acknowledging incoming response", ) else: # A peer mustn't send a CON to multicast, but if a malicious # peer does, we better not answer if message.mtype == CON and not message.remote.is_multicast_locally: self.log.info("Response not recognized - sending RST.") rst = Message(mtype=RST, mid=message.mid, code=EMPTY, payload="") rst.remote = message.remote.as_response_address() self._send_initially(rst) else: self.log.info( "Ignoring unknown response (which is not a unicast CON)" ) else: self.log.warning( "Received a message with code %s and type %s (those don't fit) from %s, ignoring it.", message.code, message.mtype, message.remote, ) def dispatch_error(self, error, remote): if self._active_exchanges is None: # Not entirely sure where it is so far; better just raise a warning # than an exception later, nothing terminally bad should come of # this error. self.log.warning( "Internal shutdown sequence msismatch: error dispatched through messagemanager after shutown" ) return self.log.debug("Incoming error %s from %r", error, remote) # cancel requests first, and then exchanges: cancelling the pending # exchange would trigger enqueued requests to be transmitted self.token_manager.dispatch_error(error, remote) keys_for_removal = [] for key, ( messageerror_monitor, cancellable_timeout, ) in self._active_exchanges.items(): (exchange_remote, message_id) = key if remote == exchange_remote: keys_for_removal.append(key) for k in keys_for_removal: (messageerror_monitor, cancellable_timeout) = self._active_exchanges.pop(k) cancellable_timeout.cancel() # not triggering the messageerror_monitor: that already got the # clue from the token manager self._backlogs.pop(remote, ()) # while that's an iterable over messages and messageerror monitors, not # triggering them either for th esame reason as above # # coap dispatch, message-id sublayer: duplicate handling # def _deduplicate_message(self, message): """Return True if a message is a duplicate, and re-send the stored response if available. Duplicate is a message with the same Message ID (mid) and sender (remote), as message received within last EXCHANGE_LIFETIME seconds (usually 247 seconds).""" key = (message.remote, message.mid) if key in self._recent_messages: if message.mtype is CON: if self._recent_messages[key] is not None: self.log.info("Duplicate CON received, sending old response again") # not going via send_message because that would strip the # mid and might do all other sorts of checks self._send_initially(self._recent_messages[key]) else: self.log.info("Duplicate CON received, no response to send yet") else: self.log.info("Duplicate NON, ACK or RST received") return True else: self.log.debug("New unique message received") self.loop.call_later( message.transport_tuning.EXCHANGE_LIFETIME, functools.partial(self._recent_messages.pop, key), ) self._recent_messages[key] = None return False def _store_response_for_duplicates(self, message): """If the message is the response can be used to satisfy a future duplicate message, store it.""" key = (message.remote, message.mid) if key in self._recent_messages: self._recent_messages[key] = message # # coap dispatch, message-type sublayer: retransmission handling # def _add_exchange(self, message, messageerror_monitor): """Add an "exchange" for outgoing CON message. CON (Confirmable) messages are automatically retransmitted by protocol until ACK or RST message with the same Message ID is received from target host.""" key = (message.remote, message.mid) if message.remote not in self._backlogs: self._backlogs[message.remote] = [] timeout = random.uniform( message.transport_tuning.ACK_TIMEOUT, message.transport_tuning.ACK_TIMEOUT * message.transport_tuning.ACK_RANDOM_FACTOR, ) next_retransmission = self._schedule_retransmit(message, timeout, 0) self._active_exchanges[key] = (messageerror_monitor, next_retransmission) self.log.debug("Exchange added, message ID: %d.", message.mid) def _remove_exchange(self, message): """Remove exchange from active exchanges and cancel the timeout to next retransmission.""" key = (message.remote, message.mid) if key not in self._active_exchanges: # Before turning this up to a warning, consider https://github.com/chrysn/aiocoap/issues/288 self.log.info( "Received %s from %s, but could not match it to a running exchange.", message.mtype, message.remote, ) return messageerror_monitor, next_retransmission = self._active_exchanges.pop(key) next_retransmission.cancel() if message.mtype is RST: messageerror_monitor() self.log.debug("Exchange removed, message ID: %d.", message.mid) self._continue_backlog(message.remote) def _continue_backlog(self, remote): """After an exchange has been removed, start working off the backlog or clear it completely.""" if remote not in self._backlogs: # if active exchanges were something we could do a # .register_finally() on, we could chain them like that; if we # implemented anything but NSTART=1, we'll need a more elaborate # system anyway raise AssertionError( "backlogs/active_exchange relation violated (implementation error)" ) # first iteration is sure to happen, others happen only if the enqueued # messages were NONs while not any(r == remote for r, mid in self._active_exchanges.keys()): if self._backlogs[remote] != []: next_message, messageerror_monitor = self._backlogs[remote].pop(0) self._send_initially(next_message, messageerror_monitor) else: del self._backlogs[remote] break def _schedule_retransmit(self, message, timeout, retransmission_counter): """Create and return a call_later for first or subsequent retransmissions.""" # while this could just as well be done in a lambda or with the # arguments passed to call_later, in this form makes the test cases # easier to debug (it's about finding where references to a Context # are kept around; contexts should be able to shut down in an orderly # way without littering references in the loop) def retr( self=self, message=message, timeout=timeout, retransmission_counter=retransmission_counter, doc="If you read this, have a look at _schedule_retransmit", id=object(), ): self._retransmit(message, timeout, retransmission_counter) return self.loop.call_later(timeout, retr) def _retransmit(self, message, timeout, retransmission_counter): """Retransmit CON message that has not been ACKed or RSTed.""" key = (message.remote, message.mid) messageerror_monitor, next_retransmission = self._active_exchanges.pop(key) # this should be a no-op, but let's be sure next_retransmission.cancel() if retransmission_counter < message.transport_tuning.MAX_RETRANSMIT: self.log.info("Retransmission, Message ID: %d.", message.mid) self._send_via_transport(message) retransmission_counter += 1 timeout *= 2 next_retransmission = self._schedule_retransmit( message, timeout, retransmission_counter ) self._active_exchanges[key] = (messageerror_monitor, next_retransmission) else: self.log.info("Exchange timed out trying to transmit %s", message) del self._backlogs[message.remote] self.token_manager.dispatch_error( error.ConRetransmitsExceeded("Retransmissions exceeded"), message.remote ) # # coap dispatch, message-code sublayer: triggering custom actions based on incoming messages # def _process_ping(self, message): self.log.info("Received CoAP Ping from %s, replying with RST.", message.remote) rst = Message(mtype=RST, mid=message.mid, code=EMPTY, payload=b"") rst.remote = message.remote.as_response_address() # not going via send_message because that would strip the mid, and we # already know that it can go straight to the wire self._send_initially(rst) def _process_request(self, request): """Spawn a responder for an incoming request, or feed a long-running responder if one exists.""" if request.mtype == CON: def on_timeout(self, remote, token): mid, own_timeout = self._piggyback_opportunities.pop((remote, token)) self._send_empty_ack( request.remote, mid, "Response took too long to prepare" ) handle = self.loop.call_later( request.transport_tuning.EMPTY_ACK_DELAY, on_timeout, self, request.remote, request.token, ) key = (request.remote, request.token) if key in self._piggyback_opportunities: self.log.warning( "New request came in while old request not" " ACKed yet. Possible mismatch between EMPTY_ACK_DELAY" " and EXCHANGE_LIFETIME. Cancelling ACK to ward off any" " further confusion." ) mid, old_handle = self._piggyback_opportunities.pop(key) old_handle.cancel() self._piggyback_opportunities[key] = (request.mid, handle) self.token_manager.process_request(request) def _process_response(self, response): """Feed a response back to whatever might expect it. Returns True if the response was expected (and should be ACK'd depending on mtype), and False if it was not expected (and should be RST'd).""" self.log.debug("Received Response: %r", response) return self.token_manager.process_response(response) # # outgoing messages # async def fill_or_recognize_remote(self, message): if message.remote is not None: if await self.message_interface.recognize_remote(message.remote): return True remote = await self.message_interface.determine_remote(message) if remote is not None: message.remote = remote return True return False def send_message(self, message, messageerror_monitor): """Encode and send message. This takes care of retransmissions (if CON), message IDs and rate limiting, but does not hook any events to responses. (Use the :class:`Request` class or responding resources instead; those are the typical callers of this function.) If notification about the progress of the exchange is required, an ExchangeMonitor can be passed in, which will receive the appropriate callbacks.""" if message.mid is not None: # if you can give any reason why the application should provide a # fixed mid, lower the log level on demand and provide the reason # in a comment. self.log.warning( "Message ID set on to-be-sent message, this is" " probably unintended; clearing it." ) message.mid = None if message.code.is_response(): no_response = (message.opt.no_response or 0) & ( 1 << message.code.class_ - 1 ) != 0 piggyback_key = (message.remote, message.token) if piggyback_key in self._piggyback_opportunities: mid, handle = self._piggyback_opportunities.pop(piggyback_key) handle.cancel() if no_response: new_message = Message(code=EMPTY, mid=mid, mtype=ACK) new_message.remote = message.remote.as_response_address() message = new_message self.log.debug( "Turning to-be-sent message into an empty ACK due to no_response option." ) else: message.mtype = ACK message.mid = mid else: if no_response: self.log.debug( "Stopping message in message manager as it is no_response and no ACK is pending." ) return message.opt.no_response = None if message.mtype is None: if self._active_exchanges is None: # during shutdown, this is all we can do message.mtype = NON else: if message.remote.is_multicast: message.mtype = NON else: # FIXME: on responses, this should take the request into # consideration (cf. RFC7252 Section 5.2.3, answer to NON # SHOULD be NON) message.mtype = CON else: if self._active_exchanges is None: self.log.warning( "Forcing message to be sent as NON even though specified because transport is shutting down" ) message.mtype = NON if message.mtype == CON and message.remote.is_multicast: raise ValueError("Refusing to send CON message to multicast address") if message.mid is None: message.mid = self._next_message_id() if message.mtype == CON and message.remote in self._backlogs: assert any( remote == message.remote for (remote, _) in self._active_exchanges ) self.log.debug("Message to %s put into backlog", message.remote) self._backlogs[message.remote].append((message, messageerror_monitor)) else: self._send_initially(message, messageerror_monitor) def _send_initially(self, message, messageerror_monitor=None): """Put the message on the wire for the first time, starting retransmission timeouts""" self.log.debug("Sending message %r", message) if message.mtype is CON: assert ( messageerror_monitor is not None ), "messageerror_monitor needs to be set for CONs" self._add_exchange(message, messageerror_monitor) self._store_response_for_duplicates(message) self._send_via_transport(message) def _send_via_transport(self, message): """Put the message on the wire""" self.message_interface.send(message) def _next_message_id(self): """Reserve and return a new message ID.""" message_id = self.message_id self.message_id = 0xFFFF & (1 + self.message_id) return message_id def _send_empty_ack(self, remote, mid, reason): """Send separate empty ACK for any reason. Currently, this can happen only once per responder, that is, when the last block1 has been transferred and the first block2 is not ready yet.""" self.log.debug("Sending empty ACK: %s", reason) ack = Message( mtype=ACK, code=EMPTY, payload=b"", ) ack.remote = remote.as_response_address() ack.mid = mid # not going via send_message because that would strip the mid, and we # already know that it can go straight to the wire self._send_initially(ack) aiocoap-0.4.12/aiocoap/meta.py000066400000000000000000000014321472205122200161210ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT #: Make library version internally #: #: This is not supposed to be used in any decision-making process (use package #: dependencies for that) or workarounds, but used by command-line tools or the #: impl-info link to provide debugging information. version = "0.4.12" #: URI used to describe the current version of the library #: #: This is used the same way as `version` but when a URI is required, for #: example as a default value for .well-known/core's rel=impl-info link. library_uri = "https://christian.amsuess.com/tools/aiocoap/#version-" + version #: URI used in error messages that ask the user to file a bug report bugreport_uri = "https://github.com/chrysn/aiocoap/issues" aiocoap-0.4.12/aiocoap/numbers/000077500000000000000000000000001472205122200162745ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/numbers/__init__.py000066400000000000000000000071671472205122200204200ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Module in which all meaningful numbers are collected. Most of the submodules correspond to IANA registries. The contents of the :mod:`.constants`, :mod:`.types` and :mod:`.codes` modules are accessible through this module directly; :mod:`.contentformat`'s and :mod:`.optionnumbers`' sole :class:`.contentformat.ContentFormat` and :class:`.optionnumbers.OptionNumber` classes are accessible in the same way. """ import warnings import string from . import constants, types, codes from .contentformat import ContentFormat, _MediaTypes, _MediaTypesRev from .optionnumbers import OptionNumber # These lists are hard-coded in because static checkers need them explicit. The code below helps keep it up to date. # fmt: off from .constants import COAPS_PORT, COAP_PORT, MAX_REGULAR_BLOCK_SIZE_EXP, MCAST_ALL, MCAST_IPV4_ALLCOAPNODES, MCAST_IPV6_LINKLOCAL_ALLCOAPNODES, MCAST_IPV6_LINKLOCAL_ALLNODES, MCAST_IPV6_SITELOCAL_ALLCOAPNODES, MCAST_IPV6_SITELOCAL_ALLNODES, SHUTDOWN_TIMEOUT, TransportTuning from .types import Type, CON, NON, ACK, RST from .codes import Code, EMPTY, GET, POST, PUT, DELETE, FETCH, PATCH, iPATCH, CREATED, DELETED, VALID, CHANGED, CONTENT, CONTINUE, BAD_REQUEST, UNAUTHORIZED, BAD_OPTION, FORBIDDEN, NOT_FOUND, METHOD_NOT_ALLOWED, NOT_ACCEPTABLE, REQUEST_ENTITY_INCOMPLETE, CONFLICT, PRECONDITION_FAILED, REQUEST_ENTITY_TOO_LARGE, UNSUPPORTED_CONTENT_FORMAT, UNPROCESSABLE_ENTITY, TOO_MANY_REQUESTS, INTERNAL_SERVER_ERROR, NOT_IMPLEMENTED, BAD_GATEWAY, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT, PROXYING_NOT_SUPPORTED, HOP_LIMIT_REACHED, CSM, PING, PONG, RELEASE, ABORT __all__ = ['COAPS_PORT', 'COAP_PORT', 'MAX_REGULAR_BLOCK_SIZE_EXP', 'MCAST_ALL', 'MCAST_IPV4_ALLCOAPNODES', 'MCAST_IPV6_LINKLOCAL_ALLCOAPNODES', 'MCAST_IPV6_LINKLOCAL_ALLNODES', 'MCAST_IPV6_SITELOCAL_ALLCOAPNODES', 'MCAST_IPV6_SITELOCAL_ALLNODES', 'SHUTDOWN_TIMEOUT', 'TransportTuning', 'Type', 'CON', 'NON', 'ACK', 'RST', 'Code', 'EMPTY', 'GET', 'POST', 'PUT', 'DELETE', 'FETCH', 'PATCH', 'iPATCH', 'CREATED', 'DELETED', 'VALID', 'CHANGED', 'CONTENT', 'CONTINUE', 'BAD_REQUEST', 'UNAUTHORIZED', 'BAD_OPTION', 'FORBIDDEN', 'NOT_FOUND', 'METHOD_NOT_ALLOWED', 'NOT_ACCEPTABLE', 'REQUEST_ENTITY_INCOMPLETE', 'CONFLICT', 'PRECONDITION_FAILED', 'REQUEST_ENTITY_TOO_LARGE', 'UNSUPPORTED_CONTENT_FORMAT', 'UNPROCESSABLE_ENTITY', 'TOO_MANY_REQUESTS', 'INTERNAL_SERVER_ERROR', 'NOT_IMPLEMENTED', 'BAD_GATEWAY', 'SERVICE_UNAVAILABLE', 'GATEWAY_TIMEOUT', 'PROXYING_NOT_SUPPORTED', 'HOP_LIMIT_REACHED', 'CSM', 'PING', 'PONG', 'RELEASE', 'ABORT', 'OptionNumber', 'ContentFormat'] # fmt: on if __debug__: _generated_all = ( constants.__all__ + types.__all__ + codes.__all__ + ["OptionNumber", "ContentFormat"] ) if _generated_all != __all__: warnings.warn(f"""Hardcoded __all__ is out of sync (as are imports, probably), please updated to from .constants import {", ".join(constants.__all__)} from .types import {", ".join(types.__all__)} from .codes import {", ".join(codes.__all__)} __all__ = {_generated_all}""") media_types = _MediaTypes() media_types_rev = _MediaTypesRev() def __getattr__(name): if name[0] in string.ascii_uppercase and hasattr( constants._default_transport_tuning, name ): warnings.warn( f"{name} is deprecated, use through the message's transport_tuning instead", DeprecationWarning, stacklevel=2, ) return getattr(constants._default_transport_tuning, name) raise AttributeError(f"module {__name__} has no attribute {name}") aiocoap-0.4.12/aiocoap/numbers/codes.py000066400000000000000000000156321472205122200177520ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """List of known values for the CoAP "Code" field. The values in this module correspond to the IANA registry "`CoRE Parameters`_", subregistries "CoAP Method Codes" and "CoAP Response Codes". The codes come with methods that can be used to get their rough meaning, see the :class:`Code` class for details. .. _`CoRE Parameters`: https://www.iana.org/assignments/core-parameters/core-parameters.xhtml """ import warnings from ..util import ExtensibleIntEnum class Code(ExtensibleIntEnum): """Value for the CoAP "Code" field. As the number range for the code values is separated, the rough meaning of a code can be determined using the :meth:`is_request`, :meth:`is_response` and :meth:`is_successful` methods.""" EMPTY = 0 GET = 1 POST = 2 PUT = 3 DELETE = 4 FETCH = 5 PATCH = 6 iPATCH = 7 CREATED = 65 DELETED = 66 VALID = 67 CHANGED = 68 CONTENT = 69 CONTINUE = 95 BAD_REQUEST = 128 UNAUTHORIZED = 129 BAD_OPTION = 130 FORBIDDEN = 131 NOT_FOUND = 132 METHOD_NOT_ALLOWED = 133 NOT_ACCEPTABLE = 134 REQUEST_ENTITY_INCOMPLETE = 136 CONFLICT = (4 << 5) + 9 PRECONDITION_FAILED = 140 REQUEST_ENTITY_TOO_LARGE = 141 UNSUPPORTED_CONTENT_FORMAT = 143 @property def UNSUPPORTED_MEDIA_TYPE(self): warnings.warn( "UNSUPPORTED_MEDIA_TYPE is a deprecated alias for UNSUPPORTED_CONTENT_FORMAT" ) return self.UNSUPPORTED_CONTENT_FORMAT UNPROCESSABLE_ENTITY = (4 << 5) + 22 TOO_MANY_REQUESTS = (4 << 5) + 29 INTERNAL_SERVER_ERROR = 160 NOT_IMPLEMENTED = 161 BAD_GATEWAY = 162 SERVICE_UNAVAILABLE = 163 GATEWAY_TIMEOUT = 164 PROXYING_NOT_SUPPORTED = 165 HOP_LIMIT_REACHED = (5 << 5) + 8 CSM = 225 PING = 226 PONG = 227 RELEASE = 228 ABORT = 229 def is_request(self): """True if the code is in the request code range""" return True if (self >= 1 and self < 32) else False def is_response(self): """True if the code is in the response code range""" return True if (self >= 64 and self < 192) else False def is_signalling(self): return True if self >= 224 else False def is_successful(self): """True if the code is in the successful subrange of the response code range""" return True if (self >= 64 and self < 96) else False def can_have_payload(self): """True if a message with that code can carry a payload. This is not checked for strictly, but used as an indicator.""" return self.is_response() or self in ( self.POST, self.PUT, self.FETCH, self.PATCH, self.iPATCH, ) @property def class_(self): """The class of a code (distinguishing whether it's successful, a request or a response error or more). >>> Code.CONTENT >>> Code.CONTENT.class_ 2 >>> Code.BAD_GATEWAY >>> Code.BAD_GATEWAY.class_ 5 """ return self >> 5 @property def dotted(self): """The numeric value three-decimal-digits (c.dd) form""" return "%d.%02d" % divmod(self, 32) @property def name_printable(self): """The name of the code in human-readable form""" return self.name.replace("_", " ").title() def __str__(self): """ >>> print(Code.GET) GET >>> print(Code.CONTENT) 2.05 Content >>> print(Code.BAD_GATEWAY) 5.02 Bad Gateway >>> print(Code(32)) 32 """ if self.is_request() or self is self.EMPTY: return self.name elif self.is_response() or self.is_signalling(): return "%s %s" % (self.dotted, self.name_printable) else: return "%d" % self def _classification(self): return ("Successful " if self.is_successful() else "") + ( "Request " if self.is_request() else "Response " if self.is_response() else "" ) def __repr__(self): """ >>> Code.GET >>> Code.CONTENT >>> Code.BAD_GATEWAY >>> Code(32) """ return '<%sCode %d "%s">' % (self._classification(), self, self) def _repr_html_(self): """ >>> Code.GET._repr_html_() 'GET' >>> Code(31)._repr_html_() '0.31' """ import html if self.name == "(unknown)": return f'{self.dotted}' else: return f'{html.escape(self.name)}' @classmethod def _missing_(cls, value): constructed = super()._missing_(value) constructed._name_ = "(unknown)" return constructed # List is copied down to help mypy and other typing dependent tools to # understand the types. EMPTY = Code.EMPTY GET = Code.GET POST = Code.POST PUT = Code.PUT DELETE = Code.DELETE FETCH = Code.FETCH PATCH = Code.PATCH iPATCH = Code.iPATCH CREATED = Code.CREATED DELETED = Code.DELETED VALID = Code.VALID CHANGED = Code.CHANGED CONTENT = Code.CONTENT CONTINUE = Code.CONTINUE BAD_REQUEST = Code.BAD_REQUEST UNAUTHORIZED = Code.UNAUTHORIZED BAD_OPTION = Code.BAD_OPTION FORBIDDEN = Code.FORBIDDEN NOT_FOUND = Code.NOT_FOUND METHOD_NOT_ALLOWED = Code.METHOD_NOT_ALLOWED NOT_ACCEPTABLE = Code.NOT_ACCEPTABLE REQUEST_ENTITY_INCOMPLETE = Code.REQUEST_ENTITY_INCOMPLETE CONFLICT = Code.CONFLICT PRECONDITION_FAILED = Code.PRECONDITION_FAILED REQUEST_ENTITY_TOO_LARGE = Code.REQUEST_ENTITY_TOO_LARGE UNSUPPORTED_CONTENT_FORMAT = Code.UNSUPPORTED_CONTENT_FORMAT UNPROCESSABLE_ENTITY = Code.UNPROCESSABLE_ENTITY TOO_MANY_REQUESTS = Code.TOO_MANY_REQUESTS INTERNAL_SERVER_ERROR = Code.INTERNAL_SERVER_ERROR NOT_IMPLEMENTED = Code.NOT_IMPLEMENTED BAD_GATEWAY = Code.BAD_GATEWAY SERVICE_UNAVAILABLE = Code.SERVICE_UNAVAILABLE GATEWAY_TIMEOUT = Code.GATEWAY_TIMEOUT PROXYING_NOT_SUPPORTED = Code.PROXYING_NOT_SUPPORTED HOP_LIMIT_REACHED = Code.HOP_LIMIT_REACHED CSM = Code.CSM PING = Code.PING PONG = Code.PONG RELEASE = Code.RELEASE ABORT = Code.ABORT if __debug__: for _code in Code: if locals().get(_code.name) is not _code: warnings.warn( f"Locals list is out of sync; entry `{_code.name} = Code.{_code.name}` is missing" ) __all__ = ["Code"] + [ k for (k, v) in locals().items() if isinstance(v, Code) and not k.startswith("_") ] aiocoap-0.4.12/aiocoap/numbers/constants.py000066400000000000000000000133731472205122200206710ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Constants either defined in the CoAP protocol (often default values for lack of ways to determine eg. the estimated round trip time). Some parameters are invented here for practical purposes of the implementation (eg. DEFAULT_BLOCK_SIZE_EXP, EMPTY_ACK_DELAY).""" import warnings import string COAP_PORT = 5683 """The IANA-assigned standard port for COAP services.""" COAPS_PORT = 5684 MCAST_IPV4_ALLCOAPNODES = "224.0.1.187" MCAST_IPV6_LINKLOCAL_ALLNODES = "ff02::1" MCAST_IPV6_LINKLOCAL_ALLCOAPNODES = "ff02::fd" MCAST_IPV6_SITELOCAL_ALLNODES = "ff05::1" MCAST_IPV6_SITELOCAL_ALLCOAPNODES = "ff05::fd" MCAST_ALL = ( MCAST_IPV4_ALLCOAPNODES, MCAST_IPV6_LINKLOCAL_ALLNODES, MCAST_IPV6_LINKLOCAL_ALLCOAPNODES, MCAST_IPV6_SITELOCAL_ALLNODES, MCAST_IPV6_SITELOCAL_ALLCOAPNODES, ) MAX_REGULAR_BLOCK_SIZE_EXP = 6 class TransportTuning: """Base parameters that guide CoAP transport behaviors The values in here are recommended values, often defaults from RFCs. They can be tuned in subclasses (and then passed into a message as ``transport_tuning``), although users should be aware that alteing some of these can cause the library to behave in ways violating the specification, especially with respect to congestion control. """ # +-------------------+---------------+ # | name | default value | # +-------------------+---------------+ # | ACK_TIMEOUT | 2 seconds | # | ACK_RANDOM_FACTOR | 1.5 | # | MAX_RETRANSMIT | 4 | # | NSTART | 1 | # | DEFAULT_LEISURE | 5 seconds | # | PROBING_RATE | 1 Byte/second | # +-------------------+---------------+ ACK_TIMEOUT = 2.0 """The time, in seconds, to wait for an acknowledgement of a confirmable message. The inter-transmission time doubles for each retransmission.""" ACK_RANDOM_FACTOR = 1.5 """Timeout multiplier for anti-synchronization.""" MAX_RETRANSMIT = 4 """The number of retransmissions of confirmable messages to non-multicast endpoints before the infrastructure assumes no acknowledgement will be received.""" NSTART = 1 """Maximum number of simultaneous outstanding interactions that endpoint maintains to a given server (including proxies)""" # +-------------------+---------------+ # | name | default value | # +-------------------+---------------+ # | MAX_TRANSMIT_SPAN | 45 s | # | MAX_TRANSMIT_WAIT | 93 s | # | MAX_LATENCY | 100 s | # | PROCESSING_DELAY | 2 s | # | MAX_RTT | 202 s | # | EXCHANGE_LIFETIME | 247 s | # | NON_LIFETIME | 145 s | # +-------------------+---------------+ @property def MAX_TRANSMIT_SPAN(self): """Maximum time from the first transmission of a confirmable message to its last retransmission.""" return self.ACK_TIMEOUT * (2**self.MAX_RETRANSMIT - 1) * self.ACK_RANDOM_FACTOR @property def MAX_TRANSMIT_WAIT(self): """Maximum time from the first transmission of a confirmable message to the time when the sender gives up on receiving an acknowledgement or reset.""" return ( self.ACK_TIMEOUT * (2 ** (self.MAX_RETRANSMIT + 1) - 1) * self.ACK_RANDOM_FACTOR ) MAX_LATENCY = 100.0 """Maximum time a datagram is expected to take from the start of its transmission to the completion of its reception.""" @property def PROCESSING_DELAY(self): """ "Time a node takes to turn around a confirmable message into an acknowledgement.""" return self.ACK_TIMEOUT @property def MAX_RTT(self): """Maximum round-trip time.""" return 2 * self.MAX_LATENCY + self.PROCESSING_DELAY @property def EXCHANGE_LIFETIME(self): """time from starting to send a confirmable message to the time when an acknowledgement is no longer expected, i.e. message layer information about the message exchange can be purged""" return self.MAX_TRANSMIT_SPAN + self.MAX_RTT DEFAULT_BLOCK_SIZE_EXP = MAX_REGULAR_BLOCK_SIZE_EXP """Default size exponent for blockwise transfers.""" EMPTY_ACK_DELAY = 0.1 """After this time protocol sends empty ACK, and separate response""" REQUEST_TIMEOUT = MAX_TRANSMIT_WAIT """Time after which server assumes it won't receive any answer. It is not defined by IETF documents. For human-operated devices it might be preferable to set some small value (for example 10 seconds) For M2M it's application dependent.""" DEFAULT_LEISURE = 5 @property def MULTICAST_REQUEST_TIMEOUT(self): return self.REQUEST_TIMEOUT + self.DEFAULT_LEISURE OBSERVATION_RESET_TIME = 128 """Time in seconds after which the value of the observe field are ignored. This number is not explicitly named in RFC7641. """ _default_transport_tuning = TransportTuning() def __getattr__(name): if name[0] in string.ascii_uppercase and hasattr(_default_transport_tuning, name): warnings.warn( f"{name} is deprecated, use through the message's transport_tuning instead", DeprecationWarning, stacklevel=2, ) return getattr(_default_transport_tuning, name) raise AttributeError(f"module {__name__} has no attribute {name}") SHUTDOWN_TIMEOUT = 3 """Maximum time, in seconds, for which the process is kept around during shutdown""" __all__ = [ k for k in dir() if not k.startswith("_") and k not in ("warnings", "string") ] aiocoap-0.4.12/aiocoap/numbers/contentformat.py000066400000000000000000000343311472205122200215350ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Module containing the CoRE parameters / CoAP Content-Formats registry""" from __future__ import annotations from typing import Dict, Tuple from ..util import ExtensibleIntEnum, ExtensibleEnumMeta import warnings # _raw can be updated from: `curl https://www.iana.org/assignments/core-parameters/content-formats.csv | python3 -c 'import csv, sys; print(list(csv.reader(sys.stdin))[1:])'` # fmt: off _raw = [ ['text/plain; charset=utf-8', '', '0', '[RFC2046][RFC3676][RFC5147]'], ['Unassigned', '', '1-15', ''], ['application/cose; cose-type="cose-encrypt0"', '', '16', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['application/cose; cose-type="cose-mac0"', '', '17', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['application/cose; cose-type="cose-sign1"', '', '18', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['application/ace+cbor', '', '19', '[RFC-ietf-ace-oauth-authz-46]'], ['Unassigned', '', '20', ''], ['image/gif', '', '21', '[https://www.w3.org/Graphics/GIF/spec-gif89a.txt]'], ['image/jpeg', '', '22', '[ISO/IEC 10918-5]'], ['image/png', '', '23', '[RFC2083]'], ['Unassigned', '', '24-39', ''], ['application/link-format', '', '40', '[RFC6690]'], ['application/xml', '', '41', '[RFC3023]'], ['application/octet-stream', '', '42', '[RFC2045][RFC2046]'], ['Unassigned', '', '43-46', ''], ['application/exi', '', '47', '["Efficient XML Interchange (EXI) Format 1.0 (Second Edition)", February 2014]'], ['Unassigned', '', '48-49', ''], ['application/json', '', '50', '[RFC8259]'], ['application/json-patch+json', '', '51', '[RFC6902]'], ['application/merge-patch+json', '', '52', '[RFC7396]'], ['Unassigned', '', '53-59', ''], ['application/cbor', '', '60', '[RFC8949]'], ['application/cwt', '', '61', '[RFC8392]'], ['application/multipart-core', '', '62', '[RFC8710]'], ['application/cbor-seq', '', '63', '[RFC8742]'], ['Unassigned', '', '64-95', ''], ['application/cose; cose-type="cose-encrypt"', '', '96', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['application/cose; cose-type="cose-mac"', '', '97', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['application/cose; cose-type="cose-sign"', '', '98', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['Unassigned', '', '99-100', ''], ['application/cose-key', '', '101', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['application/cose-key-set', '', '102', '[RFC-ietf-cose-rfc8152bis-struct-15]'], ['Unassigned', '', '103-109', ''], ['application/senml+json', '', '110', '[RFC8428]'], ['application/sensml+json', '', '111', '[RFC8428]'], ['application/senml+cbor', '', '112', '[RFC8428]'], ['application/sensml+cbor', '', '113', '[RFC8428]'], ['application/senml-exi', '', '114', '[RFC8428]'], ['application/sensml-exi', '', '115', '[RFC8428]'], ['Unassigned', '', '116-139', ''], ['application/yang-data+cbor; id=sid', '', '140', '[RFC9254]'], ['Unassigned', '', '141-255', ''], ['application/coap-group+json', '', '256', '[RFC7390]'], ['application/concise-problem-details+cbor', '', '257', '[RFC-ietf-core-problem-details-08]'], ['application/swid+cbor', '', '258', '[RFC-ietf-sacm-coswid-22]'], ['Unassigned', '', '259-270', ''], ['application/dots+cbor', '', '271', '[RFC9132]'], ['application/missing-blocks+cbor-seq', '', '272', '[RFC9177]'], ['Unassigned', '', '273-279', ''], ['application/pkcs7-mime; smime-type=server-generated-key', '', '280', '[RFC7030][RFC8551][RFC9148]'], ['application/pkcs7-mime; smime-type=certs-only', '', '281', '[RFC8551][RFC9148]'], ['Unassigned', '', '282-283', ''], ['application/pkcs8', '', '284', '[RFC5958][RFC8551][RFC9148]'], ['application/csrattrs', '', '285', '[RFC7030][RFC9148]'], ['application/pkcs10', '', '286', '[RFC5967][RFC8551][RFC9148]'], ['application/pkix-cert', '', '287', '[RFC2585][RFC9148]'], ['Unassigned', '', '288-289', ''], ['application/aif+cbor', '', '290', '[RFC-ietf-ace-aif-07]'], ['application/aif+json', '', '291', '[RFC-ietf-ace-aif-07]'], ['Unassigned', '', '292-309', ''], ['application/senml+xml', '', '310', '[RFC8428]'], ['application/sensml+xml', '', '311', '[RFC8428]'], ['Unassigned', '', '312-319', ''], ['application/senml-etch+json', '', '320', '[RFC8790]'], ['Unassigned', '', '321', ''], ['application/senml-etch+cbor', '', '322', '[RFC8790]'], ['Unassigned', '', '323-339', ''], ['application/yang-data+cbor', '', '340', '[RFC9254]'], ['application/yang-data+cbor; id=name', '', '341', '[RFC9254]'], ['Unassigned', '', '342-431', ''], ['application/td+json', '', '432', '["Web of Things (WoT) Thing Description", May 2019]'], ['Unassigned', '', '433-835', ''], ['application/voucher-cose+cbor (TEMPORARY - registered 2022-04-12, expires 2023-04-12)', '', '836', '[draft-ietf-anima-constrained-voucher-17]'], ['Unassigned', '', '837-1541', ''], ['Reserved, do not use', '', '1542-1543', '[OMA-TS-LightweightM2M-V1_0]'], ['Unassigned', '', '1544-9999', ''], ['application/vnd.ocf+cbor', '', '10000', '[Michael_Koster]'], ['application/oscore', '', '10001', '[RFC8613]'], ['application/javascript', '', '10002', '[RFC4329]'], ['Unassigned', '', '10003-11049', ''], ['application/json', 'deflate', '11050', '[RFC8259]'], ['Unassigned', '', '11051-11059', ''], ['application/cbor', 'deflate', '11060', '[RFC8949]'], ['Unassigned', '', '11061-11541', ''], ['application/vnd.oma.lwm2m+tlv', '', '11542', '[OMA-TS-LightweightM2M-V1_0]'], ['application/vnd.oma.lwm2m+json', '', '11543', '[OMA-TS-LightweightM2M-V1_0]'], ['application/vnd.oma.lwm2m+cbor', '', '11544', '[OMA-TS-LightweightM2M-V1_2]'], ['Unassigned', '', '11545-19999', ''], ['text/css', '', '20000', '[RFC2318]'], ['Unassigned', '', '20001-29999', ''], ['image/svg+xml', '', '30000', '[https://www.w3.org/TR/SVG/mimereg.html]'], ['Unassigned', '', '30001-64999', ''], ['Reserved for Experimental Use', '', '65000-65535', '[RFC7252]'], ] # fmt: on def _normalize_media_type(s): """Strip out the white space between parameters; doesn't need to fully parse the types because it's applied to values of _raw (or to input that'll eventually be compared to them and fail)""" return s.replace("; ", ";") class ContentFormatMeta(ExtensibleEnumMeta): def __init__(self, name, bases, dict) -> None: super().__init__(name, bases, dict) # If this were part of the class definition, it would be taken up as an # enum instance; hoisting it to the metaclass avoids that special # treatment. self._by_mt_encoding: Dict[Tuple[str, str], "ContentFormat"] = {} class ContentFormat(ExtensibleIntEnum, metaclass=ContentFormatMeta): """Entry in the `CoAP Content-Formats registry`__ of the IANA Constrained RESTful Environments (Core) Parameters group .. __: https://www.iana.org/assignments/core-parameters/core-parameters.xhtml#content-formats Known entries have ``.media_type`` and ``.encoding`` attributes: >>> ContentFormat(0).media_type 'text/plain; charset=utf-8' >>> int(ContentFormat.by_media_type('text/plain;charset=utf-8')) 0 >>> ContentFormat(60) >>> ContentFormat(11060).encoding 'deflate' Unknown entries do not have these properties: >>> ContentFormat(12345).is_known() False >>> ContentFormat(12345).media_type # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: ... Only a few formats are available as attributes for easy access. Their selection and naming are arbitrary and biased. The remaining known types are available through the :meth:`by_media_type` class method. >>> ContentFormat.TEXT A convenient property of ContentFormat is that any content format is true in a boolean context, and thus when used in alternation with None, can be assigned defaults easily: >>> requested_by_client = ContentFormat.TEXT >>> int(requested_by_client) # Usually, this would always pick the default 0 >>> used = requested_by_client or ContentFormat.LINKFORMAT >>> assert used == ContentFormat.TEXT """ @classmethod def define(cls, number, media_type: str, encoding: str = "identity"): s = cls(number) if hasattr(s, "media_type"): warnings.warn( "Redefining media type is a compatibility hazard, but allowed for experimental purposes" ) s._media_type = media_type s._encoding = encoding cls._by_mt_encoding[(_normalize_media_type(media_type), encoding)] = s @classmethod def by_media_type( cls, media_type: str, encoding: str = "identity" ) -> ContentFormat: """Produce known entry for a known media type (and encoding, though 'identity' is default due to its prevalence), or raise KeyError.""" return cls._by_mt_encoding[(_normalize_media_type(media_type), encoding)] def is_known(self): return hasattr(self, "media_type") @property def media_type(self) -> str: return self._media_type @media_type.setter def media_type(self, media_type: str) -> None: warnings.warn( "Setting media_type or encoding is deprecated, use ContentFormat.define(media_type, encoding) instead.", DeprecationWarning, stacklevel=1, ) self._media_type = media_type @property def encoding(self) -> str: return self._encoding @encoding.setter def encoding(self, encoding: str) -> None: warnings.warn( "Setting media_type or encoding is deprecated, use ContentFormat(number, media_type, encoding) instead.", DeprecationWarning, stacklevel=1, ) self._encoding = encoding @classmethod def _rehash(cls): """Update the class's cache of known media types Run this after having created entries with media type and encoding that should be found later on.""" # showing as a deprecation even though it is a private function because # altering media_type/encoding required users to call this. warnings.warn( "This function is not needed when defining a content type through `.define()` rather than by setting media_type and encoding.", DeprecationWarning, stacklevel=1, ) cls._by_mt_encoding = { (_normalize_media_type(c.media_type), c.encoding): c for c in cls._value2member_map_.values() } def __repr__(self): return "<%s %d%s>" % ( type(self).__name__, self, ", media_type=%r, encoding=%r" % (self.media_type, self.encoding) if self.is_known() else "", ) def __bool__(self): return True def _repr_html_(self): # The name with title thing isn't too pretty for these ones if self.is_known(): import html return f"""{html.escape(self.media_type)}{'@' + self.encoding if self.encoding != 'identity' else ''}""" else: return f"""{int(self)}""" TEXT = 0 LINKFORMAT = 40 OCTETSTREAM = 42 JSON = 50 CBOR = 60 SENML = 112 for _mt, _enc, _i, _source in _raw: if _mt in ["Reserved for Experimental Use", "Reserved, do not use", "Unassigned"]: continue _mt, _, _ = _mt.partition(" (TEMPORARY") ContentFormat.define(int(_i), _mt, _enc or "identity") class _MediaTypes: """Wrapper to provide a media_types indexable object as was present up to 0.4.2""" def __getitem__(self, content_format): warnings.warn( "media_types is deprecated, please use aiocoap.numbers.ContentFormat", DeprecationWarning, stacklevel=2, ) if content_format is None: # That was a convenient idiom to short-circuit through, but would # fail through the constructor raise KeyError(None) cf = ContentFormat(content_format) if cf.is_known(): return _normalize_media_type(cf.media_type) else: raise KeyError(content_format) def get(self, content_format, default=None): warnings.warn( "media_types is deprecated, please use aiocoap.numbers.ContentFormat", DeprecationWarning, stacklevel=2, ) try: return self[content_format] except KeyError: return default class _MediaTypesRev: """Wrapper to provide a media_types_rev indexable object as was present up to 0.4.2""" def __getitem__(self, name): warnings.warn( "media_types_rev is deprecated, please use aiocoap.numbers.ContentFormat", DeprecationWarning, stacklevel=2, ) if name == "text/plain": # deprecated alias. Kept alive for scripts like # https://gitlab.f-interop.eu/f-interop-contributors/ioppytest/blob/develop/automation/coap_client_aiocoap/automated_iut.py # that run aiocoap-client with text/plain as an argument. name = "text/plain;charset=utf-8" return int(ContentFormat.by_media_type(name)) def get(self, name, default=None): warnings.warn( "media_types_rev is deprecated, please use aiocoap.numbers.ContentFormat", DeprecationWarning, stacklevel=2, ) try: return self[name] except KeyError: return default aiocoap-0.4.12/aiocoap/numbers/optionnumbers.py000066400000000000000000000146701472205122200215620ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Known values for CoAP option numbers The values defined in `OptionNumber` correspond to the IANA registry "CoRE Parameters", subregistries "CoAP Method Codes" and "CoAP Response Codes". The option numbers come with methods that can be used to evaluate their properties, see the `OptionNumber` class for details. """ import warnings from ..util import ExtensibleIntEnum from .. import optiontypes class OptionNumber(ExtensibleIntEnum): """A CoAP option number. As the option number contains information on whether the option is critical, and whether it is safe-to-forward, those properties can be queried using the `is_*` group of methods. Note that whether an option may be repeated or not does not only depend on the option, but also on the context, and is thus handled in the `Options` object instead.""" IF_MATCH = 1 URI_HOST = 3 ETAG = 4 IF_NONE_MATCH = 5 OBSERVE = 6 URI_PORT = 7 LOCATION_PATH = 8 OSCORE = 9 URI_PATH = 11 CONTENT_FORMAT = 12 MAX_AGE = 14 URI_QUERY = 15 HOP_LIMIT = 16 ACCEPT = 17 Q_BLOCK1 = 19 LOCATION_QUERY = 20 EDHOC = 21 BLOCK2 = 23 BLOCK1 = 27 SIZE2 = 28 Q_BLOCK2 = 31 PROXY_URI = 35 PROXY_SCHEME = 39 SIZE1 = 60 ECHO = 252 NO_RESPONSE = 258 REQUEST_TAG = 292 # experimental for draft-amsuess-core-cachable-oscore # # Using the number suggested there (rather than a high one) as this is # going to be used in overhead comparisons. REQUEST_HASH = 548 @property def OBJECT_SECURITY(self): warnings.warn("OBJECT_SECURITY is a deprecated alias for OSCORE") return self.OSCORE def __add__(self, delta): """Addition makes sense on these due to the delta encoding in CoAP serialization""" return type(self)(int(self) + delta) def is_critical(self): return self & 0x01 == 0x01 def is_elective(self): return not self.is_critical() def is_unsafe(self): return self & 0x02 == 0x02 def is_safetoforward(self): return not self.is_unsafe() def is_nocachekey(self): if self.is_unsafe(): raise ValueError("NoCacheKey is only meaningful for safe options") return self & 0x1E == 0x1C def is_cachekey(self): return not self.is_nocachekey() def _get_format(self): if hasattr(self, "_format"): return self._format else: return optiontypes.OpaqueOption def set_format(self, value): """Set the serialization format. This affects any use of the option throughout the program; existing options should not be altered incompatibly. Use this on custom or experimental options. This is available as a setter function in addition to write access through the `format` property to satisfy requirements imposed by mypy's special handling of enums. """ if hasattr(self, "_format"): if self._format != value: warnings.warn( "Altering the serialization format of {self}. This is a severe interoperability hazard with other modules, and should only be used during experimentation. This warning may be converted into an error at any time." ) self._format = value format = property( _get_format, set_format, doc="Serialization format; see :func:`~aiocoap.numbers.optionnumbers.OptionNumber.set_format`", ) def create_option(self, decode=None, value=None): """Return an Option element of the appropriate class from this option number. An initial value may be set using the decode or value options, and will be fed to the resulting object's decode method or value property, respectively.""" option = self.format(self) if decode is not None: option.decode(decode) if value is not None: option.value = value return option def _repr_html_(self): import html properties = f"{'critical' if self.is_critical() else 'elective'}, {'safe-to-forward' if self.is_safetoforward() else 'proxy unsafe'}" if self.is_safetoforward(): properties += ( ", part of the cache key" if self.is_cachekey() else ", not part of the cache key" ) if hasattr(self, "name"): return f'{html.escape(self.name)}' else: return f'Option {int(self)}' # OpaqueOption is set on formats where it is known to be used even though it is # the default. This allows developers to rely on those interfaces to be stable # (or at least to be notified visibly in the release notes). # RFC 7252 OptionNumber.IF_MATCH.set_format(optiontypes.OpaqueOption) OptionNumber.URI_HOST.set_format(optiontypes.StringOption) OptionNumber.ETAG.set_format(optiontypes.OpaqueOption) OptionNumber.URI_PORT.set_format(optiontypes.UintOption) OptionNumber.LOCATION_PATH.set_format(optiontypes.StringOption) OptionNumber.URI_PATH.set_format(optiontypes.StringOption) OptionNumber.CONTENT_FORMAT.set_format(optiontypes.ContentFormatOption) OptionNumber.MAX_AGE.set_format(optiontypes.UintOption) OptionNumber.URI_QUERY.set_format(optiontypes.StringOption) OptionNumber.ACCEPT.set_format(optiontypes.ContentFormatOption) OptionNumber.LOCATION_QUERY.set_format(optiontypes.StringOption) OptionNumber.PROXY_URI.set_format(optiontypes.StringOption) OptionNumber.PROXY_SCHEME.set_format(optiontypes.StringOption) OptionNumber.SIZE1.set_format(optiontypes.UintOption) # RFC 7959 OptionNumber.BLOCK2.set_format(optiontypes.BlockOption) OptionNumber.BLOCK1.set_format(optiontypes.BlockOption) OptionNumber.SIZE2.set_format(optiontypes.UintOption) # RFC 7641 OptionNumber.OBSERVE.set_format(optiontypes.UintOption) # RFC 7967 OptionNumber.NO_RESPONSE.set_format(optiontypes.UintOption) # RFC 8613 OptionNumber.OSCORE.set_format(optiontypes.OpaqueOption) # RFC 9175 OptionNumber.ECHO.set_format(optiontypes.OpaqueOption) OptionNumber.REQUEST_TAG.set_format(optiontypes.OpaqueOption) # RFC 8768 OptionNumber.HOP_LIMIT.set_format(optiontypes.UintOption) # experimental for draft-amsuess-core-cachable-oscore OptionNumber.REQUEST_HASH.set_format(optiontypes.OpaqueOption) aiocoap-0.4.12/aiocoap/numbers/types.py000066400000000000000000000011051472205122200200070ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """List of known values for the CoAP "Type" field. As this field is only 2 bits, its valid values are comprehensively enumerated in the `Type` object. """ from enum import IntEnum class Type(IntEnum): CON = 0 # Confirmable NON = 1 # Non-confirmable ACK = 2 # Acknowledgement RST = 3 # Reset def __str__(self): return self.name CON, NON, ACK, RST = Type.CON, Type.NON, Type.ACK, Type.RST __all__ = ["Type", "CON", "NON", "ACK", "RST"] aiocoap-0.4.12/aiocoap/options.py000066400000000000000000000235141472205122200166730ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from itertools import chain from warnings import warn from .numbers.optionnumbers import OptionNumber from .error import UnparsableMessage def _read_extended_field_value(value, rawdata): """Used to decode large values of option delta and option length from raw binary form.""" if value >= 0 and value < 13: return (value, rawdata) elif value == 13: if len(rawdata) < 1: raise UnparsableMessage("Option ended prematurely") return (rawdata[0] + 13, rawdata[1:]) elif value == 14: if len(rawdata) < 2: raise UnparsableMessage("Option ended prematurely") return (int.from_bytes(rawdata[:2], "big") + 269, rawdata[2:]) else: raise UnparsableMessage("Option contained partial payload marker.") def _write_extended_field_value(value): """Used to encode large values of option delta and option length into raw binary form. In CoAP option delta and length can be represented by a variable number of bytes depending on the value.""" if value >= 0 and value < 13: return (value, b"") elif value >= 13 and value < 269: return (13, (value - 13).to_bytes(1, "big")) elif value >= 269 and value < 65804: return (14, (value - 269).to_bytes(2, "big")) else: raise ValueError("Value out of range.") def _single_value_view(option_number, doc=None, deprecated=None): """Generate a property for a given option number, where the option is not repeatable. For getting, it will return the value of the first option object with matching number. For setting, it will remove all options with that number and create one with the given value. The property can be deleted, resulting in removal of the option from the header. For consistency, setting the value to None also clears the option. (Note that with the currently implemented optiontypes, None is not a valid value for any of them).""" def _getter(self, option_number=option_number): if deprecated is not None: warn(deprecated, stacklevel=2) options = self.get_option(option_number) if not options: return None else: return options[0].value def _setter(self, value, option_number=option_number): if deprecated is not None: # Stack level 3 is what it takes to get out of the typical # `Message(opt=val)` construction, and probably most useful. warn(deprecated, stacklevel=3) self.delete_option(option_number) if value is not None: self.add_option(option_number.create_option(value=value)) def _deleter(self, option_number=option_number): if deprecated is not None: warn(deprecated, stacklevel=2) self.delete_option(option_number) return property( _getter, _setter, _deleter, doc or "Single-value view on the %s option." % option_number, ) def _items_view(option_number, doc=None): """Generate a property for a given option number, where the option is repeatable. For getting, it will return a tuple of the values of the option objects with matching number. For setting, it will remove all options with that number and create new ones from the given iterable.""" def _getter(self, option_number=option_number): return tuple(o.value for o in self.get_option(option_number)) def _setter(self, value, option_number=option_number): self.delete_option(option_number) for v in value: self.add_option(option_number.create_option(value=v)) def _deleter(self, option_number=option_number): self.delete_option(option_number) return property( _getter, _setter, _deleter, doc=doc or "Iterable view on the %s option." % option_number, ) def _empty_presence_view(option_number, doc=None): """Generate a property for a given option number, where the option is not repeatable and (usually) empty. The values True and False are mapped to presence and absence of the option.""" def _getter(self, option_number=option_number): return bool(self.get_option(option_number)) def _setter(self, value, option_number=option_number): self.delete_option(option_number) if value: self.add_option(option_number.create_option()) return property( _getter, _setter, doc=doc or "Presence of the %s option." % option_number ) class Options(object): """Represent CoAP Header Options.""" # this is not so much an optimization as a safeguard -- if custom # attributes were placed here, they could be accessed but would not be # serialized __slots__ = ["_options"] def __init__(self): self._options = {} def __eq__(self, other): if not isinstance(other, Options): return NotImplemented # this implementation is much easier than implementing equality on # StringOption etc return self.encode() == other.encode() def __repr__(self): text = ", ".join( "%s: %s" % (OptionNumber(k), " / ".join(map(str, v))) for (k, v) in self._options.items() ) return "" % (id(self), text or "empty") def _repr_html_(self): if self._options: n_opt = sum(len(o) for o in self._options.values()) items = ( f'
  • {OptionNumber(k)._repr_html_()}: {", ".join(vi._repr_html_() for vi in v)}' for (k, v) in sorted(self._options.items()) ) return f"""
    {n_opt} option{'s' if n_opt != 1 else ''}
      {''.join(items)}
    """ else: return "
    No options
    " def decode(self, rawdata): """Passed a CoAP message body after the token as rawdata, fill self with the options starting at the beginning of rawdata, an return the rest of the message (the body).""" option_number = OptionNumber(0) while rawdata: if rawdata[0] == 0xFF: return rawdata[1:] dllen = rawdata[0] delta = (dllen & 0xF0) >> 4 length = dllen & 0x0F rawdata = rawdata[1:] (delta, rawdata) = _read_extended_field_value(delta, rawdata) (length, rawdata) = _read_extended_field_value(length, rawdata) option_number += delta if len(rawdata) < length: raise UnparsableMessage("Option announced but absent") option = option_number.create_option(decode=rawdata[:length]) self.add_option(option) rawdata = rawdata[length:] return b"" def encode(self): """Encode all options in option header into string of bytes.""" data = [] current_opt_num = 0 for option in self.option_list(): optiondata = option.encode() delta, extended_delta = _write_extended_field_value( option.number - current_opt_num ) length, extended_length = _write_extended_field_value(len(optiondata)) data.append(bytes([((delta & 0x0F) << 4) + (length & 0x0F)])) data.append(extended_delta) data.append(extended_length) data.append(optiondata) current_opt_num = option.number return b"".join(data) def add_option(self, option): """Add option into option header.""" self._options.setdefault(option.number, []).append(option) def delete_option(self, number): """Delete option from option header.""" if number in self._options: self._options.pop(number) def get_option(self, number): """Get option with specified number.""" return self._options.get(number, ()) def option_list(self): return chain.from_iterable( sorted(self._options.values(), key=lambda x: x[0].number) ) uri_path = _items_view(OptionNumber.URI_PATH) uri_query = _items_view(OptionNumber.URI_QUERY) location_path = _items_view(OptionNumber.LOCATION_PATH) location_query = _items_view(OptionNumber.LOCATION_QUERY) block2 = _single_value_view(OptionNumber.BLOCK2) block1 = _single_value_view(OptionNumber.BLOCK1) content_format = _single_value_view(OptionNumber.CONTENT_FORMAT) etag = _single_value_view(OptionNumber.ETAG, "Single ETag as used in responses") etags = _items_view(OptionNumber.ETAG, "List of ETags as used in requests") if_none_match = _empty_presence_view(OptionNumber.IF_NONE_MATCH) observe = _single_value_view(OptionNumber.OBSERVE) accept = _single_value_view(OptionNumber.ACCEPT) uri_host = _single_value_view(OptionNumber.URI_HOST) uri_port = _single_value_view(OptionNumber.URI_PORT) proxy_uri = _single_value_view(OptionNumber.PROXY_URI) proxy_scheme = _single_value_view(OptionNumber.PROXY_SCHEME) size1 = _single_value_view(OptionNumber.SIZE1) oscore = _single_value_view(OptionNumber.OSCORE) object_security = _single_value_view( OptionNumber.OSCORE, deprecated="Use `oscore` instead of `object_security`" ) max_age = _single_value_view(OptionNumber.MAX_AGE) if_match = _items_view(OptionNumber.IF_MATCH) no_response = _single_value_view(OptionNumber.NO_RESPONSE) echo = _single_value_view(OptionNumber.ECHO) request_tag = _items_view(OptionNumber.REQUEST_TAG) hop_limit = _single_value_view(OptionNumber.HOP_LIMIT) request_hash = _single_value_view( OptionNumber.REQUEST_HASH, "Experimental property for draft-amsuess-core-cachable-oscore", ) edhoc = _empty_presence_view(OptionNumber.EDHOC) size2 = _single_value_view(OptionNumber.SIZE2) aiocoap-0.4.12/aiocoap/optiontypes.py000066400000000000000000000203411472205122200175700ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import abc import collections from .numbers.contentformat import ContentFormat def _to_minimum_bytes(value): return value.to_bytes((value.bit_length() + 7) // 8, "big") class OptionType(metaclass=abc.ABCMeta): """Interface for decoding and encoding option values Instances of :class:`OptionType` are collected in a list in a :attr:`.Message.opt` :class:`.Options` object, and provide a translation between the CoAP octet-stream (accessed using the :meth:`encode()`/:meth:`decode()` method pair) and the interpreted value (accessed via the :attr:`value` attribute). Note that OptionType objects usually don't need to be handled by library users; the recommended way to read and set options is via the Options object'sproperties (eg. ``message.opt.uri_path = ('.well-known', 'core')``).""" @abc.abstractmethod def __init__(self, number, value): """Set the `self.name` and `self.value` attributes""" @abc.abstractmethod def encode(self): """Return the option's value in serialzied form""" @abc.abstractmethod def decode(self, rawdata): """Set the option's value from the bytes in rawdata""" def _repr_html_(self): import html return f"{html.escape(repr(self))}" class StringOption(OptionType): """String CoAP option - used to represent string options. Always encoded in UTF8 per CoAP specification.""" def __init__(self, number, value=""): self.value = value self.number = number def encode(self): # FIXME: actually, this should be utf8 of the net-unicode form (maybe it is) rawdata = self.value.encode("utf-8") return rawdata def decode(self, rawdata): self.value = rawdata.decode("utf-8") def __str__(self): return self.value def _repr_html_(self): import html return f"{html.escape(repr(self.value))}" class OpaqueOption(OptionType): """Opaque CoAP option - used to represent options that just have their uninterpreted bytes as value.""" def __init__(self, number, value=b""): self.value = value self.number = number def encode(self): rawdata = self.value return rawdata def decode(self, rawdata): self.value = rawdata def __str__(self): return repr(self.value) def _repr_html_(self): return f"{self.value.hex()}" class UintOption(OptionType): """Uint CoAP option - used to represent integer options.""" def __init__(self, number, value=0): self.value = value self.number = number def encode(self): return _to_minimum_bytes(int(self.value)) def decode(self, rawdata): self.value = int.from_bytes(rawdata, "big") def __str__(self): return str(self.value) def _repr_html_(self): import html return f"{html.escape(repr(self.value))}" class TypedOption(OptionType, metaclass=abc.ABCMeta): @property @abc.abstractmethod def type(self) -> type: """Checked type of the option""" def __init__(self, number, value=None): self.number = number # FIXME when is this ever initialized without value? if value is not None: self.value = value value = property( lambda self: self._value, lambda self, value: self._set_from_opt_value(value) ) def _set_from_opt_value(self, value: object): """Convert a value set as ``message.opt.option_name = value`` into the stored value. By default, this does an eager isinstance check on the value (anticipating that encoding an unsuitable value would otherwise fail at a hard-to-debug location).""" if not isinstance(value, self.type): raise ValueError( "Setting values of type %s is not supported on this option" % type(value) ) self._value = value def __str__(self): return str(self.value) def _repr_html_(self): if hasattr(self.value, "_repr_html_"): return self.value._repr_html_() else: import html return f"{html.escape(repr(self.value))}" class BlockOption(TypedOption): """Block CoAP option - special option used only for Block1 and Block2 options. Currently it is the only type of CoAP options that has internal structure. That structure (BlockwiseTuple) covers not only the block options of RFC7959, but also the BERT extension of RFC8323. If the reserved size exponent 7 is used for purposes incompatible with BERT, the implementor might want to look at the context dependent option number interpretations which will hopefully be in place for Signaling (7.xx) messages by then.""" class BlockwiseTuple( collections.namedtuple( "_BlockwiseTuple", ["block_number", "more", "size_exponent"] ) ): @property def size(self): return 2 ** (min(self.size_exponent, 6) + 4) @property def start(self): """The byte offset in the body indicated by block number and size. Note that this calculation is only valid for descriptive use and Block2 control use. The semantics of block_number and size in Block1 control use are unrelated (indicating the acknowledged block number in the request Block1 size and the server's preferred block size), and must not be calculated using this property in that case.""" return self.block_number * self.size @property def is_bert(self): """True if the exponent is recognized to signal a BERT message.""" return self.size_exponent == 7 def is_valid_for_payload_size(self, payloadsize): if self.is_bert: if self.more: return payloadsize % 1024 == 0 return True else: if self.more: return payloadsize == self.size else: return payloadsize <= self.size def reduced_to(self, maximum_exponent): """Return a BlockwiseTuple whose exponent is capped to the given maximum_exponent >>> initial = BlockOption.BlockwiseTuple(10, 0, 5) >>> initial == initial.reduced_to(6) True >>> initial.reduced_to(3) BlockwiseTuple(block_number=40, more=0, size_exponent=3) """ if maximum_exponent >= self.size_exponent: return self if maximum_exponent == 6 and self.size_exponent == 7: return (self.block_number, self.more, 6) increasednumber = self.block_number << ( min(self.size_exponent, 6) - maximum_exponent ) return type(self)(increasednumber, self.more, maximum_exponent) type = BlockwiseTuple def encode(self): as_integer = ( (self.value.block_number << 4) + (self.value.more * 0x08) + self.value.size_exponent ) return _to_minimum_bytes(as_integer) def decode(self, rawdata): as_integer = int.from_bytes(rawdata, "big") self.value = self.BlockwiseTuple( block_number=(as_integer >> 4), more=bool(as_integer & 0x08), size_exponent=(as_integer & 0x07), ) def _set_from_opt_value(self, value): # Casting it through the constructor makes it easy to set the option as # `(num, more, sz)` without having to pick the type out of a very long # module name super()._set_from_opt_value(self.type(*value)) class ContentFormatOption(TypedOption): """Type of numeric options whose number has :class:`ContentFormat` semantics""" type = ContentFormat def encode(self): return _to_minimum_bytes(int(self.value)) def decode(self, rawdata): as_integer = int.from_bytes(rawdata, "big") self._value = ContentFormat(as_integer) def _set_from_opt_value(self, value): super()._set_from_opt_value(ContentFormat(value)) aiocoap-0.4.12/aiocoap/oscore.py000066400000000000000000002631101472205122200164700ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module contains the tools to send OSCORE secured messages. It only deals with the algorithmic parts, the security context and protection and unprotection of messages. It does not touch on the integration of OSCORE in the larger aiocoap stack of having a context or requests; that's what :mod:`aiocoap.transports.osore` is for.`""" from __future__ import annotations from collections import namedtuple import io import json import binascii import os import os.path import tempfile import abc from typing import Optional, List, Any, Tuple import secrets import warnings from aiocoap.message import Message from aiocoap.util import cryptography_additions, deprecation_getattr, Sentinel from aiocoap.numbers import GET, POST, FETCH, CHANGED, UNAUTHORIZED, CONTENT from aiocoap import error from . import credentials from cryptography.hazmat.primitives.ciphers import aead from cryptography.hazmat.primitives.kdf.hkdf import HKDF from cryptography.hazmat.primitives import ciphers, hashes import cryptography.hazmat.backends import cryptography.exceptions from cryptography.hazmat.primitives import asymmetric, serialization from cryptography.hazmat.primitives.asymmetric.utils import ( decode_dss_signature, encode_dss_signature, ) import cbor2 as cbor import filelock MAX_SEQNO = 2**40 - 1 # Relevant values from the IANA registry "CBOR Object Signing and Encryption (COSE)" COSE_KID = 4 COSE_PIV = 6 COSE_KID_CONTEXT = 10 # from RFC9338 COSE_COUNTERSIGNATURE0 = 12 # from RFC9528 COSE_KCCS = 14 COMPRESSION_BITS_N = 0b111 COMPRESSION_BIT_K = 0b1000 COMPRESSION_BIT_H = 0b10000 COMPRESSION_BIT_GROUP = 0b100000 # Group Flag from draft-ietf-core-oscore-groupcomm-21 COMPRESSION_BITS_RESERVED = 0b11000000 CWT_CLAIM_CNF = 8 CWT_CNF_COSE_KEY = 1 COSE_KEY_COMMON_KTY = 1 COSE_KTY_OKP = 1 COSE_KTY_EC2 = 2 COSE_KEY_COMMON_ALG = 3 COSE_KEY_OKP_CRV = -1 COSE_KEY_OKP_X = -2 COSE_KEY_EC2_X = -2 COSE_KEY_EC2_Y = -3 # While the original values were simple enough to be used in literals, starting # with oscore-groupcomm we're using more compact values INFO_TYPE_KEYSTREAM_REQUEST = True INFO_TYPE_KEYSTREAM_RESPONSE = False PRESENT_BUT_NO_VALUE_YET = Sentinel("Value will be populated later") class CodeStyle(namedtuple("_CodeStyle", ("request", "response"))): FETCH_CONTENT: CodeStyle POST_CHANGED: CodeStyle @classmethod def from_request(cls, request) -> CodeStyle: if request == FETCH: return cls.FETCH_CONTENT elif request == POST: return cls.POST_CHANGED else: raise ValueError("Invalid request code %r" % request) CodeStyle.FETCH_CONTENT = CodeStyle(FETCH, CONTENT) CodeStyle.POST_CHANGED = CodeStyle(POST, CHANGED) class DeterministicKey: """Singleton to indicate that for this key member no public or private key is available because it is the Deterministic Client (see ) This is highly experimental not only from an implementation but also from a specification point of view. The specification has not received adaequate review that would justify using it in any non-experimental scenario. """ DETERMINISTIC_KEY = DeterministicKey() del DeterministicKey class NotAProtectedMessage(error.Error, ValueError): """Raised when verification is attempted on a non-OSCORE message""" def __init__(self, message, plain_message): super().__init__(message) self.plain_message = plain_message class ProtectionInvalid(error.Error, ValueError): """Raised when verification of an OSCORE message fails""" class DecodeError(ProtectionInvalid): """Raised when verification of an OSCORE message fails because CBOR or compressed data were erroneous""" class ReplayError(ProtectionInvalid): """Raised when verification of an OSCORE message fails because the sequence numbers was already used""" class ReplayErrorWithEcho(ProtectionInvalid, error.RenderableError): """Raised when verification of an OSCORE message fails because the recipient replay window is uninitialized, but a 4.01 Echo can be constructed with the data in the exception that can lead to the client assisting in replay window recovery""" def __init__(self, secctx, request_id, echo): self.secctx = secctx self.request_id = request_id self.echo = echo def to_message(self): inner = Message( code=UNAUTHORIZED, echo=self.echo, ) outer, _ = self.secctx.protect(inner, request_id=self.request_id) return outer class ContextUnavailable(error.Error, ValueError): """Raised when a context is (currently or permanently) unavailable for protecting or unprotecting a message""" class RequestIdentifiers: """A container for details that need to be passed along from the (un)protection of a request to the (un)protection of the response; these data ensure that the request-response binding process works by passing around the request's partial IV. Users of this module should never create or interact with instances, but just pass them around. """ def __init__(self, kid, partial_iv, nonce, can_reuse_nonce, request_code): self.kid = kid self.partial_iv = partial_iv self.nonce = nonce self.can_reuse_nonce = can_reuse_nonce self.code_style = CodeStyle.from_request(request_code) self.request_hash = None def get_reusable_nonce_and_piv(self): """Return the nonce and the partial IV if can_reuse_nonce is True, and set can_reuse_nonce to False.""" if self.can_reuse_nonce: self.can_reuse_nonce = False return (self.nonce, self.partial_iv) else: return (None, None) def _xor_bytes(a, b): assert len(a) == len(b), "XOR needs consistent lengths" # FIXME is this an efficient thing to do, or should we store everything # that possibly needs xor'ing as long integers with an associated length? return bytes(_a ^ _b for (_a, _b) in zip(a, b)) class SymmetricEncryptionAlgorithm(metaclass=abc.ABCMeta): """A symmetric algorithm The algorithm's API is the AEAD API with addtional authenticated data: The algorihm may or may not verify that data. Algorithms that actually do verify the data are recognized by also being AeadAlgorithm. """ value: int key_bytes: int tag_bytes: int iv_bytes: int @abc.abstractmethod def encrypt(cls, plaintext, aad, key, iv): """Return ciphertext + tag for given input data""" @abc.abstractmethod def decrypt(cls, ciphertext_and_tag, aad, key, iv): """Reverse encryption. Must raise ProtectionInvalid on any error stemming from untrusted data.""" @staticmethod def _build_encrypt0_structure(protected, external_aad): assert protected == {}, "Unexpected data in protected bucket" protected_serialized = b"" # were it into an empty dict, it'd be the cbor dump enc_structure = ["Encrypt0", protected_serialized, external_aad] return cbor.dumps(enc_structure) class AeadAlgorithm(SymmetricEncryptionAlgorithm, metaclass=abc.ABCMeta): """A symmetric algorithm that provides authentication, including authentication of additional data.""" class AES_CBC(SymmetricEncryptionAlgorithm, metaclass=abc.ABCMeta): """AES in CBC mode using tthe Python cryptography library""" tag_bytes = 0 iv_bytes = 0 # This introduces padding -- this library doesn't need to care because # Python does allocation for us, but others may need to rethink their # buffer allocation strategies. @classmethod def _cipher(cls, key, iv): return ciphers.base.Cipher( ciphers.algorithms.AES(key), ciphers.modes.CBC(iv), ) @classmethod def encrypt(cls, plaintext, _aad, key, iv): # FIXME: Ignoring aad violates https://www.rfc-editor.org/rfc/rfc9459.html#name-implementation-consideratio but is required for Group OSCORE # Padding according to https://www.rfc-editor.org/rfc/rfc5652#section-6.3 k = cls.key_bytes assert ( k < 256 ), "Algorithm with this key size should not have been created in the first plae" pad_byte = k - (len(plaintext) % k) pad_bytes = bytes((pad_byte,)) * pad_byte plaintext += pad_bytes encryptor = cls._cipher(key, iv).encryptor() result = encryptor.update(plaintext) result += encryptor.finalize() return result @classmethod def decrypt(cls, ciphertext_and_tag, _aad, key, iv): # FIXME: Ignoring aad violates https://www.rfc-editor.org/rfc/rfc9459.html#name-implementation-consideratio but is required for Group OSCORE k = cls.key_bytes if ciphertext_and_tag == b"" or len(ciphertext_and_tag) % k != 0: raise ProtectionInvalid("Message length does not match padding") decryptor = cls._cipher(key, iv).decryptor() result = decryptor.update(ciphertext_and_tag) result += decryptor.finalize() # Padding according to https://www.rfc-editor.org/rfc/rfc5652#section-6.3 claimed_padding = result[-1] if claimed_padding == 0 or claimed_padding > k: raise ProtectionInvalid("Padding does not match key") if result[-claimed_padding:] != bytes((claimed_padding,)) * claimed_padding: raise ProtectionInvalid("Padding is inconsistent") return result[:-claimed_padding] class A128CBC(AES_CBC): # from RFC9459 value = -65531 key_bytes = 16 # 128-bit key iv_bytes = 16 # 16-octet nonce class AES_CCM(AeadAlgorithm, metaclass=abc.ABCMeta): """AES-CCM implemented using the Python cryptography library""" @classmethod def encrypt(cls, plaintext, aad, key, iv): return aead.AESCCM(key, cls.tag_bytes).encrypt(iv, plaintext, aad) @classmethod def decrypt(cls, ciphertext_and_tag, aad, key, iv): try: return aead.AESCCM(key, cls.tag_bytes).decrypt(iv, ciphertext_and_tag, aad) except cryptography.exceptions.InvalidTag: raise ProtectionInvalid("Tag invalid") class AES_CCM_16_64_128(AES_CCM): # from RFC8152 and draft-ietf-core-object-security-0[012] 3.2.1 value = 10 key_bytes = 16 # 128-bit key tag_bytes = 8 # 64-bit tag iv_bytes = 13 # 13-byte nonce class AES_CCM_16_64_256(AES_CCM): # from RFC8152 value = 11 key_bytes = 32 # 256-bit key tag_bytes = 8 # 64-bit tag iv_bytes = 13 # 13-byte nonce class AES_CCM_64_64_128(AES_CCM): # from RFC8152 value = 12 key_bytes = 16 # 128-bit key tag_bytes = 8 # 64-bit tag iv_bytes = 7 # 7-byte nonce class AES_CCM_64_64_256(AES_CCM): # from RFC8152 value = 13 key_bytes = 32 # 256-bit key tag_bytes = 8 # 64-bit tag iv_bytes = 7 # 7-byte nonce class AES_CCM_16_128_128(AES_CCM): # from RFC8152 value = 30 key_bytes = 16 # 128-bit key tag_bytes = 16 # 128-bit tag iv_bytes = 13 # 13-byte nonce class AES_CCM_16_128_256(AES_CCM): # from RFC8152 value = 31 key_bytes = 32 # 256-bit key tag_bytes = 16 # 128-bit tag iv_bytes = 13 # 13-byte nonce class AES_CCM_64_128_128(AES_CCM): # from RFC8152 value = 32 key_bytes = 16 # 128-bit key tag_bytes = 16 # 128-bit tag iv_bytes = 7 # 7-byte nonce class AES_CCM_64_128_256(AES_CCM): # from RFC8152 value = 33 key_bytes = 32 # 256-bit key tag_bytes = 16 # 128-bit tag iv_bytes = 7 # 7-byte nonce class AES_GCM(AeadAlgorithm, metaclass=abc.ABCMeta): """AES-GCM implemented using the Python cryptography library""" iv_bytes = 12 # 96 bits fixed size of the nonce @classmethod def encrypt(cls, plaintext, aad, key, iv): return aead.AESGCM(key).encrypt(iv, plaintext, aad) @classmethod def decrypt(cls, ciphertext_and_tag, aad, key, iv): try: return aead.AESGCM(key).decrypt(iv, ciphertext_and_tag, aad) except cryptography.exceptions.InvalidTag: raise ProtectionInvalid("Tag invalid") class A128GCM(AES_GCM): # from RFC8152 value = 1 key_bytes = 16 # 128-bit key tag_bytes = 16 # 128-bit tag class A192GCM(AES_GCM): # from RFC8152 value = 2 key_bytes = 24 # 192-bit key tag_bytes = 16 # 128-bit tag class A256GCM(AES_GCM): # from RFC8152 value = 3 key_bytes = 32 # 256-bit key tag_bytes = 16 # 128-bit tag class ChaCha20Poly1305(AeadAlgorithm): # from RFC8152 value = 24 key_bytes = 32 # 256-bit key tag_bytes = 16 # 128-bit tag iv_bytes = 12 # 96-bit nonce @classmethod def encrypt(cls, plaintext, aad, key, iv): return aead.ChaCha20Poly1305(key).encrypt(iv, plaintext, aad) @classmethod def decrypt(cls, ciphertext_and_tag, aad, key, iv): try: return aead.ChaCha20Poly1305(key).decrypt(iv, ciphertext_and_tag, aad) except cryptography.exceptions.InvalidTag: raise ProtectionInvalid("Tag invalid") class AlgorithmCountersign(metaclass=abc.ABCMeta): """A fully parameterized COSE countersign algorithm An instance is able to provide all the alg_signature, par_countersign and par_countersign_key parameters taht go into the Group OSCORE algorithms field. """ value: int | str @abc.abstractmethod def sign(self, body, external_aad, private_key): """Return the signature produced by the key when using CounterSignature0 as describe in draft-ietf-cose-countersign-01""" @abc.abstractmethod def verify(self, signature, body, external_aad, public_key): """Verify a signature in analogy to sign""" @abc.abstractmethod def generate_with_ccs(self) -> Tuple[Any, bytes]: """Return a usable private key along with a CCS describing it""" @abc.abstractmethod def public_from_private(self, private_key): """Given a private key, derive the publishable key""" @abc.abstractmethod def from_kccs(self, ccs: bytes) -> Any: """Given a CCS, extract the public key, or raise a ValueError if the credential format does not align with the type. The type is not exactly Any, but whichever type is used by this algorithm class.""" @staticmethod def _build_countersign_structure(body, external_aad): countersign_structure = [ "CounterSignature0", b"", b"", external_aad, body, ] tobesigned = cbor.dumps(countersign_structure) return tobesigned @abc.abstractproperty def signature_length(self) -> int: """The length of a signature using this algorithm""" @abc.abstractproperty def curve_number(self) -> int: """Registered curve number used with this algorithm. Only used for verification of credentials' details""" class AlgorithmStaticStatic(metaclass=abc.ABCMeta): @abc.abstractmethod def staticstatic(self, private_key, public_key): """Derive a shared static-static secret from a private and a public key""" def _from_kccs_common(ccs: bytes) -> dict: """Check that the CCS contains a CNF claim that is a COSE Key, and return that key""" try: parsed = cbor.loads(ccs) except cbor.CBORDecodeError as e: raise ValueError("CCS not in CBOR format") from e if ( not isinstance(parsed, dict) or CWT_CLAIM_CNF not in parsed or not isinstance(parsed[CWT_CLAIM_CNF], dict) or CWT_CNF_COSE_KEY not in parsed[CWT_CLAIM_CNF] or not isinstance(parsed[CWT_CLAIM_CNF][CWT_CNF_COSE_KEY], dict) ): raise ValueError("CCS must contain a COSE Key dict in a CNF") return parsed[CWT_CLAIM_CNF][CWT_CNF_COSE_KEY] class Ed25519(AlgorithmCountersign): def sign(self, body, aad, private_key): private_key = asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes( private_key ) return private_key.sign(self._build_countersign_structure(body, aad)) def verify(self, signature, body, aad, public_key): public_key = asymmetric.ed25519.Ed25519PublicKey.from_public_bytes(public_key) try: public_key.verify(signature, self._build_countersign_structure(body, aad)) except cryptography.exceptions.InvalidSignature: raise ProtectionInvalid("Signature mismatch") def _generate(self): key = asymmetric.ed25519.Ed25519PrivateKey.generate() # FIXME: We could avoid handing the easy-to-misuse bytes around if the # current algorithm interfaces did not insist on passing the # exchangable representations -- and generally that should be more # efficient. return key.private_bytes( encoding=serialization.Encoding.Raw, format=serialization.PrivateFormat.Raw, encryption_algorithm=serialization.NoEncryption(), ) def generate_with_ccs(self) -> Tuple[Any, bytes]: private = self._generate() public = self.public_from_private(private) ccs = cbor.dumps( { CWT_CLAIM_CNF: { CWT_CNF_COSE_KEY: { COSE_KEY_COMMON_KTY: COSE_KTY_OKP, COSE_KEY_COMMON_ALG: self.value, COSE_KEY_OKP_CRV: self.curve_number, COSE_KEY_OKP_X: public, } } } ) return (private, ccs) def public_from_private(self, private_key): private_key = asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes( private_key ) public_key = private_key.public_key() return public_key.public_bytes( encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw, ) def from_kccs(self, ccs: bytes) -> Any: # eg. {1: 1, 3: -8, -1: 6, -2: h'77 ... 88'} cose_key = _from_kccs_common(ccs) if ( cose_key.get(COSE_KEY_COMMON_KTY) == COSE_KTY_OKP and cose_key.get(COSE_KEY_COMMON_ALG) == self.value and cose_key.get(COSE_KEY_OKP_CRV) == self.curve_number and COSE_KEY_OKP_X in cose_key ): return cose_key[COSE_KEY_OKP_X] else: raise ValueError("Key type not recognized from CCS key %r" % cose_key) value = -8 curve_number = 6 signature_length = 64 class EcdhSsHkdf256(AlgorithmStaticStatic): # FIXME: This class uses the Edwards keys as private and public keys, and # not the converted ones. This will be problematic if pairwise-only # contexts are to be set up. value = -27 # FIXME these two will be different when using the Montgomery keys directly # This one will only be used when establishing and distributing pairwise-only keys public_from_private = Ed25519.public_from_private def staticstatic(self, private_key, public_key): private_key = asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes( private_key ) private_key = cryptography_additions.sk_to_curve25519(private_key) public_key = asymmetric.ed25519.Ed25519PublicKey.from_public_bytes(public_key) public_key = cryptography_additions.pk_to_curve25519(public_key) return private_key.exchange(public_key) class ECDSA_SHA256_P256(AlgorithmCountersign, AlgorithmStaticStatic): # Trying a new construction approach -- should work just as well given # we're just passing Python objects around def from_public_parts(self, x: bytes, y: bytes): """Create a public key from its COSE values""" return asymmetric.ec.EllipticCurvePublicNumbers( int.from_bytes(x, "big"), int.from_bytes(y, "big"), asymmetric.ec.SECP256R1(), ).public_key() def from_kccs(self, ccs: bytes) -> Any: cose_key = _from_kccs_common(ccs) if ( cose_key.get(COSE_KEY_COMMON_KTY) == COSE_KTY_EC2 and cose_key.get(COSE_KEY_COMMON_ALG) == self.value and COSE_KEY_EC2_X in cose_key and COSE_KEY_EC2_Y in cose_key ): return self.from_public_parts( x=cose_key[COSE_KEY_EC2_X], y=cose_key[COSE_KEY_EC2_Y], ) else: raise ValueError("Key type not recognized from CCS key %r" % cose_key) def from_private_parts(self, x: bytes, y: bytes, d: bytes): public_numbers = self.from_public_parts(x, y).public_numbers() private_numbers = asymmetric.ec.EllipticCurvePrivateNumbers( int.from_bytes(d, "big"), public_numbers ) return private_numbers.private_key() def sign(self, body, aad, private_key): der_signature = private_key.sign( self._build_countersign_structure(body, aad), asymmetric.ec.ECDSA(hashes.SHA256()), ) (r, s) = decode_dss_signature(der_signature) return r.to_bytes(32, "big") + s.to_bytes(32, "big") def verify(self, signature, body, aad, public_key): r = signature[:32] s = signature[32:] r = int.from_bytes(r, "big") s = int.from_bytes(s, "big") der_signature = encode_dss_signature(r, s) try: public_key.verify( der_signature, self._build_countersign_structure(body, aad), asymmetric.ec.ECDSA(hashes.SHA256()), ) except cryptography.exceptions.InvalidSignature: raise ProtectionInvalid("Signature mismatch") def _generate(self): return asymmetric.ec.generate_private_key(asymmetric.ec.SECP256R1()) def generate_with_ccs(self) -> Tuple[Any, bytes]: private = self._generate() public = self.public_from_private(private) # FIXME: Deduplicate with edhoc.py x = public.public_numbers().x.to_bytes(32, "big") y = public.public_numbers().y.to_bytes(32, "big") ccs = cbor.dumps( { CWT_CLAIM_CNF: { CWT_CNF_COSE_KEY: { COSE_KEY_COMMON_KTY: COSE_KTY_EC2, COSE_KEY_COMMON_ALG: self.value, COSE_KEY_EC2_X: x, COSE_KEY_EC2_Y: y, } } } ) return (private, ccs) def public_from_private(self, private_key): return private_key.public_key() def staticstatic(self, private_key, public_key): return private_key.exchange(asymmetric.ec.ECDH(), public_key) value = -7 # FIXME: when used as a static-static algorithm, does this become -27? see shepherd review. curve_number = 1 signature_length = 64 algorithms = { "AES-CCM-16-64-128": AES_CCM_16_64_128(), "AES-CCM-16-64-256": AES_CCM_16_64_256(), "AES-CCM-64-64-128": AES_CCM_64_64_128(), "AES-CCM-64-64-256": AES_CCM_64_64_256(), "AES-CCM-16-128-128": AES_CCM_16_128_128(), "AES-CCM-16-128-256": AES_CCM_16_128_256(), "AES-CCM-64-128-128": AES_CCM_64_128_128(), "AES-CCM-64-128-256": AES_CCM_64_128_256(), "ChaCha20/Poly1305": ChaCha20Poly1305(), "A128GCM": A128GCM(), "A192GCM": A192GCM(), "A256GCM": A256GCM(), } # algorithms with full parameter set algorithms_countersign = { # maybe needs a different name... "EdDSA on Ed25519": Ed25519(), "ECDSA w/ SHA-256 on P-256": ECDSA_SHA256_P256(), } algorithms_staticstatic = { "ECDH-SS + HKDF-256": EcdhSsHkdf256(), } DEFAULT_ALGORITHM = "AES-CCM-16-64-128" _hash_backend = cryptography.hazmat.backends.default_backend() hashfunctions = { "sha256": hashes.SHA256(), "sha384": hashes.SHA384(), "sha512": hashes.SHA512(), } DEFAULT_HASHFUNCTION = "sha256" DEFAULT_WINDOWSIZE = 32 class BaseSecurityContext: # Deprecated marker for whether the class uses the # ContextWhereExternalAadIsGroup mixin; see documentation there. external_aad_is_group = False # Authentication information carried with this security context; managed # externally by whatever creates the security context. authenticated_claims: List[str] = [] #: AEAD algorithm. This may be None if it is not set in an OSCORE group context. alg_aead: Optional[AeadAlgorithm] @property def algorithm(self): warnings.warn( "Property was renamed to 'alg_aead'", DeprecationWarning, stacklevel=2 ) return self.alg_aead @algorithm.setter def algorithm(self, value): warnings.warn( "Property was renamed to 'alg_aead'", DeprecationWarning, stacklevel=2 ) self.alg_aead = value hashfun: hashes.HashAlgorithm def _construct_nonce(self, partial_iv_short, piv_generator_id): pad_piv = b"\0" * (5 - len(partial_iv_short)) s = bytes([len(piv_generator_id)]) pad_id = b"\0" * (self.alg_aead.iv_bytes - 6 - len(piv_generator_id)) components = s + pad_id + piv_generator_id + pad_piv + partial_iv_short used_common_iv = self.common_iv[: len(components)] nonce = _xor_bytes(used_common_iv, components) return nonce def _extract_external_aad( self, message, request_id, local_is_sender: bool ) -> bytes: """Build the serialized external AAD from information in the message and the request_id. Information about whether the local context is the sender of the message is only relevant to group contexts, where it influences whose authentication credentials are placed in the AAD. """ # If any option were actually Class I, it would be something like # # the_options = pick some of(message) # class_i_options = Message(the_options).opt.encode() oscore_version = 1 class_i_options = b"" if request_id.request_hash is not None: class_i_options = Message(request_hash=request_id.request_hash).opt.encode() algorithms: List[int | str | None] = [ None if self.alg_aead is None else self.alg_aead.value ] if isinstance(self, ContextWhereExternalAadIsGroup): algorithms.append( None if self.alg_group_enc is None else self.alg_group_enc.value ) algorithms.append( None if self.alg_signature is None else self.alg_signature.value ) algorithms.append( None if self.alg_pairwise_key_agreement is None else self.alg_pairwise_key_agreement.value ) external_aad = [ oscore_version, algorithms, request_id.kid, request_id.partial_iv, class_i_options, ] if isinstance(self, ContextWhereExternalAadIsGroup): # FIXME: We may need to carry this over in the request_id when # observation span group rekeyings external_aad.append(self.id_context) assert message.opt.oscore is not None, "Double OSCORE" external_aad.append(message.opt.oscore) if local_is_sender: external_aad.append(self.sender_auth_cred) else: external_aad.append(self.recipient_auth_cred) external_aad.append(self.group_manager_cred) return cbor.dumps(external_aad) class ContextWhereExternalAadIsGroup(BaseSecurityContext): """The protection and unprotection functions will use the Group OSCORE AADs rather than the regular OSCORE AADs iff a context uses this mixin. (Ie. alg_group_enc etc are added to the algorithms, and request_kid_context, OSCORE_option, sender_auth_cred and gm_cred are added). This does not necessarily match the is_signing property (as pairwise contexts use this but don't sign), and is distinct from the added OSCORE option in the AAD (as that's only applicable for the external AAD as extracted for signing and signature verification purposes).""" id_context: bytes external_aad_is_group = True alg_group_enc: Optional[AeadAlgorithm] alg_signature: Optional[AlgorithmCountersign] # This is also of type AlgorithmCountersign because the staticstatic # function is sitting on the same type. alg_pairwise_key_agreement: Optional[AlgorithmCountersign] sender_auth_cred: bytes recipient_auth_cred: bytes group_manager_cred: bytes # FIXME pull interface components from SecurityContext up here class CanProtect(BaseSecurityContext, metaclass=abc.ABCMeta): # The protection function will add a signature acccording to the context's # alg_signature attribute if this is true is_signing = False # Send the KID when protecting responses # # Once group pairwise mode is implemented, this will need to become a # parameter to protect(), which is stored at the point where the incoming # context is turned into an outgoing context. (Currently, such a mechanism # isn't there yet, and oscore_wrapper protects responses with the very same # context they came in on). responses_send_kid = False @staticmethod def _compress(protected, unprotected, ciphertext): """Pack the untagged COSE_Encrypt0 object described by the *args into two bytestrings suitable for the Object-Security option and the message body""" if protected: raise RuntimeError( "Protection produced a message that has uncompressable fields." ) piv = unprotected.pop(COSE_PIV, b"") if len(piv) > COMPRESSION_BITS_N: raise ValueError("Can't encode overly long partial IV") firstbyte = len(piv) if COSE_KID in unprotected: firstbyte |= COMPRESSION_BIT_K kid_data = unprotected.pop(COSE_KID) else: kid_data = b"" if COSE_KID_CONTEXT in unprotected: firstbyte |= COMPRESSION_BIT_H kid_context = unprotected.pop(COSE_KID_CONTEXT) s = len(kid_context) if s > 255: raise ValueError("KID Context too long") s_kid_context = bytes((s,)) + kid_context else: s_kid_context = b"" if COSE_COUNTERSIGNATURE0 in unprotected: firstbyte |= COMPRESSION_BIT_GROUP unprotected.pop(COSE_COUNTERSIGNATURE0) # ciphertext will eventually also get the countersignature, but # that happens later when the option is already processed. if unprotected: raise RuntimeError( "Protection produced a message that has uncompressable fields." ) if firstbyte: option = bytes([firstbyte]) + piv + s_kid_context + kid_data else: option = b"" return (option, ciphertext) def protect(self, message, request_id=None, *, kid_context=True): """Given a plain CoAP message, create a protected message that contains message's options in the inner or outer CoAP message as described in OSCOAP. If the message is a response to a previous message, the additional data from unprotecting the request are passed in as request_id. When request data is present, its partial IV is reused if possible. The security context's ID context is encoded in the resulting message unless kid_context is explicitly set to a False; other values for the kid_context can be passed in as byte string in the same parameter. """ assert ( (request_id is None) == message.code.is_request() ), "Requestishness of code to protect does not match presence of request ID" outer_message, plaintext = self._split_message(message, request_id) protected = {} nonce = None unprotected = {} if request_id is not None: nonce, partial_iv_short = request_id.get_reusable_nonce_and_piv() if nonce is not None: partial_iv_generated_by = request_id.kid if nonce is None: nonce, partial_iv_short = self._build_new_nonce() partial_iv_generated_by = self.sender_id unprotected[COSE_PIV] = partial_iv_short if message.code.is_request(): unprotected[COSE_KID] = self.sender_id request_id = RequestIdentifiers( self.sender_id, partial_iv_short, nonce, can_reuse_nonce=None, request_code=outer_message.code, ) if kid_context is True: if self.id_context is not None: unprotected[COSE_KID_CONTEXT] = self.id_context elif kid_context is not False: unprotected[COSE_KID_CONTEXT] = kid_context else: if self.responses_send_kid: unprotected[COSE_KID] = self.sender_id # Putting in a dummy value as the signature calculation will already need some of the compression result if self.is_signing: unprotected[COSE_COUNTERSIGNATURE0] = b"" # FIXME: Running this twice quite needlessly (just to get the oscore option for sending) option_data, _ = self._compress(protected, unprotected, b"") outer_message.opt.oscore = option_data external_aad = self._extract_external_aad( outer_message, request_id, local_is_sender=True ) aad = self.alg_aead._build_encrypt0_structure(protected, external_aad) key = self._get_sender_key(outer_message, external_aad, plaintext, request_id) ciphertext = self.alg_aead.encrypt(plaintext, aad, key, nonce) _, payload = self._compress(protected, unprotected, ciphertext) if self.is_signing: signature = self.alg_signature.sign(payload, external_aad, self.private_key) keystream = self._kdf_for_keystreams( partial_iv_generated_by, partial_iv_short, self.signature_encryption_key, self.sender_id, INFO_TYPE_KEYSTREAM_REQUEST if message.code.is_request() else INFO_TYPE_KEYSTREAM_RESPONSE, ) encrypted_signature = _xor_bytes(signature, keystream) payload += encrypted_signature outer_message.payload = payload # FIXME go through options section # the request_id in the second argument should be discarded by the # caller when protecting a response -- is that reason enough for an # `if` and returning None? return outer_message, request_id def _get_sender_key(self, outer_message, aad, plaintext, request_id): """Customization hook of the protect function While most security contexts have a fixed sender key, deterministic requests need to shake up a few things. They need to modify the outer message, as well as the request_id as it will later be used to unprotect the response.""" return self.sender_key def _split_message(self, message, request_id): """Given a protected message, return the outer message that contains all Class I and Class U options (but without payload or Object-Security option), and the encoded inner message that contains all Class E options and the payload. This leaves the messages' remotes unset.""" if message.code.is_request(): outer_host = message.opt.uri_host proxy_uri = message.opt.proxy_uri inner_message = message.copy( uri_host=None, uri_port=None, proxy_uri=None, proxy_scheme=None, ) inner_message.remote = None if proxy_uri is not None: # Use set_request_uri to split up the proxy URI into its # components; extract, preserve and clear them. inner_message.set_request_uri(proxy_uri, set_uri_host=False) if inner_message.opt.proxy_uri is not None: raise ValueError("Can not split Proxy-URI into options") outer_uri = inner_message.remote.uri_base inner_message.remote = None inner_message.opt.proxy_scheme = None if message.opt.observe is None: outer_code = POST else: outer_code = FETCH else: outer_host = None proxy_uri = None inner_message = message.copy() outer_code = request_id.code_style.response # no max-age because these are always successsful responses outer_message = Message( code=outer_code, uri_host=outer_host, observe=None if message.code.is_response() else message.opt.observe, ) if proxy_uri is not None: outer_message.set_request_uri(outer_uri) plaintext = bytes([inner_message.code]) + inner_message.opt.encode() if inner_message.payload: plaintext += bytes([0xFF]) plaintext += inner_message.payload return outer_message, plaintext def _build_new_nonce(self): """This implements generation of a new nonce, assembled as per Figure 5 of draft-ietf-core-object-security-06. Returns the shortened partial IV as well.""" seqno = self.new_sequence_number() partial_iv = seqno.to_bytes(5, "big") return ( self._construct_nonce(partial_iv, self.sender_id), partial_iv.lstrip(b"\0") or b"\0", ) # sequence number handling def new_sequence_number(self): """Return a new sequence number; the implementation is responsible for never returning the same value twice in a given security context. May raise ContextUnavailable.""" retval = self.sender_sequence_number if retval >= MAX_SEQNO: raise ContextUnavailable("Sequence number too large, context is exhausted.") self.sender_sequence_number += 1 self.post_seqnoincrease() return retval # implementation defined @abc.abstractmethod def post_seqnoincrease(self): """Ensure that sender_sequence_number is stored""" raise def context_from_response(self, unprotected_bag) -> CanUnprotect: """When receiving a response to a request protected with this security context, pick the security context with which to unprotect the response given the unprotected information from the Object-Security option. This allow picking the right security context in a group response, and helps getting a new short-lived context for B.2 mode. The default behaivor is returning self. """ # FIXME justify by moving into a mixin for CanProtectAndUnprotect return self # type: ignore class CanUnprotect(BaseSecurityContext): def unprotect(self, protected_message, request_id=None): assert ( (request_id is not None) == protected_message.code.is_response() ), "Requestishness of code to unprotect does not match presence of request ID" is_response = protected_message.code.is_response() # Set to a raisable exception on replay check failures; it will be # raised, but the package may still be processed in the course of Echo handling. replay_error = None protected_serialized, protected, unprotected, ciphertext = ( self._extract_encrypted0(protected_message) ) if protected: raise ProtectionInvalid("The protected field is not empty") # FIXME check for duplicate keys in protected if unprotected.pop(COSE_KID_CONTEXT, self.id_context) != self.id_context: # FIXME is this necessary? raise ProtectionInvalid("Sender ID context does not match") if unprotected.pop(COSE_KID, self.recipient_id) != self.recipient_id: # for most cases, this is caught by the session ID dispatch, but in # responses (where explicit sender IDs are atypical), this is a # valid check raise ProtectionInvalid("Sender ID does not match") if COSE_PIV not in unprotected: if not is_response: raise ProtectionInvalid("No sequence number provided in request") nonce = request_id.nonce seqno = None # sentinel for not striking out anyting partial_iv_short = request_id.partial_iv partial_iv_generated_by = request_id.kid else: partial_iv_short = unprotected.pop(COSE_PIV) partial_iv_generated_by = self.recipient_id nonce = self._construct_nonce(partial_iv_short, self.recipient_id) seqno = int.from_bytes(partial_iv_short, "big") if not is_response: if not self.recipient_replay_window.is_initialized(): replay_error = ReplayError("Sequence number check unavailable") elif not self.recipient_replay_window.is_valid(seqno): replay_error = ReplayError("Sequence number was re-used") if replay_error is not None and self.echo_recovery is None: # Don't even try decoding if there is no reason to raise replay_error request_id = RequestIdentifiers( self.recipient_id, partial_iv_short, nonce, can_reuse_nonce=replay_error is None, request_code=protected_message.code, ) if unprotected.pop(COSE_COUNTERSIGNATURE0, None) is not None: try: alg_signature = self.alg_signature except NameError: raise DecodeError( "Group messages can not be decoded with this non-group context" ) siglen = alg_signature.signature_length if len(ciphertext) < siglen: raise DecodeError("Message too short for signature") encrypted_signature = ciphertext[-siglen:] keystream = self._kdf_for_keystreams( partial_iv_generated_by, partial_iv_short, self.signature_encryption_key, self.recipient_id, INFO_TYPE_KEYSTREAM_REQUEST if protected_message.code.is_request() else INFO_TYPE_KEYSTREAM_RESPONSE, ) signature = _xor_bytes(encrypted_signature, keystream) ciphertext = ciphertext[:-siglen] else: signature = None if unprotected: raise DecodeError("Unsupported unprotected option") if ( len(ciphertext) < self.alg_aead.tag_bytes + 1 ): # +1 assures access to plaintext[0] (the code) raise ProtectionInvalid("Ciphertext too short") external_aad = self._extract_external_aad( protected_message, request_id, local_is_sender=False ) enc_structure = ["Encrypt0", protected_serialized, external_aad] aad = cbor.dumps(enc_structure) key = self._get_recipient_key(protected_message) plaintext = self.alg_aead.decrypt(ciphertext, aad, key, nonce) self._post_decrypt_checks( external_aad, plaintext, protected_message, request_id ) if not is_response and seqno is not None and replay_error is None: self.recipient_replay_window.strike_out(seqno) if signature is not None: # Only doing the expensive signature validation once the cheaper decyrption passed alg_signature.verify( signature, ciphertext, external_aad, self.recipient_public_key ) # FIXME add options from unprotected unprotected_message = Message(code=plaintext[0]) unprotected_message.payload = unprotected_message.opt.decode(plaintext[1:]) try_initialize = ( not self.recipient_replay_window.is_initialized() and self.echo_recovery is not None ) if try_initialize: if protected_message.code.is_request(): # Either accept into replay window and clear replay error, or raise # something that can turn into a 4.01,Echo response if unprotected_message.opt.echo == self.echo_recovery: self.recipient_replay_window.initialize_from_freshlyseen(seqno) replay_error = None else: raise ReplayErrorWithEcho( secctx=self, request_id=request_id, echo=self.echo_recovery ) else: # We can initialize the replay window from a response as well. # The response is guaranteed fresh as it was AEAD-decoded to # match a request sent by this process. # # This is rare, as it only works when the server uses an own # sequence number, eg. when sending a notification or when # acting again on a retransmitted safe request whose response # it did not cache. # # Nothing bad happens if we can't make progress -- we just # don't initialize the replay window that wouldn't have been # checked for a response anyway. if seqno is not None: self.recipient_replay_window.initialize_from_freshlyseen(seqno) if replay_error is not None: raise replay_error if unprotected_message.code.is_request(): if protected_message.opt.observe != 0: unprotected_message.opt.observe = None else: if protected_message.opt.observe is not None: # -1 ensures that they sort correctly in later reordering # detection. Note that neither -1 nor high (>3 byte) sequence # numbers can be serialized in the Observe option, but they are # in this implementation accepted for passing around. unprotected_message.opt.observe = -1 if seqno is None else seqno return unprotected_message, request_id def _get_recipient_key(self, protected_message): """Customization hook of the unprotect function While most security contexts have a fixed recipient key, deterministic requests build it on demand.""" return self.recipient_key def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id): """Customization hook of the unprotect function after decryption While most security contexts are good with the default checks, deterministic requests need to perform additional checks while AAD and plaintext information is still available, and modify the request_id for the later protection step of the response.""" @staticmethod def _uncompress(option_data, payload): if option_data == b"": firstbyte = 0 else: firstbyte = option_data[0] tail = option_data[1:] unprotected = {} if firstbyte & COMPRESSION_BITS_RESERVED: raise DecodeError("Protected data uses reserved fields") pivsz = firstbyte & COMPRESSION_BITS_N if pivsz: if len(tail) < pivsz: raise DecodeError("Partial IV announced but not present") unprotected[COSE_PIV] = tail[:pivsz] tail = tail[pivsz:] if firstbyte & COMPRESSION_BIT_H: # kid context hint s = tail[0] if len(tail) - 1 < s: raise DecodeError("Context hint announced but not present") tail = tail[1:] unprotected[COSE_KID_CONTEXT] = tail[:s] tail = tail[s:] if firstbyte & COMPRESSION_BIT_K: kid = tail unprotected[COSE_KID] = kid if firstbyte & COMPRESSION_BIT_GROUP: # Not really; As this is (also) used early on (before the KID # context is even known, because it's just getting extracted), this # is returning an incomplete value here and leaves it to the later # processing to strip the right number of bytes from the ciphertext unprotected[COSE_COUNTERSIGNATURE0] = PRESENT_BUT_NO_VALUE_YET return b"", {}, unprotected, payload @classmethod def _extract_encrypted0(cls, message): if message.opt.oscore is None: raise NotAProtectedMessage("No Object-Security option present", message) protected_serialized, protected, unprotected, ciphertext = cls._uncompress( message.opt.oscore, message.payload ) return protected_serialized, protected, unprotected, ciphertext # implementation defined def context_for_response(self) -> CanProtect: """After processing a request with this context, with which security context should an outgoing response be protected? By default, it's the same context.""" # FIXME: Is there any way in which the handler may want to influence # the decision taken here? Or would, then, the handler just call a more # elaborate but similar function when setting the response's remote # already? # FIXME justify by moving into a mixin for CanProtectAndUnprotect return self # type: ignore class SecurityContextUtils(BaseSecurityContext): def _kdf(self, salt, ikm, role_id, out_type): """The HKDF as used to derive sender and recipient key and IV in RFC8613 Section 3.2.1, and analogously the Group Encryption Key of oscore-groupcomm. """ # The field in info is called `alg_aead` defined in RFC8613, but in # group OSCORE something that's very clearly *not* alg_aead is put in # there. the_field_called_alg_aead = self.alg_aead.value if out_type == "Key": out_bytes = self.alg_aead.key_bytes elif out_type == "IV": out_bytes = max( ( a.iv_bytes for a in [self.alg_aead, getattr(self, "alg_group_enc", None)] if a is not None ) ) elif out_type == "SEKey": # "While the obtained Signature Encryption Key is never used with # the Group Encryption Algorithm, its length was chosen to obtain a # matching level of security." out_bytes = self.alg_group_enc.key_bytes the_field_called_alg_aead = self.alg_group_enc.value else: raise ValueError("Output type not recognized") info = [ role_id, self.id_context, the_field_called_alg_aead, out_type, out_bytes, ] return self._kdf_lowlevel(salt, ikm, info, out_bytes) def _kdf_for_keystreams(self, piv_generated_by, salt, ikm, role_id, out_type): """The HKDF as used to derive the keystreams of oscore-groupcomm.""" out_bytes = self.alg_signature.signature_length assert out_type in ( INFO_TYPE_KEYSTREAM_REQUEST, INFO_TYPE_KEYSTREAM_RESPONSE, ), "Output type not recognized" info = [ piv_generated_by, self.id_context, out_type, out_bytes, ] return self._kdf_lowlevel(salt, ikm, info, out_bytes) def _kdf_lowlevel(self, salt: bytes, ikm: bytes, info: list, l: int) -> bytes: # noqa: E741 (signature follows RFC definition) """The HKDF function as used in RFC8613 and oscore-groupcomm (notated there as ``something = HKDF(...)`` Note that `info` typically contains `L` at some point. When `info` takes the conventional structure of pid, id_context, ald_aead, type, L], it may make sense to extend the `_kdf` function to support that case, or `_kdf_for_keystreams` for a different structure, as they are the more high-level tools.""" hkdf = HKDF( algorithm=self.hashfun, length=l, salt=salt, info=cbor.dumps(info), backend=_hash_backend, ) expanded = hkdf.derive(ikm) return expanded def derive_keys(self, master_salt, master_secret): """Populate sender_key, recipient_key and common_iv from the algorithm, hash function and id_context already configured beforehand, and from the passed salt and secret.""" self.sender_key = self._kdf(master_salt, master_secret, self.sender_id, "Key") self.recipient_key = self._kdf( master_salt, master_secret, self.recipient_id, "Key" ) self.common_iv = self._kdf(master_salt, master_secret, b"", "IV") # really more of the Credentials interface def get_oscore_context_for(self, unprotected): """Return a sutiable context (most easily self) for an incoming request if its unprotected data (COSE_KID, COSE_KID_CONTEXT) fit its description. If it doesn't match, it returns None. The default implementation just strictly checks for whether kid and any kid context match (not matching if a local KID context is set but none is given in the request); modes like Group OSCORE can spin up aspect objects here. """ if ( unprotected.get(COSE_KID, None) == self.recipient_id and unprotected.get(COSE_KID_CONTEXT, None) == self.id_context ): return self class ReplayWindow: """A regular replay window of a fixed size. It is implemented as an index and a bitfield (represented by an integer) whose least significant bit represents the seqyence number of the index, and a 1 indicates that a number was seen. No shenanigans around implicit leading ones (think floating point normalization) happen. >>> w = ReplayWindow(32, lambda: None) >>> w.initialize_empty() >>> w.strike_out(5) >>> w.is_valid(3) True >>> w.is_valid(5) False >>> w.strike_out(0) >>> w.strike_out(1) >>> w.strike_out(2) >>> w.is_valid(1) False Jumping ahead by the window size invalidates older numbers: >>> w.is_valid(4) True >>> w.strike_out(35) >>> w.is_valid(4) True >>> w.strike_out(36) >>> w.is_valid(4) False Usage safety ------------ For every key, the replay window can only be initielized empty once. On later uses, it needs to be persisted by storing the output of self.persist() somewhere and loaded from that persisted data. It is acceptable to store persistance data in the strike_out_callback, but that must then ensure that the data is written (flushed to a file or committed to a database), but that is usually inefficient. Stability --------- This class is not considered for stabilization yet and an implementation detail of the SecurityContext implementation(s). """ _index = None """Sequence number represented by the least significant bit of _bitfield""" _bitfield = None """Integer interpreted as a bitfield, self._size wide. A digit 1 at any bit indicates that the bit's index (its power of 2) plus self._index was already seen.""" def __init__(self, size, strike_out_callback): self._size = size self.strike_out_callback = strike_out_callback def is_initialized(self): return self._index is not None def initialize_empty(self): self._index = 0 self._bitfield = 0 def initialize_from_persisted(self, persisted): self._index = persisted["index"] self._bitfield = persisted["bitfield"] def initialize_from_freshlyseen(self, seen): """Initialize the replay window with a particular value that is just being observed in a fresh (ie. generated by the peer later than any messages processed before state was lost here) message. This marks the seen sequence number and all preceding it as invalid, and and all later ones as valid.""" self._index = seen self._bitfield = 1 def is_valid(self, number): if number < self._index: return False if number >= self._index + self._size: return True return (self._bitfield >> (number - self._index)) & 1 == 0 def strike_out(self, number): if not self.is_valid(number): raise ValueError( "Sequence number is not valid any more and " "thus can't be removed from the window" ) overshoot = number - (self._index + self._size - 1) if overshoot > 0: self._index += overshoot self._bitfield >>= overshoot assert self.is_valid(number), "Sequence number was not valid before strike-out" self._bitfield |= 1 << (number - self._index) self.strike_out_callback() def persist(self): """Return a dict containing internal state which can be passed to init to recreated the replay window.""" return {"index": self._index, "bitfield": self._bitfield} class FilesystemSecurityContext( CanProtect, CanUnprotect, SecurityContextUtils, credentials._Objectish ): """Security context stored in a directory as distinct files containing containing * Master secret, master salt, sender and recipient ID, optionally algorithm, the KDF hash function, and replay window size (settings.json and secrets.json, where the latter is typically readable only for the user) * sequence numbers and replay windows (sequence.json, the only file the process needs write access to) The static parameters can all either be placed in settings.json or secrets.json, but must not be present in both; the presence of either file is sufficient. .. warning:: Security contexts must never be copied around and used after another copy was used. They should only ever be moved, and if they are copied (eg. as a part of a system backup), restored contexts must not be used again; they need to be replaced with freshly created ones. An additional file named `lock` is created to prevent the accidental use of a context by to concurrent programs. Note that the sequence number file is updated in an atomic fashion which requires file creation privileges in the directory. If privilege separation between settings/key changes and sequence number changes is desired, one way to achieve that on Linux is giving the aiocoap process's user group write permissions on the directory and setting the sticky bit on the directory, thus forbidding the user to remove the settings/secret files not owned by him. Writes due to sent sequence numbers are reduced by applying a variation on the mechanism of RFC8613 Appendix B.1.1 (incrementing the persisted sender seqence number in steps of `k`). That value is automatically grown from sequence_number_chunksize_start up to sequence_number_chunksize_limit. At runtime, the receive window is not stored but kept indeterminate. In case of an abnormal shutdown, the server uses the mechanism described in Appendix B.1.2 to recover. """ # possibly overridden in constructor alg_aead = algorithms[DEFAULT_ALGORITHM] class LoadError(ValueError): """Exception raised with a descriptive message when trying to load a faulty security context""" def __init__( self, basedir: str, sequence_number_chunksize_start=10, sequence_number_chunksize_limit=10000, ): self.basedir = basedir self.lockfile: Optional[filelock.FileLock] = filelock.FileLock( os.path.join(basedir, "lock") ) # 0.001: Just fail if it can't be acquired # See https://github.com/benediktschmitt/py-filelock/issues/57 try: self.lockfile.acquire(timeout=0.001) # see https://github.com/PyCQA/pycodestyle/issues/703 except: # noqa: E722 # No lock, no loading, no need to fail in __del__ self.lockfile = None raise # Always enabled as committing to a file for every received request # would be a terrible burden. self.echo_recovery = secrets.token_bytes(8) try: self._load() except KeyError as k: raise self.LoadError("Configuration key missing: %s" % (k.args[0],)) self.sequence_number_chunksize_start = sequence_number_chunksize_start self.sequence_number_chunksize_limit = sequence_number_chunksize_limit self.sequence_number_chunksize = sequence_number_chunksize_start self.sequence_number_persisted = self.sender_sequence_number def _load(self): # doesn't check for KeyError on every occasion, relies on __init__ to # catch that data = {} for readfile in ("secret.json", "settings.json"): try: with open(os.path.join(self.basedir, readfile)) as f: filedata = json.load(f) except FileNotFoundError: continue for key, value in filedata.items(): if key.endswith("_hex"): key = key[:-4] value = binascii.unhexlify(value) elif key.endswith("_ascii"): key = key[:-6] value = value.encode("ascii") if key in data: raise self.LoadError( "Datum %r present in multiple input files at %r." % (key, self.basedir) ) data[key] = value self.alg_aead = algorithms[data.get("algorithm", DEFAULT_ALGORITHM)] self.hashfun = hashfunctions[data.get("kdf-hashfun", DEFAULT_HASHFUNCTION)] windowsize = data.get("window", DEFAULT_WINDOWSIZE) if not isinstance(windowsize, int): raise self.LoadError("Non-integer replay window") self.sender_id = data["sender-id"] self.recipient_id = data["recipient-id"] if ( max(len(self.sender_id), len(self.recipient_id)) > self.alg_aead.iv_bytes - 6 ): raise self.LoadError( "Sender or Recipient ID too long (maximum length %s for this algorithm)" % (self.alg_aead.iv_bytes - 6) ) master_secret = data["secret"] master_salt = data.get("salt", b"") self.id_context = data.get("id-context", None) self.derive_keys(master_salt, master_secret) self.recipient_replay_window = ReplayWindow( windowsize, self._replay_window_changed ) try: with open(os.path.join(self.basedir, "sequence.json")) as f: sequence = json.load(f) except FileNotFoundError: self.sender_sequence_number = 0 self.recipient_replay_window.initialize_empty() self.replay_window_persisted = True else: self.sender_sequence_number = int(sequence["next-to-send"]) received = sequence["received"] if received == "unknown": # The replay window will stay uninitialized, which triggers # Echo recovery self.replay_window_persisted = False else: try: self.recipient_replay_window.initialize_from_persisted(received) except (ValueError, TypeError, KeyError): # Not being particularly careful about what could go wrong: If # someone tampers with the replay data, we're already in *big* # trouble, of which I fail to see how it would become worse # than a crash inside the application around "failure to # right-shift a string" or that like; at worst it'd result in # nonce reuse which tampering with the replay window file # already does. raise self.LoadError( "Persisted replay window state was not understood" ) self.replay_window_persisted = True # This is called internally whenever a new sequence number is taken or # crossed out from the window, and blocks a lot; B.1 mode mitigates that. # # Making it async and block in a threadpool would mitigate the blocking of # other messages, but the more visible effect of this will be that no # matter if sync or async, a reply will need to wait for a file sync # operation to conclude. def _store(self): tmphand, tmpnam = tempfile.mkstemp( dir=self.basedir, prefix=".sequence-", suffix=".json", text=True ) data = {"next-to-send": self.sequence_number_persisted} if not self.replay_window_persisted: data["received"] = "unknown" else: data["received"] = self.recipient_replay_window.persist() # Using io.open (instead os.fdopen) and binary / write with encode # rather than dumps as that works even while the interpreter is # shutting down. # # This can be relaxed when there is a defined shutdown sequence for # security contexts that's triggered from the general context shutdown # -- but right now, there isn't. with io.open(tmphand, "wb") as tmpfile: tmpfile.write(json.dumps(data).encode("utf8")) tmpfile.flush() os.fsync(tmpfile.fileno()) os.replace(tmpnam, os.path.join(self.basedir, "sequence.json")) def _replay_window_changed(self): if self.replay_window_persisted: # Just remove the sequence numbers once from the file self.replay_window_persisted = False self._store() def post_seqnoincrease(self): if self.sender_sequence_number > self.sequence_number_persisted: self.sequence_number_persisted += self.sequence_number_chunksize self.sequence_number_chunksize = min( self.sequence_number_chunksize * 2, self.sequence_number_chunksize_limit ) # FIXME: this blocks -- see https://github.com/chrysn/aiocoap/issues/178 self._store() # The = case would only happen if someone deliberately sets all # numbers to 1 to force persisting on every step assert ( self.sender_sequence_number <= self.sequence_number_persisted ), "Using a sequence number that has been persisted already" def _destroy(self): """Release the lock file, and ensure tha he object has become unusable. If there is unpersisted state from B.1 operation, the actually used number and replay window gets written back to the file to allow resumption without wasting digits or round-trips. """ # FIXME: Arrange for a more controlled shutdown through the credentials self.replay_window_persisted = True self.sequence_number_persisted = self.sender_sequence_number self._store() del self.sender_key del self.recipient_key os.unlink(self.lockfile.lock_file) self.lockfile.release() self.lockfile = None def __del__(self): if self.lockfile is not None: self._destroy() @classmethod def from_item(cls, init_data): """Overriding _Objectish's from_item because the parameter name for basedir is contextfile for historical reasons""" def constructor( basedir: Optional[str] = None, contextfile: Optional[str] = None ): if basedir is not None and contextfile is not None: raise credentials.CredentialsLoadError( "Conflicting arguments basedir and contextfile; just contextfile instead" ) if basedir is None and contextfile is None: raise credentials.CredentialsLoadError("Missing item 'basedir'") if contextfile is not None: warnings.warn( "Property contextfile was renamed to basedir in OSCORE credentials entries", DeprecationWarning, stacklevel=2, ) basedir = contextfile assert ( basedir is not None ) # This helps mypy which would otherwise not see that the above ensures this already return cls(basedir) return credentials._call_from_structureddata( constructor, cls.__name__, init_data ) def find_all_used_contextless_oscore_kid(self) -> set[bytes]: return set((self.recipient_id,)) class GroupContext(ContextWhereExternalAadIsGroup, BaseSecurityContext): is_signing = True responses_send_kid = True @abc.abstractproperty def private_key(self): """Private key used to sign outgoing messages. Contexts not designed to send messages may raise a RuntimeError here; that necessity may later go away if some more accurate class modelling is found.""" @abc.abstractproperty def recipient_public_key(self): """Public key used to verify incoming messages. Contexts not designed to receive messages (because they'd have aspects for that) may raise a RuntimeError here; that necessity may later go away if some more accurate class modelling is found.""" class SimpleGroupContext(GroupContext, CanProtect, CanUnprotect, SecurityContextUtils): """A context for an OSCORE group This is a non-persistable version of a group context that does not support any group manager or rekeying; it is set up statically at startup. It is intended for experimentation and demos, but aims to be correct enough to be usable securely. """ # set during initialization (making all those attributes rather than # possibly properties as they might be in super) sender_id = None id_context = None # type: ignore private_key = None alg_aead = None hashfun = None # type: ignore alg_signature = None alg_group_enc = None alg_pairwise_key_agreement = None sender_auth_cred = None # type: ignore group_manager_cred = None # type: ignore cred_fmt = None # This is currently not evaluated, but any GM interaction will need to have this information available. group_manager_cred_fmt = None def __init__( self, alg_aead, hashfun, alg_signature, alg_group_enc, alg_pairwise_key_agreement, group_id, master_secret, master_salt, sender_id, private_key, sender_auth_cred, peers, group_manager_cred, cred_fmt=COSE_KCCS, group_manager_cred_fmt=COSE_KCCS, ): self.sender_id = sender_id self.id_context = group_id self.private_key = private_key self.alg_aead = alg_aead self.hashfun = hashfun self.alg_signature = alg_signature self.alg_group_enc = alg_group_enc self.alg_pairwise_key_agreement = alg_pairwise_key_agreement self.sender_auth_cred = sender_auth_cred self.group_manager_cred = group_manager_cred self.cred_fmt = cred_fmt self.group_manager_cred_fmt = group_manager_cred_fmt self.peers = peers.keys() self.recipient_public_keys = { k: self._parse_credential(v) for (k, v) in peers.items() } self.recipient_auth_creds = peers self.recipient_replay_windows = {} for k in self.peers: # no need to persist, the whole group is ephemeral w = ReplayWindow(32, lambda: None) w.initialize_empty() self.recipient_replay_windows[k] = w self.derive_keys(master_salt, master_secret) self.sender_sequence_number = 0 sender_public_key = self._parse_credential(sender_auth_cred) if ( self.alg_signature.public_from_private(self.private_key) != sender_public_key ): raise ValueError( "The key in the provided sender credential does not match the private key" ) def _parse_credential(self, credential: bytes): """Extract the public key (in the public_key format the respective AlgorithmCountersign needs) from credentials. This raises a ValueError if the credentials do not match the group's cred_fmt, or if the parameters do not match those configured in the group. This currently discards any information that is present in the credential that exceeds the key. (In a future version, this could return both the key and extracted other data, where that other data would be stored with the peer this is parsed from). """ if self.cred_fmt != COSE_KCCS: raise ValueError( "Credential parsing is currently only implemented for CCSs" ) assert self.alg_signature is not None return self.alg_signature.from_kccs(credential) def __repr__(self): return "<%s with group %r sender_id %r and %d peers>" % ( type(self).__name__, self.id_context.hex(), self.sender_id.hex(), len(self.peers), ) @property def recipient_public_key(self): raise RuntimeError( "Group context without key indication was used for verification" ) def derive_keys(self, master_salt, master_secret): # FIXME unify with parent? self.sender_key = self._kdf(master_salt, master_secret, self.sender_id, "Key") self.recipient_keys = { recipient_id: self._kdf(master_salt, master_secret, recipient_id, "Key") for recipient_id in self.peers } self.common_iv = self._kdf(master_salt, master_secret, b"", "IV") # but this one is new self.signature_encryption_key = self._kdf( master_salt, master_secret, b"", "SEKey" ) def post_seqnoincrease(self): """No-op because it's ephemeral""" def context_from_response(self, unprotected_bag) -> CanUnprotect: # sender ID *needs to be* here -- if this were a pairwise request, it # would not run through here try: sender_kid = unprotected_bag[COSE_KID] except KeyError: raise DecodeError("Group server failed to send own sender KID") if COSE_COUNTERSIGNATURE0 in unprotected_bag: return _GroupContextAspect(self, sender_kid) else: return _PairwiseContextAspect(self, sender_kid) def get_oscore_context_for(self, unprotected): if unprotected.get(COSE_KID_CONTEXT, None) != self.id_context: return None kid = unprotected.get(COSE_KID, None) if kid in self.peers: if COSE_COUNTERSIGNATURE0 in unprotected: return _GroupContextAspect(self, kid) elif self.recipient_public_keys[kid] is DETERMINISTIC_KEY: return _DeterministicUnprotectProtoAspect(self, kid) else: return _PairwiseContextAspect(self, kid) def find_all_used_contextless_oscore_kid(self) -> set[bytes]: # not conflicting: groups always send KID Context return set() # yet to stabilize... def pairwise_for(self, recipient_id): return _PairwiseContextAspect(self, recipient_id) def for_sending_deterministic_requests( self, deterministic_id, target_server: Optional[bytes] ): return _DeterministicProtectProtoAspect(self, deterministic_id, target_server) class _GroupContextAspect(GroupContext, CanUnprotect, SecurityContextUtils): """The concrete context this host has with a particular peer As all actual data is stored in the underlying groupcontext, this acts as an accessor to that object (which picks the right recipient key). This accessor is for receiving messages in group mode from a particular peer; it does not send (and turns into a pairwise context through context_for_response before it comes to that). """ def __init__(self, groupcontext: GroupContext, recipient_id: bytes) -> None: self.groupcontext = groupcontext self.recipient_id = recipient_id def __repr__(self): return "<%s inside %r with the peer %r>" % ( type(self).__name__, self.groupcontext, self.recipient_id.hex(), ) private_key = None # not inline because the equivalent lambda would not be recognized by mypy # (workaround for ) @property def id_context(self): return self.groupcontext.id_context @property def alg_aead(self): return self.groupcontext.alg_aead @property def alg_signature(self): return self.groupcontext.alg_signature @property def alg_group_enc(self): return self.groupcontext.alg_group_enc @property def alg_pairwise_key_agreement(self): return self.groupcontext.alg_pairwise_key_agreement @property def group_manager_cred(self): return self.groupcontext.group_manager_cred @property def common_iv(self): return self.groupcontext.common_iv @property def hashfun(self): return self.groupcontext.hashfun @property def signature_encryption_key(self): return self.groupcontext.signature_encryption_key @property def recipient_key(self): return self.groupcontext.recipient_keys[self.recipient_id] @property def recipient_public_key(self): return self.groupcontext.recipient_public_keys[self.recipient_id] @property def recipient_auth_cred(self): return self.groupcontext.recipient_auth_creds[self.recipient_id] @property def recipient_replay_window(self): return self.groupcontext.recipient_replay_windows[self.recipient_id] def context_for_response(self): return self.groupcontext.pairwise_for(self.recipient_id) @property def sender_auth_cred(self): raise RuntimeError( "Could relay the sender auth credential from the group context, but it shouldn't matter here" ) class _PairwiseContextAspect( GroupContext, CanProtect, CanUnprotect, SecurityContextUtils ): is_signing = False def __init__(self, groupcontext, recipient_id): self.groupcontext = groupcontext self.recipient_id = recipient_id shared_secret = self.alg_pairwise_key_agreement.staticstatic( self.groupcontext.private_key, self.groupcontext.recipient_public_keys[recipient_id], ) self.sender_key = self._kdf( self.groupcontext.sender_key, ( self.groupcontext.sender_auth_cred + self.groupcontext.recipient_auth_creds[recipient_id] + shared_secret ), self.groupcontext.sender_id, "Key", ) self.recipient_key = self._kdf( self.groupcontext.recipient_keys[recipient_id], ( self.groupcontext.recipient_auth_creds[recipient_id] + self.groupcontext.sender_auth_cred + shared_secret ), self.recipient_id, "Key", ) def __repr__(self): return "<%s based on %r with the peer %r>" % ( type(self).__name__, self.groupcontext, self.recipient_id.hex(), ) # FIXME: actually, only to be sent in requests # not inline because the equivalent lambda would not be recognized by mypy # (workaround for ) @property def id_context(self): return self.groupcontext.id_context @property def alg_aead(self): return self.groupcontext.alg_aead @property def hashfun(self): return self.groupcontext.hashfun @property def alg_signature(self): return self.groupcontext.alg_signature @property def alg_group_enc(self): return self.groupcontext.alg_group_enc @property def alg_pairwise_key_agreement(self): return self.groupcontext.alg_pairwise_key_agreement @property def group_manager_cred(self): return self.groupcontext.group_manager_cred @property def common_iv(self): return self.groupcontext.common_iv @property def sender_id(self): return self.groupcontext.sender_id @property def recipient_auth_cred(self): return self.groupcontext.recipient_auth_creds[self.recipient_id] @property def sender_auth_cred(self): return self.groupcontext.sender_auth_cred @property def recipient_replay_window(self): return self.groupcontext.recipient_replay_windows[self.recipient_id] # Set at initialization recipient_key = None sender_key = None @property def sender_sequence_number(self): return self.groupcontext.sender_sequence_number @sender_sequence_number.setter def sender_sequence_number(self, new): self.groupcontext.sender_sequence_number = new def post_seqnoincrease(self): self.groupcontext.post_seqnoincrease() # same here -- not needed because not signing private_key = property(post_seqnoincrease) recipient_public_key = property(post_seqnoincrease) def context_from_response(self, unprotected_bag) -> CanUnprotect: if unprotected_bag.get(COSE_KID, self.recipient_id) != self.recipient_id: raise DecodeError( "Response coming from a different server than requested, not attempting to decrypt" ) if COSE_COUNTERSIGNATURE0 in unprotected_bag: # It'd be an odd thing to do, but it's source verified, so the # server hopefully has reasons to make this readable to other group # members. return _GroupContextAspect(self.groupcontext, self.recipient_id) else: return self class _DeterministicProtectProtoAspect( ContextWhereExternalAadIsGroup, CanProtect, SecurityContextUtils ): """This implements the sending side of Deterministic Requests. While simialr to a _PairwiseContextAspect, it only derives the key at protection time, as the plain text is hashed into the key.""" deterministic_hashfun = hashes.SHA256() def __init__(self, groupcontext, sender_id, target_server: Optional[bytes]): self.groupcontext = groupcontext self.sender_id = sender_id self.target_server = target_server def __repr__(self): return "<%s based on %r with the sender ID %r%s>" % ( type(self).__name__, self.groupcontext, self.sender_id.hex(), "limited to responses from %s" % self.target_server if self.target_server is not None else "", ) def new_sequence_number(self): return 0 def post_seqnoincrease(self): pass def context_from_response(self, unprotected_bag): if self.target_server is None: if COSE_KID not in unprotected_bag: raise DecodeError( "Server did not send a KID and no particular one was addressed" ) else: if unprotected_bag.get(COSE_KID, self.target_server) != self.target_server: raise DecodeError( "Response coming from a different server than requested, not attempting to decrypt" ) if COSE_COUNTERSIGNATURE0 not in unprotected_bag: # Could just as well pass and later barf when the group context doesn't find a signature raise DecodeError( "Response to deterministic request came from unsecure pairwise context" ) return _GroupContextAspect( self.groupcontext, unprotected_bag.get(COSE_KID, self.target_server) ) def _get_sender_key(self, outer_message, aad, plaintext, request_id): if outer_message.code.is_response(): raise RuntimeError("Deterministic contexts shouldn't protect responses") basekey = self.groupcontext.recipient_keys[self.sender_id] h = hashes.Hash(self.deterministic_hashfun) h.update(basekey) h.update(aad) h.update(plaintext) request_hash = h.finalize() outer_message.opt.request_hash = request_hash outer_message.code = FETCH # By this time, the AADs have all been calculated already; setting this # for the benefit of the response parsing later request_id.request_hash = request_hash # FIXME I don't think this ever comes to bear but want to be sure # before removing this line (this should only be client-side) request_id.can_reuse_nonce = False # FIXME: we're still sending a h'00' PIV. Not wrong, just a wasted byte. return self._kdf(basekey, request_hash, self.sender_id, "Key") # details needed for various operations, especially eAAD generation # not inline because the equivalent lambda would not be recognized by mypy # (workaround for ) @property def alg_aead(self): return self.groupcontext.alg_aead @property def hashfun(self): return self.groupcontext.hashfun @property def common_iv(self): return self.groupcontext.common_iv @property def id_context(self): return self.groupcontext.id_context @property def alg_signature(self): return self.groupcontext.alg_signature class _DeterministicUnprotectProtoAspect( ContextWhereExternalAadIsGroup, CanUnprotect, SecurityContextUtils ): """This implements the sending side of Deterministic Requests. While simialr to a _PairwiseContextAspect, it only derives the key at unprotection time, based on information given as Request-Hash.""" # Unless None, this is the value by which the running process recognizes # that the second phase of a B.1.2 replay window recovery Echo option comes # from the current process, and thus its sequence number is fresh echo_recovery = None deterministic_hashfun = hashes.SHA256() class ZeroIsAlwaysValid: """Special-purpose replay window that accepts 0 indefinitely""" def is_initialized(self): return True def is_valid(self, number): # No particular reason to be lax here return number == 0 def strike_out(self, number): # FIXME: I'd rather indicate here that it's a potential replay, have the # request_id.can_reuse_nonce = False # set here rather than in _post_decrypt_checks, and thus also get # the check for whether it's a safe method pass def persist(self): pass def __init__(self, groupcontext, recipient_id): self.groupcontext = groupcontext self.recipient_id = recipient_id self.recipient_replay_window = self.ZeroIsAlwaysValid() def __repr__(self): return "<%s based on %r with the recipient ID %r>" % ( type(self).__name__, self.groupcontext, self.recipient_id.hex(), ) def context_for_response(self): return self.groupcontext def _get_recipient_key(self, protected_message): return self._kdf( self.groupcontext.recipient_keys[self.recipient_id], protected_message.opt.request_hash, self.recipient_id, "Key", ) def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id): if plaintext[0] not in (GET, FETCH): # FIXME: "is safe" # FIXME: accept but return inner Unauthorized. (Raising Unauthorized # here would just create an unprotected Unauthorized, which is not # what's spec'd for here) raise ProtectionInvalid("Request was not safe") basekey = self.groupcontext.recipient_keys[self.recipient_id] h = hashes.Hash(self.deterministic_hashfun) h.update(basekey) h.update(aad) h.update(plaintext) request_hash = h.finalize() if request_hash != protected_message.opt.request_hash: raise ProtectionInvalid( "Client's hash of the plaintext diverges from the actual request hash" ) # This is intended for the protection of the response, and the # later use in signature in the unprotect function is not happening # here anyway, neither is the later use for Echo requests request_id.request_hash = request_hash request_id.can_reuse_nonce = False # details needed for various operations, especially eAAD generation # not inline because the equivalent lambda would not be recognized by mypy # (workaround for ) @property def alg_aead(self): return self.groupcontext.alg_aead @property def hashfun(self): return self.groupcontext.hashfun @property def common_iv(self): return self.groupcontext.common_iv @property def id_context(self): return self.groupcontext.id_context @property def alg_signature(self): return self.groupcontext.alg_signature def verify_start(message): """Extract the unprotected COSE options from a message for the verifier to then pick a security context to actually verify the message. (Future versions may also report fields from both unprotected and protected, if the protected bag is ever used with OSCORE.). Call this only requests; for responses, you'll have to know the security context anyway, and there is usually no information to be gained.""" _, _, unprotected, _ = CanUnprotect._extract_encrypted0(message) return unprotected _getattr__ = deprecation_getattr( { "COSE_COUNTERSINGATURE0": "COSE_COUNTERSIGNATURE0", "Algorithm": "AeadAlgorithm", }, globals(), ) aiocoap-0.4.12/aiocoap/oscore_sitewrapper.py000066400000000000000000000255001472205122200211140ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module assists in creating OSCORE servers by proving a wrapper around a :class:aiocoap.resource.Site. It enforces no access control, but just indicates to the resources whether a client is authenticated by setting the request's remote property adaequately. """ import logging from typing import Optional import uuid import cbor2 import lakers import aiocoap from aiocoap import interfaces from aiocoap import oscore, error import aiocoap.pipe from .numbers.codes import FETCH, POST from .numbers.optionnumbers import OptionNumber from . import edhoc from aiocoap.transports.oscore import OSCOREAddress class OscoreSiteWrapper(interfaces.Resource): def __init__(self, inner_site, server_credentials): self.log = logging.getLogger("coap-server.oscore-site") self._inner_site = inner_site self.server_credentials = server_credentials async def render(self, request): raise RuntimeError( "OscoreSiteWrapper can only be used through the render_to_pipe interface" ) async def needs_blockwise_assembly(self, request): raise RuntimeError( "OscoreSiteWrapper can only be used through the render_to_pipe interface" ) # FIXME: should there be a get_resources_as_linkheader that just forwards # all the others and indicates ;osc everywhere? async def render_to_pipe(self, pipe): request = pipe.request if request.opt.uri_path == (".well-known", "edhoc"): # We'll have to take that explicitly, otherwise we'd need to rely # on a resource to be prepared by the user in the site with a # cyclical reference closed after site construction await self._render_edhoc_to_pipe(pipe) return try: unprotected = oscore.verify_start(request) except oscore.NotAProtectedMessage: # ie. if no object_seccurity present await self._inner_site.render_to_pipe(pipe) return if request.code not in (FETCH, POST): raise error.MethodNotAllowed try: sc = self.server_credentials.find_oscore(unprotected) except KeyError: if request.mtype == aiocoap.CON: raise error.Unauthorized("Security context not found") else: return try: unprotected, seqno = sc.unprotect(request) except error.RenderableError as e: # Note that this is flying out of the unprotection (ie. the # security context), which is trusted to not leak unintended # information in unencrypted responses. (By comparison, a # renderable exception flying out of a user # render_to_pipe could only be be rendered to a # protected message, and we'd need to be weary of rendering errors # during to_message as well). # # Note that this clause is not a no-op: it protects the 4.01 Echo # recovery exception (which is also a ReplayError) from being # treated as such. raise e # The other errors could be ported thee but would need some better NoResponse handling. except oscore.ReplayError: if request.mtype == aiocoap.CON: pipe.add_response( aiocoap.Message( code=aiocoap.UNAUTHORIZED, max_age=0, payload=b"Replay detected" ), is_last=True, ) return except oscore.DecodeError: if request.mtype == aiocoap.CON: raise error.BadOption("Failed to decode COSE") else: return except oscore.ProtectionInvalid: if request.mtype == aiocoap.CON: raise error.BadRequest("Decryption failed") else: return unprotected.remote = OSCOREAddress(sc, request.remote) self.log.debug("Request %r was unprotected into %r", request, unprotected) sc = sc.context_for_response() inner_pipe = aiocoap.pipe.IterablePipe(unprotected) pr_that_can_take_errors = aiocoap.pipe.error_to_message(inner_pipe, self.log) # FIXME: do not create a task but run this in here (can this become a # feature of the aiterable PR?) aiocoap.pipe.run_driving_pipe( pr_that_can_take_errors, self._inner_site.render_to_pipe(inner_pipe), name="OSCORE response rendering for %r" % unprotected, ) async for event in inner_pipe: if event.exception is not None: # These are expected to be rare in handlers # # FIXME should we try to render them? (See also # run_driving_pipe). Just raising them # would definitely be bad, as they might be renderable and # then would hit the outer message. self.log.warn( "Turning error raised from renderer into nondescript protected error %r", event.exception, ) message = aiocoap.Message(code=aiocoap.INTERNAL_SERVER_ERROR) is_last = True else: message = event.message is_last = event.is_last # FIXME: Around several places in the use of pipe (and # now even here), non-final events are hard-coded as observations. # This should shift toward the source telling, or the stream being # annotated as "eventually consistent resource states". if not is_last: message.opt.observe = 0 protected_response, _ = sc.protect(message, seqno) if message.opt.observe is not None: # FIXME: should be done in protect, or by something else that # generally handles obs numbers better (sending the # oscore-reconstructed number is nice because it's consistent # with a proxy that doesn't want to keep a counter when it # knows it's OSCORE already), but starting this per obs with # zero (unless it was done on that token recently) would be # most efficient protected_response.opt.observe = sc.sender_sequence_number & 0xFFFFFFFF self.log.debug( "Response %r was encrypted into %r", message, protected_response ) pipe.add_response(protected_response, is_last=is_last) if event.is_last: break # The created task gets cancelled here because the __aiter__ result is # dropped and thus all interest in the inner_pipe goes away async def _render_edhoc_to_pipe(self, pipe): self.log.debug("Processing request as EDHOC message 1") # Conveniently we don't have to care for observation, and thus can treat the rendering to a pipeline as just a rendering request = pipe.request if request.code is not POST: raise error.MethodNotAllowed if any( o.number.is_critical() for o in request.opt.option_list() if o.number not in (OptionNumber.URI_PATH, OptionNumber.URI_HOST) ): # FIXME: This should be done by every resource handler (see # https://github.com/chrysn/aiocoap/issues/268) -- this is crude # but better than doing nothing (and because we're rendering to a # pipe, chances are upcoming mitigation might not catch this) raise error.BadOption if len(request.payload) == 0: raise error.BadRequest if request.payload[0:1] != cbor2.dumps(True): self.log.error( "Receivign message 3 as a standalone message is not supported yet" ) # FIXME: Add support for it raise error.BadRequest origin = request.get_request_uri(local_is_server=True).removesuffix( "/.well-known/edhoc" ) own_credential_object = self._get_edhoc_identity(origin) if own_credential_object is None: self.log.error( "Peer attempted EDHOC even though no EDHOC credentials are configured for %s", origin, ) raise error.NotFound # FIXME lakers: Shouldn't have to commit this early, might still look at EAD1 assert isinstance(own_credential_object.own_cred, dict) and list( own_credential_object.own_cred.keys() ) == [14], ( "So far can only process CCS style own credentials a la {14: ...}, own_cred = %r" % own_credential_object.own_cred ) responder = lakers.EdhocResponder( r=own_credential_object.own_key.d, cred_r=cbor2.dumps(own_credential_object.own_cred[14], canonical=True), ) c_i, ead_1 = responder.process_message_1(request.payload[1:]) if ead_1 is not None: self.log.error("Aborting EDHOC: EAD1 present") raise error.BadRequest used_own_identifiers = ( self.server_credentials.find_all_used_contextless_oscore_kid() ) # can't have c_r==c_i used_own_identifiers.add(c_i) # FIXME try larger ones too, but currently they wouldn't work in Lakers candidates = [cbor2.dumps(i) for i in range(-24, 24)] candidates = [c for c in candidates if c not in used_own_identifiers] if not candidates: # FIXME: LRU or timeout the contexts raise error.InternalServerError("Too many contexts") c_r = candidates[0] message_2 = responder.prepare_message_2( own_credential_object.own_cred_style.as_lakers(), c_r, None ) credentials_entry = edhoc.EdhocResponderContext( responder, c_i, c_r, self.server_credentials, self.log, ) # FIXME we shouldn't need arbitrary keys self.server_credentials[":" + uuid.uuid4().hex] = credentials_entry pipe.add_response( aiocoap.Message(code=aiocoap.CHANGED, payload=message_2), is_last=True ) def _get_edhoc_identity(self, origin: str) -> Optional[edhoc.EdhocCredentials]: """With lakers-python 0.3.1, we can effectively only have one identity per host; expect this to change once we gain access to EAD1 (plus more when there are more methods or cipher suites) """ # That this works is a flaw of the credentials format by itself candidate = self.server_credentials.get(origin + "/*") if not isinstance(candidate, edhoc.EdhocCredentials): # FIXME not really a pair needed is it? return None return candidate aiocoap-0.4.12/aiocoap/pipe.py000066400000000000000000000306361472205122200161400ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import asyncio from collections import namedtuple import functools from . import error from .numbers import INTERNAL_SERVER_ERROR class Pipe: """Low-level meeting point between a request and a any responses that come back on it. A single request message is placed in the Pipe at creation time. Any responses, as well as any exception happening in the course of processing, are passed back to the requester along the Pipe. A response can carry an indication of whether it is final; an exception always is. This object is used both on the client side (where the Context on behalf of the application creates a Pipe and passes it to the network transports that send the request and fill in any responses) and on the server side (where the Context creates one for an incoming request and eventually lets the server implementation populate it with responses). This currently follows a callback dispatch style. (It may be developed into something where only awaiting a response drives the proces, though). Currently, the requester sets up the object, connects callbacks, and then passes the Pipe on to whatever creates the response. The creator of responses is notified by the Pipe of a loss of interest in a response when there are no more callback handlers registered by registering an on_interest_end callback. As the response callbacks need to be already in place when the Pipe is passed on to the responder, the absence event callbacks is signalled by callign the callback immediately on registration. To accurately model "loss of interest", it is important to use the two-phase setup of first registering actual callbacks and then producing events and/or placing on_interest_end callbacks; this is not clearly expressed in type or state yet. (One possibility would be for the Pipe to carry a preparation boolean, and which prohibits event sending during preparation and is_interest=True callback creation afterwards). This was previously named PlumbingRequest. **Stability** Sites and resources implemented by provinding a :meth:`~aiocoap.interfaces.Resource.render_to_pipe` method can stably use the :meth:`add_response` method of a Pipe (or something that quacks like it). They should not rely on :meth:`add_exception` but rather just raise the exception, and neither register :meth:`on_event` handlers (being the sole producer of events) nor hook to :meth:`on_interest_end` (instead, they can use finally clauses or async context managers to handle any cleanup when the cancellation of the render task indicates the peer's loss of interest). """ Event = namedtuple("Event", ("message", "exception", "is_last")) # called by the initiator of the request def __init__(self, request, log): self.request = request self.log = log self._event_callbacks = [] """list[(callback, is_interest)], or None during event processing, or False when there were no more event callbacks and an the on_interest_end callbacks have already been called""" def __repr__(self): return "<%s at %#x around %r with %r callbacks (thereof %r interests)>" % ( type(self).__name__, id(self), self.request, len(self._event_callbacks) if self._event_callbacks else self._event_callbacks, sum(1 for (e, is_interest) in self._event_callbacks if is_interest) if self._event_callbacks else self._event_callbacks, ) def _any_interest(self): return any(is_interest for (cb, is_interest) in self._event_callbacks) def poke(self): """Ask the responder for a life sign. It is up to the responder to ignore this (eg. because the responder is the library/application and can't be just gone), to issue a generic transport-dependent 'ping' to see whether the connection is still alive, or to retransmit the request if it is an observation over an unreliable channel. In any case, no status is reported directly to the poke, but if whatever the responder does fails, it will send an appropriate error message as a response.""" raise NotImplementedError() def on_event(self, callback, is_interest=True): """Call callback on any event. The callback must return True to be called again after an event. Callbacks must not produce new events or deregister unrelated event handlers. If is_interest=False, the callback will not be counted toward the active callbacks, and will receive a (None, None, is_last=True) event eventually. To unregister the handler, call the returned closure; this can trigger on_interest_end callbacks. """ self._event_callbacks.append((callback, is_interest)) return functools.partial(self._unregister_on_event, callback) def _unregister_on_event(self, callback): if self._event_callbacks is False: # They wouldn't be called any more so they're already dropped.a # It's OK that the caller cleans up after itself: Sure it could # register an on_interest_end, but that's really not warranted if # all it wants to know is whether it'll have to execute cleanup # when it's shutting down or not. return self._event_callbacks = [ (cb, i) for (cb, i) in self._event_callbacks if callback is not cb ] if not self._any_interest(): self._end() def on_interest_end(self, callback): """Register a callback that will be called exactly once -- either right now if there is not even a current indicated interest, or at a last event, or when no more interests are present""" if self._event_callbacks is False: # Happens, for example, when a proxy receives multiple requests on a single token self.log.warning( "on_interest_end callback %r added after %r has already ended", callback, self, ) callback() return if self._any_interest(): self._event_callbacks.append( ( lambda e: ((callback(), False) if e.is_last else (None, True))[1], False, ) ) else: callback() def _end(self): cbs = self._event_callbacks self._event_callbacks = False tombstone = self.Event(None, None, True) [cb(tombstone) for (cb, _) in cbs] # called by the responding side def _add_event(self, event): if self._event_callbacks is False: # Happens, for example, when a proxy receives multiple requests on a single token self.log.warning( "Response %r added after %r has already ended", event, self ) return for cb, is_interest in self._event_callbacks[:]: keep_calling = cb(event) if not keep_calling: if self._event_callbacks is False: # All interest was just lost during the callback return self._event_callbacks.remove((cb, is_interest)) if not self._any_interest(): self._end() def add_response(self, response, is_last=False): self._add_event(self.Event(response, None, is_last)) def add_exception(self, exception): self._add_event(self.Event(None, exception, True)) def run_driving_pipe(pipe, coroutine, name=None): """Create a task from a coroutine where the end of the coroutine produces a terminal event on the pipe, and lack of interest in the pipe cancels the task. The coroutine will typically produce output into the pipe; that connection is set up by the caller like as in ``run_driving_pipe(pipe, render_to(pipe))``. The create task is not returned, as the only sensible operation on it would be cancellation and that's already set up from the pipe. """ async def wrapped(): try: await coroutine except Exception as e: pipe.add_exception(e) # Not doing anything special about cancellation: it indicates the # peer's loss of interest, so there's no use in sending anythign out to # someone not listening any more task = asyncio.create_task( wrapped(), name=name, ) pipe.on_interest_end(task.cancel) def error_to_message(old_pr, log): """Given a pipe set up by the requester, create a new pipe to pass on to a responder. Any exceptions produced by the responder will be turned into terminal responses on the original pipe, and loss of interest is forwarded.""" from .message import Message next_pr = Pipe(old_pr.request, log) def on_event(event): if event.message is not None: old_pr.add_response(event.message, event.is_last) return not event.is_last e = event.exception if isinstance(e, error.RenderableError): # the repr() here is quite imporant for garbage collection log.info( "Render request raised a renderable error (%s), responding accordingly.", repr(e), ) try: msg = e.to_message() if msg is None: # This deserves a separate check because the ABC checks # that should ensure that the default to_message method is # never used in concrete classes fails due to the metaclass # conflict between ABC and Exceptions raise ValueError( "Exception to_message failed to produce a message on %r" % e ) except Exception as e2: log.error( "Rendering the renderable exception failed: %r", e2, exc_info=e2 ) msg = Message(code=INTERNAL_SERVER_ERROR) old_pr.add_response(msg, is_last=True) else: log.error( "An exception occurred while rendering a resource: %r", e, exc_info=e ) old_pr.add_response(Message(code=INTERNAL_SERVER_ERROR), is_last=True) return False remove_interest = next_pr.on_event(on_event) old_pr.on_interest_end(remove_interest) return next_pr class IterablePipe: """A stand-in for a Pipe that the requesting party can use instead. It should behave just like a Pipe to the responding party, but the caller does not register on_event handlers and instead iterates asynchronously over the events. Note that the PR can be aitered over only once, and does not support any additional hook settings once asynchronous iteration is started; this is consistent with the usage pattern of pipes. """ def __init__(self, request): self.request = request self.__on_interest_end = [] # FIXME: This is unbounded -- pipes should gain support for # backpressure. self.__queue = asyncio.Queue() def on_interest_end(self, callback): try: self.__on_interest_end.append(callback) except AttributeError: raise RuntimeError( "Attempted to declare interest in the end of a IterablePipe on which iteration already started" ) from None def __aiter__(self): i = self.Iterator(self.__queue, self.__on_interest_end) del self.__on_interest_end return i def _add_event(self, e): self.__queue.put_nowait(e) def add_response(self, response, is_last=False): self._add_event(Pipe.Event(response, None, is_last)) def add_exception(self, exception): self._add_event(Pipe.Event(None, exception, True)) class Iterator: def __init__(self, queue, on_interest_end): self.__queue = queue self.__on_interest_end = on_interest_end async def __anext__(self): return await self.__queue.get() def __del__(self): # This is pretty reliable as the iterator is only created and # referenced in the desugaring of the `async for`. for c in self.__on_interest_end: c() aiocoap-0.4.12/aiocoap/protocol.py000066400000000000000000001423061472205122200170420ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module contains the classes that are responsible for keeping track of messages: * :class:`Context` roughly represents the CoAP endpoint (basically a UDP socket) -- something that can send requests and possibly can answer incoming requests. Incoming requests are processed in tasks created by the context. * a :class:`Request` gets generated whenever a request gets sent to keep track of the response Logging ~~~~~~~ Several constructors of the Context accept a logger name; these names go into the construction of a Python logger. Log events will be emitted to these on different levels, with "warning" and above being a practical default for things that should may warrant reviewing by an operator: * DEBUG is used for things that occur even under perfect conditions. * INFO is for things that are well expected, but might be interesting during testing a network of nodes and not just when debugging the library. (This includes timeouts, retransmissions, and pings.) * WARNING is for everything that indicates a malbehaved peer. These don't *necessarily* indicate a client bug, though: Things like requesting a nonexistent block can just as well happen when a resource's content has changed between blocks. The library will not go out of its way to determine whether there is a plausible explanation for the odd behavior, and will report something as a warning in case of doubt. * ERROR is used when something clearly went wrong. This includes irregular connection terminations and resource handler errors (which are demoted to error responses), and can often contain a backtrace. """ import asyncio import weakref import time from typing import Optional, List from . import defaults from .credentials import CredentialsMap from .message import Message from .messagemanager import MessageManager from .tokenmanager import TokenManager from .pipe import Pipe, run_driving_pipe, error_to_message from . import interfaces from . import error from .numbers import INTERNAL_SERVER_ERROR, NOT_FOUND, CONTINUE, SHUTDOWN_TIMEOUT import warnings import logging class Context(interfaces.RequestProvider): """Applications' entry point to the network A :class:`.Context` coordinates one or more network :mod:`.transports` implementations and dispatches data between them and the application. The application can start requests using the message dispatch methods, and set a :class:`resources.Site` that will answer requests directed to the application as a server. On the library-internals side, it is the prime implementation of the :class:`interfaces.RequestProvider` interface, creates :class:`Request` and :class:`Response` classes on demand, and decides which transport implementations to start and which are to handle which messages. **Context creation and destruction** The following functions are provided for creating and stopping a context: .. note:: A typical application should only ever create one context, even (or especially when) it acts both as a server and as a client (in which case a server context should be created). A context that is not used any more must be shut down using :meth:`.shutdown()`, but typical applications will not need to because they use the context for the full process lifetime. .. automethod:: create_client_context .. automethod:: create_server_context .. automethod:: shutdown **Dispatching messages** CoAP requests can be sent using the following functions: .. automethod:: request If more control is needed, you can create a :class:`Request` yourself and pass the context to it. **Other methods and properties** The remaining methods and properties are to be considered unstable even when the project reaches a stable version number; please file a feature request for stabilization if you want to reliably access any of them. """ def __init__( self, loop=None, serversite=None, loggername="coap", client_credentials=None, server_credentials=None, ): self.log = logging.getLogger(loggername) self.loop = loop or asyncio.get_event_loop() self.serversite = serversite self.request_interfaces = [] self.client_credentials = client_credentials or CredentialsMap() self.server_credentials = server_credentials or CredentialsMap() # # convenience methods for class instanciation # async def _append_tokenmanaged_messagemanaged_transport( self, message_interface_constructor ): tman = TokenManager(self) mman = MessageManager(tman) transport = await message_interface_constructor(mman) mman.message_interface = transport tman.token_interface = mman self.request_interfaces.append(tman) async def _append_tokenmanaged_transport(self, token_interface_constructor): tman = TokenManager(self) transport = await token_interface_constructor(tman) tman.token_interface = transport self.request_interfaces.append(tman) @classmethod async def create_client_context( cls, *, loggername="coap", loop=None, transports: Optional[List[str]] = None ): """Create a context bound to all addresses on a random listening port. This is the easiest way to get a context suitable for sending client requests. :meta private: (not actually private, just hiding from automodule due to being grouped with the important functions) """ if loop is None: loop = asyncio.get_event_loop() self = cls(loop=loop, serversite=None, loggername=loggername) selected_transports = transports or defaults.get_default_clienttransports( loop=loop ) # FIXME make defaults overridable (postponed until they become configurable too) for transportname in selected_transports: if transportname == "udp6": from .transports.udp6 import MessageInterfaceUDP6 await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceUDP6.create_client_transport_endpoint( mman, log=self.log, loop=loop ) ) elif transportname == "simple6": from .transports.simple6 import MessageInterfaceSimple6 await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceSimple6.create_client_transport_endpoint( mman, log=self.log, loop=loop ) ) elif transportname == "tinydtls": from .transports.tinydtls import MessageInterfaceTinyDTLS await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceTinyDTLS.create_client_transport_endpoint( mman, log=self.log, loop=loop ) ) elif transportname == "tcpclient": from .transports.tcp import TCPClient await self._append_tokenmanaged_transport( lambda tman: TCPClient.create_client_transport(tman, self.log, loop) ) elif transportname == "tlsclient": from .transports.tls import TLSClient await self._append_tokenmanaged_transport( lambda tman: TLSClient.create_client_transport( tman, self.log, loop, self.client_credentials ) ) elif transportname == "ws": from .transports.ws import WSPool await self._append_tokenmanaged_transport( lambda tman: WSPool.create_transport( tman, self.log, loop, client_credentials=self.client_credentials ) ) elif transportname == "oscore": from .transports.oscore import TransportOSCORE oscoretransport = TransportOSCORE(self, self) self.request_interfaces.append(oscoretransport) else: raise RuntimeError( "Transport %r not know for client context creation" % transportname ) return self @classmethod async def create_server_context( cls, site, bind=None, *, loggername="coap-server", loop=None, _ssl_context=None, multicast=[], server_credentials=None, transports: Optional[List[str]] = None, ): """Create a context, bound to all addresses on the CoAP port (unless otherwise specified in the ``bind`` argument). This is the easiest way to get a context suitable both for sending client and accepting server requests. The ``bind`` argument, if given, needs to be a 2-tuple of IP address string and port number, where the port number can be None to use the default port. If ``multicast`` is given, it needs to be a list of (multicast address, interface name) tuples, which will all be joined. (The IPv4 style of selecting the interface by a local address is not supported; users may want to use the netifaces package to arrive at an interface name for an address). As a shortcut, the list may also contain interface names alone. Those will be joined for the 'all CoAP nodes' groups of IPv4 and IPv6 (with scopes 2 and 5) as well as the respective 'all nodes' groups in IPv6. Under some circumstances you may already need a context to pass into the site for creation; this is typically the case for servers that trigger requests on their own. For those cases, it is usually easiest to pass None in as a site, and set the fully constructed site later by assigning to the ``serversite`` attribute. :meta private: (not actually private, just hiding from automodule due to being grouped with the important functions) """ if loop is None: loop = asyncio.get_event_loop() self = cls( loop=loop, serversite=site, loggername=loggername, server_credentials=server_credentials, ) multicast_done = not multicast selected_transports = transports or defaults.get_default_servertransports( loop=loop ) for transportname in selected_transports: if transportname == "udp6": from .transports.udp6 import MessageInterfaceUDP6 await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceUDP6.create_server_transport_endpoint( mman, log=self.log, loop=loop, bind=bind, multicast=multicast ) ) multicast_done = True # FIXME this is duplicated from the client version, as those are client-only anyway elif transportname == "simple6": from .transports.simple6 import MessageInterfaceSimple6 await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceSimple6.create_client_transport_endpoint( mman, log=self.log, loop=loop ) ) elif transportname == "tinydtls": from .transports.tinydtls import MessageInterfaceTinyDTLS await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceTinyDTLS.create_client_transport_endpoint( mman, log=self.log, loop=loop ) ) # FIXME end duplication elif transportname == "tinydtls_server": from .transports.tinydtls_server import MessageInterfaceTinyDTLSServer await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceTinyDTLSServer.create_server( bind, mman, log=self.log, loop=loop, server_credentials=self.server_credentials, ) ) elif transportname == "simplesocketserver": from .transports.simplesocketserver import MessageInterfaceSimpleServer await self._append_tokenmanaged_messagemanaged_transport( lambda mman: MessageInterfaceSimpleServer.create_server( bind, mman, log=self.log, loop=loop ) ) elif transportname == "tcpserver": from .transports.tcp import TCPServer await self._append_tokenmanaged_transport( lambda tman: TCPServer.create_server(bind, tman, self.log, loop) ) elif transportname == "tcpclient": from .transports.tcp import TCPClient await self._append_tokenmanaged_transport( lambda tman: TCPClient.create_client_transport(tman, self.log, loop) ) elif transportname == "tlsserver": if _ssl_context is not None: from .transports.tls import TLSServer await self._append_tokenmanaged_transport( lambda tman: TLSServer.create_server( bind, tman, self.log, loop, _ssl_context ) ) elif transportname == "tlsclient": from .transports.tls import TLSClient await self._append_tokenmanaged_transport( lambda tman: TLSClient.create_client_transport( tman, self.log, loop, self.client_credentials ) ) elif transportname == "ws": from .transports.ws import WSPool await self._append_tokenmanaged_transport( # None, None: Unlike the other transports this has a server/client generic creator, and only binds if there is some bind lambda tman: WSPool.create_transport( tman, self.log, loop, client_credentials=self.client_credentials, server_bind=bind or (None, None), server_context=_ssl_context, ) ) elif transportname == "oscore": from .transports.oscore import TransportOSCORE oscoretransport = TransportOSCORE(self, self) self.request_interfaces.append(oscoretransport) else: raise RuntimeError( "Transport %r not know for server context creation" % transportname ) if not multicast_done: self.log.warning( "Multicast was requested, but no multicast capable transport was selected." ) # This is used in tests to wait for externally launched servers to be ready self.log.debug("Server ready to receive requests") return self async def shutdown(self): """Take down any listening sockets and stop all related timers. After this coroutine terminates, and once all external references to the object are dropped, it should be garbage-collectable. This method takes up to :const:`aiocoap.numbers.constants.SHUTDOWN_TIMEOUT` seconds, allowing transports to perform any cleanup implemented in them (such as orderly connection shutdown and cancelling observations, where the latter is currently not implemented). :meta private: (not actually private, just hiding from automodule due to being grouped with the important functions) """ self.log.debug("Shutting down context") done, pending = await asyncio.wait( [ asyncio.create_task( ri.shutdown(), name="Shutdown of %r" % ri, ) for ri in self.request_interfaces ], timeout=SHUTDOWN_TIMEOUT, ) for item in done: await item if pending: # Apart from being useful to see, this also ensures that developers # see the error in the logs during test suite runs -- and the error # should be easier to follow than the "we didn't garbage collect # everything" errors we see anyway (or otherwise, if the error is # escalated into a test failure) self.log.error( "Shutdown timeout exceeded, returning anyway. Interfaces still busy: %s", pending, ) # FIXME: determine how official this should be, or which part of it is # public -- now that BlockwiseRequest uses it. (And formalize what can # change about messages and what can't after the remote has been thusly # populated). async def find_remote_and_interface(self, message): if message.remote is None: raise error.MissingRemoteError() for ri in self.request_interfaces: if await ri.fill_or_recognize_remote(message): return ri raise RuntimeError("No request interface could route message") def request(self, request_message, handle_blockwise=True): if handle_blockwise: return BlockwiseRequest(self, request_message) pipe = Pipe(request_message, self.log) # Request sets up callbacks at creation result = Request(pipe, self.loop, self.log) async def send(): try: request_interface = await self.find_remote_and_interface( request_message ) request_interface.request(pipe) except Exception as e: pipe.add_exception(e) return self.loop.create_task( send(), name="Request processing of %r" % result, ) return result # the following are under consideration for moving into Site or something # mixed into it def render_to_pipe(self, pipe): """Fill a pipe by running the site's render_to_pipe interface and handling errors.""" pr_that_can_receive_errors = error_to_message(pipe, self.log) run_driving_pipe( pr_that_can_receive_errors, self._render_to_pipe(pipe), name="Rendering for %r" % pipe.request, ) async def _render_to_pipe(self, pipe): if self.serversite is None: pipe.add_response( Message(code=NOT_FOUND, payload=b"not a server"), is_last=True ) return return await self.serversite.render_to_pipe(pipe) class BaseRequest(object): """Common mechanisms of :class:`Request` and :class:`MulticastRequest`""" class BaseUnicastRequest(BaseRequest): """A utility class that offers the :attr:`response_raising` and :attr:`response_nonraising` alternatives to waiting for the :attr:`response` future whose error states can be presented either as an unsuccessful response (eg. 4.04) or an exception. It also provides some internal tools for handling anything that has a :attr:`response` future and an :attr:`observation`""" @property async def response_raising(self): """An awaitable that returns if a response comes in and is successful, otherwise raises generic network exception or a :class:`.error.ResponseWrappingError` for unsuccessful responses. Experimental Interface.""" response = await self.response if not response.code.is_successful(): raise error.ResponseWrappingError(response) return response @property async def response_nonraising(self): """An awaitable that rather returns a 500ish fabricated message (as a proxy would return) instead of raising an exception. Experimental Interface.""" # FIXME: Can we smuggle error_to_message into the underlying pipe? # That should make observe notifications into messages rather # than exceptions as well, plus it has fallbacks for `e.to_message()` # raising. try: return await self.response except error.RenderableError as e: return e.to_message() except Exception: return Message(code=INTERNAL_SERVER_ERROR) class Request(interfaces.Request, BaseUnicastRequest): # FIXME: Implement timing out with REQUEST_TIMEOUT here def __init__(self, pipe, loop, log): self._pipe = pipe self.response = loop.create_future() if pipe.request.opt.observe == 0: self.observation = ClientObservation() else: self.observation = None self._runner = self._run() self._runner.send(None) def process(event): try: # would be great to have self or the runner as weak ref, but # see ClientObservation.register_callback comments -- while # that is around, we can't weakref here. self._runner.send(event) return True except StopIteration: return False self._stop_interest = self._pipe.on_event(process) self.log = log self.response.add_done_callback(self._response_cancellation_handler) def _response_cancellation_handler(self, response): # Propagate cancellation to the runner (if interest in the first # response is lost, there won't be observation items to pull out), but # not general completion (because if it's completed and not cancelled, # eg. when an observation is active) if self.response.cancelled() and self._runner is not None: # Dropping the only reference makes it stop with GeneratorExit, # similar to a cancelled task self._runner = None self._stop_interest() # Otherwise, there will be a runner still around, and it's its task to # call _stop_interest. @staticmethod def _add_response_properties(response, request): response.request = request def _run(self): # FIXME: This is in iterator form because it used to be a task that # awaited futures, and that code could be easily converted to an # iterator. I'm not sure that's a bad state here, but at least it # should be a more conscious decision to make this an iterator rather # than just having it happen to be one. # # FIXME: check that responses come from the same remmote as long as we're assuming unicast first_event = yield None if first_event.message is not None: self._add_response_properties(first_event.message, self._pipe.request) self.response.set_result(first_event.message) else: self.response.set_exception(first_event.exception) if not isinstance(first_event.exception, error.Error): self.log.warning( "An exception that is not an aiocoap Error was raised " "from a transport; please report this as a bug in " "aiocoap: %r", first_event.exception, ) if self.observation is None: if not first_event.is_last: self.log.error( "Pipe indicated more possible responses" " while the Request handler would not know what to" " do with them, stopping any further request." ) self._stop_interest() return if first_event.is_last: self.observation.error(error.NotObservable()) return if first_event.message.opt.observe is None: self.log.error( "Pipe indicated more possible responses" " while the Request handler would not know what to" " do with them, stopping any further request." ) self._stop_interest() return # variable names from RFC7641 Section 3.4 v1 = first_event.message.opt.observe t1 = time.time() while True: # We don't really support cancellation of observations yet (see # https://github.com/chrysn/aiocoap/issues/92), but at least # stopping the interest is a way to free the local resources after # the first observation update, and to make the MID handler RST the # observation on the next. # FIXME: there *is* now a .on_cancel callback, we should at least # hook into that, and possibly even send a proper cancellation # then. next_event = yield True if self.observation.cancelled: self._stop_interest() return if next_event.exception is not None: self.observation.error(next_event.exception) if not next_event.is_last: self._stop_interest() if not isinstance(next_event.exception, error.Error): self.log.warning( "An exception that is not an aiocoap Error was " "raised from a transport during an observation; " "please report this as a bug in aiocoap: %r", next_event.exception, ) return self._add_response_properties(next_event.message, self._pipe.request) if next_event.message.opt.observe is not None: # check for reordering v2 = next_event.message.opt.observe t2 = time.time() is_recent = ( (v1 < v2 and v2 - v1 < 2**23) or (v1 > v2 and v1 - v2 > 2**23) or ( t2 > t1 + self._pipe.request.transport_tuning.OBSERVATION_RESET_TIME ) ) if is_recent: t1 = t2 v1 = v2 else: # the terminal message is always the last is_recent = True if is_recent: self.observation.callback(next_event.message) if next_event.is_last: self.observation.error(error.ObservationCancelled()) return if next_event.message.opt.observe is None: self.observation.error(error.ObservationCancelled()) self.log.error( "Pipe indicated more possible responses" " while the Request handler would not know what to" " do with them, stopping any further request." ) self._stop_interest() return class BlockwiseRequest(BaseUnicastRequest, interfaces.Request): def __init__(self, protocol, app_request): self.protocol = protocol self.log = self.protocol.log.getChild("blockwise-requester") self.response = protocol.loop.create_future() if app_request.opt.observe is not None: self.observation = ClientObservation() else: self.observation = None self._runner = protocol.loop.create_task( self._run_outer( app_request, self.response, weakref.ref(self.observation) if self.observation is not None else lambda: None, self.protocol, self.log, ), name="Blockwise runner for %r" % app_request, ) self.response.add_done_callback(self._response_cancellation_handler) def _response_cancellation_handler(self, response_future): # see Request._response_cancellation_handler if self.response.cancelled(): self._runner.cancel() @classmethod async def _run_outer(cls, app_request, response, weak_observation, protocol, log): try: await cls._run(app_request, response, weak_observation, protocol, log) except asyncio.CancelledError: pass # results already set except Exception as e: logged = False if not response.done(): logged = True response.set_exception(e) obs = weak_observation() if app_request.opt.observe is not None and obs is not None: logged = True obs.error(e) if not logged: # should be unreachable log.error( "Exception in BlockwiseRequest runner neither went to response nor to observation: %s", e, exc_info=e, ) # This is a class method because that allows self and self.observation to # be freed even when this task is running, and the task to stop itself -- # otherwise we couldn't know when users just "forget" about a request # object after using its response (esp. in observe cases) and leave this # task running. @classmethod async def _run(cls, app_request, response, weak_observation, protocol, log): # we need to populate the remote right away, because the choice of # blocks depends on it. await protocol.find_remote_and_interface(app_request) size_exp = app_request.remote.maximum_block_size_exp if app_request.opt.block1 is not None: warnings.warn( "Setting a block1 option in a managed block-wise transfer is deprecated. Instead, set request.remote.maximum_block_size_exp to the desired value", DeprecationWarning, stacklevel=2, ) assert ( app_request.opt.block1.block_number == 0 ), "Unexpected block number in app_request" assert ( not app_request.opt.block1.more ), "Unexpected more-flag in app_request" # this is where the library user can traditionally pass in size # exponent hints into the library. size_exp = app_request.opt.block1.size_exponent # Offset in the message in blocks of size_exp. Whoever changes size_exp # is responsible for updating this number. block_cursor = 0 while True: # ... send a chunk if size_exp >= 6: # FIXME from maximum_payload_size fragmentation_threshold = app_request.remote.maximum_payload_size else: fragmentation_threshold = 2 ** (size_exp + 4) if ( app_request.opt.block1 is not None or len(app_request.payload) > fragmentation_threshold ): current_block1 = app_request._extract_block( block_cursor, size_exp, app_request.remote.maximum_payload_size ) if block_cursor == 0: current_block1.opt.size1 = len(app_request.payload) else: current_block1 = app_request blockrequest = protocol.request(current_block1, handle_blockwise=False) blockresponse = await blockrequest.response # store for future blocks to ensure that the next blocks will be # sent from the same source address (in the UDP case; for many # other transports it won't matter). carrying along locally set block size limitation if ( app_request.remote.maximum_block_size_exp < blockresponse.remote.maximum_block_size_exp ): blockresponse.remote.maximum_block_size_exp = ( app_request.remote.maximum_block_size_exp ) app_request.remote = blockresponse.remote if blockresponse.opt.block1 is None: if blockresponse.code.is_successful() and current_block1.opt.block1: log.warning( "Block1 option completely ignored by server, assuming it knows what it is doing." ) # FIXME: handle 4.13 and retry with the indicated size option break block1 = blockresponse.opt.block1 log.debug( "Response with Block1 option received, number = %d, more = %d, size_exp = %d.", block1.block_number, block1.more, block1.size_exponent, ) if block1.block_number != current_block1.opt.block1.block_number: raise error.UnexpectedBlock1Option("Block number mismatch") if size_exp == 7: block_cursor += len(current_block1.payload) // 1024 else: block_cursor += 1 while block1.size_exponent < size_exp: block_cursor *= 2 size_exp -= 1 if not current_block1.opt.block1.more: if block1.more or blockresponse.code == CONTINUE: # treating this as a protocol error -- letting it slip # through would misrepresent the whole operation as an # over-all 2.xx (successful) one. raise error.UnexpectedBlock1Option( "Server asked for more data at end of body" ) break # checks before preparing the next round: if blockresponse.opt.observe: # we're not *really* interested in that block, we just sent an # observe option to indicate that we'll want to observe the # resulting representation as a whole log.warning( "Server answered Observe in early Block1 phase, cancelling the erroneous observation." ) blockrequest.observe.cancel() if block1.more: # FIXME i think my own server is dowing this wrong # if response.code != CONTINUE: # raise error.UnexpectedBlock1Option("more-flag set but no Continue") pass else: if not blockresponse.code.is_successful(): break else: # ignoring (discarding) the successul intermediate result, waiting for a final one continue lower_observation = None if app_request.opt.observe is not None: if blockresponse.opt.observe is not None: lower_observation = blockrequest.observation else: obs = weak_observation() if obs: obs.error(error.NotObservable()) del obs assert blockresponse is not None, "Block1 loop broke without setting a response" blockresponse.opt.block1 = None # FIXME check with RFC7959: it just says "send requests similar to the # requests in the Block1 phase", what does that mean? using the last # block1 as a reference for now, especially because in the # only-one-request-block case, that's the original request we must send # again and again anyway assembled_response = await cls._complete_by_requesting_block2( protocol, current_block1, blockresponse, log ) response.set_result(assembled_response) # finally set the result if lower_observation is not None: # FIXME this can all be simplified a lot since it's no more # expected that observations shut themselves down when GC'd. obs = weak_observation() del weak_observation if obs is None: lower_observation.cancel() return future_weak_observation = protocol.loop.create_future() # packing this up because its destroy callback needs to reference the subtask subtask = asyncio.create_task( cls._run_observation( app_request, lower_observation, future_weak_observation, protocol, log, ), name="Blockwise observation for %r" % app_request, ) future_weak_observation.set_result( weakref.ref(obs, lambda obs: subtask.cancel()) ) obs.on_cancel(subtask.cancel) del obs await subtask @classmethod async def _run_observation( cls, original_request, lower_observation, future_weak_observation, protocol, log ): weak_observation = await future_weak_observation # we can use weak_observation() here at any time, because whenever that # becomes None, this task gets cancelled try: async for block1_notification in lower_observation: log.debug("Notification received") full_notification = await cls._complete_by_requesting_block2( protocol, original_request, block1_notification, log ) log.debug("Reporting completed notification") weak_observation().callback(full_notification) # FIXME verify that this loop actually ends iff the observation # was cancelled -- otherwise find out the cause(s) or make it not # cancel under indistinguishable circumstances weak_observation().error(error.ObservationCancelled()) except asyncio.CancelledError: return except Exception as e: weak_observation().error(e) finally: # We generally avoid idempotent cancellation, but we may have # reached this point either due to an earlier cancellation or # without one if not lower_observation.cancelled: lower_observation.cancel() @classmethod async def _complete_by_requesting_block2( cls, protocol, request_to_repeat, initial_response, log ): # FIXME this can probably be deduplicated against BlockwiseRequest if ( initial_response.opt.block2 is None or initial_response.opt.block2.more is False ): initial_response.opt.block2 = None return initial_response if initial_response.opt.block2.block_number != 0: log.error("Error assembling blockwise response (expected first block)") raise error.UnexpectedBlock2() assembled_response = initial_response last_response = initial_response while True: current_block2 = request_to_repeat._generate_next_block2_request( assembled_response ) current_block2 = current_block2.copy(remote=initial_response.remote) blockrequest = protocol.request(current_block2, handle_blockwise=False) last_response = await blockrequest.response if last_response.opt.block2 is None: log.warning( "Server sent non-blockwise response after having started a blockwise transfer. Blockwise transfer cancelled, accepting single response." ) return last_response block2 = last_response.opt.block2 log.debug( "Response with Block2 option received, number = %d, more = %d, size_exp = %d.", block2.block_number, block2.more, block2.size_exponent, ) try: assembled_response._append_response_block(last_response) except error.Error as e: log.error("Error assembling blockwise response, passing on error %r", e) raise if block2.more is False: return assembled_response class ClientObservation: """An interface to observe notification updates arriving on a request. This class does not actually provide any of the observe functionality, it is purely a container for dispatching the messages via asynchronous iteration. It gets driven (ie. populated with responses or errors including observation termination) by a Request object. """ def __init__(self): self.callbacks = [] self.errbacks = [] self.cancelled = False self._on_cancel = [] self._latest_response = None # the analogous error is stored in _cancellation_reason when cancelled. def __aiter__(self): """`async for` interface to observations. This is the preferred interface to obtaining observations.""" it = self._Iterator() self.register_callback(it.push, _suppress_deprecation=True) self.register_errback(it.push_err, _suppress_deprecation=True) return it class _Iterator: def __init__(self): self._future = asyncio.get_event_loop().create_future() def push(self, item): if self._future.done(): # we don't care whether we overwrite anything, this is a lossy queue as observe is lossy self._future = asyncio.get_event_loop().create_future() self._future.set_result(item) def push_err(self, e): if self._future.done(): self._future = asyncio.get_event_loop().create_future() self._future.set_exception(e) async def __anext__(self): f = self._future try: result = await self._future # FIXME see `await servobs._trigger` comment: might waiting for # the original future not yield the first future's result when # a quick second future comes in in a push? if f is self._future: self._future = asyncio.get_event_loop().create_future() return result except (error.NotObservable, error.ObservationCancelled): # only exit cleanly when the server -- right away or later -- # states that the resource is not observable any more # FIXME: check whether an unsuccessful message is still passed # as an observation result (or whether it should be) raise StopAsyncIteration def __del__(self): if self._future.done(): try: # Fetch the result so any errors show up at least in the # finalizer output self._future.result() except (error.ObservationCancelled, error.NotObservable): # This is the case at the end of an observation cancelled # by the server. pass except error.NetworkError: # This will already have shown up in the main result too. pass except (error.LibraryShutdown, asyncio.CancelledError): pass # Anything else flying out of this is unexpected and probably a # library error # When this function is removed, we can finally do cleanup better. Right # now, someone could register a callback that doesn't hold any references, # so we can't just stop the request when nobody holds a reference to this # any more. Once we're all in pull mode, we can make the `process` function # that sends data in here use a weak reference (because any possible # recipient would need to hold a reference to self or the iterator, and # thus _run). def register_callback(self, callback, _suppress_deprecation=False): """Call the callback whenever a response to the message comes in, and pass the response to it. The use of this function is deprecated: Use the asynchronous iteration interface instead.""" if not _suppress_deprecation: warnings.warn( "register_callback on observe results is deprected: Use `async for notify in request.observation` instead.", DeprecationWarning, stacklevel=2, ) if self.cancelled: return self.callbacks.append(callback) if self._latest_response is not None: callback(self._latest_response) def register_errback(self, callback, _suppress_deprecation=False): """Call the callback whenever something goes wrong with the observation, and pass an exception to the callback. After such a callback is called, no more callbacks will be issued. The use of this function is deprecated: Use the asynchronous iteration interface instead.""" if not _suppress_deprecation: warnings.warn( "register_errback on observe results is deprected: Use `async for notify in request.observation` instead.", DeprecationWarning, stacklevel=2, ) if self.cancelled: callback(self._cancellation_reason) return self.errbacks.append(callback) def callback(self, response): """Notify all listeners of an incoming response""" self._latest_response = response for c in self.callbacks: c(response) def error(self, exception): """Notify registered listeners that the observation went wrong. This can only be called once.""" if self.errbacks is None: raise RuntimeError( "Error raised in an already cancelled ClientObservation" ) from exception for c in self.errbacks: c(exception) self.cancel() self._cancellation_reason = exception def cancel(self): # FIXME determine whether this is called by anything other than error, # and make it private so there is always a _cancellation_reason """Cease to generate observation or error events. This will not generate an error by itself. This function is only needed while register_callback and register_errback are around; once their deprecations are acted on, dropping the asynchronous iterator will automatically cancel the observation. """ assert not self.cancelled, "ClientObservation cancelled twice" # make sure things go wrong when someone tries to continue this self.errbacks = None self.callbacks = None self.cancelled = True while self._on_cancel: self._on_cancel.pop()() self._cancellation_reason = None def on_cancel(self, callback): if self.cancelled: callback() self._on_cancel.append(callback) def __repr__(self): return "<%s %s at %#x>" % ( type(self).__name__, "(cancelled)" if self.cancelled else "(%s call-, %s errback(s))" % (len(self.callbacks), len(self.errbacks)), id(self), ) class ServerObservation: def __init__(self): self._accepted = False self._trigger = asyncio.get_event_loop().create_future() # A deregistration is "early" if it happens before the response message # is actually sent; calling deregister() in that time (typically during # `render()`) will not send an unsuccessful response message but just # sent this flag which is set to None as soon as it is too late for an # early deregistration. # This mechanism is temporary until more of aiocoap behaves like # Pipe which does not suffer from this limitation. self._early_deregister = False self._late_deregister = False def accept(self, cancellation_callback): self._accepted = True self._cancellation_callback = cancellation_callback def deregister(self, reason=None): if self._early_deregister is False: self._early_deregister = True return warnings.warn( "Late use of ServerObservation.deregister() is" " deprecated, use .trigger with an unsuccessful value" " instead", DeprecationWarning, ) self.trigger( Message(code=INTERNAL_SERVER_ERROR, payload=b"Resource became unobservable") ) def trigger(self, response=None, *, is_last=False): """Send an updated response; if None is given, the observed resource's rendering will be invoked to produce one. `is_last` can be set to True to indicate that no more responses will be sent. Note that an unsuccessful response will be the last no matter what is_last says, as such a message always terminates a CoAP observation.""" if is_last: self._late_deregister = True if self._trigger.done(): # we don't care whether we overwrite anything, this is a lossy queue as observe is lossy self._trigger = asyncio.get_event_loop().create_future() self._trigger.set_result(response) aiocoap-0.4.12/aiocoap/proxy/000077500000000000000000000000001472205122200160025ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/proxy/__init__.py000066400000000000000000000003621472205122200201140ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Container module, see submodules: * :mod:`.client` -- using CoAP via a proxy server * :mod:`.server` -- running a proxy server """ aiocoap-0.4.12/aiocoap/proxy/client.py000066400000000000000000000030501472205122200176300ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import warnings from ..message import UndecidedRemote from .. import interfaces from ..util import hostportsplit class ProxyForwarder(interfaces.RequestProvider): """Object that behaves like a Context but only provides the request function and forwards all messages to a proxy. This is not a proxy itself, it is just the interface for an external one.""" def __init__(self, proxy_address, context): if "://" not in proxy_address: warnings.warn( "Proxy addresses without scheme are deprecated, " "please specify like `coap://host`, `coap+tcp://ip:port` " "etc.", DeprecationWarning, ) proxy_address = "coap://" + proxy_address self.proxy_address = UndecidedRemote.from_pathless_uri(proxy_address) self.context = context proxy = property(lambda self: self._proxy) def request(self, message, **kwargs): if not isinstance(message.remote, UndecidedRemote): raise ValueError( "Message already has a configured " "remote, set .opt.uri_{host,port} instead of remote" ) host, port = hostportsplit(message.remote.hostinfo) message.opt.uri_port = port message.opt.uri_host = host message.opt.proxy_scheme = self.proxy_address.scheme message.remote = self.proxy_address return self.context.request(message) aiocoap-0.4.12/aiocoap/proxy/server.py000066400000000000000000000411321472205122200176630ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Basic implementation of CoAP-CoAP proxying This is work in progress and not yet part of the API.""" import asyncio import functools import ipaddress import logging import warnings from .. import numbers, interfaces, message, error, util, resource from ..numbers import codes from ..blockwise import Block1Spool, Block2Cache from ..pipe import Pipe class CanNotRedirect(error.ConstructionRenderableError): message = "Proxy redirection failed" class NoUriSplitting(CanNotRedirect): code = codes.NOT_IMPLEMENTED message = "URI splitting not implemented, please use Proxy-Scheme." class IncompleteProxyUri(CanNotRedirect): code = codes.BAD_REQUEST message = "Proxying requires Proxy-Scheme and Uri-Host" class NotAForwardProxy(CanNotRedirect): code = codes.PROXYING_NOT_SUPPORTED message = "This is a reverse proxy, not a forward one." class NoSuchHostname(CanNotRedirect): code = codes.NOT_FOUND message = "" class CanNotRedirectBecauseOfUnsafeOptions(CanNotRedirect): code = codes.BAD_OPTION def __init__(self, options): self.message = "Unsafe options in request: %s" % ( ", ".join(str(o.number) for o in options) ) def raise_unless_safe(request, known_options): """Raise a BAD_OPTION CanNotRedirect unless all options in request are safe to forward or known""" known_options = set(known_options).union( { # it is expected that every proxy is aware of these options even though # one of them often doesn't need touching numbers.OptionNumber.URI_HOST, numbers.OptionNumber.URI_PORT, numbers.OptionNumber.URI_PATH, numbers.OptionNumber.URI_QUERY, # handled by the Context numbers.OptionNumber.BLOCK1, numbers.OptionNumber.BLOCK2, # handled by the proxy resource numbers.OptionNumber.OBSERVE, } ) unsafe_options = [ o for o in request.opt.option_list() if o.number.is_unsafe() and o.number not in known_options ] if unsafe_options: raise CanNotRedirectBecauseOfUnsafeOptions(unsafe_options) class Proxy(interfaces.Resource): # other than in special cases, we're trying to be transparent wrt blockwise transfers interpret_block_options = False def __init__(self, outgoing_context, logger=None): super().__init__() # Provide variables for render_to_pipe # FIXME this is copied from aiocoap.resource's __init__ -- but on the # long run proxying shouldn't rely on that anyway but implement # render_to_pipe right on its own self._block1 = Block1Spool() self._block2 = Block2Cache() self.outgoing_context = outgoing_context self.log = logger or logging.getLogger("proxy") self._redirectors = [] def add_redirector(self, redirector): self._redirectors.append(redirector) def apply_redirection(self, request): for r in self._redirectors: result = r.apply_redirection(request) if result is not None: return result return None async def needs_blockwise_assembly(self, request): return self.interpret_block_options async def render(self, request): # FIXME i'd rather let the application do with the message whatever it # wants. everything the responder needs of the request should be # extracted beforehand. request = request.copy(mid=None, token=None) try: request = self.apply_redirection(request) except CanNotRedirect as e: return e.to_message() if request is None: response = await super().render(request) if response is None: raise IncompleteProxyUri("No matching proxy rule") return response try: response = await self.outgoing_context.request( request, handle_blockwise=self.interpret_block_options ).response except error.TimeoutError: return message.Message(code=numbers.codes.GATEWAY_TIMEOUT) raise_unless_safe(response, ()) response.mtype = None response.mid = None response.remote = None response.token = None return response # Not inheriting from them because we do *not* want the .render() in the # resolution tree (it can't deal with None requests, which are used among # proxy implementations) async def render_to_pipe(self, pipe: Pipe) -> None: await resource.Resource.render_to_pipe(self, pipe) # type: ignore class ProxyWithPooledObservations(Proxy, interfaces.ObservableResource): def __init__(self, outgoing_context, logger=None): super(ProxyWithPooledObservations, self).__init__(outgoing_context, logger) self._outgoing_observations = {} @staticmethod def _cache_key(request): return request.get_cache_key([numbers.optionnumbers.OptionNumber.OBSERVE]) def _peek_observation_for(self, request): """Return the augmented request (see _get_obervation_for) towards a resource, or raise KeyError""" cachekey = self._cache_key(request) return self._outgoing_observations[cachekey] def _get_observation_for(self, request): """Return an existing augmented request towards a resource or create one. An augmented request is an observation request that has some additional properties (__users, __cachekey, __latest_response), which are used in ProxyWithPooledObservations to immediately serve responses from observed resources, and to tear the observations down again.""" # see ProxiedResource.render request = request.copy(mid=None, remote=None, token=None) request = self.apply_redirection(request) cachekey = self._cache_key(request) try: obs = self._outgoing_observations[cachekey] except KeyError: obs = self._outgoing_observations[cachekey] = self.outgoing_context.request( request ) obs.__users = set() obs.__cachekey = cachekey obs.__latest_response = None # this becomes a cached response right after the .response comes in (so only use this after waiting for it), and gets updated when new responses arrive. def when_first_request_done(result, obs=obs): obs.__latest_response = result.result() obs.response.add_done_callback(when_first_request_done) def cb(incoming_message, obs=obs): self.log.info( "Received incoming message %r, relaying it to %d clients", incoming_message, len(obs.__users), ) obs.__latest_response = incoming_message for observationserver in set(obs.__users): observationserver.trigger(incoming_message.copy()) obs.observation.register_callback(cb) def eb(exception, obs=obs): if obs.__users: code = numbers.codes.INTERNAL_SERVER_ERROR payload = b"" if isinstance(exception, error.RenderableError): code = exception.code payload = exception.message.encode("ascii") self.log.debug( "Received error %r, which did not lead to unregistration of the clients. Actively deregistering them with %s %r.", exception, code, payload, ) for u in list(obs.__users): u.trigger(message.Message(code=code, payload=payload)) if obs.__users: self.log.error( "Observations survived sending them an error message." ) else: self.log.debug( "Received error %r, but that seems to have been passed on cleanly to the observers as they are gone by now.", exception, ) obs.observation.register_errback(eb) return obs def _add_observation_user(self, clientobservationrequest, serverobservation): clientobservationrequest.__users.add(serverobservation) def _remove_observation_user(self, clientobservationrequest, serverobservation): clientobservationrequest.__users.remove(serverobservation) # give the request that just cancelled time to be dealt with before # dropping the __latest_response asyncio.get_event_loop().call_soon( self._consider_dropping, clientobservationrequest ) def _consider_dropping(self, clientobservationrequest): if not clientobservationrequest.__users: self.log.debug( "Last client of observation went away, deregistering with server." ) self._outgoing_observations.pop(clientobservationrequest.__cachekey) if not clientobservationrequest.observation.cancelled: clientobservationrequest.observation.cancel() async def add_observation(self, request, serverobservation): """As ProxiedResource is intended to be just the proxy's interface toward the Context, accepting observations is handled here, where the observations handling can be defined by the subclasses.""" try: clientobservationrequest = self._get_observation_for(request) except CanNotRedirect: pass # just don't accept the observation, the rest will be taken care of at rendering else: self._add_observation_user(clientobservationrequest, serverobservation) serverobservation.accept( functools.partial( self._remove_observation_user, clientobservationrequest, serverobservation, ) ) async def render(self, request): # FIXME this is evaulated twice in the implementation (once here, but # unless it's an observation what matters is inside the super call), # maybe this needs to hook in differently than by subclassing and # calling super. self.log.info("render called") redirected_request = request.copy() try: redirected_request = self.apply_redirection(redirected_request) if redirected_request is None: return await super().render(request) clientobservationrequest = self._peek_observation_for(redirected_request) except (KeyError, CanNotRedirect) as e: if not isinstance(e, CanNotRedirect) and request.opt.observe is not None: self.log.warning( "No matching observation found: request is %r (cache key %r), outgoing observations %r", redirected_request, self._cache_key(redirected_request), self._outgoing_observations, ) return message.Message( code=numbers.codes.BAD_OPTION, payload="Observe option can not be proxied without active observation.".encode( "utf8" ), ) self.log.debug( "Request is not an observation or can't be proxied, passing it on to regular proxying mechanisms." ) return await super(ProxyWithPooledObservations, self).render(request) else: self.log.info( "Serving request using latest cached response of %r", clientobservationrequest, ) await clientobservationrequest.response cached_response = clientobservationrequest.__latest_response cached_response.mid = None cached_response.token = None cached_response.remote = None cached_response.mtype = None return cached_response class ForwardProxy(Proxy): def apply_redirection(self, request): request = request.copy() if request.opt.proxy_uri is not None: raise NoUriSplitting if request.opt.proxy_scheme is None: return super().apply_redirection(request) if request.opt.uri_host is None: raise IncompleteProxyUri raise_unless_safe( request, ( numbers.OptionNumber.PROXY_SCHEME, numbers.OptionNumber.URI_HOST, numbers.OptionNumber.URI_PORT, ), ) request.remote = message.UndecidedRemote( request.opt.proxy_scheme, util.hostportjoin(request.opt.uri_host, request.opt.uri_port), ) request.opt.proxy_scheme = None request.opt.uri_port = None forward_host = request.opt.uri_host try: # I'd prefer to not do if-by-try, but the ipaddress doesn't seem to # offer any other choice ipaddress.ip_address(request.opt.uri_host) warnings.warn( "URI-Host looks like IPv6 but has no square " "brackets. This is deprecated, see " "https://github.com/chrysn/aiocoap/issues/216", DeprecationWarning, ) except ValueError: pass else: request.opt.uri_host = None if forward_host.startswith("["): # IPv6 or future literals are not recognized by ipaddress which # does not look at host-encoded form request.opt.uri_host = None # Maybe the URI-Host matches a known forwarding -- in that case, catch that. redirected = super(ForwardProxy, self).apply_redirection(request) if redirected is not None: return redirected return request class ForwardProxyWithPooledObservations(ForwardProxy, ProxyWithPooledObservations): pass class ReverseProxy(Proxy): def __init__(self, *args, **kwargs): import warnings warnings.warn( "ReverseProxy has become moot due to proxy operation " "changes, just instanciate Proxy and set the appropriate " "redirectors", DeprecationWarning, stacklevel=1, ) super().__init__(*args, **kwargs) class ReverseProxyWithPooledObservations(ReverseProxy, ProxyWithPooledObservations): pass class Redirector: def apply_redirection(self, request): return None class NameBasedVirtualHost(Redirector): def __init__(self, match_name, target, rewrite_uri_host=False, use_as_proxy=False): self.match_name = match_name self.target = target self.rewrite_uri_host = rewrite_uri_host self.use_as_proxy = use_as_proxy def apply_redirection(self, request): raise_unless_safe(request, ()) if self._matches(request.opt.uri_host): if self.use_as_proxy: request.opt.proxy_scheme = request.remote.scheme if self.rewrite_uri_host: request.opt.uri_host, _ = util.hostportsplit(self.target) request.unresolved_remote = self.target return request def _matches(self, hostname): return hostname == self.match_name class SubdomainVirtualHost(NameBasedVirtualHost): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.rewrite_uri_host: raise TypeError( "rewrite_uri_host makes no sense with subdomain virtual hosting" ) def _matches(self, hostname): return hostname.endswith("." + self.match_name) class UnconditionalRedirector(Redirector): def __init__(self, target, use_as_proxy=False): self.target = target self.use_as_proxy = use_as_proxy def apply_redirection(self, request): raise_unless_safe(request, ()) if self.use_as_proxy: request.opt.proxy_scheme = request.remote.scheme request.unresolved_remote = self.target return request class SubresourceVirtualHost(Redirector): def __init__(self, path, target): self.path = tuple(path) self.target = target def apply_redirection(self, request): raise_unless_safe(request, ()) if self.path == request.opt.uri_path[: len(self.path)]: request.opt.uri_path = request.opt.uri_path[len(self.path) :] request.unresolved_remote = self.target return request aiocoap-0.4.12/aiocoap/resource.py000066400000000000000000000424051472205122200170270ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Basic resource implementations A resource in URL / CoAP / REST terminology is the thing identified by a URI. Here, a :class:`.Resource` is the place where server functionality is implemented. In many cases, there exists one persistent Resource object for a given resource (eg. a ``TimeResource()`` is responsible for serving the ``/time`` location). On the other hand, an aiocoap server context accepts only one thing as its serversite, and that is a Resource too (typically of the :class:`Site` class). Resources are most easily implemented by deriving from :class:`.Resource` and implementing ``render_get``, ``render_post`` and similar coroutine methods. Those take a single request message object and must return a :class:`aiocoap.Message` object or raise an :class:`.error.RenderableError` (eg. ``raise UnsupportedMediaType()``). To serve more than one resource on a site, use the :class:`Site` class to dispatch requests based on the Uri-Path header. """ import hashlib import warnings from . import message from . import meta from . import error from . import interfaces from .numbers.contentformat import ContentFormat from .numbers.codes import Code from .pipe import Pipe from .util.linkformat import Link, LinkFormat def hashing_etag(request: message.Message, response: message.Message): """Helper function for render_get handlers that allows them to use ETags based on the payload's hash value Run this on your request and response before returning from render_get; it is safe to use this function with all kinds of responses, it will only act on 2.05 Content messages (and those with no code set, which defaults to that for GET requests). The hash used are the first 8 bytes of the sha1 sum of the payload. Note that this method is not ideal from a server performance point of view (a file server, for example, might want to hash only the stat() result of a file instead of reading it in full), but it saves bandwith for the simple cases. >>> from aiocoap import * >>> req = Message(code=GET) >>> hash_of_hello = b'\\xaa\\xf4\\xc6\\x1d\\xdc\\xc5\\xe8\\xa2' >>> req.opt.etags = [hash_of_hello] >>> resp = Message(code=CONTENT) >>> resp.payload = b'hello' >>> hashing_etag(req, resp) >>> resp # doctest: +ELLIPSIS """ if response.code != Code.CONTENT and response.code is not None: return response.opt.etag = hashlib.sha1(response.payload).digest()[:8] if request.opt.etags is not None and response.opt.etag in request.opt.etags: response.code = Code.VALID response.payload = b"" class _ExposesWellknownAttributes: def get_link_description(self): # FIXME which formats are acceptable, and how much escaping and # list-to-separated-string conversion needs to happen here ret = {} if hasattr(self, "ct"): ret["ct"] = str(self.ct) if hasattr(self, "rt"): ret["rt"] = self.rt if hasattr(self, "if_"): ret["if"] = self.if_ return ret class Resource(_ExposesWellknownAttributes, interfaces.Resource): """Simple base implementation of the :class:`interfaces.Resource` interface The render method delegates content creation to ``render_$method`` methods (``render_get``, ``render_put`` etc), and responds appropriately to unsupported methods. Those messages may return messages without a response code, the default render method will set an appropriate successful code ("Content" for GET/FETCH, "Deleted" for DELETE, "Changed" for anything else). The render method will also fill in the request's no_response code into the response (see :meth:`.interfaces.Resource.render`) if none was set. Moreover, this class provides a ``get_link_description`` method as used by .well-known/core to expose a resource's ``.ct``, ``.rt`` and ``.if_`` (alternative name for ``if`` as that's a Python keyword) attributes. Details can be added by overriding the method to return a more comprehensive dictionary, and resources can be hidden completely by returning None. """ async def needs_blockwise_assembly(self, request): return True async def render(self, request): if not request.code.is_request(): raise error.UnsupportedMethod() m = getattr(self, "render_%s" % str(request.code).lower(), None) if not m: raise error.UnallowedMethod() response = await m(request) if response is message.NoResponse: warnings.warn( "Returning NoResponse is deprecated, please return a" " regular response with a no_response option set.", DeprecationWarning, ) response = message.Message(no_response=26) if response.code is None: if request.code in (Code.GET, Code.FETCH): response_default = Code.CONTENT elif request.code == Code.DELETE: response_default = Code.DELETED else: response_default = Code.CHANGED response.code = response_default if response.opt.no_response is None: response.opt.no_response = request.opt.no_response return response async def render_to_pipe(self, pipe: Pipe): # Silence the deprecation warning if isinstance(self, interfaces.ObservableResource): # See interfaces.Resource.render_to_pipe return await interfaces.ObservableResource._render_to_pipe(self, pipe) return await interfaces.Resource._render_to_pipe(self, pipe) class ObservableResource(Resource, interfaces.ObservableResource): def __init__(self): super(ObservableResource, self).__init__() self._observations = set() async def add_observation(self, request, serverobservation): self._observations.add(serverobservation) def _cancel(self=self, obs=serverobservation): self._observations.remove(serverobservation) self.update_observation_count(len(self._observations)) serverobservation.accept(_cancel) self.update_observation_count(len(self._observations)) def update_observation_count(self, newcount): """Hook into this method to be notified when the number of observations on the resource changes.""" def updated_state(self, response=None): """Call this whenever the resource was updated, and a notification should be sent to observers.""" for o in self._observations: o.trigger(response) def get_link_description(self): link = super(ObservableResource, self).get_link_description() link["obs"] = None return link async def render_to_pipe(self, request: Pipe): # Silence the deprecation warning return await interfaces.ObservableResource._render_to_pipe(self, request) def link_format_to_message( request: message.Message, linkformat: LinkFormat, default_ct=ContentFormat.LINKFORMAT, ) -> message.Message: """Given a LinkFormat object, render it to a response message, picking a suitable conent format from a given request. It returns a Not Acceptable response if something unsupported was queried. It makes no attempt to modify the URI reference literals encoded in the LinkFormat object; they have to be suitably prepared by the caller.""" ct = request.opt.accept if ct is None: ct = default_ct if ct == ContentFormat.LINKFORMAT: payload = str(linkformat).encode("utf8") else: return message.Message(code=Code.NOT_ACCEPTABLE) return message.Message(payload=payload, content_format=ct) # Convenience attribute to set as ct on resources that use # link_format_to_message as their final step in the request handler # # mypy doesn't like attributes on functions, requiring some `type: ignore` for # this, but the alternatives (a __call__able class) have worse docs. link_format_to_message.supported_ct = " ".join( # type: ignore str(int(x)) for x in (ContentFormat.LINKFORMAT,) ) class WKCResource(Resource): """Read-only dynamic resource list, suitable as .well-known/core. This resource renders a link_header.LinkHeader object (which describes a collection of resources) as application/link-format (RFC 6690). The list to be rendered is obtained from a function passed into the constructor; typically, that function would be a bound Site.get_resources_as_linkheader() method. This resource also provides server `implementation information link`_; server authors are invited to override this by passing an own URI as the `impl_info` parameter, and can disable it by passing None. .. _`implementation information link`: https://tools.ietf.org/html/draft-bormann-t2trg-rel-impl-00""" ct = link_format_to_message.supported_ct # type: ignore def __init__(self, listgenerator, impl_info=meta.library_uri, **kwargs): super().__init__(**kwargs) self.listgenerator = listgenerator self.impl_info = impl_info async def render_get(self, request): links = self.listgenerator() if self.impl_info is not None: links.links = links.links + [Link(href=self.impl_info, rel="impl-info")] filters = [] for q in request.opt.uri_query: try: k, v = q.split("=", 1) except ValueError: continue # no =, not a relevant filter if v.endswith("*"): def matchexp(x, v=v): return x.startswith(v[:-1]) else: def matchexp(x, v=v): return x == v if k in ("rt", "if", "ct"): filters.append( lambda link: any( matchexp(part) for part in (" ".join(getattr(link, k, ()))).split(" ") ) ) elif k in ("href",): # x.href is single valued filters.append(lambda link: matchexp(getattr(link, k))) else: filters.append( lambda link: any(matchexp(part) for part in getattr(link, k, ())) ) while filters: links.links = filter(filters.pop(), links.links) links.links = list(links.links) response = link_format_to_message(request, links) if ( request.opt.uri_query and not links.links and request.remote.is_multicast_locally ): if request.opt.no_response is None: # If the filter does not match, multicast requests should not # be responded to -- that's equivalent to a "no_response on # 2.xx" option. response.opt.no_response = 0x02 return response class PathCapable: """Class that indicates that a resource promises to parse the uri_path option, and can thus be given requests for :meth:`~.interfaces.Resource.render`-ing that contain a uri_path""" class Site(interfaces.ObservableResource, PathCapable): """Typical root element that gets passed to a :class:`Context` and contains all the resources that can be found when the endpoint gets accessed as a server. This provides easy registration of statical resources. Add resources at absolute locations using the :meth:`.add_resource` method. For example, the site at >>> site = Site() >>> site.add_resource(["hello"], Resource()) will have requests to rendered by the new resource. You can add another Site (or another instance of :class:`PathCapable`) as well, those will be nested and integrally reported in a WKCResource. The path of a site should not end with an empty string (ie. a slash in the URI) -- the child site's own root resource will then have the trailing slash address. Subsites can not have link-header attributes on their own (eg. `rt`) and will never respond to a request that does not at least contain a single slash after the the given path part. For example, >>> batch = Site() >>> batch.add_resource(["light1"], Resource()) >>> batch.add_resource(["light2"], Resource()) >>> batch.add_resource([], Resource()) >>> s = Site() >>> s.add_resource(["batch"], batch) will have the three created resources rendered at , and . If it is necessary to respond to requests to or report its attributes in .well-known/core in addition to the above, a non-PathCapable resource can be added with the same path. This is usually considered an odd design, not fully supported, and for example doesn't support removal of resources from the site. """ def __init__(self): self._resources = {} self._subsites = {} async def needs_blockwise_assembly(self, request): try: child, subrequest = self._find_child_and_pathstripped_message(request) except KeyError: return True else: return await child.needs_blockwise_assembly(subrequest) def _find_child_and_pathstripped_message(self, request): """Given a request, find the child that will handle it, and strip all path components from the request that are covered by the child's position within the site. Returns the child and a request with a path shortened by the components in the child's path, or raises a KeyError. While producing stripped messages, this adds a ._original_request_uri attribute to the messages which holds the request URI before the stripping is started. That allows internal components to access the original URI until there is a variation of the request API that allows accessing this in a better usable way.""" original_request_uri = getattr( request, "_original_request_uri", request.get_request_uri(local_is_server=True), ) if request.opt.uri_path in self._resources: stripped = request.copy(uri_path=()) stripped._original_request_uri = original_request_uri return self._resources[request.opt.uri_path], stripped if not request.opt.uri_path: raise KeyError() remainder = [request.opt.uri_path[-1]] path = request.opt.uri_path[:-1] while path: if path in self._subsites: res = self._subsites[path] if remainder == [""]: # sub-sites should see their root resource like sites remainder = [] stripped = request.copy(uri_path=remainder) stripped._original_request_uri = original_request_uri return res, stripped remainder.insert(0, path[-1]) path = path[:-1] raise KeyError() async def render(self, request): try: child, subrequest = self._find_child_and_pathstripped_message(request) except KeyError: raise error.NotFound() else: return await child.render(subrequest) async def add_observation(self, request, serverobservation): try: child, subrequest = self._find_child_and_pathstripped_message(request) except KeyError: return try: await child.add_observation(subrequest, serverobservation) except AttributeError: pass def add_resource(self, path, resource): if isinstance(path, str): raise ValueError("Paths should be tuples or lists of strings") if isinstance(resource, PathCapable): self._subsites[tuple(path)] = resource else: self._resources[tuple(path)] = resource def remove_resource(self, path): try: del self._subsites[tuple(path)] except KeyError: del self._resources[tuple(path)] def get_resources_as_linkheader(self): links = [] for path, resource in self._resources.items(): if hasattr(resource, "get_link_description"): details = resource.get_link_description() else: details = {} if details is None: continue lh = Link("/" + "/".join(path), **details) links.append(lh) for path, resource in self._subsites.items(): if hasattr(resource, "get_resources_as_linkheader"): for link in resource.get_resources_as_linkheader().links: links.append( Link("/" + "/".join(path) + link.href, link.attr_pairs) ) return LinkFormat(links) async def render_to_pipe(self, request: Pipe): try: child, subrequest = self._find_child_and_pathstripped_message( request.request ) except KeyError: raise error.NotFound() else: # FIXME consider carefully whether this switching-around is good. # It probably is. request.request = subrequest return await child.render_to_pipe(request) aiocoap-0.4.12/aiocoap/resourcedirectory/000077500000000000000000000000001472205122200203755ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/resourcedirectory/__init__.py000066400000000000000000000006401472205122200225060ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Implementations of RFC9176_ Currently, this only contains the :mod:`client` module. There is an implementation of the server side contained in aiocoap, but it is currently accessible only as :mod:`command line application `. .. _RFC9176: https://datatracker.ietf.org/doc/html/rfc9176 """ aiocoap-0.4.12/aiocoap/resourcedirectory/client/000077500000000000000000000000001472205122200216535ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/resourcedirectory/client/__init__.py000066400000000000000000000005221472205122200237630ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Client components for RFC9176_ Currently, this only contains the :mod:`register` module, which helps utilizing the registration interface of a resource directory. .. _RFC9176: https://datatracker.ietf.org/doc/html/rfc9176 """ aiocoap-0.4.12/aiocoap/resourcedirectory/client/register.py000066400000000000000000000341111472205122200240510ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Client components for registering with a resource-directory server""" import asyncio from urllib.parse import urljoin, urlparse import logging from socket import getfqdn from ...util.linkformat import link_header from ...message import Message from ...numbers import GET, POST, DELETE, SERVICE_UNAVAILABLE, NOT_FOUND __all__ = ["Registerer"] class Registerer: """Implementation of the client side of the registration of a resource directory. Until the object is :meth:`shut down `, it keeps the registration alive. It works both for registering the own context as well as for registering others (taking the role of a commissioning tool). The :attr:`state` attribute is kept up to date with an informal representation of whether the registration is currently active. If any step in the registration fails, the object will not retry indefinitely, and it will back off to earlier steps; after a limited number of retries after the last successful step, the object permanently enters a failed state. (In future extension, it might listen for external events that allow it to restart heuristics, like a new network interface coming up). The registration does not observe the resource list of the registered host (yet?), so registrations are only kept alive and never updated.""" def __init__( self, context, rd=None, lt=90000, name_from_hostname=None, link_source=None, registration_parameters={}, loggername="coap-rd-registerer", ): """Use a ``context`` to create a registration at the Resource directiory at ``rd`` (defaulting to "find an RD yourself"; URIs should have no path component, unless the user wishes to sidestep the URI discovery step). It will be renewed every `lifetime` seconds. The registration data will be obtained by querying the context's site's ``.well-known/core`` resource, unless another source URI is given in ``link_source``, in which case this object acts as a Commissioning Tool. Parameters to pass with the registration can be given in ``registration_parameters``. ``lt`` and ``base`` default to the constructor arguments ``lt`` and ``link_source``, respectively. If ``name_from_hostname`` is True (or by default if ``ep`` is not present in ``registration_parameters``), the ``ep`` and ``d`` registration parameters are derived from the host name.""" self._context = context self._link_source = None self._link_data = None #: Message self._lt = lt self._initial_rd = rd self._directory_resource = None self._registration_resource = None self._registration_parameters = dict(registration_parameters) if name_from_hostname or ( name_from_hostname is None and "ep" not in registration_parameters ): ep, _, d = getfqdn().partition(".") self._registration_parameters["ep"] = ep if d: self._registration_parameters["d"] = d self.log = logging.getLogger(loggername) self._set_state("starting") self._task = asyncio.create_task(self._run()) def __repr__(self): return "<%s at %#x: registering at %s as %s (currently %s)>" % ( type(self).__name__, id(self), self._registration_resource or self._directory_resource or self._initial_rd, self._registration_parameters, self.state, ) def _set_state(self, newstate): self.log.debug("Entering state %s", newstate) self.state = newstate async def _fill_directory_resource(self, blacklist=set()): # FIXME: this should at some point catch network errors (short of # falling back to "RD discovery failed, backing off"), but that needs # falling back to other discovery methods here, and i don't know how # this will be done yet if self._directory_resource is not None: return if self._initial_rd is None: # FIXME: can't access DHCP options generically, dunno about SLAAC. # It seems to be a sane assumption that the best thing to do is to # assume we're on a big host and multicast is cheap here. self._directory_resource = await self._discovery_directory_uri( "coap://[ff05::fd]]", blacklist=blacklist ) else: components = urlparse(self._initial_rd) if not components.scheme or not components.netloc: raise ValueError("Explicit RD URIs need to contain scheme and path") if components.path: if self._initial_rd in blacklist: raise self._UnrecoverableError( "Explicitly configured RD was blacklisted" ) else: self._directory_resource = self._initial_rd else: self._directory_resource = await self._discovery_directory_uri( self._initial_rd, blacklist=blacklist ) async def _discovery_directory_uri(self, host, blacklist=set()): lookup_uri = urljoin(host, "/.well-known/core?rt=core.rd") try: # FIXME: this should be able to deal with multicasts response = await self._context.request( Message(code=GET, uri=lookup_uri, accept=40) ).response_raising links = link_header.parse(response.payload.decode("utf8")) except (UnicodeDecodeError, link_header.ParseException): self.log.error("Error parsing the RD's self description") raise addresses = [ link.get_target(response.get_request_uri()) for link in links.links if "core.rd" in " ".join(link.rt).split(" ") ] unfiltered_addresses = len(addresses) addresses = [a for a in addresses if a not in blacklist] if not addresses: if len(addresses) != unfiltered_addresses: raise self._UnrecoverableError( "All discovered Directory Resources are blacklisted" ) else: raise self._UnrecoverableError( "No registration interface found in RD's response" ) if len(addresses) > 1: self.log.warn( "More than one registration interface found," " picking the first" ) return addresses[0] async def _obtain_link_data(self): """Store a message describing the data to be POSTed to the registration interface. This needs to be in :class:`Message` format, but doesn't need to have any particular code set yet (that gets set later anyway), so in effect, the response message from the con can be returned as is. """ if self._link_source is None: self._link_data = Message( content_format=40, payload=str( self._context.serversite.get_resources_as_linkheader() ).encode("utf8"), ) else: self._link_data = await self._context.request( Message(code=GET, uri=urljoin(self._link_source, "/.well-known/core")) ).response_raising class _RetryableError(RuntimeError): """Raised when an initial registration or update rails in a way that warrants rediscovery of the RD""" class _UnrecoverableError(RuntimeError): """Raised when the RD registration process runs out of options (typically with a descriptive message)""" async def _request_with_retries(self, message): # FIXME: response_nonraising gives 5.00 now, but for debugging we might # want to show something better, and for URI discovery, we should not # consider this a final error response = await self._context.request(message).response_nonraising unavailable_retries = 0 while response.code == SERVICE_UNAVAILABLE and response.opt.max_age is not None: if unavailable_retries > 6: raise self._RetryableError( "RD responded with Service Unavailable too often" ) self.log.info("RD asked to retry the operation later") await asyncio.sleep(max(response.opt.max_age, 2 ** (unavailable_retries))) response = await self._context.request(message).response_nonraising return response async def _register(self): initial_message = self._link_data.copy(code=POST, uri=self._directory_resource) base_query = {} if self._lt != 90000: base_query["lt"] = str(self._lt) if self._link_source is not None: base_query["base"] = self._link_source query = dict(base_query, **self._registration_parameters) initial_message.opt.uri_query = initial_message.opt.uri_query + tuple( "%s=%s" % (k, v) for (k, v) in query.items() ) response = await self._request_with_retries(initial_message) if not response.code.is_successful(): raise self._RetryableError( "RD responded with odd error: %s / %r" % (response.code, response.payload) ) if not response.opt.location_path: raise self._RetryableError("RD responded without a location") # FIXME this should probably be available from the API, and consider location_query etc self._registration_resource = urljoin( response.get_request_uri(), "/" + "/".join(response.opt.location_path) ) async def _renew_registration(self): update_message = Message(code=POST, uri=self._registration_resource) response = await self._request_with_retries(update_message) if response.code == NOT_FOUND: raise self._RetryableError("RD forgot about the registration") if not response.code.is_successful(): raise self._RetryableError( "RD responded with odd error: %s / %r" % (response.code, response.payload) ) async def _run(self): obtain = asyncio.create_task(self._obtain_link_data()) try: await self._run_inner(obtain) except asyncio.CancelledError: self._set_state("cancelled") pass except self._UnrecoverableError as e: self._set_state("failed") self.log.error("Aborting RD discovery: %s", e.args[0]) except Exception as e: self._set_state("failed") self.log.error( "An error occurred during RD registration, not pursuing registration any further:", exc_info=e, ) finally: obtain.cancel() async def _run_inner(self, obtain): errors = 0 errors_max = 5 failed_initialization = set() try_reuse_discovery = False while True: if try_reuse_discovery: try_reuse_discovery = False else: self._set_state("discovering") for i in range(4): if i: self.log.info("Waiting to retry RD discovery") await asyncio.sleep(2 * 3 ** (i - 1)) # arbitrary fall-off await self._fill_directory_resource(blacklist=failed_initialization) break else: self.log.error("Giving up RD discovery") break await obtain self._set_state("registering") try: await self._register() except self._RetryableError as e: errors += 1 if errors < errors_max: self.log.warning( "Initial registration failed (%s), blacklisting RD URI and retrying discovery", e, ) failed_initialization.add(self._directory_resource) self._directory_resource = None continue else: self.log.error( "Giving up after too many failed initial registrations" ) break # registration is active, keep it that way. # things look good enough to forget about past bad experiences. # could move this to the end of the following loop if worries come # up of having picked a bad RD that supports registration but not # registration updates errors = 0 failed_initialization = set() try_reuse_discovery = True while True: self._set_state("registered") # renew 60 seconds before timeout, unless that's before the 75% mark (then wait for that) await asyncio.sleep( self._lt - 60 if self._lt > 240 else self._lt * 3 // 4 ) self._set_state("renewing") try: await self._renew_registration() except self._RetryableError as e: self.log.warning( "Registration update failed (%s), retrying with new registration", e, ) break async def shutdown(self): """Delete the registration. This will not raise any resulting error messages but just log them, same as any errors occurring during the registration will only be logged.""" self._task.cancel() if self._registration_resource is None: return try: await self._context.request( Message(code=DELETE, uri=self._registration_resource) ).response_raising except Exception as e: self.log.error("Error deregistering from the RD", exc_info=e) aiocoap-0.4.12/aiocoap/tokenmanager.py000066400000000000000000000262641472205122200176600ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import functools import random from . import error from . import interfaces # To be used sparingly here: This deals with request / responses on the token # layer. But the layer below won't even know that messages are responses, so it # can't make the informed decisions we make here. from .numbers.types import NON from .pipe import Pipe class TokenManager(interfaces.RequestInterface, interfaces.TokenManager): def __init__(self, context): self.context = context self._token = random.randint(0, 65535) self.outgoing_requests = {} """Unfinished outgoing requests (identified by token and remote)""" self.incoming_requests = {} """Unfinished incoming requests. ``(token, remote): (Pipe, stopper)`` where stopper is a function unregistes the Pipe event handler and thus indicates to the server the discontinued interest""" self.log = self.context.log self.loop = self.context.loop # self.token_interface = … -- needs to be set post-construction, because the token_interface in its constructor already needs to get its manager def __repr__(self): return "<%s for %s>" % ( type(self).__name__, getattr(self, "token_interface", "(unbound)"), ) @property def client_credentials(self): return self.context.client_credentials async def shutdown(self): while self.incoming_requests: key = next(iter(self.incoming_requests.keys())) (_, stop) = self.incoming_requests.pop(key) # This cancels them, not sending anything. # # FIXME should we? (RST? 5.00 Server Shutdown? An RST would only # work if we pushed this further down the shutdown chain; a 5.00 we # could raise in the task.) stop() self.incoming_requests = None while self.outgoing_requests: key = next(iter(self.outgoing_requests.keys())) request = self.outgoing_requests.pop(key) request.add_exception(error.LibraryShutdown()) self.outgoing_requests = None await self.token_interface.shutdown() def next_token(self): """Reserve and return a new Token for request.""" # TODO: add proper Token handling self._token = (self._token + 1) % (2**64) return self._token.to_bytes(8, "big").lstrip(b"\0") # # implement the tokenmanager interface # def dispatch_error(self, exception, remote): if self.outgoing_requests is None: # Not entirely sure where it is so far; better just raise a warning # than an exception later, nothing terminally bad should come of # this error. self.log.warning( "Internal shutdown sequence msismatch: error dispatched through tokenmanager after shutdown" ) return # NetworkError is what we promise users to raise from request etc; if # it's already a NetworkError and possibly more descriptive (eg. a # TimeoutError), we'll just let it through (and thus allow # differentiated handling eg. in application-level retries). if not isinstance(exception, error.NetworkError): cause = exception exception = error.NetworkError(str(exception)) exception.__cause__ = cause # The stopping calls would pop items from the pending requests -- # iterating once, extracting the stoppers and then calling them en # batch stoppers = [] for key, request in self.outgoing_requests.items(): (token, request_remote) = key if request_remote == remote: stoppers.append( lambda request=request, exception=exception: request.add_exception( exception ) ) for (_, _r), (_, stopper) in self.incoming_requests.items(): if remote == _r: stoppers.append(stopper) for stopper in stoppers: stopper() def process_request(self, request): key = (request.token, request.remote) if key in self.incoming_requests: # This is either a "I consider that token invalid, probably forgot # about it, but here's a new request" or renewed interest in an # observation, which gets modelled as a new request at thislevel self.log.debug("Incoming request overrides existing request") # Popping: FIXME Decide if one of them is sufficient (see `del self.incoming_requests[key]` below) (pipe, stop) = self.incoming_requests.pop(key) stop() pipe = Pipe(request, self.log) # FIXME: what can we pass down to the token_interface? certainly not # the request, but maybe the request with a response filter applied? def on_event(ev): if ev.message is not None: m = ev.message # FIXME: should this code warn if token or remote are set? m.token = request.token m.remote = request.remote.as_response_address() if m.mtype is None and request.mtype is NON: # Default to sending NON to NON requests; rely on the # default (CON if stand-alone else ACK) otherwise. m.mtype = NON self.token_interface.send_message( m, # No more interest from *that* remote; as it's the only # thing keeping the PR alive, it'll go its course of # vanishing for lack of interest (as it would if # stop were called from its other possible caller, # the start of process_request when a new request comes # in on the same token) stop, ) else: # It'd be tempting to raise here, but typically being called # from a task, it wouldn't propagate any further either, and at # least here we have a logger. self.log.error( "Requests shouldn't receive errors at the level of a TokenManager any more, but this did: %s", ev, ) if not ev.is_last: return True def on_end(): if key in self.incoming_requests: # It may not be, especially if it was popped in `(pipe, stop) = self.incoming_requests.pop(keyu)` above # FIXME Decide if one of them is sufficient del self.incoming_requests[key] # no further cleanup to do here: any piggybackable ack was already flushed # out by the first response, and if there was not even a # NoResponse, something went wrong above (and we can't tell easily # here). stop = pipe.on_event(on_event) pipe.on_interest_end(on_end) self.incoming_requests[key] = (pipe, stop) self.context.render_to_pipe(pipe) def process_response(self, response): key = (response.token, response.remote) if key not in self.outgoing_requests: # maybe it was a multicast... key = (response.token, None) try: request = self.outgoing_requests[key] except KeyError: self.log.info("Response %r could not be matched to any request", response) return False else: self.log.debug("Response %r matched to request %r", response, request) # FIXME: there's a multicast aspect to that as well # # Is it necessary to look into .opt.observe here, wouldn't that better # be done by the higher-level code that knows about CoAP options? # Maybe, but at some point in TokenManager we *have* to look into the # options to see whether to expect a short- or long-running token. # Still, it would be an option not to send an is_last here and *always* # have the higher-level code indicate loss of interest in that exchange # when it detects that no more observations will follow. final = not ( request.request.opt.observe == 0 and response.opt.observe is not None ) if final: self.outgoing_requests.pop(key) request.add_response(response, is_last=final) return True # # implement RequestInterface # async def fill_or_recognize_remote(self, message): return await self.token_interface.fill_or_recognize_remote(message) def request(self, request): msg = request.request assert msg.code.is_request(), "Message code is not valid for request" # This might easily change, but right now, relying on the Context to # fill_remote early makes steps easier here. assert msg.remote is not None, "Remote not pre-populated" # FIXME: pick a suitably short one where available, and a longer one # for observations if many short ones are already in-flight msg.token = self.next_token() self.log.debug( "Sending request - Token: %s, Remote: %s", msg.token.hex(), msg.remote ) # A request sent over the multicast interface will only return a single # response and otherwise behave quite like an anycast request (which is # probably intended). if msg.remote.is_multicast: self.log.warning("Sending request to multicast via unicast request method") key = (msg.token, None) else: key = (msg.token, msg.remote) self.outgoing_requests[key] = request request.on_interest_end( functools.partial(self.outgoing_requests.pop, key, None) ) try: send_canceller = self.token_interface.send_message( msg, lambda: request.add_exception(error.MessageError) ) except Exception as e: request.add_exception(e) return if send_canceller is not None: # This needs to be called both when the requester cancels the # request, and when a response to the CON request comes in via a # different CON when the original ACK was lost, so the retransmits # can stop. # # FIXME: This might need a little sharper conditions: A fresh CON # should be sufficient to stop retransmits of a CON in a first # request, but when refreshing an observation, only an ACK tells us # that the updated observation got through. Also, multicast needs # to be an exception, but that generally needs handling here. # # It may be that it'd be wise to reduce the use of send_canceller # to situations when the request is actualy cancelled, and pass # some information to the token_interface about whether it should # keep an eye out for responses on that token and cancel # transmission accordingly. request.on_event(lambda ev: (send_canceller(), False)[1], is_interest=False) aiocoap-0.4.12/aiocoap/transports/000077500000000000000000000000001472205122200170405ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/transports/__init__.py000066400000000000000000000014741472205122200211570ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Container module for transports Transports are expected to be the modular backends of aiocoap, and implement the specifics of eg. TCP, WebSockets or SMS, possibly divided by backend implementations as well. Transports are not part of the API, so the class descriptions in the modules are purely informational. Multiple transports can be used in parallel in a single :class:`.Context`, and are loaded in a particular sequence. Some transports will grab all addresses of a given protocol, so they might not be practical to combine. Which transports are started in a given Context follows the :func:`.defaults.get_default_clienttransports` function. The available transports are: .. the files in this directory. """ aiocoap-0.4.12/aiocoap/transports/generic_udp.py000066400000000000000000000052561472205122200217060ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from aiocoap import interfaces, error, util from aiocoap import COAP_PORT, Message class GenericMessageInterface(interfaces.MessageInterface): """GenericMessageInterface is not a standalone implementation of a message inteface. It does implement everything between the MessageInterface and a not yet fully specified interface of "bound UDP sockets". It delegates sending through the address objects (which persist through some time, given this is some kind of bound-socket scenario). The user must: * set up a ._pool after construction with a shutdown and a connect method * provide their addresses with a send(bytes) method * pass incoming data to the _received_datagram and _received_exception methods """ def __init__(self, mman: interfaces.MessageManager, log, loop): self._mman = mman self._log = log self._loop = loop # Callbacks to be hooked up by the user of the class; feed data on to the # message manager def _received_datagram(self, address, datagram): try: message = Message.decode(datagram, remote=address) except error.UnparsableMessage: self._log.warning("Ignoring unparsable message from %s", address) return self._mman.dispatch_message(message) def _received_exception(self, address, exception): self._mman.dispatch_error(exception, address) # Implementations of MessageInterface def send(self, message): if self._mman is None: self._log.info( "Not sending message %r: transport is already shutting down.", message ) else: message.remote.send(message.encode()) async def shutdown(self): await self._pool.shutdown() self._mman = None async def determine_remote(self, request): if request.requested_scheme not in ("coap", None): return None if request.unresolved_remote is not None: host, port = util.hostportsplit(request.unresolved_remote) port = port or COAP_PORT elif request.opt.uri_host: host = request.opt.uri_host port = request.opt.uri_port or COAP_PORT else: raise ValueError( "No location found to send message to (neither in .opt.uri_host nor in .remote)" ) result = await self._pool.connect((host, port)) if request.remote.maximum_block_size_exp < result.maximum_block_size_exp: result.maximum_block_size_exp = request.remote.maximum_block_size_exp return result aiocoap-0.4.12/aiocoap/transports/oscore.py000066400000000000000000000310401472205122200207020ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT # WORK IN PROGRESS: TransportEndpoint has been renamed to MessageInterface # here, but actually we'll be providing a RequestInterface -- that's one of the # reasons why RequestInterface, TokenInterface and MessageInterface were split # in the first place. """This module implements a RequestProvider for OSCORE. As such, it takes routing ownership of requests that it has a security context available for, and sends off the protected messages via another transport. This transport is a bit different from the others because it doesn't have its dedicated URI scheme, but purely relies on preconfigured contexts. So far, this transport only deals with outgoing requests, and does not help in building an OSCORE server. (Some code that could be used here in future resides in `contrib/oscore-plugtest/plugtest-server` as the `ProtectedSite` class. In outgoing request, this transport automatically handles Echo options that appear to come from RFC8613 Appendix B.1.2 style servers. They indicate that the server could not process the request initially, but could do so if the client retransmits it with an appropriate Echo value. Unlike other transports that could (at least in theory) be present multiple times in :attr:`aiocoap.protocol.Context.request_interfaces` (eg. because there are several bound sockets), this is only useful once in there, as it has no own state, picks the OSCORE security context from the CoAP :attr:`aiocoap.protocol.Context.client_credentials` when populating the remote field, and handles any populated request based ono its remote.security_context property alone. """ from collections import namedtuple from functools import wraps from .. import interfaces, credentials, edhoc, oscore from ..numbers import UNAUTHORIZED, MAX_REGULAR_BLOCK_SIZE_EXP def _requires_ua(f): @wraps(f) def wrapper(self): if self.underlying_address is None: raise ValueError( "No underlying address populated that could be used to derive a hostinfo" ) return f(self) return wrapper class OSCOREAddress( namedtuple("_OSCOREAddress", ["security_context", "underlying_address"]), interfaces.EndpointAddress, ): """Remote address type for :class:`TransportOSCORE`.""" def __repr__(self): return "<%s in context %r to %r>" % ( type(self).__name__, self.security_context, self.underlying_address, ) @property @_requires_ua def hostinfo(self): return self.underlying_address.hostinfo @property @_requires_ua def hostinfo_local(self): return self.underlying_address.hostinfo_local @property @_requires_ua def uri_base(self): return self.underlying_address.uri_base @property @_requires_ua def uri_base_local(self): return self.underlying_address.uri_base_local @property @_requires_ua def scheme(self): return self.underlying_address.scheme @property def authenticated_claims(self): return self.security_context.authenticated_claims is_multicast = False is_multicast_locally = False maximum_payload_size = 1024 maximum_block_size_exp = MAX_REGULAR_BLOCK_SIZE_EXP @property def blockwise_key(self): if hasattr(self.security_context, "groupcontext"): # it's an aspect, and all aspects work compatibly as long as data # comes from the same recipient ID -- taking the group recipient # key for that one which is stable across switches between pairwise # and group mode detail = self.security_context.groupcontext.recipient_keys[ self.security_context.recipient_id ] else: detail = self.security_context.recipient_key return (self.underlying_address.blockwise_key, detail) class TransportOSCORE(interfaces.RequestProvider): def __init__(self, context, forward_context): self._context = context self._wire = forward_context if self._context.loop is not self._wire.loop: # TransportOSCORE is not designed to bridge loops -- would probably # be possible, but incur confusion that is most likely well avoidable raise ValueError("Wire and context need to share an asyncio loop") self.loop = self._context.loop self.log = self._context.log # Keep current requests. This is not needed for shutdown purposes (see # .shutdown), but because Python 3.6.4 (but not 3.6.5, and not at least # some 3.5) would otherwise cancel OSCORE tasks mid-observation. This # manifested itself as . self._tasks = set() # # implement RequestInterface # async def fill_or_recognize_remote(self, message): if isinstance(message.remote, OSCOREAddress): return True if message.opt.oscore is not None: # double oscore is not specified; using this fact to make `._wire # is ._context` an option return False if message.opt.uri_path == (".well-known", "edhoc"): # FIXME better criteria based on next-hop? return False try: secctx = self._context.client_credentials.credentials_from_request(message) except credentials.CredentialsMissingError: return False # FIXME: it'd be better to have a "get me credentials *of this type* if they exist" if isinstance(secctx, oscore.CanProtect) or isinstance( secctx, edhoc.EdhocCredentials ): message.remote = OSCOREAddress(secctx, message.remote) self.log.debug( "Selecting OSCORE transport based on context %r for new request %r", secctx, message, ) return True else: return False def request(self, request): t = self.loop.create_task( self._request(request), name="OSCORE request %r" % request, ) self._tasks.add(t) def done(t, _tasks=self._tasks, _request=request): _tasks.remove(t) try: t.result() except Exception as e: _request.add_exception(e) t.add_done_callback(done) async def _request(self, request) -> None: """Process a request including any pre-flights or retries Retries by this coroutine are limited to actionable authenticated errors, i.e. those where it is ensured that even though the request is encrypted twice, it is still only processed once. This coroutine sets the result of request.request on completion; otherwise it raises and relies on its done callback to propagate the error. """ msg = request.request secctx = msg.remote.security_context if isinstance(secctx, edhoc.EdhocCredentials): if secctx._established_context is None: self._established_context = ( msg.remote.security_context.establish_context( wire=self._wire, underlying_address=msg.remote.underlying_address, underlying_proxy_scheme=msg.opt.proxy_scheme, underlying_uri_host=msg.opt.uri_host, logger=self.log.getChild("edhoc"), ) ) # FIXME: Who should drive retries here? We probably don't retry if # it fails immediately, but what happens with the request that # finds this broken, will it recurse? secctx = await self._established_context def protect(echo): if echo is None: msg_to_protect = msg else: if msg.opt.echo: self.log.warning( "Overwriting the requested Echo value with the one to answer a 4.01 Unauthorized" ) msg_to_protect = msg.copy(echo=echo) protected, original_request_seqno = secctx.protect(msg_to_protect) protected.remote = msg.remote.underlying_address wire_request = self._wire.request(protected) return (wire_request, original_request_seqno) wire_request, original_request_seqno = protect(None) # tempting as it would be, we can't access the request as a # Pipe here, because it is a BlockwiseRequest to handle # outer blockwise. # (Might be a good idea to model those after Pipe too, # though). def _check(more, unprotected_response): if more and not unprotected_response.code.is_successful(): self.log.warning( "OSCORE protected message contained observe, but unprotected code is unsuccessful. Ignoring the observation." ) return False return more try: protected_response = await wire_request.response # Offer secctx to switch over for reception based on the header # data (similar to how the server address switches over when # receiving a response to a request sent over multicast) unprotected = oscore.verify_start(protected_response) secctx = secctx.context_from_response(unprotected) unprotected_response, _ = secctx.unprotect( protected_response, original_request_seqno ) if ( unprotected_response.code == UNAUTHORIZED and unprotected_response.opt.echo is not None ): # Assist the server in B.1.2 Echo receive window recovery self.log.info( "Answering the server's 4.01 Unauthorized / Echo as part of OSCORE B.1.2 recovery" ) wire_request, original_request_seqno = protect( unprotected_response.opt.echo ) protected_response = await wire_request.response unprotected_response, _ = secctx.unprotect( protected_response, original_request_seqno ) unprotected_response.remote = OSCOREAddress( secctx, protected_response.remote ) self.log.debug( "Successfully unprotected %r into %r", protected_response, unprotected_response, ) # FIXME: if i could tap into the underlying Pipe, that'd # be a lot easier -- and also get rid of the awkward _check # code moved into its own function just to avoid duplication. more = protected_response.opt.observe is not None more = _check(more, unprotected_response) request.add_response(unprotected_response, is_last=not more) if not more: return async for protected_response in wire_request.observation: unprotected_response, _ = secctx.unprotect( protected_response, original_request_seqno ) more = protected_response.opt.observe is not None more = _check(more, unprotected_response) unprotected_response.remote = OSCOREAddress( secctx, protected_response.remote ) self.log.debug( "Successfully unprotected %r into %r", protected_response, unprotected_response, ) # FIXME: discover is_last from the underlying response request.add_response(unprotected_response, is_last=not more) if not more: return request.add_exception( NotImplementedError( "End of observation" " should have been indicated in is_last, see above lines" ) ) finally: # FIXME: no way yet to cancel observations exists yet, let alone # one that can be used in a finally clause (ie. won't raise # something else if the observation terminated server-side) pass # if wire_request.observation is not None: # wire_request.observation.cancel() async def shutdown(self): # Nothing to do here yet; the individual requests will be shut down by # their underlying transports pass aiocoap-0.4.12/aiocoap/transports/rfc8323common.py000066400000000000000000000205341472205122200217210ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Common code for the tcp and the ws modules, both of which are based on RFC8323 mechanisms, but differ in their underlying protocol implementations (asyncio stream vs. websockets module) far enough that they only share small portions of their code""" import asyncio from typing import Optional from aiocoap import Message from aiocoap import optiontypes, util from aiocoap.numbers.codes import CSM, PING, PONG, RELEASE, ABORT from aiocoap import error class CloseConnection(Exception): """Raised in RFC8323 common processing to trigger a connection shutdown on the TCP / WebSocket side. The TCP / WebSocket side should send the exception's argument on to the token manager, close the connection, and does not need to perform further logging.""" class RFC8323Remote: """Mixin for Remotes for all the common RFC8323 processing Implementations still need the per-transport parts, especially a _send_message and an _abort_with implementation. """ # CSM received from the peer. The receive hook should abort suitably when # receiving a non-CSM message and this is not set yet. _remote_settings: Optional[Message] # Parameter usually set statically per implementation _my_max_message_size = 1024 * 1024 def __init__(self): self._remote_settings = None is_multicast = False is_multicast_locally = False # implementing interfaces.EndpointAddress def __repr__(self): return "<%s at %#x, hostinfo %s, local %s>" % ( type(self).__name__, id(self), self.hostinfo, self.hostinfo_local, ) @property def hostinfo(self): # keeping _remote_hostinfo and _local_hostinfo around structurally rather than in # hostinfo / hostinfo_local form looks odd now, but on the long run the # remote should be able to tell the message what its default Uri-Host # value is return util.hostportjoin(*self._remote_hostinfo) @property def hostinfo_local(self): return util.hostportjoin(*self._local_hostinfo) @property def uri_base(self): if self._local_is_server: raise error.AnonymousHost( "Client side of %s can not be expressed as a URI" % self._ctx._scheme ) else: return self._ctx._scheme + "://" + self.hostinfo @property def uri_base_local(self): if self._local_is_server: return self._ctx._scheme + "://" + self.hostinfo_local else: raise error.AnonymousHost( "Client side of %s can not be expressed as a URI" % self._ctx._scheme ) @property def maximum_block_size_exp(self): if self._remote_settings is None: # This is assuming that we can do BERT, so a first Block1 would be # exponent 7 but still only 1k -- because by the time we send this, # we typically haven't seen a CSM yet, so we'd be stuck with 6 # because 7959 says we can't increase the exponent... # # FIXME: test whether we're properly using lower block sizes if # server says that szx=7 is not OK. return 7 max_message_size = (self._remote_settings or {}).get("max-message-size", 1152) has_blockwise = (self._remote_settings or {}).get("block-wise-transfer", False) if max_message_size > 1152 and has_blockwise: return 7 return 6 # FIXME: deal with smaller max-message-size @property def maximum_payload_size(self): # see maximum_payload_size of interfaces comment slack = 100 max_message_size = (self._remote_settings or {}).get("max-message-size", 1152) has_blockwise = (self._remote_settings or {}).get("block-wise-transfer", False) if max_message_size > 1152 and has_blockwise: return ((max_message_size - 128) // 1024) * 1024 + slack return 1024 + slack # FIXME: deal with smaller max-message-size @property def blockwise_key(self): return (self._remote_hostinfo, self._local_hostinfo) # Utility methods for implementing an RFC8323 transport def _send_initial_csm(self): my_csm = Message(code=CSM) # this is a tad awkward in construction because the options objects # were designed under the assumption that the option space is constant # for all message codes. block_length = optiontypes.UintOption(2, self._my_max_message_size) my_csm.opt.add_option(block_length) supports_block = optiontypes.UintOption(4, 0) my_csm.opt.add_option(supports_block) self._send_message(my_csm) def _process_signaling(self, msg): if msg.code == CSM: if self._remote_settings is None: self._remote_settings = {} for opt in msg.opt.option_list(): # FIXME: this relies on the relevant option numbers to be # opaque; message parsing should already use the appropriate # option types, or re-think the way options are parsed if opt.number == 2: self._remote_settings["max-message-size"] = int.from_bytes( opt.value, "big" ) elif opt.number == 4: self._remote_settings["block-wise-transfer"] = True elif opt.number.is_critical(): self.abort("Option not supported", bad_csm_option=opt.number) else: pass # ignoring elective CSM options elif msg.code in (PING, PONG, RELEASE, ABORT): # not expecting data in any of them as long as Custody is not implemented for opt in msg.opt.option_list(): if opt.number.is_critical(): self.abort("Unknown critical option") else: pass if msg.code == PING: pong = Message(code=PONG, token=msg.token) self._send_message(pong) elif msg.code == PONG: pass elif msg.code == RELEASE: # The behavior SHOULD be enhanced to answer outstanding # requests, but it is unclear to which extent this side may # still use the connection. self.log.info( "Received Release, closing on this end (options: %s)", msg.opt ) raise CloseConnection( error.RemoteServerShutdown("Peer released connection") ) elif msg.code == ABORT: self.log.warning("Received Abort (options: %s)", msg.opt) raise CloseConnection( error.RemoteServerShutdown("Peer aborted connection") ) else: self.abort("Unknown signalling code") def abort(self, errormessage=None, bad_csm_option=None): self.log.warning("Aborting connection: %s", errormessage) abort_msg = Message(code=ABORT) if errormessage is not None: abort_msg.payload = errormessage.encode("utf8") if bad_csm_option is not None: bad_csm_option_option = optiontypes.UintOption(2, bad_csm_option) abort_msg.opt.add_option(bad_csm_option_option) self._abort_with(abort_msg) async def release(self): """Send Release message, (not implemented:) wait for connection to be actually closed by the peer. Subclasses should extend this to await closing of the connection, especially if they'd get into lock-up states otherwise (was would WebSockets). """ self.log.info("Releasing connection %s", self) release_msg = Message(code=RELEASE) self._send_message(release_msg) try: # FIXME: we could wait for the peer to close the connection, but a) # that'll need some work on the interface between this module and # ws/tcp, and b) we have no peers to test this with that would # produce any sensible data (as aiocoap on release just closes). pass except asyncio.CancelledError: self.log.warning( "Connection %s was not closed by peer in time after release", self ) aiocoap-0.4.12/aiocoap/transports/simple6.py000066400000000000000000000266611472205122200210040ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module implements a MessageInterface for UDP based on the asyncio DatagramProtocol. This is a simple version that works only for clients (by creating a dedicated unbound but connected socket for each communication partner) and probably not with multicast (it is assumed to be unsafe for multicast), which can be expected to work even on platforms where the :mod:`.udp6` module can not be made to work (Android, OSX, Windows for missing ``recvmsg`` and socket options, or any event loops that don't have an add_reader method). Note that the name of the module is a misnomer (and the module is likely to be renamed): Nothing in it is IPv6 specific; the socket is created using whichever address family the OS chooses based on the given host name. One small but noteworthy detail about this transport is that it does not distinguish between IP literals and host names. As a result, requests and responses from remotes will appear to arrive from a remote whose netloc is the requested name, not an IP literal. This transport is experimental, likely to change, and not fully tested yet (because the test suite is not yet ready to matrix-test the same tests with different transport implementations, and because it still fails in proxy blockwise tests). For one particular use case, this may be usable for servers in a sense: If (and only if) all incoming requests are only ever sent from clients that were previously addressed as servers by the running instance. (This is generally undesirable as it greatly limits the usefulness of the server, but is used in LwM2M setups). As such a setup makes demands on the peer that are not justified by the CoAP specification (in particular, that it send requests from a particular port), this should still only be used for cases where the udp6 transport is unavailable due to platform limitations. """ import asyncio from collections import OrderedDict import socket from typing import Tuple from aiocoap import error from aiocoap import interfaces from ..numbers import COAP_PORT, constants from ..util import hostportjoin from .generic_udp import GenericMessageInterface class _Connection(asyncio.DatagramProtocol, interfaces.EndpointAddress): def __init__( self, ready_callback, message_interface: "GenericMessageInterface", stored_sockaddr, ): self._ready_callback = ready_callback self._message_interface = message_interface # This gets stored in the _Connection because not all implementations # of datagram transports will expose the get_extra_info('socket') # (right now, all I knew do), or their backend might not be a connected # socket (like in uvloop), so the information can't be just obtained # from the transport, but is needed to implement .hostinfo # # If _Connections become used in other contexts (eg. tinydtls starts # using them), it might be a good idea to move all this into a subclass # and split it from the pure networking stuff. self.hostinfo = hostportjoin( stored_sockaddr[0], None if stored_sockaddr[1] == COAP_PORT else stored_sockaddr[1], ) self._stage = "initializing" #: Status property purely for debugging def __repr__(self): return "<%s at %#x on transport %s, %s>" % ( type(self).__name__, id(self), getattr(self, "_transport", "(none)"), self._stage, ) # address interface is_multicast = False is_multicast_locally = False scheme = "coap" # Unlike for other remotes, this is settable per instance. maximum_block_size_exp = constants.MAX_REGULAR_BLOCK_SIZE_EXP # statically initialized in init hostinfo = None uri_base = None uri_base = property(lambda self: "coap://" + self.hostinfo) @property def hostinfo_local(self): # This can only be done on a best-effort base here. Unlike the below # hostinfo (see comments there), there is no easy way around this, so # if there are still implementations out that don't do the extras, # that's it and the calling site should reconsider whether they need # something that can not be determined. (Some more effort could go into # falling back to get_extra_info('socket').getsockname(), but that # should really be fixed in the transport provider). if not hasattr(self, "_transport"): raise RuntimeError( "Simple6 does not have defined local host info in current stage %s" % self._stage ) sockname = self._transport.get_extra_info("sockname") if sockname is None: raise RuntimeError( "Simple6 can not determine local address from the underlying UDP implementation" ) return hostportjoin(*sockname[:2]) uri_base_local = property(lambda self: "coap://" + self.hostinfo_local) @property def blockwise_key(self): # Not pulling in hostinfo_local as that's unreliable anyway -- if we # change identity and the (UDP sever) follows its requests across our # changing port, that's probably fine. return self.hostinfo # fully disabled because some implementations of asyncio don't make the # information available; going the easy route and storing it for all (see # attribute population in __init__) # # FIXME continued: probably this is, and the above is not (or should not be) # @property # def hostinfo(self): # print("ACCESSING HOSTINFO") # host, port = self._transport.get_extra_info('socket').getpeername()[:2] # if port == COAP_PORT: # port = None # # FIXME this should use some of the _plainaddress mechanisms of the udp6 addresses # return hostportjoin(host, port) # datagram protocol interface def connection_made(self, transport): self._transport = transport self._ready_callback() self._stage = "active" del self._ready_callback def datagram_received(self, data, address): self._message_interface._received_datagram(self, data) def error_received(self, exception): self._message_interface._received_exception(self, exception) def connection_lost(self, exception): if exception is None: pass else: self._new_error_callback(self, exception) # whatever it is _DatagramClientSocketpoolSimple6 expects # ... because generic_udp expects it from _DatagramClientSocketpoolSimple6 def send(self, data): self._transport.sendto(data, None) async def shutdown(self): self._stage = "shutting down" self._transport.abort() del self._message_interface self._stage = "destroyed" class _DatagramClientSocketpoolSimple6: """This class is used to explore what an Python/asyncio abstraction around a hypothetical "UDP connections" mechanism could look like. Assume there were a socket variety that had UDP messages (ie. unreliable, unordered, boundary-preserving) but that can do an accept() like a TCP listening socket can, and can create outgoing connection-ish sockets from the listeing port. That interface would be usable for all UDP-based CoAP transport implementations; this particular implementation, due to limitations of POSIX sockets (and the additional limitations imposed on it like not using PKTINFO) provides the interface, but only implements the outgoing part, and will not allow setting the outgoing port or interface.""" max_sockets = 64 # FIXME (new_message_callback, new_error_callback) should probably rather # be one object with a defined interface; either that's the # MessageInterfaceSimple6 and stored accessibly (so the Protocol can know # which MessageInterface to talk to for sending), or we move the # MessageInterface out completely and have that object be the Protocol, # and the Protocol can even send new packages via the address def __init__(self, loop, mi: "GenericMessageInterface"): # using an OrderedDict to implement an LRU cache as it's suitable for that purpose according to its documentation self._sockets: OrderedDict[Tuple[str, int], _Connection] = OrderedDict() self._loop = loop self._message_interface = mi async def _maybe_purge_sockets(self): while len(self._sockets) >= self.max_sockets: # more of an if oldaddr, oldest = next(iter(self._sockets.items())) await oldest.shutdown() del self._sockets[oldaddr] async def connect(self, sockaddr): """Create a new socket with a given remote socket address Note that the sockaddr does not need to be fully resolved or complete, as it is not used for matching incoming packages; ('host.example.com', 5683) is perfectly OK (and will create a different outgoing socket that ('hostalias.example.com', 5683) even if that has the same address, for better or for worse). For where the general underlying interface is concerned, it is not yet fixed at all when this must return identical objects.""" protocol = self._sockets.get(sockaddr) if protocol is not None: self._sockets.move_to_end(sockaddr) return protocol await self._maybe_purge_sockets() ready = asyncio.get_running_loop().create_future() try: transport, protocol = await self._loop.create_datagram_endpoint( lambda: _Connection( lambda: ready.set_result(None), self._message_interface, sockaddr ), remote_addr=sockaddr, ) except socket.gaierror as e: raise error.ResolutionError( "No address information found for requests to %r" % (sockaddr,) ) from e await ready # # Enable this to easily make every connection to localhost a new one # # during testing # import random # sockaddr = sockaddr + (random.random(),) # FIXME twice: 1., those never get removed yet (should timeout or # remove themselves on error), and 2., this is racy against a shutdown right after a connect self._sockets[sockaddr] = protocol return protocol async def shutdown(self): # preventing the creation of new sockets early on, and generally # breaking cycles del self._message_interface if self._sockets: done, pending = await asyncio.wait( [ asyncio.create_task( s.shutdown(), name="Socket shutdown of %r" % s, ) for s in self._sockets.values() ] ) for item in done: await item del self._sockets class MessageInterfaceSimple6(GenericMessageInterface): @classmethod async def create_client_transport_endpoint(cls, ctx, log, loop): self = cls(ctx, log, loop) # Cyclic reference broken during shutdown self._pool = _DatagramClientSocketpoolSimple6(self._loop, self) return self async def recognize_remote(self, remote): return isinstance(remote, _Connection) and remote in self._pool._sockets aiocoap-0.4.12/aiocoap/transports/simplesocketserver.py000066400000000000000000000203471472205122200233510ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module implements a MessageInterface for UDP based on the asyncio DatagramProtocol. This is a simple version that works only for servers bound to a single unicast address. It provides a server backend in situations when :mod:`.udp6` is unavailable and :mod:`.simple6` needs to be used for clients. While it is in theory capable of sending requests too, it should not be used like that, because it won't receive ICMP errors (see below). Shortcomings ------------ * This implementation does not receive ICMP errors. This violates the CoAP standard and can lead to unnecessary network traffic, bad user experience (when used for client requests) or even network attack amplification. * The server can not be used with the "any-address" (``::``, ``0.0.0.0``). If it were allowed to bind there, it would not receive any indication from the operating system as to which of its own addresses a request was sent, and could not send the response with the appropriate sender address. (The :mod:`udp6` transport does not suffer that shortcoming, simplesocketserver is typically only used when that is unavailable). With simplesocketserver, you need to explicitly give the IP address of your server in the ``bind`` argument of :meth:`aiocoap.protocol.Context.create_server_context`. * This transport is experimental and likely to change. """ import asyncio from collections import namedtuple from .. import error from ..numbers import COAP_PORT, constants from .. import interfaces from .generic_udp import GenericMessageInterface from ..util import hostportjoin from .. import defaults class _Address( namedtuple("_Address", ["serversocket", "address"]), interfaces.EndpointAddress ): # hashability and equality follow from being a namedtuple def __repr__(self): return "<%s.%s via %s to %s>" % ( __name__, type(self).__name__, self.serversocket, self.address, ) def send(self, data): self.serversocket._transport.sendto(data, self.address) # EnpointAddress interface is_multicast = False is_multicast_locally = False # Unlike for other remotes, this is settable per instance. maximum_block_size_exp = constants.MAX_REGULAR_BLOCK_SIZE_EXP @property def hostinfo(self): # `host` already contains the interface identifier, so throwing away # scope and interface identifier host, port, *_ = self.address if port == COAP_PORT: port = None return hostportjoin(host, port) @property def uri_base(self): return self.scheme + "://" + self.hostinfo @property def hostinfo_local(self): return self.serversocket.hostinfo_local @property def uri_base_local(self): return self.scheme + "://" + self.hostinfo_local scheme = "coap" @property def blockwise_key(self): return self.address class _DatagramServerSocketSimple(asyncio.DatagramProtocol): # To be overridden by tinydtls_server _Address = _Address @classmethod async def create( cls, bind, log, loop, message_interface: "GenericMessageInterface" ): if bind is None or bind[0] in ("::", "0.0.0.0", "", None): # If you feel tempted to remove this check, think about what # happens if two configured addresses can both route to a # requesting endpoint, how that endpoint is supposed to react to a # response from the other address, and if that case is not likely # to ever happen in your field of application, think about what you # tell the first user where it does happen anyway. raise ValueError("The transport can not be bound to any-address.") ready = asyncio.get_running_loop().create_future() transport, protocol = await loop.create_datagram_endpoint( lambda: cls(ready.set_result, message_interface, log), local_addr=bind, reuse_port=defaults.has_reuse_port(), ) # Conveniently, we only bind to a single port (because we need to know # the return address, not because we insist we know the local # hostinfo), and can thus store the local hostinfo without distinction protocol.hostinfo_local = hostportjoin( bind[0], bind[1] if bind[1] != COAP_PORT else None ) self = await ready self._loop = loop return self def __init__( self, ready_callback, message_interface: "GenericMessageInterface", log ): self._ready_callback = ready_callback self._message_interface = message_interface self.log = log async def shutdown(self): self._transport.abort() # interface like _DatagramClientSocketpoolSimple6 async def connect(self, sockaddr): # FIXME this is not regularly tested either self.log.warning( "Sending initial messages via a server socket is not recommended" ) # A legitimate case is when something stores return addresses as # URI(part)s and not as remotes. (In similar transports this'd also be # the case if the address's connection is dropped from the pool, but # that doesn't happen here since there is no pooling as there is no # per-connection state). # getaddrinfo is not only to needed to resolve any host names (which # would not be recognized otherwise), but also to get a complete (host, # port, zoneinfo, whatwasthefourth) tuple from what is passed in as a # (host, port) tuple. addresses = await self._loop.getaddrinfo( *sockaddr, family=self._transport.get_extra_info("socket").family ) if not addresses: raise error.NetworkError("No addresses found for %s" % sockaddr[0]) # FIXME could do happy eyebals address = addresses[0][4] address = self._Address(self, address) return address # datagram protocol interface def connection_made(self, transport): self._transport = transport self._ready_callback(self) del self._ready_callback def datagram_received(self, data, sockaddr): self._message_interface._received_datagram(self._Address(self, sockaddr), data) def error_received(self, exception): # This is why this whole implementation is a bad idea (but still the best we got on some platforms) self.log.warning( "Ignoring error because it can not be mapped to any connection: %s", exception, ) def connection_lost(self, exception): if exception is None: pass # regular shutdown else: self.log.error("Received unexpected connection loss: %s", exception) class MessageInterfaceSimpleServer(GenericMessageInterface): # for alteration by tinydtls_server _default_port = COAP_PORT _serversocket = _DatagramServerSocketSimple @classmethod async def create_server( cls, bind, ctx: interfaces.MessageManager, log, loop, *args, **kwargs ): self = cls(ctx, log, loop) bind = bind or ("::", None) # Interpret None as 'default port', but still allow to bind to 0 for # servers that want a random port (eg. when the service URLs are # advertised out-of-band anyway). LwM2M clients should use simple6 # instead as outlined there. bind = ( bind[0], self._default_port if bind[1] is None else bind[1] + (self._default_port - COAP_PORT), ) # Cyclic reference broken during shutdown self._pool = await self._serversocket.create(bind, log, self._loop, self) # type: ignore return self async def recognize_remote(self, remote): # FIXME: This is never tested (as is the connect method) because all # tests create client contexts client-side (which don't build a # simplesocketserver), and because even when a server context is # created, there's a simple6 that grabs such addresses before a request # is sent out return isinstance(remote, _Address) and remote.serversocket is self._pool aiocoap-0.4.12/aiocoap/transports/tcp.py000066400000000000000000000366021472205122200202070ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import asyncio import socket from logging import Logger from typing import Dict, Optional, Set, Tuple from aiocoap.transports import rfc8323common from aiocoap import interfaces, error, util from aiocoap import COAP_PORT, Message from aiocoap import defaults def _extract_message_size(data: bytes): """Read out the full length of a CoAP messsage represented by data. Returns None if data is too short to read the (full) length. The number returned is the number of bytes that has to be read into data to start reading the next message; it consists of a constant term, the token length and the extended length of options-plus-payload.""" if not data: return None length = data[0] >> 4 tokenoffset = 2 tkl = data[0] & 0x0F if length >= 13: if length == 13: extlen = 1 offset = 13 elif length == 14: extlen = 2 offset = 269 else: extlen = 4 offset = 65805 if len(data) < extlen + 1: return None tokenoffset = 2 + extlen length = int.from_bytes(data[1 : 1 + extlen], "big") + offset return tokenoffset, tkl, length def _decode_message(data: bytes) -> Message: tokenoffset, tkl, _ = _extract_message_size(data) if tkl > 8: raise error.UnparsableMessage("Overly long token") code = data[tokenoffset - 1] token = data[tokenoffset : tokenoffset + tkl] msg = Message(code=code, token=token) msg.payload = msg.opt.decode(data[tokenoffset + tkl :]) return msg def _encode_length(length: int): if length < 13: return (length, b"") elif length < 269: return (13, (length - 13).to_bytes(1, "big")) elif length < 65805: return (14, (length - 269).to_bytes(2, "big")) else: return (15, (length - 65805).to_bytes(4, "big")) def _serialize(msg: Message) -> bytes: data_list = [msg.opt.encode()] if msg.payload: data_list += [b"\xff", msg.payload] data = b"".join(data_list) length, extlen = _encode_length(len(data)) tkl = len(msg.token) if tkl > 8: raise ValueError("Overly long token") return b"".join( (bytes(((length << 4) | tkl,)), extlen, bytes((msg.code,)), msg.token, data) ) class TcpConnection( asyncio.Protocol, rfc8323common.RFC8323Remote, interfaces.EndpointAddress ): # currently, both the protocol and the EndpointAddress are the same object. # if, at a later point in time, the keepaliving of TCP connections should # depend on whether the library user still keeps a usable address around, # those functions could be split. def __init__(self, ctx, log, loop, *, is_server) -> None: super().__init__() self._ctx = ctx self.log = log self.loop = loop self._spool = b"" self._remote_settings = None self._transport: Optional[asyncio.Transport] = None self._local_is_server = is_server @property def scheme(self): return self._ctx._scheme def _send_message(self, msg: Message): self.log.debug("Sending message: %r", msg) assert ( self._transport is not None ), "Attempted to send message before connection" self._transport.write(_serialize(msg)) def _abort_with(self, abort_msg): if self._transport is not None: self._send_message(abort_msg) self._transport.close() else: # FIXME: find out how this happens; i've only seen it after nmap # runs against an aiocoap server and then shutting it down. # "poisoning" the object to make sure this can not be exploited to # bypass the server shutdown. self._ctx = None # implementing asyncio.Protocol def connection_made(self, transport): self._transport = transport ssl_object = transport.get_extra_info("ssl_object") if ssl_object is not None: server_name = getattr(ssl_object, "indicated_server_name", None) else: server_name = None # `host` already contains the interface identifier, so throwing away # scope and interface identifier self._local_hostinfo = transport.get_extra_info("sockname")[:2] self._remote_hostinfo = transport.get_extra_info("peername")[:2] def none_default_port(sockname): return ( sockname[0], None if sockname[1] == self._ctx._default_port else sockname[1], ) self._local_hostinfo = none_default_port(self._local_hostinfo) self._remote_hostinfo = none_default_port(self._remote_hostinfo) # SNI information available if server_name is not None: if self._local_is_server: self._local_hostinfo = (server_name, self._local_hostinfo[1]) else: self._remote_hostinfo = (server_name, self._remote_hostinfo[1]) self._send_initial_csm() def connection_lost(self, exc): # FIXME react meaningfully: # * send event through pool so it can propagate the error to all # requests on the same remote # * mark the address as erroneous so it won't be recognized by # fill_or_recognize_remote self._ctx._dispatch_error(self, exc) def data_received(self, data): # A rope would be more efficient here, but the expected case is that # _spool is b"" and spool gets emptied soon -- most messages will just # fit in a single TCP package and not be nagled together. # # (If this does become a bottleneck, say self._spool = SomeRope(b"") # and barely change anything else). self._spool += data while True: msglen = _extract_message_size(self._spool) if msglen is None: break msglen = sum(msglen) if msglen > self._my_max_message_size: self.abort("Overly large message announced") return if msglen > len(self._spool): break msg = self._spool[:msglen] try: msg = _decode_message(msg) except error.UnparsableMessage: self.abort("Failed to parse message") return msg.remote = self self.log.debug("Received message: %r", msg) self._spool = self._spool[msglen:] if msg.code.is_signalling(): try: self._process_signaling(msg) except rfc8323common.CloseConnection as e: self._ctx._dispatch_error(self, e.args[0]) self._transport.close() continue if self._remote_settings is None: self.abort("No CSM received") return self._ctx._dispatch_incoming(self, msg) def eof_received(self): # FIXME: as with connection_lost, but less noisy if announced # FIXME: return true and initiate own shutdown if that is what CoAP prescribes pass def pause_writing(self): # FIXME: do something ;-) pass def resume_writing(self): # FIXME: do something ;-) pass # RFC8323Remote.release recommends subclassing this, but there's no easy # awaitable here yet, and no important business to finish, timeout-wise. class _TCPPooling: # implementing TokenInterface def send_message(self, message, messageerror_monitor): # Ignoring messageerror_monitor: CoAP over reliable transports has no # way of indicating that a particular message was bad, it always shuts # down the complete connection if message.code.is_response(): no_response = (message.opt.no_response or 0) & ( 1 << message.code.class_ - 1 ) != 0 if no_response: return message.opt.no_response = None message.remote._send_message(message) # used by the TcpConnection instances def _dispatch_incoming(self, connection, msg): if msg.code == 0: pass if msg.code.is_response(): self._tokenmanager.process_response(msg) # ignoring the return value; unexpected responses can be the # asynchronous result of cancelled observations else: self._tokenmanager.process_request(msg) def _dispatch_error(self, connection, exc): self._evict_from_pool(connection) if self._tokenmanager is None: if exc is not None: self.log.warning("Ignoring late error during shutdown: %s", exc) else: # it's just a regular connection loss, that's to be expected during shutdown pass return self._tokenmanager.dispatch_error(exc, connection) # for diverting behavior of _TLSMixIn _scheme = "coap+tcp" _default_port = COAP_PORT class TCPServer(_TCPPooling, interfaces.TokenInterface): def __init__(self) -> None: self._pool: Set[TcpConnection] = set() self.log: Optional[Logger] = None self.server = None @classmethod async def create_server( cls, bind, tman: interfaces.TokenManager, log, loop, *, _server_context=None ): self = cls() self._tokenmanager = tman self.log = log # self.loop = loop bind = bind or ("::", None) bind = ( bind[0], bind[1] + (self._default_port - COAP_PORT) if bind[1] else self._default_port, ) def new_connection(): c = TcpConnection(self, log, loop, is_server=True) self._pool.add(c) return c try: server = await loop.create_server( new_connection, bind[0], bind[1], ssl=_server_context, reuse_port=defaults.has_reuse_port(), ) except socket.gaierror as e: raise error.ResolutionError( "No local bindable address found for %s" % bind[0] ) from e self.server = server return self def _evict_from_pool(self, connection): # May easily happen twice, once when an error comes in and once when # the connection is (subsequently) closed. if connection in self._pool: self._pool.remove(connection) # implementing TokenInterface async def fill_or_recognize_remote(self, message): if ( message.remote is not None and isinstance(message.remote, TcpConnection) and message.remote._ctx is self ): return True return False async def shutdown(self): self.log.debug("Shutting down server %r", self) self._tokenmanager = None self.server.close() # Since server has been closed, we won't be getting any *more* # connections, so we can process them all now: shutdowns = [ asyncio.create_task( c.release(), name="Close client %s" % c, ) for c in self._pool ] shutdowns.append( asyncio.create_task( self.server.wait_closed(), name="Close server %s" % self ), ) # There is at least one member, so we can just .wait() await asyncio.wait(shutdowns) class TCPClient(_TCPPooling, interfaces.TokenInterface): def __init__(self) -> None: self._pool: Dict[ Tuple[str, int], TcpConnection ] = {} #: (host, port) -> connection # note that connections are filed by host name, so different names for # the same address might end up with different connections, which is # probably okay for TCP, and crucial for later work with TLS. self.log: Optional[Logger] = None self.loop: Optional[asyncio.AbstractEventLoop] = None self.credentials = None async def _spawn_protocol(self, message): if message.unresolved_remote is None: host = message.opt.uri_host port = message.opt.uri_port or self._default_port if host is None: raise ValueError( "No location found to send message to (neither in .opt.uri_host nor in .remote)" ) else: host, port = util.hostportsplit(message.unresolved_remote) port = port or self._default_port if (host, port) in self._pool: return self._pool[(host, port)] try: _, protocol = await self.loop.create_connection( lambda: TcpConnection(self, self.log, self.loop, is_server=False), host, port, ssl=self._ssl_context_factory(message.unresolved_remote), ) except socket.gaierror as e: raise error.ResolutionError( "No address information found for requests to %r" % host ) from e except OSError as e: raise error.NetworkError("Connection failed to %r" % host) from e self._pool[(host, port)] = protocol return protocol # for diverting behavior of TLSClient def _ssl_context_factory(self, hostinfo): return None def _evict_from_pool(self, connection): keys = [] for k, p in self._pool.items(): if p is connection: keys.append(k) # should really be zero or one for k in keys: self._pool.pop(k) @classmethod async def create_client_transport( cls, tman: interfaces.TokenManager, log, loop, credentials=None ): # this is not actually asynchronous, and even though the interface # between the context and the creation of interfaces is not fully # standardized, this stays in the other inferfaces' style. self = cls() self._tokenmanager = tman self.log = log self.loop = loop # used by the TLS variant; FIXME not well thought through self.credentials = credentials return self # implementing TokenInterface async def fill_or_recognize_remote(self, message): if ( message.remote is not None and isinstance(message.remote, TcpConnection) and message.remote._ctx is self ): return True if message.requested_scheme == self._scheme: # FIXME: This could pool outgoing connections. # (Checking if an incoming connection is a pool candidate is # probably overkill because even if a URI can be constructed from a # ephemeral client port, nobody but us can use it, and we can just # set .remote). message.remote = await self._spawn_protocol(message) return True return False async def shutdown(self): self.log.debug("Shutting down any outgoing connections on on %r", self) self._tokenmanager = None shutdowns = [ asyncio.create_task( c.release(), name="Close client %s" % c, ) for c in self._pool.values() ] if not shutdowns: # wait is documented to require a non-empty set return await asyncio.wait(shutdowns) aiocoap-0.4.12/aiocoap/transports/tinydtls.py000066400000000000000000000350751472205122200212760ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module implements a MessageInterface that handles coaps:// using a wrapped tinydtls library. This currently only implements the client side. To have a test server, run:: $ git clone https://github.com/obgm/libcoap.git --recursive $ cd libcoap $ ./autogen.sh $ ./configure --with-tinydtls --disable-shared --disable-documentation $ make $ ./examples/coap-server -k secretPSK (Using TinyDTLS in libcoap is important; with the default OpenSSL build, I've seen DTLS1.0 responses to DTLS1.3 requests, which are hard to debug.) The test server with its built-in credentials can then be accessed using:: $ echo '{"coaps://localhost/*": {"dtls": {"psk": {"ascii": "secretPSK"}, "client-identity": {"ascii": "client_Identity"}}}}' > testserver.json $ ./aiocoap-client coaps://localhost --credentials testserver.json While it is planned to allow more programmatical construction of the credentials store, the currently recommended way of storing DTLS credentials is to load a structured data object into the client_credentials store of the context: >>> c = await aiocoap.Context.create_client_context() # doctest: +SKIP >>> c.client_credentials.load_from_dict( ... {'coaps://localhost/*': {'dtls': { ... 'psk': b'secretPSK', ... 'client-identity': b'client_Identity', ... }}}) # doctest: +SKIP where, compared to the JSON example above, byte strings can be used directly rather than expressing them as 'ascii'/'hex' (`{'hex': '30383135'}` style works as well) to work around JSON's limitation of not having raw binary strings. Bear in mind that the aiocoap CoAPS support is highly experimental; for example, while requests to this server do complete, error messages are still shown during client shutdown. """ import asyncio import weakref import functools import time import warnings from ..util import hostportjoin, hostportsplit from ..message import Message from .. import interfaces, error from ..numbers import COAPS_PORT from ..credentials import CredentialsMissingError # tinyDTLS passes address information around in its session data, but the way # it's used here that will be ignored; this is the data that is sent to / read # from the tinyDTLS functions _SENTINEL_ADDRESS = "::1" _SENTINEL_PORT = 1234 DTLS_EVENT_CONNECT = 0x01DC DTLS_EVENT_CONNECTED = 0x01DE DTLS_EVENT_RENEGOTIATE = 0x01DF LEVEL_NOALERT = 0 # seems only to be issued by tinydtls-internal events # from RFC 5246 LEVEL_WARNING = 1 LEVEL_FATAL = 2 CODE_CLOSE_NOTIFY = 0 # tinydtls can not be debugged in the Python way; if you need to get more # information out of it, use the following line: # dtls.setLogLevel(dtls.DTLS_LOG_DEBUG) # FIXME this should be exposed by the dtls wrapper DTLS_TICKS_PER_SECOND = 1000 DTLS_CLOCK_OFFSET = time.time() # Currently kept a bit private by not inheriting from NetworkError -- thus # they'll be wrapped in a NetworkError when they fly out of a request. class CloseNotifyReceived(Exception): """The DTLS connection a request was sent on raised was closed by the server while the request was being processed""" class FatalDTLSError(Exception): """The DTLS connection a request was sent on raised a fatal error while the request was being processed""" class DTLSClientConnection(interfaces.EndpointAddress): # FIXME not only does this not do error handling, it seems not to even # survive its 2**16th message exchange. is_multicast = False is_multicast_locally = False hostinfo = None # stored at initualization time uri_base = property(lambda self: "coaps://" + self.hostinfo) # Not necessarily very usable given we don't implement responding to server # connection, but valid anyway uri_base_local = property(lambda self: "coaps://" + self.hostinfo_local) scheme = "coaps" @property def hostinfo_local(self): # See TCP's.hostinfo_local host, port, *_ = self._transport.get_extra_info("socket").getsockname() if port == COAPS_PORT: port = None return hostportjoin(host, port) @property def blockwise_key(self): return (self._host, self._port, self._pskId, self._psk) def __init__(self, host, port, pskId, psk, coaptransport): self._ready = False self._queue = [] # stores sent packages while connection is being built self._host = host self._port = port self._pskId = pskId self._psk = psk self.coaptransport = coaptransport self.hostinfo = hostportjoin(host, None if port == COAPS_PORT else port) self._startup = asyncio.ensure_future(self._start()) def _remove_from_pool(self): """Remove self from the MessageInterfaceTinyDTLS's pool, so that it will not be used in new requests. This is idempotent (to allow quick removal and still remove it in a finally clause) and not thread safe. """ poolkey = (self._host, self._port, self._pskId) if self.coaptransport._pool.get(poolkey) is self: del self.coaptransport._pool[poolkey] def send(self, message): if self._queue is not None: self._queue.append(message) else: # most of the time that will have returned long ago self._retransmission_task.cancel() self._dtls_socket.write(self._connection, message) self._retransmission_task = asyncio.create_task( self._run_retransmissions(), name="DTLS handshake retransmissions", ) log = property(lambda self: self.coaptransport.log) def _build_accessor(self, method, deadvalue): """Think self._build_accessor('_write')() == self._write(), just that it's returning a weak wrapper that allows refcounting-based GC to happen when the remote falls out of use""" weakself = weakref.ref(self) def wrapper(*args, __weakself=weakself, __method=method, __deadvalue=deadvalue): self = __weakself() if self is None: warnings.warn( "DTLS module did not shut down the DTLSSocket " "perfectly; it still tried to call %s in vain" % __method ) return __deadvalue return getattr(self, __method)(*args) wrapper.__name__ = "_build_accessor(%s)" % method return wrapper async def _start(self): from DTLSSocket import dtls self._dtls_socket = None self._connection = None try: self._transport, _ = await self.coaptransport.loop.create_datagram_endpoint( self.SingleConnection.factory(self), remote_addr=(self._host, self._port), ) self._dtls_socket = dtls.DTLS( read=self._build_accessor("_read", 0), write=self._build_accessor("_write", 0), event=self._build_accessor("_event", 0), pskId=self._pskId, pskStore={self._pskId: self._psk}, ) self._connection = self._dtls_socket.connect( _SENTINEL_ADDRESS, _SENTINEL_PORT ) self._retransmission_task = asyncio.create_task( self._run_retransmissions(), name="DTLS handshake retransmissions", ) self._connecting = asyncio.get_running_loop().create_future() await self._connecting queue = self._queue self._queue = None for message in queue: # could be a tad more efficient by stopping the retransmissions # in a go, then doing just the punch line and then starting it, # but practically this will be a single thing most of the time # anyway self.send(message) return except Exception as e: self.coaptransport.ctx.dispatch_error(e, self) finally: if self._queue is None: # all worked, we're done here return self.shutdown() async def _run_retransmissions(self): while True: when = self._dtls_socket.checkRetransmit() / DTLS_TICKS_PER_SECOND if when == 0: return now = time.time() - DTLS_CLOCK_OFFSET await asyncio.sleep(when - now) def shutdown(self): self._remove_from_pool() self._startup.cancel() self._retransmission_task.cancel() if self._connection is not None: # This also does what `.close()` does -- that would happen at # __del__, but let's wait for that. self._dtls_socket.resetPeer(self._connection) # doing this here allows the dtls socket to send a final word, but # by closing this, we protect the nascent next connection from any # delayed ICMP errors that might still wind up in the old socket self._transport.close() def __del__(self): # Breaking the loops between the DTLS object and this here to allow for # an orderly Alet (fatal, close notify) to go out -- and also because # DTLSSocket throws `TypeError: 'NoneType' object is not subscriptable` # from its destructor while the cyclical dependency is taken down. self.shutdown() def _inject_error(self, e): """Put an error to all pending operations on this remote, just as if it were raised inside the main loop.""" self.coaptransport.ctx.dispatch_error(e, self) self.shutdown() # dtls callbacks def _read(self, sender, data): # ignoring sender: it's only _SENTINEL_* try: message = Message.decode(data, self) except error.UnparsableMessage: self.log.warning("Ignoring unparsable message from %s", sender) return len(data) self.coaptransport.ctx.dispatch_message(message) return len(data) def _write(self, recipient, data): # ignoring recipient: it's only _SENTINEL_* try: t = self._transport except Exception: # tinydtls sends callbacks very very late during shutdown (ie. # `hasattr` and `AttributeError` are all not available any more, # and even if the DTLSClientConnection class had a ._transport, it # would already be gone), and it seems even a __del__ doesn't help # break things up into the proper sequence. return 0 t.sendto(data) return len(data) def _event(self, level, code): if (level, code) == (LEVEL_NOALERT, DTLS_EVENT_CONNECT): return elif (level, code) == (LEVEL_NOALERT, DTLS_EVENT_CONNECTED): self._connecting.set_result(True) elif (level, code) == (LEVEL_FATAL, CODE_CLOSE_NOTIFY): self._inject_error(CloseNotifyReceived()) elif level == LEVEL_FATAL: self._inject_error(FatalDTLSError(code)) else: self.log.warning("Unhandled alert level %d code %d", level, code) # transport protocol class SingleConnection: @classmethod def factory(cls, parent): return functools.partial(cls, weakref.ref(parent)) def __init__(self, parent): self.parent = parent #: DTLSClientConnection def connection_made(self, transport): # only for for shutdown self.transport = transport def connection_lost(self, exc): pass def error_received(self, exc): parent = self.parent() if parent is None: self.transport.close() return parent._inject_error(exc) def datagram_received(self, data, addr): parent = self.parent() if parent is None: self.transport.close() return parent._dtls_socket.handleMessage(parent._connection, data) class MessageInterfaceTinyDTLS(interfaces.MessageInterface): def __init__(self, ctx: interfaces.MessageManager, log, loop): self._pool: weakref.WeakValueDictionary = weakref.WeakValueDictionary( {} ) # see _connection_for_address self.ctx = ctx self.log = log self.loop = loop def _connection_for_address(self, host, port, pskId, psk): """Return a DTLSConnection to a given address. This will always give the same result for the same host/port combination, at least for as long as that result is kept alive (eg. by messages referring to it in their .remote) and while the connection has not failed.""" try: return self._pool[(host, port, pskId)] except KeyError: self.log.info( "No DTLS connection active to (%s, %s, %s), creating one", host, port, pskId, ) connection = DTLSClientConnection(host, port, pskId, psk, self) self._pool[(host, port, pskId)] = connection return connection @classmethod async def create_client_transport_endpoint( cls, ctx: interfaces.MessageManager, log, loop ): return cls(ctx, log, loop) async def recognize_remote(self, remote): return ( isinstance(remote, DTLSClientConnection) and remote in self._pool.values() ) async def determine_remote(self, request): if request.requested_scheme != "coaps": return None if request.unresolved_remote: host, port = hostportsplit(request.unresolved_remote) port = port or COAPS_PORT elif request.opt.uri_host: host = request.opt.uri_host port = request.opt.uri_port or COAPS_PORT else: raise ValueError( "No location found to send message to (neither in .opt.uri_host nor in .remote)" ) dtlsparams = self.ctx.client_credentials.credentials_from_request(request) try: pskId, psk = dtlsparams.as_dtls_psk() except AttributeError: raise CredentialsMissingError( "Credentials for requested URI are not compatible with DTLS-PSK" ) result = self._connection_for_address(host, port, pskId, psk) return result def send(self, message): message.remote.send(message.encode()) async def shutdown(self): remaining_connections = list(self._pool.values()) for c in remaining_connections: c.shutdown() aiocoap-0.4.12/aiocoap/transports/tinydtls_server.py000066400000000000000000000263521472205122200226620ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module implements a MessageInterface that serves coaps:// using a wrapped tinydtls library. Bear in mind that the aiocoap CoAPS support is highly experimental and incomplete. Unlike other transports this is *not* enabled automatically in general, as it is limited to servers bound to a single address for implementation reasons. (Basically, because it is built on the simplesocketserver rather than the udp6 server -- that can change in future, though). Until either the implementation is changed or binding arguments are (allowing different transports to bind to per-transport addresses or ports), a DTLS server will only be enabled if the AIOCOAP_DTLSSERVER_ENABLED environment variable is set, or tinydtls_server is listed explicitly in AIOCOAP_SERVER_TRANSPORT. """ # Comparing this to the tinydtls transport, things are a bit easier as we don't # expect to send the first DTLS payload (thus don't need the queue), and don't # need that clean a cleanup (at least if we assume that the clients all shut # down on their own anyway). # # Then again, keeping connections live for as long as someone holds their # address (eg. by some "pool with N strong references, and the rest are weak" # and just go away on overflow unless someone keeps the address alive) would be # more convenient here. import asyncio from collections import OrderedDict import time from ..numbers import COAPS_PORT, constants from .generic_udp import GenericMessageInterface from .. import error, interfaces from . import simplesocketserver from .simplesocketserver import _DatagramServerSocketSimple from .tinydtls import ( LEVEL_NOALERT, LEVEL_FATAL, DTLS_EVENT_CONNECT, DTLS_EVENT_CONNECTED, CODE_CLOSE_NOTIFY, CloseNotifyReceived, DTLS_TICKS_PER_SECOND, DTLS_CLOCK_OFFSET, FatalDTLSError, ) # tinyDTLS passes address information around in its session data, but the way # it's used here that will be ignored; this is the data that is sent to / read # from the tinyDTLS functions _SENTINEL_ADDRESS = "::1" _SENTINEL_PORT = 1234 # While we don't have retransmissions set up, this helps work issues of dropped # packets from sending in rapid succession _SEND_SLEEP_WORKAROUND = 0 class _AddressDTLS(interfaces.EndpointAddress): # no slots here, thus no equality other than identity, which is good def __init__(self, protocol, underlying_address): from DTLSSocket import dtls self._protocol = protocol self._underlying_address = simplesocketserver._Address( protocol, underlying_address ) self._dtls_socket = None self._psk_store = SecurityStore(protocol._server_credentials) self._dtls_socket = dtls.DTLS( # FIXME: Use accessors like tinydtls (but are they needed? maybe shutdown sequence is just already better here...) read=self._read, write=self._write, event=self._event, pskId=b"The socket needs something there but we'll never use it", pskStore=self._psk_store, ) self._dtls_session = dtls.Session(_SENTINEL_ADDRESS, _SENTINEL_PORT) self._retransmission_task = asyncio.create_task( self._run_retransmissions(), name="DTLS server handshake retransmissions", ) self.log = protocol.log is_multicast = False is_multicast_locally = False hostinfo = property(lambda self: self._underlying_address.hostinfo) uri_base = property(lambda self: "coaps://" + self.hostinfo) hostinfo_local = property(lambda self: self._underlying_address.hostinfo_local) uri_base_local = property(lambda self: "coaps://" + self.hostinfo_local) scheme = "coaps" authenticated_claims = property(lambda self: [self._psk_store._claims]) # Unlike for other remotes, this is settable per instance. maximum_block_size_exp = constants.MAX_REGULAR_BLOCK_SIZE_EXP @property def blockwise_key(self): return (self._underlying_address.blockwise_key, self._psk_store._claims) # implementing GenericUdp addresses def send(self, message): self._dtls_socket.write(self._dtls_session, message) # dtls callbacks def _read(self, sender, data): # ignoring sender: it's only _SENTINEL_* self._protocol._message_interface._received_plaintext(self, data) return len(data) def _write(self, recipient, data): if ( _SEND_SLEEP_WORKAROUND and len(data) > 13 and data[0] == 22 and data[13] == 14 ): time.sleep(_SEND_SLEEP_WORKAROUND) self._underlying_address.send(data) return len(data) def _event(self, level, code): if (level, code) == (LEVEL_NOALERT, DTLS_EVENT_CONNECT): return elif (level, code) == (LEVEL_NOALERT, DTLS_EVENT_CONNECTED): # No need to react to "connected": We're not the ones sending the first message return elif (level, code) == (LEVEL_FATAL, CODE_CLOSE_NOTIFY): self._inject_error(CloseNotifyReceived()) elif level == LEVEL_FATAL: self._inject_error(FatalDTLSError(code)) else: self.log.warning("Unhandled alert level %d code %d", level, code) # own helpers copied and adjusted from tinydtls def _inject_error(self, e): # this includes "was shut down" with a CloseNotifyReceived e """Put an error to all pending operations on this remote, just as if it were raised inside the main loop.""" self._protocol._message_interface._received_exception(self, e) self._retransmission_task.cancel() self._protocol._connections.pop(self._underlying_address.address) # This is a bit more defensive than the one in tinydtls as it starts out in # waiting, and RFC6347 indicates on a brief glance that the state machine # could go from waiting to some other state later on, so we (re)trigger it # whenever something comes in async def _run_retransmissions(self): when = self._dtls_socket.checkRetransmit() / DTLS_TICKS_PER_SECOND if when == 0: return # FIXME: Find out whether the DTLS server is ever supposed to send # retransmissions in the first place (this part was missing an import # and it never showed). now = time.time() - DTLS_CLOCK_OFFSET await asyncio.sleep(when - now) self._retransmission_task = asyncio.create_task( self._run_retransmissions(), name="DTLS server handshake retransmissions", ) class _DatagramServerSocketSimpleDTLS(_DatagramServerSocketSimple): _Address = _AddressDTLS # type: ignore max_sockets = 64 def __init__(self, *args, **kwargs): self._connections = OrderedDict() # analogous to simple6's _sockets return super().__init__(*args, **kwargs) async def connect(self, sockaddr): # Even if we opened a connection, it wouldn't have the same security # properties as the incoming one that it's probably supposed to replace # would have had raise RuntimeError("Sending initial messages via a DTLSServer is not supported") # Overriding to use GoingThroughMessageDecryption adapter @classmethod async def create(cls, bind, log, loop, message_interface): wrapped_interface = GoingThroughMessageDecryption(message_interface) self = await super().create(bind, log, loop, wrapped_interface) # self._security_store left uninitialized to ease subclassing from SimpleSocketServer; should be set before using this any further return self # Overriding as now we do need to manage the pol def datagram_received(self, data, sockaddr): if sockaddr in self._connections: address = self._connections[sockaddr] self._connections.move_to_end(sockaddr) else: address = self._Address(self, sockaddr) self._connections[sockaddr] = address self._message_interface._received_datagram(address, data) def _maybe_purge_sockets(self): while len(self._connections) >= self.max_sockets: # more of an if oldaddr, oldest = next(iter(self._connections.items())) # FIXME custom error? oldest._inject_error( error.LibraryShutdown("Connection is being closed for lack of activity") ) class GoingThroughMessageDecryption: """Warapper around GenericMessageInterface that puts incoming data through the DTLS context stored with the address""" def __init__(self, plaintext_interface: "GenericMessageInterface"): self._plaintext_interface = plaintext_interface def _received_datagram(self, address, data): # Put it into the DTLS processor; that'll forward any actually contained decrypted datagrams on to _received_plaintext address._retransmission_task.cancel() address._dtls_socket.handleMessage(address._dtls_session, data) address._retransmission_task = asyncio.create_task( address._run_retransmissions(), name="DTLS server handshake retransmissions", ) def _received_exception(self, address, exception): self._plaintext_interface._received_exception(address, exception) def _received_plaintext(self, address, data): self._plaintext_interface._received_datagram(address, data) class SecurityStore: """Wrapper around a CredentialsMap that makes it accessible to the dict-like object DTLSSocket expects. Not only does this convert interfaces, it also adds a back channel: As DTLSSocket wouldn't otherwise report who authenticated, this is tracking access and storing the claims associated with the used key for later use. Therefore, SecurityStore objects are created per connection and not per security store. """ def __init__(self, server_credentials): self._server_credentials = server_credentials self._claims = None def keys(self): return self def __contains__(self, key): try: self._server_credentials.find_dtls_psk(key) return True except KeyError: return False def __getitem__(self, key): (psk, claims) = self._server_credentials.find_dtls_psk(key) if self._claims not in (None, claims): # I didn't know it could do that -- how would we know which is the # one it eventually picked? raise RuntimeError("DTLS stack tried accessing different keys") self._claims = claims return psk class MessageInterfaceTinyDTLSServer(simplesocketserver.MessageInterfaceSimpleServer): _default_port = COAPS_PORT _serversocket = _DatagramServerSocketSimpleDTLS @classmethod async def create_server( cls, bind, ctx: interfaces.MessageManager, log, loop, server_credentials ): self = await super().create_server(bind, ctx, log, loop) self._pool._server_credentials = server_credentials return self async def shutdown(self): remaining_connections = list(self._pool._connections.values()) for c in remaining_connections: c._inject_error(error.LibraryShutdown("Shutting down")) await super().shutdown() aiocoap-0.4.12/aiocoap/transports/tls.py000066400000000000000000000022161472205122200202150ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """ CoAP-over-TLS transport (early work in progress) Right now this is running on self-signed, hard-coded certificates with default SSL module options. To use this, generate keys as with:: $ openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 5 -nodes and state your hostname (eg. localhost) when asked for the Common Name. """ from .tcp import TCPClient, TCPServer from aiocoap import COAPS_PORT class _TLSMixIn: _scheme = "coaps+tcp" _default_port = COAPS_PORT class TLSServer(_TLSMixIn, TCPServer): @classmethod async def create_server(cls, bind, tman, log, loop, server_context): return await super().create_server( bind, tman, log, loop, _server_context=server_context ) class TLSClient(_TLSMixIn, TCPClient): def _ssl_context_factory(self, hostinfo): c = self.credentials.ssl_client_context(self._scheme, hostinfo) if c is None: import ssl c = ssl.create_default_context() c.set_alpn_protocols(["coap"]) return c aiocoap-0.4.12/aiocoap/transports/udp6.py000066400000000000000000000640201472205122200202720ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module implements a MessageInterface for UDP based on a variation of the asyncio DatagramProtocol. This implementation strives to be correct and complete behavior while still only using a single socket; that is, to be usable for all kinds of multicast traffic, to support server and client behavior at the same time, and to work correctly even when multiple IPv6 and IPv4 (using V4MAPPED style addresses) interfaces are present, and any of the interfaces has multiple addresses. This requires using some standardized but not necessarily widely ported features: ``IPV6_RECVPKTINFO`` to determine incoming packages' destination addresses (was it multicast) and to return packages from the same address, ``IPV6_JOIN_GROUP`` for multicast membership management and ``recvmsg`` to obtain data configured with the above options. The need for ``AI_V4MAPPED`` and ``AI_ADDRCONFIG`` is not manifest in the code because the latter on its own is insufficient to enable seamless interoperability with IPv4+IPv6 servers on IPv4-only hosts; instead, short-lived sockets are crated to assess which addresses are routable. This should correctly deal with situations in which a client has an IPv6 ULA assigned but no route, no matter whether the server advertises global IPv6 addresses or addresses inside that ULA. It can not deal with situations in which the host has a default IPv6 route, but that route is not actually usable. To the author's knowledge, there is no standardized mechanism for receiving ICMP errors in such a setup. On Linux, ``IPV6_RECVERR`` and ``MSG_ERRQUEUE`` are used to receive ICMP errors from the socket; on other platforms, a warning is emitted that ICMP errors are ignored. Using a :mod:`.simple6` for clients is recommended for those when working as a client only. Exceeding for the above error handling, no attempts are made to fall back to a kind-of-correct or limited-functionality behavior if these options are unavailable, for the resulting code would be hard to maintain ("``ifdef`` hell") or would cause odd bugs at users (eg. servers that stop working when an additional IPv6 address gets assigned). If the module does not work for you, and the options can not be added easily to your platform, consider using the :mod:`.simple6` module instead. """ import asyncio import contextvars import errno import os import socket import ipaddress import struct import weakref from collections import namedtuple from ..message import Message from ..numbers import constants from .. import defaults from .. import error from .. import interfaces from ..numbers import COAP_PORT from ..util.asyncio.recvmsg import ( RecvmsgDatagramProtocol, create_recvmsg_datagram_endpoint, ) from ..util.asyncio.getaddrinfo_addrconfig import ( getaddrinfo_routechecked as getaddrinfo, ) from ..util import hostportjoin, hostportsplit from ..util import socknumbers """The `struct in6_pktinfo` from RFC3542""" _in6_pktinfo = struct.Struct("16sI") _ipv6_unspecified = socket.inet_pton(socket.AF_INET6, "::") _ipv4_unspecified = socket.inet_pton(socket.AF_INET6, "::ffff:0.0.0.0") class InterfaceOnlyPktinfo(bytes): """A thin wrapper over bytes that represent a pktinfo built just to select an outgoing interface. This must not be treated any different than a regular pktinfo, and is just tagged for better debug output. (Ie. if this is replaced everywhere with plain `bytes`, things must still work).""" class UDP6EndpointAddress(interfaces.EndpointAddress): """Remote address type for :class:`MessageInterfaceUDP6`. Remote address is stored in form of a socket address; local address can be roundtripped by opaque pktinfo data. For purposes of equality (and thus hashing), the local address is *not* checked. Neither is the scopeid that is part of the socket address. >>> interface = type("FakeMessageInterface", (), {}) >>> if1_name = socket.if_indextoname(1) >>> local = UDP6EndpointAddress(socket.getaddrinfo('127.0.0.1', 5683, type=socket.SOCK_DGRAM, family=socket.AF_INET6, flags=socket.AI_V4MAPPED)[0][-1], interface) >>> local.is_multicast False >>> local.hostinfo '127.0.0.1' >>> all_coap_link1 = UDP6EndpointAddress(socket.getaddrinfo('ff02:0:0:0:0:0:0:fd%1', 1234, type=socket.SOCK_DGRAM, family=socket.AF_INET6)[0][-1], interface) >>> all_coap_link1.is_multicast True >>> all_coap_link1.hostinfo == '[ff02::fd%{}]:1234'.format(if1_name) True >>> all_coap_site = UDP6EndpointAddress(socket.getaddrinfo('ff05:0:0:0:0:0:0:fd', 1234, type=socket.SOCK_DGRAM, family=socket.AF_INET6)[0][-1], interface) >>> all_coap_site.is_multicast True >>> all_coap_site.hostinfo '[ff05::fd]:1234' >>> all_coap4 = UDP6EndpointAddress(socket.getaddrinfo('224.0.1.187', 5683, type=socket.SOCK_DGRAM, family=socket.AF_INET6, flags=socket.AI_V4MAPPED)[0][-1], interface) >>> all_coap4.is_multicast True """ def __init__(self, sockaddr, interface, *, pktinfo=None): self.sockaddr = sockaddr self.pktinfo = pktinfo self._interface = weakref.ref(interface) scheme = "coap" interface = property(lambda self: self._interface()) # Unlike for other remotes, this is settable per instance. maximum_block_size_exp = constants.MAX_REGULAR_BLOCK_SIZE_EXP def __hash__(self): return hash(self.sockaddr[:-1]) def __eq__(self, other): return self.sockaddr[:-1] == other.sockaddr[:-1] def __repr__(self): return "<%s %s%s>" % ( type(self).__name__, self.hostinfo, " (locally %s)" % self._repr_pktinfo() if self.pktinfo is not None else "", ) @staticmethod def _strip_v4mapped(address): """Turn anything that's a valid input to ipaddress.IPv6Address into a user-friendly string that's either an IPv6 or an IPv4 address. This also compresses (normalizes) the IPv6 address as a convenient side effect.""" address = ipaddress.IPv6Address(address) mapped = address.ipv4_mapped if mapped is not None: return str(mapped) return str(address) def _plainaddress(self): """Return the IP adress part of the sockaddr in IPv4 notation if it is mapped, otherwise the plain v6 address including the interface identifier if set.""" if self.sockaddr[3] != 0: try: scopepart = "%" + socket.if_indextoname(self.sockaddr[3]) except Exception: # could be an OS error, could just be that there is no function of this name, as it is on Android scopepart = "%" + str(self.sockaddr[3]) else: scopepart = "" if "%" in self.sockaddr[0]: # Fix for Python 3.6 and earlier that reported the scope information # in the IP literal (3.7 consistently expresses it in the tuple slot 3) scopepart = "" return self._strip_v4mapped(self.sockaddr[0]) + scopepart def _repr_pktinfo(self): """What repr(self.pktinfo) would be if that were not a plain untyped bytestring""" addr, interface = _in6_pktinfo.unpack_from(self.pktinfo) if interface == 0: interface = "" else: try: interface = "%" + socket.if_indextoname(interface) except Exception as e: interface = "%%%d(%s)" % (interface, e) return "%s%s" % (self._strip_v4mapped(addr), interface) def _plainaddress_local(self): """Like _plainaddress, but on the address in the pktinfo. Unlike _plainaddress, this does not contain the interface identifier.""" addr, interface = _in6_pktinfo.unpack_from(self.pktinfo) return self._strip_v4mapped(addr) @property def netif(self): """Textual interface identifier of the explicitly configured remote interface, or the interface identifier reported in an incoming link-local message. None if not set.""" index = self.sockaddr[3] return socket.if_indextoname(index) if index else None @property def hostinfo(self): port = self.sockaddr[1] if port == COAP_PORT: port = None # plainaddress: don't assume other applications can deal with v4mapped addresses return hostportjoin(self._plainaddress(), port) @property def hostinfo_local(self): host = self._plainaddress_local() port = self.interface._local_port() if port == 0: raise ValueError("Local port read before socket has bound itself") if port == COAP_PORT: port = None return hostportjoin(host, port) @property def uri_base(self): return "coap://" + self.hostinfo @property def uri_base_local(self): return "coap://" + self.hostinfo_local @property def is_multicast(self): return ipaddress.ip_address(self._plainaddress().split("%", 1)[0]).is_multicast @property def is_multicast_locally(self): return ipaddress.ip_address(self._plainaddress_local()).is_multicast def as_response_address(self): if not self.is_multicast_locally: return self # Create a copy without pktinfo, as responses to messages received to # multicast addresses can not have their request's destination address # as source address return type(self)(self.sockaddr, self.interface) @property def blockwise_key(self): return (self.sockaddr, self.pktinfo) class SockExtendedErr( namedtuple( "_SockExtendedErr", "ee_errno ee_origin ee_type ee_code ee_pad ee_info ee_data" ) ): _struct = struct.Struct("IbbbbII") @classmethod def load(cls, data): # unpack_from: recvmsg(2) says that more data may follow return cls(*cls._struct.unpack_from(data)) class MessageInterfaceUDP6(RecvmsgDatagramProtocol, interfaces.MessageInterface): def __init__(self, ctx: interfaces.MessageManager, log, loop): self._ctx = ctx self.log = log self.loop = loop self._shutting_down = ( None #: Future created and used in the .shutdown() method. ) self.ready = asyncio.get_running_loop().create_future() #: Future that gets fullfilled by connection_made (ie. don't send before this is done; handled by ``create_..._context`` # This is set while a send is underway to determine in the # error_received call site whom we were actually sending something to. # This is a workaround for the abysmal error handling unconnected # sockets have :-/ # # FIXME: Figure out whether aiocoap can at all support a context being # used with multiple aiocoap contexts, and if not, raise an error early # rather than just-in-case doing extra stuff here. self._remote_being_sent_to = contextvars.ContextVar( "_remote_being_sent_to", default=None ) def _local_port(self): # FIXME: either raise an error if this is 0, or send a message to self # to force the OS to decide on a port. Right now, this reports wrong # results while the first message has not been sent yet. return self.transport.get_extra_info("socket").getsockname()[1] @classmethod async def _create_transport_endpoint( cls, sock, ctx: interfaces.MessageManager, log, loop, multicast=[] ): try: sock.setsockopt(socket.IPPROTO_IPV6, socknumbers.IPV6_RECVPKTINFO, 1) except NameError: raise RuntimeError( "RFC3542 PKTINFO flags are unavailable, unable to create a udp6 transport." ) if socknumbers.HAS_RECVERR: sock.setsockopt(socket.IPPROTO_IPV6, socknumbers.IPV6_RECVERR, 1) # i'm curious why this is required; didn't IPV6_V6ONLY=0 already make # it clear that i don't care about the ip version as long as everything looks the same? sock.setsockopt(socket.IPPROTO_IP, socknumbers.IP_RECVERR, 1) else: log.warning( "Transport udp6 set up on platform without RECVERR capability. ICMP errors will be ignored." ) for address_string, interface_string in sum( map( # Expand shortcut of "interface name means default CoAP all-nodes addresses" lambda i: [(a, i) for a in constants.MCAST_ALL] if isinstance(i, str) else [i], multicast, ), [], ): address = ipaddress.ip_address(address_string) interface = socket.if_nametoindex(interface_string) if isinstance(address, ipaddress.IPv4Address): s = struct.pack( "4s4si", address.packed, socket.inet_aton("0.0.0.0"), interface ) try: sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, s) except OSError: log.warning("Could not join IPv4 multicast group") elif isinstance(address, ipaddress.IPv6Address): s = struct.pack("16si", address.packed, interface) try: sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, s) except OSError: log.warning("Could not join IPv6 multicast group") else: raise RuntimeError("Unknown address format") transport, protocol = await create_recvmsg_datagram_endpoint( loop, lambda: cls(ctx, log=log, loop=loop), sock=sock ) await protocol.ready return protocol @classmethod async def create_client_transport_endpoint( cls, ctx: interfaces.MessageManager, log, loop ): sock = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) return await cls._create_transport_endpoint(sock, ctx, log, loop) @classmethod async def create_server_transport_endpoint( cls, ctx: interfaces.MessageManager, log, loop, bind, multicast ): bind = bind or ("::", None) # Interpret None as 'default port', but still allow to bind to 0 for # servers that want a random port (eg. when the service URLs are # advertised out-of-band anyway, or in LwM2M clients) bind = (bind[0], COAP_PORT if bind[1] is None else bind[1]) # The later bind() does most of what getaddr info usually does # (including resolving names), but is missing out subtly: It does not # populate the zone identifier of an IPv6 address, making it impossible # without a getaddrinfo (or manual mapping of the name to a number) to # bind to a specific link-local interface try: addriter = getaddrinfo( loop, log, bind[0], bind[1], ) try: bind = await addriter.__anext__() except StopAsyncIteration: raise RuntimeError( "getaddrinfo returned zero-length list rather than erring out" ) except socket.gaierror: raise error.ResolutionError( "No local bindable address found for %s" % bind[0] ) try: additional = await addriter.__anext__() except StopAsyncIteration: pass except Exception as e: log.error( "Ignoring exception raised when checking for additional addresses that match the bind address", exc_info=e, ) else: log.warning( "Multiple addresses to bind to, only selecting %r and discarding %r and any later", bind, additional, ) sock = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) if defaults.has_reuse_port(): # I doubt that there is any platform that supports RECVPKTINFO but # not REUSEPORT, but why take chances. sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) sock.bind(bind) return await cls._create_transport_endpoint(sock, ctx, log, loop, multicast) async def shutdown(self): self._shutting_down = asyncio.get_running_loop().create_future() self.transport.close() await self._shutting_down del self._ctx def send(self, message): ancdata = [] if message.remote.pktinfo is not None: ancdata.append( (socket.IPPROTO_IPV6, socknumbers.IPV6_PKTINFO, message.remote.pktinfo) ) assert ( self._remote_being_sent_to.get(None) is None ), "udp6.MessageInterfaceUDP6.send was reentered in a single task" self._remote_being_sent_to.set(message.remote) try: self.transport.sendmsg( message.encode(), ancdata, 0, message.remote.sockaddr ) finally: self._remote_being_sent_to.set(None) async def recognize_remote(self, remote): return isinstance(remote, UDP6EndpointAddress) and remote.interface == self async def determine_remote(self, request): if request.requested_scheme not in ("coap", None): return None if request.unresolved_remote is not None: host, port = hostportsplit(request.unresolved_remote) port = port or COAP_PORT elif request.opt.uri_host: host = request.opt.uri_host if host.startswith("[") and host.endswith("]"): host = host[1:-1] port = request.opt.uri_port or COAP_PORT else: raise ValueError( "No location found to send message to (neither in .opt.uri_host nor in .remote)" ) # Take aside the zone identifier. While it can pass through getaddrinfo # in some situations (eg. 'fe80::1234%eth0' will give 'fe80::1234' # scope eth0, and similar for ff02:: addresses), in others (eg. ff05::) # it gives 'Name or service not known'. if "%" in host: host, zone = host.split("%", 1) try: zone = socket.if_nametoindex(zone) except OSError: raise error.ResolutionError("Invalid zone identifier %s" % zone) else: zone = None try: # Note that this is our special addrinfo that ensures there is a # route. ip, port, flowinfo, scopeid = await getaddrinfo( self.loop, self.log, host, port, ).__anext__() except socket.gaierror: raise error.ResolutionError( "No address information found for requests to %r" % host ) if zone is not None: # Still trying to preserve the information returned (libc can't do # it as described at # ) # in case something sane does come out of that. if scopeid != 0 and scopeid != zone: self.log.warning( "Resolved address of %s came with zone ID %d whereas explicit ID %d takes precedence", host, scopeid, zone, ) scopeid = zone # We could be done here and return UDP6EndpointAddress(the reassembled # sockaddr, self), but: # # Linux (unlike FreeBSD) takes the sockaddr's scope ID only for # link-local scopes (as per ipv6(7), and discards it otherwise. It does # need the information of the selected interface, though, in order to # pick the right outgoing interface. Thus, we provide it in the local # portion. if scopeid: # "Any" does not include "even be it IPv4" -- the underlying family # unfortunately needs to be set, or Linux will refuse to send. if ipaddress.IPv6Address(ip).ipv4_mapped is None: local_source = _ipv6_unspecified else: local_source = _ipv4_unspecified local = InterfaceOnlyPktinfo(_in6_pktinfo.pack(local_source, scopeid)) else: local = None sockaddr = ip, port, flowinfo, scopeid result = UDP6EndpointAddress(sockaddr, self, pktinfo=local) if request.remote.maximum_block_size_exp < result.maximum_block_size_exp: result.maximum_block_size_exp = request.remote.maximum_block_size_exp return result # # implementing the typical DatagramProtocol interfaces. # # note from the documentation: we may rely on connection_made to be called # before datagram_received -- but sending immediately after context # creation will still fail def connection_made(self, transport): """Implementation of the DatagramProtocol interface, called by the transport.""" self.ready.set_result(True) self.transport = transport def datagram_msg_received(self, data, ancdata, flags, address): """Implementation of the RecvmsgDatagramProtocol interface, called by the transport.""" pktinfo = None for cmsg_level, cmsg_type, cmsg_data in ancdata: if ( cmsg_level == socket.IPPROTO_IPV6 and cmsg_type == socknumbers.IPV6_PKTINFO ): pktinfo = cmsg_data else: self.log.info( "Received unexpected ancillary data to recvmsg: level %d, type %d, data %r", cmsg_level, cmsg_type, cmsg_data, ) if pktinfo is None: self.log.warning( "Did not receive requested pktinfo ancdata on message from %s", address ) try: message = Message.decode( data, UDP6EndpointAddress(address, self, pktinfo=pktinfo) ) except error.UnparsableMessage: self.log.warning("Ignoring unparsable message from %s", address) return try: self._ctx.dispatch_message(message) except BaseException as exc: # Catching here because util.asyncio.recvmsg inherits # _SelectorDatagramTransport's bad handling of callback errors; # this is the last time we have a log at hand. self.log.error( "Exception raised through dispatch_message: %s", exc, exc_info=exc ) raise def datagram_errqueue_received(self, data, ancdata, flags, address): assert ( flags == socknumbers.MSG_ERRQUEUE ), "Received non-error data through the errqueue" pktinfo = None errno_value = None for cmsg_level, cmsg_type, cmsg_data in ancdata: assert ( cmsg_level == socket.IPPROTO_IPV6 ), "Received non-IPv6 protocol through the errqueue" if cmsg_type == socknumbers.IPV6_RECVERR: extended_err = SockExtendedErr.load(cmsg_data) self.log.debug("Socket error recevied, details: %s", extended_err) errno_value = extended_err.ee_errno elif ( cmsg_level == socket.IPPROTO_IPV6 and cmsg_type == socknumbers.IPV6_PKTINFO ): pktinfo = cmsg_data else: self.log.info( "Received unexpected ancillary data to recvmsg errqueue: level %d, type %d, data %r", cmsg_level, cmsg_type, cmsg_data, ) remote = UDP6EndpointAddress(address, self, pktinfo=pktinfo) # not trying to decode a message from data -- that works for # "connection refused", doesn't work for "no route to host", and # anyway, when an icmp error comes back, everything pending from that # port should err out. try: text = os.strerror(errno_value) symbol = errno.errorcode.get(errno_value, None) symbol = "" if symbol is None else f"{symbol}, " self._ctx.dispatch_error( OSError(errno_value, f"{text} ({symbol}received through errqueue)"), remote, ) except BaseException as exc: # Catching here because util.asyncio.recvmsg inherits # _SelectorDatagramTransport's bad handling of callback errors; # this is the last time we have a log at hand. self.log.error( "Exception raised through dispatch_error: %s", exc, exc_info=exc ) raise def error_received(self, exc): """Implementation of the DatagramProtocol interface, called by the transport.""" remote = self._remote_being_sent_to.get() if remote is None: self.log.info( "Error received in situation with no way to to determine which sending caused the error; this should be accompanied by an error in another code path: %s", exc, ) return try: self._ctx.dispatch_error(exc, remote) except BaseException as exc: # Catching here because util.asyncio.recvmsg inherits # _SelectorDatagramTransport's bad handling of callback errors; # this is the last time we have a log at hand. self.log.error( "Exception raised through dispatch_error: %s", exc, exc_info=exc ) raise def connection_lost(self, exc): # TODO better error handling -- find out what can cause this at all # except for a shutdown if exc is not None: self.log.error("Connection lost: %s", exc) if self._shutting_down is None: self.log.error("Connection loss was not expected.") else: self._shutting_down.set_result(None) aiocoap-0.4.12/aiocoap/transports/ws.py000066400000000000000000000437121472205122200200520ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """ This moduel implements a TokenInterface for `CoAP over WebSockets`_. .. _`CoAP over WebSockets`: https://tools.ietf.org/html/rfc8323#section-4 As with CoAP-over-TCP, while the transport distinguishes a connection initiator ("WebSocket (and TCP) client") and a receiver ("WebSocket (and TCP) server"), both sides can take both roles in CoAP (ie. as a CoAP server and a CoAP client). As the WebSocket client can not possibly be connected to (even by the same server -- once the connection is closed, it's gone and even a new one likely has a different port), aiocoap does not allow expressing their addresses in URIs (given they wouldn't serve their purpose as URLs and don't provide any stability either). Requests to a CoAP-over-WS client can be made by assigning the remote to an outgoing request. Port choice ----------- Unlike the other transports, CoAP-over-WS is specified with a privileged port (port 80) as the default port. This is impractical for aiocoap servers for two reasons: * Unless explicitly configured, aiocoap is typically run as an unprivileged user (and has no provisions in place to receive a socket by other means than opening it). * Where a CoAP-over-WS proxy is run, there is often a "proper" website running on the same port on a full HTTP server. That server is usually capable of forwarding requests, whereas the ``websockets`` module used by aiocoap is in no position to either serve websites nor to proxy to an underlying server. The recommended setup is therefore to run a full web server at port 80, and configure it to proxy incoming requests for WebSockets at `/.well-known/coap` to aiocoap's server, which defaults to binding to port 8683. The port choice of outgoing connections, or the interpretation of the protocol's default port (ie. the port implied by ``coap+ws://hostname/``) is of course unaffected by this. .. warning:: Due to a shortcoming of aiocoap's way of specifying ports to bind to, if a port is explicitly stated to bind to, CoAP-over-WS will bind to that port plus 3000 (resulting in the abovementioned 8683 for 5683). If TLS server keys are given, the TLS server is launched on the next port after the HTTP server (typically 8684). Using on pyodide_ ----------------- When use on pyodide_, instead of using the ``websockets`` module, a simplified client-only back-end is used, which utilizes the browser's WebSocket API. .. _pyodide: https://pyodide.org/ """ from __future__ import annotations from typing import Dict, List from collections import namedtuple import asyncio import functools import http import weakref from aiocoap import Message, interfaces, ABORT, util, error from aiocoap.transports import rfc8323common from ..credentials import CredentialsMap from ..defaults import is_pyodide if is_pyodide: import aiocoap.util.pyodide_websockets as websockets else: import websockets # type: ignore def _decode_message(data: bytes) -> Message: codeoffset = 1 tokenoffset = 2 tkl = data[0] if tkl > 8: raise error.UnparsableMessage("Overly long token") code = data[codeoffset] token = data[tokenoffset : tokenoffset + tkl] msg = Message(code=code, token=token) msg.payload = msg.opt.decode(data[tokenoffset + tkl :]) return msg def _serialize(msg: Message) -> bytes: tkl = len(msg.token) if tkl > 8: raise ValueError("Overly long token") data = [ bytes( ( tkl, msg.code, ) ), msg.token, msg.opt.encode(), ] if msg.payload: data += [b"\xff", msg.payload] return b"".join(data) PoolKey = namedtuple("PoolKey", ("scheme", "hostinfo")) class WSRemote(rfc8323common.RFC8323Remote, interfaces.EndpointAddress): _connection: websockets.WebSocketCommonProtocol # Only used to ensure that remotes are associated to the right pool -- not # that there'd be any good reason to have multiple of those. _pool: weakref.ReferenceType[WSPool] scheme = None # Override property -- it's per instance here def __init__( self, pool, connection, loop, log, *, scheme, local_hostinfo=None, remote_hostinfo=None, ): super().__init__() self._pool = weakref.ref(pool) self._connection = connection self.loop = loop self.log = log self._local_is_server = isinstance( connection, websockets.WebSocketServerProtocol ) if local_hostinfo is None: self._local_hostinfo = self._connection.local_address[:2] else: self._local_hostinfo = local_hostinfo if remote_hostinfo is None: self._remote_hostinfo = self._connection.remote_address[:2] else: self._remote_hostinfo = remote_hostinfo self.scheme = scheme # Goes both for client and for server ends; on the server end, it # ensures that role reversal URIs can be used even when passed as URIs # and not as remotes (although that's of course only possible locally). self._poolkey = PoolKey(self.scheme, self.hostinfo) # Necessary for RFC8323Remote def _abort_with(self, msg, *, close_code=1002): # Like _send_message, this may take actual time -- but unlike there, # there's no need to regulate back-pressure self.loop.create_task( self._abort_with_waiting(msg, close_code=close_code), name="Abortion WebSocket sonnection with %r" % msg, ) # Unlike _send_message, this is pulled out of the the _abort_with function # as it's also used in _run_recv_loop async def _abort_with_waiting(self, msg, *, close_code): self.log.debug("Aborting with message: %r", msg) try: await self._connection.send(_serialize(msg)) except Exception as e: self.log.error("Sending to a WebSocket should not raise errors", exc_info=e) await self._connection.close(code=close_code) def _send_message(self, msg): # FIXME overhaul back-pressure model async def send(): self.log.debug("Sending message: %r", msg) try: await self._connection.send(_serialize(msg)) except Exception as e: self.log.error( "Sending to a WebSocket should not raise errors", exc_info=e ) self.loop.create_task( send(), name="WebSocket sending of %r" % msg, ) async def release(self): await super().release() try: await self._connection.wait_closed() except asyncio.CancelledError: self.log.warning( "Connection %s was not closed by peer in time after release", self ) class WSPool(interfaces.TokenInterface): _outgoing_starting: Dict[PoolKey, asyncio.Task] _pool: Dict[PoolKey, WSRemote] _servers: List[websockets.WebSocketServer] def __init__(self, tman, log, loop) -> None: self.loop = loop self._pool = {} self._outgoing_starting = {} self._servers = [] # See where it is used for documentation, remove when not needed any more self._in_shutdown = False self._tokenmanager = tman self.log = log self._client_credentials: CredentialsMap @classmethod async def create_transport( cls, tman: interfaces.TokenManager, log, loop, *, client_credentials, server_bind=None, server_context=None, ): self = cls(tman, log, loop) self._client_credentials = client_credentials if server_bind: host, port = server_bind if port is None: port = 8683 elif port != 0: # FIXME see module documentation port = port + 3000 server = await websockets.serve( functools.partial(self._new_connection, scheme="coap+ws"), host, port, subprotocols=["coap"], process_request=self._process_request, ping_interval=None, # "SHOULD NOT be used" ) self._servers.append(server) if server_context is not None: server = await websockets.serve( functools.partial(self._new_connection, scheme="coaps+ws"), host, port + 1, subprotocols=["coap"], process_request=self._process_request, ping_interval=None, # "SHOULD NOT be used" ssl=server_context, ) self._servers.append(server) return self # Helpers for WebScoket server async def _new_connection(self, websocket, path=None, *, scheme): # ignoring path: Already checked in _process_request # # (path is present up to 10.0 and absent in 10.1; keeping it around to # stay compatible with different versions). hostheader = websocket.request_headers["Host"] if hostheader.count(":") > 1 and "[" not in hostheader: # Workaround for websockets version before # https://github.com/aaugustin/websockets/issues/802 # # To be removed once a websockets version with this fix can be # depended on hostheader = ( "[" + hostheader[: hostheader.rfind(":")] + "]" + hostheader[hostheader.rfind(":") :] ) local_hostinfo = util.hostportsplit(hostheader) remote = WSRemote( self, websocket, self.loop, self.log, scheme=scheme, local_hostinfo=local_hostinfo, ) self._pool[remote._poolkey] = remote await self._run_recv_loop(remote) @staticmethod async def _process_request(path, request_headers): if path != "/.well-known/coap": return (http.HTTPStatus.NOT_FOUND, [], b"") # Continue with WebSockets return None # Helpers for WebScoket client def _connect(self, key: PoolKey): self._outgoing_starting[key] = self.loop.create_task( self._connect_task(key), name="WebSocket connection opening to %r" % (key,), ) async def _connect_task(self, key: PoolKey): try: ssl_context = self._client_credentials.ssl_client_context( key.scheme, key.hostinfo ) hostinfo_split = util.hostportsplit(key.hostinfo) ws_scheme = {"coap+ws": "ws", "coaps+ws": "wss"}[key.scheme] if ws_scheme == "ws" and ssl_context is not None: raise ValueError( "An SSL context was provided for a remote accessed via a plaintext websockets." ) if ws_scheme == "wss": if is_pyodide: if ssl_context is not None: raise ValueError( "The pyodide websocket implementation can't be configured with a non-default context -- connections created in a browser will always use the browser's CA set." ) else: if ssl_context is None: # Like with TLSClient, we need to pass in a default # context and can't rely on the websocket library to # use a default one when wss is requested (it doesn't) import ssl ssl_context = ssl.create_default_context() websocket = await websockets.connect( "%s://%s/.well-known/coap" % (ws_scheme, key.hostinfo), subprotocols=["coap"], ping_interval=None, ssl=ssl_context, ) remote = WSRemote( self, websocket, self.loop, self.log, scheme=key.scheme, remote_hostinfo=hostinfo_split, ) assert remote._poolkey == key, "Pool key construction is inconsistent" self._pool[key] = remote self.loop.create_task( self._run_recv_loop(remote), name="WebSocket receive loop for %r" % (key,), ) return remote finally: del self._outgoing_starting[key] # Implementation of TokenInterface async def fill_or_recognize_remote(self, message): if isinstance(message.remote, WSRemote) and message.remote._pool() is self: return True if message.requested_scheme in ("coap+ws", "coaps+ws"): key = PoolKey(message.requested_scheme, message.remote.hostinfo) if key in self._pool: message.remote = self._pool[key] if message.remote._connection.open: return True # else try opening a new one if key not in self._outgoing_starting: self._connect(key) # It's a bit unorthodox to wait for an (at least partially) # established connection in fill_or_recognize_remote, but it's # not completely off off either, and it makes it way easier to # not have partially initialized remotes around message.remote = await self._outgoing_starting[key] return True return False def send_message(self, message, messageerror_monitor): # Ignoring messageerror_monitor: CoAP over reliable transports has no # way of indicating that a particular message was bad, it always shuts # down the complete connection if message.code.is_response(): no_response = (message.opt.no_response or 0) & ( 1 << message.code.class_ - 1 ) != 0 if no_response: return message.opt.no_response = None message.remote._send_message(message) async def shutdown(self): self._in_shutdown = True self.log.debug("Shutting down any connections on %r", self) client_shutdowns = [ asyncio.create_task( c.release(), name="Close connection %s" % c, ) for c in self._pool.values() ] server_shutdowns = [] while self._servers: s = self._servers.pop() # We could do something like # >>> for websocket in s.websockets: # >>> del websocket.logger.extra['websocket'] # to reduce the reference loops # (websocket.logger.extra['websocket'] == websocket), but as the # tests actually do run a GC collection once and that gets broken # up, it's not worth adding fragilty here s.close() server_shutdowns.append( asyncio.create_task(s.wait_closed(), name="Close server %s" % s), ) # Placing client shutdowns before server shutdowns to give them a # chance to send out Abort messages; the .close() method could be more # helpful here by stopping new connections but letting us finish off # the old ones shutdowns = client_shutdowns + server_shutdowns if shutdowns: # wait is documented to require a non-empty set await asyncio.wait(shutdowns) # Incoming message processing async def _run_recv_loop(self, remote): remote._send_initial_csm() while True: try: received = await remote._connection.recv() except websockets.exceptions.ConnectionClosed: # This check is purely needed to silence the warning printed # from tokenmanager, "Internal shutdown sequence msismatch: # error dispatched through tokenmanager after shutdown" -- and # is a symptom of https://github.com/chrysn/aiocoap/issues/284 # and of the odd circumstance that we can't easily cancel the # _run_recv_loop tasks (as we should while that issue is # unresolved) in the shutdown handler. if not self._in_shutdown: # FIXME if deposited somewhere, mark that as stale? self._tokenmanager.dispatch_error( error.RemoteServerShutdown("Peer closed connection"), remote ) return if not isinstance(received, bytes): await remote._abort_with_waiting( Message(code=ABORT, payload=b"Text frame received"), close_code=1003 ) return try: msg = _decode_message(received) except error.UnparsableMessage: await remote._abort_with_waiting( Message(code=ABORT, payload=b"Message parsing error"), close_code=1007, ) return msg.remote = remote if msg.code.is_signalling(): try: remote._process_signaling(msg) except rfc8323common.CloseConnection as e: self._tokenmanager.dispatch_error(e.args[0], msg.remote) self._pool.pop(remote._poolkey) await remote._connection.close() continue if remote._remote_settings is None: remote.abort("No CSM received") return if msg.code.is_response(): self._tokenmanager.process_response(msg) # ignoring the return value; unexpected responses can be the # asynchronous result of cancelled observations else: self._tokenmanager.process_request(msg) aiocoap-0.4.12/aiocoap/util/000077500000000000000000000000001472205122200155765ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/util/__init__.py000066400000000000000000000162031472205122200177110ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Tools not directly related with CoAP that are needed to provide the API These are only part of the stable API to the extent they are used by other APIs -- for example, you can use the type constructor of :class:`ExtensibleEnumMeta` when creating an :class:`aiocoap.numbers.optionnumbers.OptionNumber`, but don't expect it to be usable in a stable way for own extensions. Most functions are available in submodules; some of them may only have components that are exclusively used internally and never part of the public API even in the limited fashion stated above. .. toctree:: :glob: aiocoap.util.* """ import urllib.parse from warnings import warn import enum import sys class ExtensibleEnumMeta(enum.EnumMeta): """Metaclass that provides a workaround for https://github.com/python/cpython/issues/118650 (_repr_html_ is not allowed on enum) for versions before that is fixed""" if sys.version_info < (3, 13, 0, "beta", 1): @classmethod def __prepare__(metacls, cls, bases, **kwd): enum_dict = super().__prepare__(cls, bases, **kwd) class PermissiveEnumDict(type(enum_dict)): def __setitem__(self, key, value): if key == "_repr_html_": # Bypass _EnumDict, go directly for the regular dict # behavior it falls back to for regular items dict.__setitem__(self, key, value) else: super().__setitem__(key, value) permissive_dict = PermissiveEnumDict() dict.update(permissive_dict, enum_dict.items()) vars(permissive_dict).update(vars(enum_dict).items()) return permissive_dict class ExtensibleIntEnum(enum.IntEnum, metaclass=ExtensibleEnumMeta): """Similar to Python's enum.IntEnum, this type can be used for named numbers which are not comprehensively known, like CoAP option numbers.""" def __repr__(self): return "<%s %d%s>" % ( type(self).__name__, self, ' "%s"' % self.name if hasattr(self, "name") else "", ) def __str__(self): return self.name if hasattr(self, "name") else int.__str__(self) def _repr_html_(self): import html if hasattr(self, "name"): return f'{html.escape(self.name)}' else: return f'{int(self)}' @classmethod def _missing_(cls, value): """Construct a member, sidestepping the lookup (because we know the lookup already failed, and there is no singleton instance to return)""" new_member = int.__new__(cls, value) new_member._value_ = value cls._value2member_map_[value] = new_member return new_member if sys.version_info < (3, 11, 5): # backport of https://github.com/python/cpython/pull/106666 # # Without this, Python versions up to 3.11.4 (eg. 3.11.2 in Debian # Bookworm) fail the copy used to modify messages without mutating them # when attempting to access the _name_ of an unknown option. def __copy__(self): return self def __deepcopy__(self, memo): return self def hostportjoin(host, port=None): """Join a host and optionally port into a hostinfo-style host:port string >>> hostportjoin('example.com') 'example.com' >>> hostportjoin('example.com', 1234) 'example.com:1234' >>> hostportjoin('127.0.0.1', 1234) '127.0.0.1:1234' This is lax with respect to whether host is an IPv6 literal in brackets or not, and accepts either form; IP-future literals that do not contain a colon must be already presented in their bracketed form: >>> hostportjoin('2001:db8::1') '[2001:db8::1]' >>> hostportjoin('2001:db8::1', 1234) '[2001:db8::1]:1234' >>> hostportjoin('[2001:db8::1]', 1234) '[2001:db8::1]:1234' """ if ":" in host and not (host.startswith("[") and host.endswith("]")): host = "[%s]" % host if port is None: hostinfo = host else: hostinfo = "%s:%d" % (host, port) return hostinfo def hostportsplit(hostport): """Like urllib.parse.splitport, but return port as int, and as None if not given. Also, it allows giving IPv6 addresses like a netloc: >>> hostportsplit('foo') ('foo', None) >>> hostportsplit('foo:5683') ('foo', 5683) >>> hostportsplit('[::1%eth0]:56830') ('::1%eth0', 56830) """ pseudoparsed = urllib.parse.SplitResult(None, hostport, None, None, None) try: return pseudoparsed.hostname, pseudoparsed.port except ValueError: if "[" not in hostport and hostport.count(":") > 1: raise ValueError( "Could not parse network location. " "Beware that when IPv6 literals are expressed in URIs, they " "need to be put in square brackets to distinguish them from " "port numbers." ) raise def quote_nonascii(s): """Like urllib.parse.quote, but explicitly only escaping non-ascii characters. This function is deprecated due to it use of the irrelevant "being an ASCII character" property (when instead RFC3986 productions like "unreserved" should be used), and due for removal when aiocoap's URI processing is overhauled the next time. """ return "".join(chr(c) if c <= 127 else "%%%02X" % c for c in s.encode("utf8")) class Sentinel: """Class for sentinel that can only be compared for identity. No efforts are taken to make these singletons; it is up to the users to always refer to the same instance, which is typically defined on module level. This value can intentionally not be serialized im CBOR or JSON: >>> import json # Not using CBOR as that may not be present for tests >>> FIXME = Sentinel("FIXME") >>> json.dumps([1, FIXME, 3]) Traceback (most recent call last): TypeError: Object of type Sentinel is not JSON serializable """ def __init__(self, label): self._label = label def __repr__(self): return "<%s>" % self._label def deprecation_getattr(_deprecated_aliases: dict, _globals: dict): """Factory for a module-level ``__getattr__`` function This creates deprecation warnings whenever a module level item by one of the keys of the alias dict is accessed by its old name rather than by its new name (which is in the values): >>> FOOBAR = 42 >>> >>> __getattr__ = deprecation_getattr({'FOOBRA': 'FOOBAR'}, globals()) """ def __getattr__(name): if name in _deprecated_aliases: modern = _deprecated_aliases[name] warn( f"{name} is deprecated, use {modern} instead", DeprecationWarning, stacklevel=2, ) return _globals[modern] raise AttributeError(f"module {__name__} has no attribute {name}") return __getattr__ aiocoap-0.4.12/aiocoap/util/asyncio/000077500000000000000000000000001472205122200172435ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/util/asyncio/__init__.py000066400000000000000000000002571472205122200213600ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Extensions to asyncio and workarounds around its shortcomings""" aiocoap-0.4.12/aiocoap/util/asyncio/getaddrinfo_addrconfig.py000066400000000000000000000101431472205122200242620ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Helper module around getaddrinfo shortcomings""" import socket import itertools import errno async def getaddrinfo_routechecked(loop, log, host, port): """Variant of getaddrinfo that works like AI_ADDRCONFIG should probably work. This is not only a workaround for 12377_, but goes beyond it by checking routability. Even with 12377 fixed, AI_ADDRCONFIG would just avoid sending AAAA requests (if no v6 addresses were available at all). It would still not avoid filtering out responses that are just not routable -- the typical example being that a host may be in a network with a ULA, so it needs to send AAAA requests because they might resolve to the ULA, but if a global address comes back, we still don't want that address passed to the application. The family and flags arguments are not accepted, because they are what is being altered (and it'd be hard to tell which combinations would work). It roughly behaves as if flags were set to AI_ADDRCONFIG | AI_V4MAPPED, and family to AF_INET6. The type and protocol are fixed to SOCK_DGRAM / IPPROTO_UDP, because only there we known that connect is side-effect free. This function also differs from getaddrinfo in that it returns an asynchronous iterator (we don't want to check addresses we don't care about), and in that it only returns the sockaddr (because the rest is fixed anyway). .. _12377: https://sourceware.org/bugzilla/show_bug.cgi?id=12377 """ # As an implementation note, we can't go through the AI_V4MAPPED feature, # because if we used that, we'd only get V4 responses if there are no V6 # addresses. As we deliberately want to produce results on V4 addresses # when both are provided (conditional on the V6 ones not being routable), # we need to convert -- and then the quick way to do things is to use # AF_UNSPEC and distinguish later. addrinfo = await loop.getaddrinfo( host, port, family=socket.AF_UNSPEC, type=socket.SOCK_DGRAM, proto=socket.IPPROTO_UDP, # Still setting that -- it spares us the traffic of pointless requests. flags=socket.AI_ADDRCONFIG, ) if any(a[0] not in (socket.AF_INET6, socket.AF_INET) for a in addrinfo): log.warning("Addresses outside of INET and INET6 families ignored.") v6_addresses = ( sockaddr for (family, *_, sockaddr) in addrinfo if family == socket.AF_INET6 ) # Dress them just as AI_V4MAPPED would do. Note that we can't do (ip, port) # instead of sockaddr b/c it's destructured already for the family check, # and at that time it may be a different AF with more members. v4_addresses = ( ("::ffff:" + sockaddr[0], sockaddr[1], 0, 0) for (family, *_, sockaddr) in addrinfo if family == socket.AF_INET ) yielded = 0 for ip, port, flowinfo, scope_id in itertools.chain(v6_addresses, v4_addresses): # Side-step connectivity test when explicitly giving an address. This # should be purely a cosmetic change, but given we try not to give IP # addresses special treatment over host names, it needs to be done # somewhere if we don't want connection attempts to explicit V6 # addresses to err with "No address information found" on V4-only # hosts. if ip != host: with socket.socket( family=socket.AF_INET6, type=socket.SOCK_DGRAM, proto=socket.IPPROTO_UDP ) as tempsock: try: tempsock.connect((ip, port)) except OSError as e: if e.errno == errno.ENETUNREACH: continue yield (ip, port, flowinfo, scope_id) yielded += 1 if not yielded: # That's the behavior of getaddrinfo -- not return empty (so we # shouldn't return empty just b/c everything was filtered either). Do # we need to be any more specific? The gaierror are caught later # anyway... raise socket.gaierror aiocoap-0.4.12/aiocoap/util/asyncio/recvmsg.py000066400000000000000000000152151472205122200212670ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from .. import socknumbers from asyncio import BaseProtocol from asyncio.transports import BaseTransport class RecvmsgDatagramProtocol(BaseProtocol): """Callback interface similar to asyncio.DatagramProtocol, but dealing with recvmsg data.""" def datagram_msg_received(self, data, ancdata, flags, address): """Called when some datagram is received.""" def datagram_errqueue_received(self, data, ancdata, flags, address): """Called when some data is received from the error queue""" def error_received(self, exc): """Called when a send or receive operation raises an OSError.""" def _set_result_unless_cancelled(fut, result): """Helper setting the result only if the future was not cancelled.""" if fut.cancelled(): return fut.set_result(result) class RecvmsgSelectorDatagramTransport(BaseTransport): """A simple loop-independent transport that largely mimicks DatagramTransport but interfaces a RecvmsgSelectorDatagramProtocol. This does not implement any flow control, based on the assumption that it's not needed, for CoAP has its own flow control mechanisms.""" max_size = 4096 # Buffer size passed to recvmsg() -- should suffice for a full MTU package and ample ancdata def __init__(self, loop, sock, protocol, waiter): super().__init__(extra={"socket": sock}) self.__sock = sock # Persisted outside of sock because when GC breaks a reference cycle, # it can happen that the sock gets closed before this; we have to hope # that no new file gets opened and registered in the meantime. self.__sock_fileno = sock.fileno() self._loop = loop self._protocol = protocol loop.call_soon(protocol.connection_made, self) # only start reading when connection_made() has been called import weakref # We could add error handling in here like this: # ``` # self = s() # if self is None or self.__sock is None: # # The read event happened briefly before .close() was called, # # but late enough that the caller of close did not yield to let # # the event out; when remove_reader was then called, the # # pending event was not removed, so it fires now that the # # socket is already closed. (Depending on the GC's whims, self # # may or may not have been GC'd, but if it wasn't yet, the # # closed state is indicated by the lack of a __sock. # # # # Thus, silently (preferably with an ICMP error, but really # # can't do that)... # return # ``` # That was done tentatively while debugging errors flying out of # _read_ready, but it turned out that this was not the actual error # source. Thus, I'm not adding the handler and assuming that close's # remove_reader is not racing against callbacks, and thus that s() is # always valid while the transport is around (and the weakref is really # only used to break up the reference cycles to ensure the GC is not # needed here). def rr(s=weakref.ref(self)): s()._read_ready() loop.call_soon(loop.add_reader, self.__sock_fileno, rr) loop.call_soon(_set_result_unless_cancelled, waiter, None) def close(self): if self.__sock is None: return if not self._loop.is_closed(): self._loop.call_soon(self._protocol.connection_lost, None) self._loop.remove_reader(self.__sock_fileno) self.__sock.close() self.__sock = None self._protocol = None self._loop = None def __del__(self): if self.__sock is not None: self.close() def _read_ready(self): if socknumbers.HAS_RECVERR: try: data, ancdata, flags, addr = self.__sock.recvmsg( self.max_size, 1024, socknumbers.MSG_ERRQUEUE ) except (BlockingIOError, InterruptedError): pass except OSError as exc: if ( repr(exc) == "OSError('received malformed or improperly truncated ancillary data',)" ): pass # workaround for https://bitbucket.org/pypy/pypy/issues/2649/recvmsg-with-empty-err-queue-raises-odd else: self._protocol.error_received(exc) except Exception as exc: self._fatal_error(exc, "Fatal read error on datagram transport") else: self._protocol.datagram_errqueue_received(data, ancdata, flags, addr) # copied and modified from _SelectorDatagramTransport try: data, ancdata, flags, addr = self.__sock.recvmsg( self.max_size, 1024 ) # TODO: find a way for the application to tell the trensport how much data is expected except (BlockingIOError, InterruptedError): pass except OSError as exc: self._protocol.error_received(exc) except Exception as exc: self._fatal_error(exc, "Fatal read error on datagram transport") else: self._protocol.datagram_msg_received(data, ancdata, flags, addr) def sendmsg(self, data, ancdata, flags, address): try: self.__sock.sendmsg((data,), ancdata, flags, address) return except OSError as exc: self._protocol.error_received(exc) return except Exception as exc: self._fatal_error(exc, "Fatal write error on datagram transport") return async def create_recvmsg_datagram_endpoint(loop, factory, sock): """Create a datagram connection that uses recvmsg rather than recvfrom, and a RecvmsgDatagramProtocol protocol type. This is used like the create_datagram_endpoint method of an asyncio loop, but implemented in a generic way using the loop's add_reader method; thus, it's not a method of the loop but an independent function. Due to the way it is used in aiocoap, socket is not an optional argument here; it could be were this module ever split off into a standalone package. """ sock.setblocking(False) protocol = factory() waiter = loop.create_future() transport = RecvmsgSelectorDatagramTransport(loop, sock, protocol, waiter) try: await waiter # see https://github.com/PyCQA/pycodestyle/issues/703 except: # noqa: E722 transport.close() raise return transport, protocol aiocoap-0.4.12/aiocoap/util/asyncio/timeoutdict.py000066400000000000000000000040631472205122200221520ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import asyncio from typing import Dict, Generic, TypeVar K = TypeVar("K") V = TypeVar("V") class TimeoutDict(Generic[K, V]): """A dict-ish type whose entries live on a timeout; adding and accessing an item each refreshes the timeout. The timeout is a lower bound; items may live up to twice as long. The container is implemented incompletely, with additions made on demand. This is not thread safe. """ def __init__(self, timeout: float): self.timeout = timeout """Timeout set on any access This can be changed at runtime, but changes only take effect """ self._items: Dict[K, V] = {} """The actual dictionary""" self._recently_accessed = None """Items accessed since the timeout last fired""" self._timeout = None """Canceler for the timeout function""" # Note: Without a __del__ implementation that even cancels, the object # will be kept alive by the main loop for a timeout def __getitem__(self, key): result = self._items[key] self._accessed(key) return result def __setitem__(self, key, value): self._items[key] = value self._accessed(key) def _start_over(self): """Clear _recently_accessed, set the timeout""" self._timeout = asyncio.get_running_loop().call_later(self.timeout, self._tick) self._recently_accessed = set() def _accessed(self, key): """Mark a key as recently accessed""" if self._timeout is None: self._start_over() # No need to add the key, it'll live for this duration anyway else: self._recently_accessed.add(key) def _tick(self): self._items = { k: v for (k, v) in self._items.items() if k in self._recently_accessed } if self._items: self._start_over() else: self._timeout = None self._recently_accessed = None aiocoap-0.4.12/aiocoap/util/cli.py000066400000000000000000000131231472205122200167170ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Helpers for creating server-style applications in aiocoap Note that these are not particular to aiocoap, but are used at different places in aiocoap and thus shared here.""" import argparse import sys import logging import asyncio import signal class ActionNoYes(argparse.Action): """Simple action that automatically manages --{,no-}something style options""" # adapted from Omnifarious's code on # https://stackoverflow.com/questions/9234258/in-python-argparse-is-it-possible-to-have-paired-no-something-something-arg#9236426 def __init__(self, option_strings, dest, default=True, required=False, help=None): assert len(option_strings) == 1, "ActionNoYes takes only one option name" assert option_strings[0].startswith( "--" ), "ActionNoYes options must start with --" super().__init__( ["--" + option_strings[0][2:], "--no-" + option_strings[0][2:]], dest, nargs=0, const=None, default=default, required=required, help=help, ) def __call__(self, parser, namespace, values, option_string=None): if option_string.startswith("--no-"): setattr(namespace, self.dest, False) else: setattr(namespace, self.dest, True) class AsyncCLIDaemon: """Helper for creating daemon-style CLI prorgrams. Note that this currently doesn't create a Daemon in the sense of doing a daemon-fork; that could be added on demand, though. Subclass this and implement the :meth:`start` method as an async function; it will be passed all the constructor's arguments. When all setup is complete and the program is operational, return from the start method. Implement the :meth:`shutdown` coroutine and to do cleanup; what actually runs your program will, if possible, call that and await its return. Two usage patterns for this are supported: * Outside of an async context, run run ``MyClass.sync_main()``, typically in the program's ``if __name__ == "__main__":`` section. In this mode, the loop that is started is configured to safely shut down the loop when SIGINT is received. * To run a subclass of this in an existing loop, start it with ``MyClass(...)`` (possibly passing in the loop to run it on if not already in an async context), and then awaiting its ``.initializing`` future. To stop it, await its ``.shutdown()`` method. Note that with this usage pattern, the :meth:`.stop()` method has no effect; servers that ``.stop()`` themselves need to signal their desire to be shut down through other channels (but that is an atypical case). """ def __init__(self, *args, **kwargs): loop = kwargs.pop("loop", None) if loop is None: loop = asyncio.get_running_loop() self.__exitcode = loop.create_future() self.initializing = loop.create_task( self.start(*args, **kwargs), name="Initialization of %r" % (self,) ) def stop(self, exitcode): """Stop the operation (and exit sync_main) at the next convenience.""" self.__exitcode.set_result(exitcode) @classmethod async def _async_main(cls, *args, **kwargs): """Run the application in an AsyncIO main loop, shutting down cleanly on keyboard interrupt. This is not exposed publicly as it messes with the loop, and we only do that with loops created in sync_main. """ main = cls(*args, **kwargs) try: asyncio.get_running_loop().add_signal_handler( signal.SIGTERM, lambda: main.__exitcode.set_result(143), ) except NotImplementedError: # Impossible on win32 -- just won't make that clean of a shutdown. pass try: await main.initializing # This is the time when we'd signal setup completion by the parent # exiting in case of a daemon setup, or to any other process # management. logging.info("Application ready.") # Common options are 143 or 0 # ( and # ) exitcode = await main.__exitcode except KeyboardInterrupt: logging.info("Keyboard interupt received, shutting down") sys.exit(3) else: sys.exit(exitcode) finally: if main.initializing.done() and main.initializing.exception(): # The exception if initializing is what we are just watching # fly by. No need to trigger it again, and running shutdown # would be even weirder. pass else: # May be done, then it's a no-op, or we might have received a # signal during startup in which case we better fetch the # result and shut down cleanly again await main.initializing # And no matter whether that happened during initialization # (which now has finished) or due to a regular signal... await main.shutdown() @classmethod def sync_main(cls, *args, **kwargs): """Run the application in an AsyncIO main loop, shutting down cleanly on keyboard interrupt.""" asyncio.run(cls._async_main(*args, **kwargs)) aiocoap-0.4.12/aiocoap/util/contenttype.py000066400000000000000000000021651472205122200205300ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Helpers around content types This uses the terminology clarified in 1_, and primarily deals with content types in their usual string representation. Unless content types get used a lot more in aiocoap, this provides only accessors to some of their relevant properties, without aiming to build semantically accessible objects to encapsulate them. .. _1: https://tools.ietf.org/html/draft-bormann-core-media-content-type-format-01""" def categorize(contenttype: str): """Return 'cbor', 'json' or 'link-format' if the content type indicates it is that format itself or derived from it.""" media_type, *_ = contenttype.split(";") _, _, subtype = media_type.partition("/") if subtype == "cbor-seq" or subtype.endswith("+cbor-seq"): return "cbor-seq" if subtype == "cbor" or subtype.endswith("+cbor"): return "cbor" if subtype == "json" or subtype.endswith("+json"): return "json" if media_type == "application/link-format": return "link-format" return None aiocoap-0.4.12/aiocoap/util/cryptography_additions.py000066400000000000000000000045511472205122200227460ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """ Workaround for https://github.com/pyca/cryptography/issues/5557 These functions could be methods to `cryptography.hazmat.primitives.asymmetric.ed25519.{Ed25519PrivateKey, Ed25519PublicKey}`, respectively, and are currently implemented manually or using ge25519. These conversions are not too critical in that they do not run on data an attacker can send arbitrarily (in the most dynamic situation, the keys are distributed through a KDC aka. group manager). """ from cryptography.hazmat.primitives.asymmetric import ed25519, x25519 from cryptography.hazmat.primitives import serialization def sk_to_curve25519(ed: ed25519.Ed25519PrivateKey) -> x25519.X25519PrivateKey: raw = ed.private_bytes( encoding=serialization.Encoding.Raw, format=serialization.PrivateFormat.Raw, encryption_algorithm=serialization.NoEncryption(), ) # as proposed in https://github.com/pyca/cryptography/issues/5557#issuecomment-739339132 from cryptography.hazmat.primitives import hashes hasher = hashes.Hash(hashes.SHA512()) hasher.update(raw) h = bytearray(hasher.finalize()) # curve25519 clamping h[0] &= 248 h[31] &= 127 h[31] |= 64 return x25519.X25519PrivateKey.from_private_bytes(h[0:32]) def pk_to_curve25519(ed: ed25519.Ed25519PublicKey) -> x25519.X25519PublicKey: raw = ed.public_bytes( encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw, ) # This is libsodium's crypto_sign_ed25519_pk_to_curve25519 translated into # the Pyton module ge25519. from ge25519 import ge25519, ge25519_p3 from fe25519 import fe25519 if ge25519.has_small_order(raw) != 0: raise RuntimeError("Doesn' thave small order") # frombytes in libsodium appears to be the same as # frombytes_negate_vartime; as ge25519 only implements the from_bytes # version, we have to do the root check manually. A = ge25519_p3.from_bytes(raw) if A.root_check: raise RuntimeError("Root check failed") if not A.is_on_main_subgroup(): raise RuntimeError("It's on the main subgroup") one_minus_y = fe25519.one() - A.Y x = A.Y + fe25519.one() x = x * one_minus_y.invert() return x25519.X25519PublicKey.from_public_bytes(bytes(x.to_bytes())) aiocoap-0.4.12/aiocoap/util/linkformat.py000066400000000000000000000026411472205122200203210ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module contains in-place modifications to the LinkHeader module to satisfy RFC6690 constraints. It is a general nursery for what aiocoap needs of link-format management before any of this is split out into its own package. """ from .vendored import link_header class LinkFormat(link_header.LinkHeader): def __str__(self): return ",".join(str(link) for link in self.links) class Link(link_header.Link): # This is copy-pasted from the link_header module's code, just replacing # the '; ' with ';'. # # Original copyright Michael Burrows , distributed under # the BSD license def __str__(self): def str_pair(key, value): if value is None: return key # workaround to accomodate copper # elif RE_ONLY_TOKEN.match(value) or key.endswith('*'): # return '%s=%s' % (key, value) else: return '%s="%s"' % (key, value.replace('"', r"\"")) return ";".join( ["<%s>" % self.href] + [str_pair(key, value) for key, value in self.attr_pairs] ) def parse(linkformat): data = link_header.parse(linkformat) data.__class__ = LinkFormat for link in data.links: link.__class__ = Link return data aiocoap-0.4.12/aiocoap/util/linkformat_pygments.py000066400000000000000000000024521472205122200222470ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from pygments import token, lexers from pygments.lexer import RegexLexer, bygroups __all__ = ["LinkFormatLexer"] class LinkFormatLexer(RegexLexer): name = "LinkFormatLexer" mimetypes = ["application/link-format"] tokens = { "root": [ ( "(<)([^>]*)(>)", bygroups(token.Punctuation, token.Name.Label, token.Punctuation), "maybe-end", ) ], "maybe-end": [ # Whitespace is not actually allowed, but produced by the pretty printer (";\\s*", token.Punctuation, "attribute"), (",\\s*", token.Punctuation, "root"), ], "attribute": [ ( '([^,;=]+)((=)("[^"]*"|[^,;"]+))?', bygroups( token.Name.Attribute, None, token.Operator, token.String.Symbol ), "maybe-end", ), ], } def _register(): if "LinkFormatLexer" not in lexers.LEXERS: lexers.LEXERS["LinkFormatLexer"] = ( "aiocoap.util.linkformat_pygments", "LinkFormatLexer", (), (), LinkFormatLexer.mimetypes, ) aiocoap-0.4.12/aiocoap/util/prettyprint.py000066400000000000000000000160031472205122200205540ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A pretty-printer for known mime types""" import json import re import pygments import pygments.lexers import pygments.formatters from aiocoap.util import linkformat, contenttype from aiocoap.util.linkformat_pygments import _register _register() MEDIATYPE_HEXDUMP = "text/vnd.aiocoap.hexdump" def lexer_for_mime(mime): """A wrapper around pygments.lexers.get_lexer_for_mimetype that takes subtypes into consideration and catches the custom hexdump mime type.""" if mime == MEDIATYPE_HEXDUMP: return pygments.lexers.HexdumpLexer() if mime == "text/plain;charset=utf8": # We have fall-throughs in place anwyay, no need to go through a no-op # TextLexer raise pygments.util.ClassNotFound try: return pygments.lexers.get_lexer_for_mimetype(mime) except pygments.util.ClassNotFound: mime = re.sub( "^([^/]+)/.*\\+([^;]+)(;.*)?$", lambda args: args[1] + "/" + args[2], mime ) return pygments.lexers.get_lexer_for_mimetype(mime) def pretty_print(message): """Given a CoAP message, reshape its payload into something human-readable. The return value is a triple (infos, mime, text) where text represents the payload, mime is a type that could be used to syntax-highlight the text (not necessarily related to the original mime type, eg. a report of some binary data that's shaped like Markdown could use a markdown mime type), and some line of infos that give additional data (like the reason for a hex dump or the original mime type). >>> from aiocoap import Message >>> def build(payload, request_cf, response_cf): ... response = Message(payload=payload, content_format=response_cf) ... request = Message(accept=request_cf) ... response.request = request ... return response >>> pretty_print(Message(payload=b"Hello", content_format=0)) ([], 'text/plain;charset=utf8', 'Hello') >>> print(pretty_print(Message(payload=b'{"hello":"world"}', content_format=50))[-1]) { "hello": "world" } >>> # Erroneous inputs still go to the pretty printer as long as they're >>> #Unicode >>> pretty_print(Message(payload=b'{"hello":"world', content_format=50)) (['Invalid JSON not re-formated'], 'application/json', '{"hello":"world') >>> pretty_print(Message(payload=b'<>,', content_format=40)) (['Invalid application/link-format content was not re-formatted'], 'application/link-format', '<>,') >>> pretty_print(Message(payload=b'a', content_format=60)) # doctest: +ELLIPSIS (['Showing hex dump of application/cbor payload: CBOR value is invalid'], 'text/vnd.aiocoap.hexdump', '00000000 61 ... """ infos = [] info = infos.append cf = message.opt.content_format or message.request.opt.accept if cf is None: content_type = "type unknown" elif cf.is_known(): content_type = cf.media_type if cf.encoding != "identity": info( "Content format is %s in %s encoding; treating as " "application/octet-stream because decompression is not " "supported yet" % (cf.media_type, cf.encoding) ) else: content_type = "type %d" % cf category = contenttype.categorize(content_type) show_hex = None if linkformat is not None and category == "link-format": try: decoded = message.payload.decode("utf8") try: parsed = linkformat.link_header.parse(decoded) except linkformat.link_header.ParseException: info("Invalid application/link-format content was not re-formatted") return (infos, "application/link-format", decoded) else: info("application/link-format content was re-formatted") prettyprinted = ",\n".join(str(link) for link in parsed.links) return (infos, "application/link-format", prettyprinted) except ValueError: # Handled later pass elif category in ("cbor", "cbor-seq"): if category == "cbor-seq": # Faking an indefinite length CBOR array is the easiest way to # parse an array into a list-like data structure, especially as # long as we don't indicate precise locations of invalid CBOR # anyway payload = b"\x9f" + message.payload + b"\xff" else: payload = message.payload try: import cbor_diag formatted = cbor_diag.cbor2diag(payload) if category == "cbor-seq": info("CBOR sequence message shown as array in Diagnostic Notation") else: info("CBOR message shown in Diagnostic Notation") # It's not exactly CDDL, but it's close enough that the syntax # highlighting looks OK, and tolerant enough to not complain about # missing leading barewords and "=" signs return (infos, "text/x-cddl", formatted) except ImportError: show_hex = "No CBOR pretty-printer available" except ValueError: show_hex = "CBOR value is invalid" elif category == "json": try: decoded = message.payload.decode("utf8") except ValueError: pass else: try: parsed = json.loads(decoded) except ValueError: info("Invalid JSON not re-formated") return (infos, "application/json", decoded) else: info("JSON re-formated and indented") formatted = json.dumps(parsed, indent=4) return (infos, "application/json", formatted) # That's about the formats we do for now. if show_hex is None: try: text = message.payload.decode("utf8") except UnicodeDecodeError: show_hex = "Message can not be parsed as UTF-8" else: return (infos, "text/plain;charset=utf8", text) info( "Showing hex dump of %s payload%s" % ( content_type if cf is not None else "untyped", ": " + show_hex if show_hex is not None else "", ) ) data = message.payload # Not the most efficient hex dumper, but we won't stream video over # this anyway formatted = [] offset = 0 while data: line, data = data[:16], data[16:] formatted.append( "%08x " % offset + " ".join("%02x" % line[i] if i < len(line) else " " for i in range(8)) + " " + " ".join( "%02x" % line[i] if i < len(line) else " " for i in range(8, 16) ) + " |" + "".join(chr(x) if 32 <= x < 127 else "." for x in line) + "|\n" ) offset += len(line) if offset % 16 != 0: formatted.append("%08x\n" % offset) return (infos, MEDIATYPE_HEXDUMP, "".join(formatted)) aiocoap-0.4.12/aiocoap/util/pyodide_websockets.py000066400000000000000000000111601472205122200220350ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module provides a slimmed-down replacement of the websockets_ module (that regularly powers :mod:`aiocoap.transports.ws`) -- but implemented through pyodide_'s JavaScript adapter towards the WebSocket module of the hosting browser. It aims to be a drop-in replacement that provides the parts that can be implemented in the browser, and to provide practical errors on the used entry points. It will not go out of its way to mimick every aspect of the websockets module, but restrain itself to what ``.ws`` needs. **Future developement:** The module can probably be extended to cover all the implementable functionality of websockets, and provide meaningful errors on all its items. When that happens, it should be split out of aiocoap. .. _websockets: https://websockets.readthedocs.io/ .. _pyodide: https://pyodide.org/ """ import asyncio class WebSocketCommonProtocol: pass class WebSocketClientProtocol(WebSocketCommonProtocol): def __init__(self, socket): # Note that this is an under-implemented constructor -- the real thing # is in `connect()` which is async enough to do more. self._socket = socket # FIXME: This is a workaround for WebSockets' shortcomings, while # WebSocketStreams are not deployed (see # https://developer.chrome.com/articles/websocketstream/ for details) self._queue = asyncio.Queue() # The initial setting doesn't matter too much because we're not handing # it out before setting this to True ... still feels cleaner this way. self.open = False async def recv(self): (etype, event) = await self._queue.get() if etype == "message": if isinstance(event.data, str): # FIXME: Test this return event.data return bytes((await event.data.arrayBuffer()).to_py()) elif etype == "close": raise exceptions.ConnectionClosed() elif etype == "error": raise exceptions.WebSocketException("Connection error") else: raise RuntimeError("Unknown event in queue") async def send(self, msg): from js import Blob, Uint8Array blob = Blob.new([Uint8Array.new(msg)]) self._socket.send(blob) # FIXME: It'd be preferable if we could make this an unassigned property # that'd raise if anybody tried to access it (for neither do we know the # value, nor could anything useful be done with it), but as things are, # we'll have to rely on all users' sensibilities to not send around # addresses that are not globally usable. (The port, indicating the default # port, is an outright lie, though.) local_address = ("localhost", None) def on_message(self, event): self._queue.put_nowait(("message", event)) def on_error(self, event): self.open = False self._queue.put_nowait(("error", event)) def on_close(self, event): self.open = False self._queue.put_nowait(("close", event)) async def connect( uri, subprotocols=None, ping_interval=20, ssl=None ) -> WebSocketClientProtocol: from pyodide.ffi.wrappers import add_event_listener from js import WebSocket if ssl is not None: raise ValueError("SSL can not be configured within the browser WebSocket API") socket = WebSocket.new(uri, subprotocols) # Ignoring ping_interval: We can't tell what the browser does, and it may # be right nor not. proto = WebSocketClientProtocol(socket) add_event_listener( socket, "open", lambda e, q=proto._queue: q.put_nowait(("open", e)) ) add_event_listener(socket, "message", proto.on_message) add_event_listener(socket, "error", proto.on_error) add_event_listener(socket, "close", proto.on_close) (etype, event) = await proto._queue.get() if etype != "open": raise exceptions.WebSocketException("Failed to connect") proto.open = True return proto class exceptions: """A class that is a good-enough approximation of ``websockets.exceptions`` to get away with a single file implementing pyodide_websockets.""" class WebSocketException(Exception): pass class ConnectionClosed(WebSocketException): pass # Mocks required by the aiocoap.transports.ws module expecting a full implementation class WebSocketServerProtocol: def __init__(self, *args, **kwargs): raise RuntimeError("Web sockets in web browsers can not be used as servers") WebSocketServer = WebSocketServerProtocol def serve(*args, **kwargs): WebSocketServer() aiocoap-0.4.12/aiocoap/util/socknumbers.py000066400000000000000000000045251472205122200205110ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This module contains numeric constants that would be expected in the socket module, but are not exposed there. This gathers both socket numbers that can be present in the socket module (eg. the PKTINFO constants) but are not in some versions (eg. on macOS before is fixed) and platform dependent constants that are not generally available at all (the ERR constants). Where available, the CPython-private IN module is used to obtain some platform specific constants. Any hints on where to get them from in a more reliable way are appreciated; possible options are parsing C header files (at build time?) or interacting with shared libraries for obtaining the symbols. The right way would probably be including them in Python in a "other constants defined on this platform for sockets" module or dictionary. As of 2024, most of these are not needed any more; this module will be removed in favor of directly accessing `socket` constants once Python 3.13 support is dropped (see [issue 352](https://github.com/chrysn/aiocoap/issues/352)). """ import sys try: from socket import IPV6_PKTINFO, IPV6_RECVPKTINFO except ImportError: if sys.platform == "linux": # Not sure if here are any Linux builds at all where this is # unavailable IPV6_PKTINFO = 50 IPv6_RECVPKTINFO = 49 elif sys.platform == "darwin": # when __APPLE_USE_RFC_3542 is defined / as would be when # https://bugs.python.org/issue35569 is fixed IPV6_PKTINFO = 46 IPV6_RECVPKTINFO = 61 # Not attempting to make any guesses for other platforms; the udp6 module # will fail to import where it needs the specifics try: from IN import IPV6_RECVERR, IP_RECVERR # type: ignore except ImportError: if sys.platform == "linux": IPV6_RECVERR = 25 IP_RECVERR = 11 # for https://bitbucket.org/pypy/pypy/issues/2648/ try: from socket import MSG_ERRQUEUE except ImportError: if sys.platform == "linux": MSG_ERRQUEUE = 8192 # type: ignore HAS_RECVERR = "IP_RECVERR" in locals() and "MSG_ERRQUEUE" in locals() """Indicates whether the discovered constants indicate that the Linux `setsockopt(IPV6, RECVERR)` / `recvmsg(..., MSG_ERRQUEUE)` mechanism is available""" aiocoap-0.4.12/aiocoap/util/uri.py000066400000000000000000000015031472205122200167460ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Tools that I'd like to have in urllib.parse""" import string #: "unreserved" characters from RFC3986 unreserved = string.ascii_letters + string.digits + "-._~" #: "sub-delims" characters from RFC3986 sub_delims = "!$&'()*+,;=" def quote_factory(safe_characters): """Return a quote function that escapes all characters not in the safe_characters iterable.""" safe_set = set(ord(x) for x in safe_characters) if any(c >= 128 for c in safe_set): raise ValueError("quote_factory does not support non-ASCII safe characters") def quote(input_string): encoded = input_string.encode("utf8") return "".join(chr(x) if x in safe_set else "%%%02X" % x for x in encoded) return quote aiocoap-0.4.12/aiocoap/util/vendored/000077500000000000000000000000001472205122200174045ustar00rootroot00000000000000aiocoap-0.4.12/aiocoap/util/vendored/link_header.py000066400000000000000000000276521472205122200222370ustar00rootroot00000000000000# SPDX-FileCopyrightText: Michael Burrows # # SPDX-License-Identifier: BSD-3-Clause """ Parse and format link headers according to RFC 5988 "Web Linking". Usage (assuming a suitable headers object in the environment): >>> headers['Link'] = str(LinkHeader([Link("http://example.com/foo", rel="self"), ... Link("http://example.com", rel="up")])) >>> headers['Link'] '; rel=self, ; rel=up' >>> parse(headers['Link']) LinkHeader([Link('http://example.com/foo', rel='self'), Link('http://example.com', rel='up')]) Blank and missing values roundtrip correctly: >>> format_link(parse('; obs; if="core.s"; foo=""')) '<; obs; if=core.s; foo="">' Conversions to and from json-friendly list-based structures are also provided: >>> parse(headers['Link']).to_py() [['http://example.com/foo', [['rel', 'self']]], ['http://example.com', [['rel', 'up']]]] >>> str(LinkHeader([['http://example.com/foo', [['rel', 'self']]], ... ['http://example.com', [['rel', 'up']]]])) '; rel=self, ; rel=up' For further information see parse(), LinkHeader and Link. """ import re from typing import Dict from urllib.parse import urljoin __all__ = [ "parse", "format_links", "format_link", "LinkHeader", "Link", "ParseException", ] SINGLE_VALUED_ATTRS = ["rel", "anchor", "rev", "media", "title", "title*", "type"] # # Regexes for link header parsing. TOKEN and QUOTED in particular should conform to RFC2616. # # Acknowledgement: The QUOTED regexp is based on # http://stackoverflow.com/questions/249791/regexp-for-quoted-string-with-escaping-quotes/249937#249937 # # Trailing spaces are consumed by each pattern. The RE_HREF pattern also allows for any leading spaces. # QUOTED = ( r'"((?:[^"\\]|\\.)*)"' # double-quoted strings with backslash-escaped double quotes ) TOKEN = r"([^()<>@,;:\"\[\]?={}\s]+)" # non-empty sequence of non-separator characters RE_COMMA_HREF = re.compile( r" *,? *< *([^>]*) *> *" ) # includes ',' separator; no attempt to check URI validity RE_ONLY_TOKEN = re.compile(r"^%(TOKEN)s$" % locals()) RE_ATTR = re.compile(r"%(TOKEN)s *(?:= *(%(TOKEN)s|%(QUOTED)s))? *" % locals()) RE_SEMI = re.compile(r"; *") RE_COMMA = re.compile(r", *") def parse(header): """Parse a link header string, returning a LinkHeader object: >>> parse('; rel="foo bar", ; rel=up; type=text/html') LinkHeader([Link('http://example.com/foo', rel='foo bar'), Link('http://example.com', rel='up', type='text/html')]) ParseException is raised in the event that the input string is not parsed completely: >>> parse(' error') #doctest: +SKIP Traceback (most recent call last): ... ParseException: ('link_header.parse() failed near %s', "'error'") """ scanner = _Scanner(header) links = [] while scanner.scan(RE_COMMA_HREF): href = scanner[1] attrs = [] while scanner.scan(RE_SEMI): if scanner.scan(RE_ATTR): attr_name, token, quoted = scanner[1], scanner[3], scanner[4] if quoted is not None: attrs.append([attr_name, quoted.replace(r"\"", '"')]) elif token is not None: attrs.append([attr_name, token]) else: attrs.append([attr_name, None]) links.append(Link(href, attrs)) if scanner.buf: raise ParseException("link_header.parse() failed near %s", repr(scanner.buf)) return LinkHeader(links) def format_links(*args, **kwargs): return str(LinkHeader(*args, **kwargs)) def format_link(*args, **kwargs): return str(Link(*args, **kwargs)) class ParseException(Exception): pass class LinkHeader(object): """Represents a sequence of links that can be formatted together as a link header.""" def __init__(self, links=None): """Initializes a LinkHeader object with a list of Link objects or with list of parameters from which Link objects can be created: >>> LinkHeader([Link('http://example.com/foo', rel='foo'), Link('http://example.com', rel='up')]) LinkHeader([Link('http://example.com/foo', rel='foo'), Link('http://example.com', rel='up')]) >>> LinkHeader([['http://example.com/foo', [['rel', 'foo']]], ['http://example.com', [['rel', 'up']]]]) LinkHeader([Link('http://example.com/foo', rel='foo'), Link('http://example.com', rel='up')]) The Link objects can be accessed afterwards via the `links` property. String conversion follows the spec: >>> str(LinkHeader([Link('http://example.com/foo', rel='foo'), Link('http://example.com', rel='up')])) '; rel=foo, ; rel=up' Conversion to json-friendly list-based structures: >>> LinkHeader([Link('http://example.com/foo', rel='foo'), Link('http://example.com', rel='up')]).to_py() [['http://example.com/foo', [['rel', 'foo']]], ['http://example.com', [['rel', 'up']]]] """ self.links = [ link if isinstance(link, Link) else Link(*link) for link in links or [] ] def to_py(self): """Supports list conversion: >>> LinkHeader([Link('http://example.com/foo', rel='foo'), Link('http://example.com', rel='up')]).to_py() [['http://example.com/foo', [['rel', 'foo']]], ['http://example.com', [['rel', 'up']]]] """ return [link.to_py() for link in self.links] def __repr__(self): return "LinkHeader([%s])" % ", ".join(repr(link) for link in self.links) def __str__(self): """Formats a link header: >>> str(LinkHeader([Link('http://example.com/foo', rel='foo'), Link('http://example.com', rel='up')])) '; rel=foo, ; rel=up' """ return ", ".join(str(link) for link in self.links) def links_by_attr_pairs(self, pairs): """Lists links that have attribute pairs matching all the supplied pairs: >>> parse('; rel="foo", ; rel="up"' ... ).links_by_attr_pairs([('rel', 'up')]) [Link('http://example.com', rel='up')] """ return [ link for link in self.links if all([key, value] in link.attr_pairs for key, value in pairs) ] class Link(object): """Represents a single link.""" def __init__(self, href, attr_pairs=None, **kwargs): """Initializes a Link object with an href and attributes either in the form of a sequence of key/value pairs &/or as keyword arguments. The sequence form allows to be repeated. Attributes may be accessed subsequently via the `attr_pairs` property. String conversion follows the spec: >>> str(Link('http://example.com', [('foo', 'bar'), ('foo', 'baz')], rel='self')) '; foo=bar; foo=baz; rel=self' Conversion to json-friendly list-based structures: >>> Link('http://example.com', [('foo', 'bar'), ('foo', 'baz')], rel='self').to_py() ['http://example.com', [['foo', 'bar'], ['foo', 'baz'], ['rel', 'self']]] """ self.href = href self.attr_pairs = [ list(pair) for pair in (attr_pairs or []) + list(kwargs.items()) ] def to_py(self): """Convert to a json-friendly list-based structure: >>> Link('http://example.com', rel='foo').to_py() ['http://example.com', [['rel', 'foo']]] """ return [self.href, self.attr_pairs] def __repr__(self): """ >>> Link('http://example.com', rel='self') Link('http://example.com', rel='self') """ return "Link(%s)" % ", ".join( [repr(self.href)] + ["%s=%s" % (pair[0], repr(pair[1])) for pair in self.attr_pairs] ) def __str__(self): """Formats a single link: >>> str(Link('http://example.com/foo', [['rel', 'self']])) '; rel=self' >>> str(Link('http://example.com/foo', [['rel', '"quoted"'], ['type', 'text/html'], ['title*', "UTF-8'en'%e2%82%ac%20rates"]])) '; rel="\\\\"quoted\\\\""; type=text/html; title*=UTF-8\\'en\\'%e2%82%ac%20rates' Note that there is no explicit support for the title* attribute other than to output it unquoted. Where used, it is up to client applications to provide values that meet RFC2231 Section 7. """ def str_pair(key, value): if value is None: return key elif RE_ONLY_TOKEN.match(value) or key.endswith("*"): return "%s=%s" % (key, value) else: return '%s="%s"' % (key, value.replace('"', r"\"")) return "; ".join( ["<%s>" % self.href] + [str_pair(key, value) for key, value in self.attr_pairs] ) def __getattr__(self, name): """ >>> Link('/', rel='self').rel 'self' >>> Link('/', hreflang='EN').hreflang ['EN'] >>> Link('/', foo='bar').foo ['bar'] >>> Link('/', [('foo', 'bar'), ('foo', 'baz')]).foo ['bar', 'baz'] >>> Link('/').rel #doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: No attribute of type 'rel' present >>> Link('/').hreflang [] >>> Link('/').foo [] """ name_lower = name.lower() values = [value for key, value in self.attr_pairs if key.lower() == name_lower] if name in SINGLE_VALUED_ATTRS: if values: return values[0] else: raise AttributeError("No attribute of type %r present" % name_lower) return values def __contains__(self, name): """ >>> 'rel' in Link('/', rel='self') True >>> 'obs' in Link('/', obs=None) True >>> 'rel' in Link('/') False """ name_lower = name.lower() return any(key.lower() == name_lower for key, value in self.attr_pairs) def get_context(self, requested_resource_address): """Return the absolute URI of the context of a link. This is usually equals the base address the statement is about (eg. the requested URL if the link header was served in a successful HTTP GET request), but can be overridden by the anchor parameter. >>> Link('../', rel='index').get_context('http://www.example.com/book1/chapter1/') 'http://www.example.com/book1/chapter1/' >>> Link('', rel='next', anchor='../').get_context('http://www.example.com/book1/chapter1/') 'http://www.example.com/book1/' """ if "anchor" in self: return urljoin(requested_resource_address, self.anchor) return requested_resource_address def get_target(self, requested_resource_address): """Return the absolute URI of the target of a link. It is determined by joining the address from which the link header was retrieved with the link-value (inside angular brackets) according to RFC3986 section 5. >>> Link('../', rel='index').get_target('http://www.example.com/book1/chapter1/') 'http://www.example.com/book1/' >>> Link('', rel='next', anchor='../').get_target('http://www.example.com/book1/chapter1/') 'http://www.example.com/book1/chapter1/' """ return urljoin(requested_resource_address, self.href) class _Scanner(object): def __init__(self, buf): self.buf = buf self.match = None def __getitem__(self, key): return self.match.group(key) def scan(self, pattern): self.match = pattern.match(self.buf) if self.match: self.buf = self.buf[self.match.end() :] return self.match # For doctest headers: Dict[str, str] = {} aiocoap-0.4.12/clientGET-observe.py000077500000000000000000000016461472205122200170530ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This is a usage example of aiocoap that demonstrates how to implement a simple client. See the "Usage Examples" section in the aiocoap documentation for some more information.""" import logging import asyncio from aiocoap import * logging.basicConfig(level=logging.INFO) async def main(): protocol = await Context.create_client_context() request = Message(code=GET, uri="coap://vs0.inf.ethz.ch/obs", observe=0) pr = protocol.request(request) r = await pr.response print("First response: %s\n%r" % (r, r.payload)) async for r in pr.observation: print("Next result: %s\n%r" % (r, r.payload)) pr.observation.cancel() break print("Loop ended, sticking around") await asyncio.sleep(50) if __name__ == "__main__": asyncio.run(main()) aiocoap-0.4.12/clientGET.py000077500000000000000000000014771472205122200154120ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This is a usage example of aiocoap that demonstrates how to implement a simple client. See the "Usage Examples" section in the aiocoap documentation for some more information.""" import logging import asyncio from aiocoap import * logging.basicConfig(level=logging.INFO) async def main(): protocol = await Context.create_client_context() request = Message(code=GET, uri="coap://localhost/time") try: response = await protocol.request(request).response except Exception as e: print("Failed to fetch resource:") print(e) else: print("Result: %s\n%r" % (response.code, response.payload)) if __name__ == "__main__": asyncio.run(main()) aiocoap-0.4.12/clientPUT.py000077500000000000000000000020411472205122200154270ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This is a usage example of aiocoap that demonstrates how to implement a simple client. See the "Usage Examples" section in the aiocoap documentation for some more information.""" import logging import asyncio from aiocoap import * logging.basicConfig(level=logging.INFO) async def main(): """Perform a single PUT request to localhost on the default port, URI "/other/block". The request is sent 2 seconds after initialization. The payload is bigger than 1kB, and thus sent as several blocks.""" context = await Context.create_client_context() await asyncio.sleep(2) payload = b"The quick brown fox jumps over the lazy dog.\n" * 30 request = Message(code=PUT, payload=payload, uri="coap://localhost/other/block") response = await context.request(request).response print("Result: %s\n%r" % (response.code, response.payload)) if __name__ == "__main__": asyncio.run(main()) aiocoap-0.4.12/contrib/000077500000000000000000000000001472205122200146465ustar00rootroot00000000000000aiocoap-0.4.12/contrib/aiocoap-kivy-widget000077500000000000000000000211551472205122200204540ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A demo that opens a Kivy window and exposes it as a text and color display via CoAP The example is more comprehensive than the GTK ``./aiocoap-widgets`` demo (in that it offers GUI for registering at an RD, and to display log output) because unlike GTK it can also be run on Android, where neither command line arguments nor debug output are easily accessible. """ import asyncio import logging from aiocoap import Context, Message, CHANGED, defaults from aiocoap.resource import Site, Resource, ObservableResource, WKCResource from aiocoap.resourcedirectory.client.register import Registerer from kivy.app import App from kivy.lang.builder import Builder from kivy.utils import escape_markup from widgets_common.kivy_resource import * kv = ''' TabbedPanel: do_default_tab: False TabbedPanelItem: text: 'Widgets' BoxLayout: padding: '5sp' spacing: '5sp' orientation: 'vertical' RstDocument: # Not that we'd intend to do any ReStructured text -- this is # just the most plain widget that just has a background color id: colorfield size_hint: 1, 1 background_color: (0, 0, 0, 1) Button: size_hint: 1, 0.1 id: btn text: 'Use CoAP discovery to write here or receive notifications of clicks.' TabbedPanelItem: text: 'RD' BoxLayout: pos: (0, 0) orientation: 'vertical' spacing: '5sp' BoxLayout: size_hint_max_y: '30sp' spacing: '5sp' orientation: 'horizontal' Label: text: 'RD URI:' TextInput: id: rd_uri text: 'coap://rd.coap.amsuess.com' multiline: False size_hint: 3, 1 BoxLayout: size_hint_max_y: '30sp' spacing: '5sp' orientation: 'horizontal' Label: text: 'Endpoint name:' TextInput: id: rd_ep text: 'kivy' multiline: False size_hint: 3, 1 BoxLayout: size_hint_max_y: '30sp' spacing: '5sp' orientation: 'horizontal' Label: text: 'Request reverse proxying:' CheckBox: id: rd_proxy size_hint: 3, 1 ToggleButton: size_hint_max_y: '30sp' id: rd_active text: 'Register' Label: size_hint_max_y: '30sp' id: rd_status text: '' Label: TabbedPanelItem: text: 'Logs' ScrollView: do_scroll_x: False do_scroll_y: True Label: id: logs markup: True # as recommended in ScrollView example size_hint_y: None height: self.texture_size[1] text_size: self.width, None ''' class KivyLogFormatter(logging.Formatter): """A logging.Formatter implementation that produces colored and escaped output for Kivy markup using widgets""" def usesTime(self): return True def color(self, level) -> str: normalized = (level - logging.DEBUG) / (logging.ERROR - logging.DEBUG) clipped = max(min(normalized, 1), 0) # gradient: white, green, red GREEN_X = 0.25 if clipped < GREEN_X: remapped = clipped / GREEN_X return (1 - remapped, 1, 1 - remapped) else: remapped = (clipped - GREEN_X) / (1 - GREEN_X) return (remapped ** 0.5, (1 - remapped) ** 0.5, 0) def formatMessage(self, record): color = bytes(int(c * 255) for c in self.color(record.levelno)).hex() return f'[color=808080][font_family=Roboto]{escape_markup(record.asctime)}[/font_family][/color] – [color={color}]{escape_markup(record.levelname)}[/color] – [color=808080]{escape_markup(record.name)}[/color] – {escape_markup(record.message)}' def formatException(self, record): result = super().formatException(record) return escape_markup(result) def formatStack(self, record): result = super().formatStack(record) return escape_markup(result) class HandlerToTextWidget(logging.Handler): """A logging handler that renders into a Kivy text widget, and truncates early output to avoid ever-increasing memory requirements""" BUFLEN = 80000 def __init__(self, widget): super().__init__() self._widget = widget self._formatter = KivyLogFormatter() def emit(self, record): old_text = self._widget.text if len(old_text) > self.BUFLEN: old_text = old_text[-self.BUFLEN:] old_text = "…" + old_text[old_text.find('\n'):] self._widget.text = old_text + self._formatter.format(record) + "\n" class CoAPDisplay(App): rd_task = None def build(self): built = Builder.load_string(kv) coap_logger = logging.getLogger('coap') coap_logger.setLevel(logging.DEBUG) coap_logger.addHandler(HandlerToTextWidget(built.ids.logs)) coap_logger.info("aiocoap's selected server transports are " + ":".join(defaults.get_default_servertransports())) for (name, f) in defaults.missing_module_functions.items(): missing = f() if missing: coap_logger.warning("Missing modules for feature %s: %s", name, missing) return built async def main(self): try: # Wrap main so that we have a chance of seeing exceptions raised # during CoAP setup await self._inner_main() except BaseException as e: log = logging.getLogger("coap") log.exception(e) raise e async def _inner_main(self): site = Site() site.add_resource(['text'], Text(self.root.ids.btn)) site.add_resource(['pressed'], PressState(self.root.ids.btn)) site.add_resource(['color'], Color(self.root.ids.colorfield, 'background_color')) site.add_resource(['.well-known', 'core'], WKCResource(site.get_resources_as_linkheader)) self.context = await Context.create_server_context(site, loggername='coap') self.root.ids.rd_active.bind(on_press=self.on_rd_active_pressed) def on_rd_active_pressed(self, btn): if btn.state == 'down': self.rd_task = asyncio.create_task(self.register_rd()) self.root.ids.rd_uri.disabled = True self.root.ids.rd_ep.disabled = True self.root.ids.rd_proxy.disabled = True else: btn.state = 'down' if self.rd_task is not None: self.rd_task.cancel() self.rd_task = None async def register_rd(self): try: rd_uri = self.root.ids.rd_uri.text rd_ep = self.root.ids.rd_ep.text rd_proxy = self.root.ids.rd_proxy.active params = {'ep': rd_ep} if rd_proxy: params['proxy'] = 'on' registerer = Registerer( self.context, rd=rd_uri, registration_parameters=params, loggername='coap.rd-ep', ) old_set_state = registerer._set_state def new_set_state(state): self.root.ids.rd_status.text = 'Current status: %s' % state return old_set_state(state) registerer._set_state = new_set_state await asyncio.Future() except Exception as e: self.root.ids.rd_status.text = 'Failed (%s)' % e logging.getLogger("coap").exception(e) except asyncio.CancelledError: self.root.ids.rd_status.text = 'Shutting down registration…' await registerer.shutdown() self.root.ids.rd_status.text = 'Registration terminated' finally: self.root.ids.rd_active.state = 'normal' self.root.ids.rd_uri.disabled = False self.root.ids.rd_ep.disabled = False self.root.ids.rd_proxy.disabled = False def on_start(self): asyncio.get_event_loop().create_task(self.main()) if __name__ == '__main__': asyncio.run(CoAPDisplay().async_run()) aiocoap-0.4.12/contrib/aiocoap-proxy.ipynb000066400000000000000000000026031472205122200205040ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "trusted": true }, "outputs": [], "source": [ "import micropip\n", "\n", "await micropip.install(\"aiocoap\")\n", "import aiocoap\n", "\n", "ctx = await aiocoap.Context.create_client_context()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "trusted": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": "Übergrößenträger = 特大の人 = 超大航母\n" } ], "source": [ "req = aiocoap.Message(code=aiocoap.GET, uri=\"coap://coap.me/blåbærsyltetøy\")\n", "req.remote = aiocoap.message.UndecidedRemote(\"coaps+ws\", \"proxy.coap.amsuess.com\")\n", "req.opt.proxy_scheme = \"coap\"\n", "print((await ctx.request(req).response).payload.decode(\"utf8\"))" ] } ], "metadata": { "kernelspec": { "display_name": "Python (Pyodide)", "language": "python", "name": "python" }, "language_info": { "codemirror_mode": { "name": "python", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8" }, "toc-autonumbering": false, "toc-showcode": false, "toc-showtags": false }, "nbformat": 4, "nbformat_minor": 4 }aiocoap-0.4.12/contrib/aiocoap-server.ipynb000066400000000000000000000077751472205122200206500ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "trusted": true }, "outputs": [], "source": [ "import micropip\n", "\n", "await micropip.install(\"aiocoap[prettyprint]\")\n", "import aiocoap\n", "\n", "ctx = await aiocoap.Context.create_client_context()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "trusted": true }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "9d812769e6724f83ae4a6dc4839b77bd", "version_major": 2, "version_minor": 0 }, "text/plain": "FloatSlider(value=0.0)" }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "await micropip.install(\"ipywidgets\")\n", "import ipywidgets as widgets\n", "\n", "slider = widgets.FloatSlider()\n", "slider" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "trusted": true }, "outputs": [], "source": [ "from aiocoap import resource\n", "import cbor2\n", "\n", "\n", "class Position(resource.Resource):\n", " def __init__(self, widget):\n", " super().__init__()\n", " self.widget = widget\n", "\n", " async def render_get(self, request):\n", " # We should check some options here, or bug the aiocoap author\n", " # about better abstractions for typed resources\n", " return aiocoap.Message(\n", " content_format=60, payload=cbor2.dumps(self.widget.value)\n", " )\n", "\n", " async def render_put(self, request):\n", " self.widget.value = cbor2.loads(request.payload)\n", " return aiocoap.Message(code=aiocoap.CHANGED)\n", "\n", "\n", "root = resource.Site()\n", "root.add_resource(\n", " [\".well-known\", \"core\"], resource.WKCResource(root.get_resources_as_linkheader)\n", ")\n", "root.add_resource([\"position\"], Position(slider))\n", "ctx.serversite = root" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "trusted": true }, "outputs": [], "source": [ "from aiocoap.resourcedirectory.client.register import Registerer\n", "\n", "registerer = Registerer(\n", " ctx,\n", " \"coaps+ws://rd.coap.amsuess.com\",\n", " lt=60,\n", " registration_parameters={\n", " \"ep\": \"jupyter\",\n", " \"proxy\": \"on\",\n", " },\n", ")" ] } ], "metadata": { "kernelspec": { "display_name": "Python (Pyodide)", "language": "python", "name": "python" }, "language_info": { "codemirror_mode": { "name": "python", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8" }, "toc-autonumbering": false, "toc-showcode": false, "toc-showtags": false, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": { "5fb425d3185b4cce92be92c1375094d5": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": {} }, "63dd6a1378da41b7a73f6e3134b2fd04": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "SliderStyleModel", "state": { "description_width": "" } }, "9d812769e6724f83ae4a6dc4839b77bd": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "FloatSliderModel", "state": { "behavior": "drag-tap", "layout": "IPY_MODEL_5fb425d3185b4cce92be92c1375094d5", "step": 0.1, "style": "IPY_MODEL_63dd6a1378da41b7a73f6e3134b2fd04" } } }, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 4 }aiocoap-0.4.12/contrib/aiocoap-widgets000077500000000000000000000075771472205122200176730ustar00rootroot00000000000000#!/usr/bin/env -S pipx run # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT # /// script # requires-python = ">= 3.10" # dependencies = [ # "aiocoap >= 0.4.8, < 0.5", # "cbor2", # "pygobject >= 3.49" # ] # /// """Graphical utility to represent typical CoAP interfaces""" import argparse from aiocoap import resource from aiocoap import Context from aiocoap.resourcedirectory.client.register import Registerer import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk import asyncio from gi.events import GLibEventLoopPolicy from widgets_common.gobject_resource import * class Slider(FloatResource, GobjectBacked): if_ = 'core.s' unit = 'degC' # FIXME that should be somewhere in core widget_property = 'value' def __init__(self, args): self.widget = Gtk.Scale() self.widget.props.draw_value = False self.backend_widget = self.widget.props.adjustment = Gtk.Adjustment() self.backend_widget.props.lower = -10 self.backend_widget.props.upper = 40 self.backend_widget.props.value = 20 super().__init__() class WindowTitle(StringResource, GobjectBacked): if_ = 'core.p' widget_property = 'title' def __init__(self, window): self.backend_widget = self.widget = window super().__init__() class ArgumentParser(argparse.ArgumentParser): def __init__(self): super().__init__(description=__doc__.split("\n")[0]) self.add_argument('--anyport', help="Bind to a non-default port (useful for multiple instances on one computer, and in connection with --register-at)", action='store_true') self.add_argument('--register', help="Resource directory to register at (uses multicast if not specified)", metavar='RD', nargs='?', action='append') self.add_argument('--register-as', help="Endpoint name (or name.domain) to register as at the resource directory (Default: %(default)s)", default='widget', metavar='EP') async def start_context(site, args): if args.anyport: ctx = await Context.create_client_context() ctx.serversite = site else: ctx = await Context.create_server_context(site) if args.register: ep, _, d = args.register_as.partition('.') registerer = Registerer(ctx, args.register[0], lt=120, registration_parameters=dict(ep=ep, d=d)) async def shutdown(): await registerer.shutdown() await ctx.shutdown() return shutdown else: return ctx.shutdown class WidgetApplication(Gtk.Application): def __init__(self): super().__init__() self.hold() self.connect("activate", lambda s: asyncio.create_task(self.start())) async def start(self): p = ArgumentParser() args = p.parse_args() win = Gtk.Window() win.connect("delete-event", lambda *args: self.release()) site = resource.Site() site.add_resource(['.well-known', 'core'], resource.WKCResource(site.get_resources_as_linkheader)) site.add_resource(['window-title'], WindowTitle(win)) # temporary show-all box = Gtk.VBox() win.add(box) rgb = RGBLight(None) box.add(rgb.widget) site.add_resource(['spot'], rgb) switch = Switch(None) box.add(switch.widget) site.add_resource(['switch'], switch) bulb = Bulb(None) box.add(bulb.widget) site.add_resource(['bulb'], bulb) slider = Slider(None) box.add(slider.widget) site.add_resource(['slider'], slider) bigbatch = SubsiteBatch(site) site.add_resource([], bigbatch) win.show_all() self.shutdown = await start_context(site, args) def sync_main(): asyncio.set_event_loop_policy(GLibEventLoopPolicy()) app = WidgetApplication() app.run() if __name__ == "__main__": sync_main() aiocoap-0.4.12/contrib/aiocoap.ipynb000066400000000000000000000073701472205122200173330ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "trusted": true }, "outputs": [], "source": [ "import micropip\n", "\n", "await micropip.install(\"aiocoap[prettyprint]\")\n", "import aiocoap\n", "\n", "ctx = await aiocoap.Context.create_client_context()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "trusted": true }, "outputs": [ { "data": { "text/html": "
    Message with code CONTENT, remote <WSRemote at 0x1e61de0, hostinfo rd.coap.amsuess.com, local localhost>\n
    1 option
    1. CONTENT_FORMAT: application/link-format

    application/link-format content was re-formatted

    </.well-known/core>; ct=40,\n</.well-known/rd>,\n</rd>; ct=40; rt=core.rd,\n</endpoint-lookup/>; ct=40; rt=core.rd-lookup-ep; obs,\n</resource-lookup/>; ct=40; rt=core.rd-lookup-res; obs,\n<https://christian.amsuess.com/tools/aiocoap/#version-0.4.7>; rel=impl-info\n
    \n
    ", "text/plain": ", 1 option(s), 260 byte(s) payload>" }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "req = aiocoap.Message(\n", " code=aiocoap.GET, uri=\"coaps+ws://rd.coap.amsuess.com/.well-known/core\"\n", ")\n", "await ctx.request(req).response" ] } ], "metadata": { "kernelspec": { "display_name": "Python (Pyodide)", "language": "python", "name": "python" }, "language_info": { "codemirror_mode": { "name": "python", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8" } }, "nbformat": 4, "nbformat_minor": 4 }aiocoap-0.4.12/contrib/android/000077500000000000000000000000001472205122200162665ustar00rootroot00000000000000aiocoap-0.4.12/contrib/android/.gitignore000066400000000000000000000000551472205122200202560ustar00rootroot00000000000000for-launcher/ for-buildozer/ venv-buildozer/ aiocoap-0.4.12/contrib/android/README.rst000066400000000000000000000025771472205122200177700ustar00rootroot00000000000000Files in here prepare ``../aiocoap-kivy-widget`` to be run on Android: * ``./prepare-launcher-code.py`` To run it this way, install the `Kivy Launcher`_ on the Andorid device, run ``./prepare-launcher-code.py`` in the git checkout. This creates several files in ``./for-launcher``; copy these files and directories from ``for-launcher`` into ``/storage/emulated/0/kivy/aiocoap-kivy-widget/`` on the Android device. Then, the demo is available through the Kivy Launcher. This is easy to run and takes no relevant resources, but can not easily contain advanced dependencies such as the cryptography package. * ``./build-with-buildozer.py`` To run it this way, run ``./build-with-buildozer.py android debug deploy run`` (or use any other arguments -- they will all be forwarded to buildozer_; ``debug`` will build the ``.apk``, and ``deploy run`` will install and launch it on an ADB connected Android device). This copies together all relevant files into ``./for-buildozer``, installs buildozer in a virtualenv in ``./venv-buildozer``, and runs buildozer. Note that buildozer may download significant amounts of SDKs and NDKs with questionable licenses, and possibly stores some of that in cache directories outside of this directory. .. _`Kivy launcher`: https://github.com/kivy/kivy-launcher .. _buildozer: https://buildozer.readthedocs.io/en/latest/quickstart.html aiocoap-0.4.12/contrib/android/build-with-buildozer.py000077500000000000000000000033171472205122200227140ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import sys import shutil from pathlib import Path import subprocess import venv out = Path(__file__).parent / "for-buildozer" out.mkdir(exist_ok=True) buildozer_venv = Path(__file__).parent / "venv-buildozer" contrib = Path(__file__).parent.parent with (out / "buildozer.spec").open("w") as buildozerspec: buildozerspec.write("""\ [app] title = aiocoap widget demo package.name = aiocoap_widget_demo package.domain = org.example source.dir = . source.include_exts = py version = 0.1 # docutils for kivy rst widget # # everything after that is aiocoap[prettyprint,oscore,ws] spelled out (without # aiocoap as that's copied in below). for some reason, ge25519's dependency on # fe25519 needs to be spelled out. requirements = python3,kivy,docutils,termcolor,cbor2,pygments,cbor-diag,cryptography,filelock,ge25519,fe25519,websockets orientation = portrait fullscreen = 0 android.permissions = INTERNET # You may need to adjust this to your device android.arch = armeabi-v7a """) shutil.copy(contrib / "aiocoap-kivy-widget", out / "main.py") # We could use aiocoap from PyPI, but so far things only work with this # branch's version. shutil.copytree(contrib / "widgets_common", out / "widgets_common", dirs_exist_ok=True) shutil.copytree(contrib.parent / "aiocoap", out / "aiocoap", dirs_exist_ok=True) venv.create(buildozer_venv, with_pip=True) subprocess.check_call( [ "bash", "-e", "-c", """\ source ../venv-buildozer/bin/activate pip install cython buildozer buildozer $@ """, "buildozer", ] + sys.argv[1:], cwd=out, ) aiocoap-0.4.12/contrib/android/prepare-launcher-code.py000077500000000000000000000031071472205122200230110ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from pathlib import Path import shutil import urllib.request out = Path(__file__).parent / "for-launcher" out.mkdir(exist_ok=True) contrib = Path(__file__).parent.parent with (out / "android.txt").open("w") as androidtxt: androidtxt.write("title=aiocoap kivy widget\n") androidtxt.write("author=chrysn\n") androidtxt.write("orientation=portrait\n") shutil.copytree(contrib / "widgets_common", out / "widgets_common", dirs_exist_ok=True) shutil.copytree(contrib.parent / "aiocoap", out / "aiocoap", dirs_exist_ok=True) wheels = [ "https://files.pythonhosted.org/packages/d5/e1/af78b099196feaab7c0252108abc4f5cfd36d255ac47c4b4a695ff838bf9/cbor2-5.4.6-py3-none-any.whl", "https://files.pythonhosted.org/packages/93/69/e391bd51bc08ed9141ecd899a0ddb61ab6465309f1eb470905c0c8868081/docutils-0.19-py3-none-any.whl", ] wheelfilenames = [] for w in wheels: with urllib.request.urlopen(w) as request: wheelname = w[w.rindex("/") + 1 :] with (out / wheelname).open("wb") as wheel_out: wheel_out.write(request.read()) wheelfilenames.append(wheelname) with (out / "main.py").open("w") as mainpy: mainpy.write("import sys\n") mainpy.write("from pathlib import Path\n") mainpy.write("_here = Path(__file__).parent\n") for w in wheelfilenames: mainpy.write(f"sys.path.append(str(_here / '{w}') + '/')\n") with (contrib / "aiocoap-kivy-widget").open() as infile: mainpy.write(infile.read()) aiocoap-0.4.12/contrib/deterministic-demo/000077500000000000000000000000001472205122200204335ustar00rootroot00000000000000aiocoap-0.4.12/contrib/deterministic-demo/README.rst000066400000000000000000000017651472205122200221330ustar00rootroot00000000000000Deterministic OSCORE demo ========================= This shows the implementation of cachable-oscore_ that comes with aiocoap as an experimental extension. .. _cachable-oscore: https://tools.ietf.org/html/draft-amsuess-core-cachable-oscore-01 Security error -------------- (A warning would really be understating it) This uses statically configured keys for both parties to be nice and slim as a demo. As a consequence, as soon as the server is started a second time, it will produce responses with the same partial IVs, resulting in nonce reuse and thus probably the loss of all security properties. (It doesn't have any to start with because the private keys are hard-coded in the files...) Usage ----- * Monitor traffic on your loopback interface * Run the server * Run the client * Observe how two identical (except for transport details like token and message ID) requests are sent, and how different responses are created, signed (that's why they are large), returned and accepted by the client. aiocoap-0.4.12/contrib/deterministic-demo/client.py000077500000000000000000000046671472205122200223030ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import logging import asyncio from aiocoap import * logging.basicConfig(level=logging.INFO) async def main(): protocol = await Context.create_client_context() import aiocoap.oscore # Acting as Rikard Test 2 Entity 3 protocol.client_credentials["coap://localhost/*"] = ( aiocoap.oscore.SimpleGroupContext( algorithm=aiocoap.oscore.algorithms[aiocoap.oscore.DEFAULT_ALGORITHM], hashfun=aiocoap.oscore.hashfunctions[aiocoap.oscore.DEFAULT_HASHFUNCTION], alg_signature=aiocoap.oscore.Ed25519(), alg_group_enc=aiocoap.oscore.algorithms[aiocoap.oscore.DEFAULT_ALGORITHM], alg_pairwise_key_agreement=aiocoap.oscore.EcdhSsHkdf256(), group_id=bytes.fromhex("DD11"), master_secret=bytes.fromhex("11223344556677889900AABBCCDDEEFF"), master_salt=bytes.fromhex("1F2E3D4C5B6A7081"), # these are really moot given we only access this as a deterministic client sender_id=bytes.fromhex("52"), private_key=bytes.fromhex( "E550CD532B881D52AD75CE7B91171063E568F2531FBDFB32EE01D1910BCF810F" ), peers={ bytes.fromhex("0A"): bytes.fromhex( "CE616F28426EF24EDB51DBCEF7A23305F886F657959D4DF889DDFC0255042159" ), bytes.fromhex("51"): bytes.fromhex( "2668BA6CA302F14E952228DA1250A890C143FDBA4DAED27246188B9E42C94B6D" ), # that's a new one for deterministic, and we build a key for it but no shared secret bytes.fromhex("dc"): None, }, # requests in group mode would be sent with # ).pairwise_for(bytes.fromhex('0A')) # but here we want to request deterministically ).for_sending_deterministic_requests(bytes.fromhex("dc"), bytes.fromhex("0a")) ) request = Message(code=GET, uri="coap://localhost/.well-known/core") response = await protocol.request(request).response print("Got response", response, response.payload) request = Message(code=GET, uri="coap://localhost/.well-known/core") response = await protocol.request(request).response print("Got response", response, response.payload) if __name__ == "__main__": asyncio.run(main()) aiocoap-0.4.12/contrib/deterministic-demo/server.py000077500000000000000000000040171472205122200223200ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import logging from aiocoap import oscore_sitewrapper import aiocoap.resource as resource import aiocoap import asyncio logging.basicConfig(level=logging.INFO) logging.getLogger("coap-server").setLevel(logging.DEBUG) async def main(): # Resource tree creation root = resource.Site() root.add_resource( [".well-known", "core"], resource.WKCResource(root.get_resources_as_linkheader) ) server_credentials = aiocoap.credentials.CredentialsMap() root = oscore_sitewrapper.OscoreSiteWrapper(root, server_credentials) protocol = await aiocoap.Context.create_server_context(root) # Keys from IETF109 plug test: Rikard Test 2 Entity 1 server_credentials[":a"] = aiocoap.oscore.SimpleGroupContext( algorithm=aiocoap.oscore.algorithms[aiocoap.oscore.DEFAULT_ALGORITHM], hashfun=aiocoap.oscore.hashfunctions[aiocoap.oscore.DEFAULT_HASHFUNCTION], alg_signature=aiocoap.oscore.Ed25519(), alg_group_enc=aiocoap.oscore.algorithms[aiocoap.oscore.DEFAULT_ALGORITHM], alg_pairwise_key_agreement=aiocoap.oscore.EcdhSsHkdf256(), group_id=bytes.fromhex("DD11"), master_secret=bytes.fromhex("11223344556677889900AABBCCDDEEFF"), master_salt=bytes.fromhex("1F2E3D4C5B6A7081"), sender_id=bytes.fromhex("0A"), private_key=bytes.fromhex( "397CEB5A8D21D74A9258C20C33FC45AB152B02CF479B2E3081285F77454CF347" ), peers={ bytes.fromhex("51"): bytes.fromhex( "2668BA6CA302F14E952228DA1250A890C143FDBA4DAED27246188B9E42C94B6D" ), bytes.fromhex("52"): bytes.fromhex( "5394E43633CDAC96F05120EA9F21307C9355A1B66B60A834B53E9BF60B1FB7DF" ), bytes.fromhex("dc"): aiocoap.oscore.DETERMINISTIC_KEY, }, ) await asyncio.get_running_loop().create_future() if __name__ == "__main__": asyncio.run(main()) aiocoap-0.4.12/contrib/edhoc-demo-server.ipynb000066400000000000000000000106601472205122200212240ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": "# Running EDHOC with the public demo server\n\nThis demo can be run on the public Jupyter instance at [try Jupyter](https://jupyter.org/try-jupyter/lab/index.html) by uploading this file." }, { "cell_type": "code", "execution_count": 1, "metadata": { "trusted": true }, "outputs": [], "source": [ "import micropip, pyodide\n", "\n", "# The currently required lakers version is unavailable in built Jupyter, but compatibly\n", "# available from the CDN (no guarantees from there: path might need adjustment any time)\n", "await micropip.install(\n", " \"https://cdn.jsdelivr.net/pyodide/dev/full/lakers_python-0.4.1-cp312-cp312-pyodide_2024_0_wasm32.whl\"\n", ")\n", "await micropip.install(\"aiocoap[oscore,prettyprint]\")" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "trusted": true }, "outputs": [], "source": [ "import aiocoap\n", "from aiocoap import *\n", "\n", "ctx = await Context.create_client_context()" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "trusted": true }, "outputs": [], "source": [ "for_demo = {\n", " \"coap://demo.coap.amsuess.com/*\": {\n", " \"edhoc-oscore\": {\n", " \"suite\": 2,\n", " \"method\": 3,\n", " \"own_cred\": {\"unauthenticated\": True},\n", " \"peer_cred\": {\n", " 14: {\n", " 2: \"demo.coap.amsuess.com\",\n", " 8: {\n", " 1: {\n", " 1: 2,\n", " 2: b\"\\0\",\n", " -1: 1,\n", " -2: bytes.fromhex(\n", " \"b9cc746df6641d55044478b29df019ef22b4d2e96ffcf8de85434e5d0f27c33c\"\n", " ),\n", " -3: bytes.fromhex(\n", " \"e14e87330d093b469b121c3d0e4d9452cb90036a6e209f21f37d35d2a05c426c\"\n", " ),\n", " }\n", " },\n", " }\n", " },\n", " }\n", " }\n", "}\n", "ctx.client_credentials.load_from_dict(for_demo)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "trusted": true }, "outputs": [ { "data": { "text/html": "
    Message with code CONTENT, remote <WSRemote at 0xd98518, hostinfo proxy.coap.amsuess.com, local localhost>\n
    1 option
    1. CONTENT_FORMAT: text/plain; charset=utf-8
    b'Used protocol: coap.\\nRequest came from [2a01:4f8:190:3064::6]:36479.\\nThe server address used [2a01:4f8:190:3064::6].\\nNo claims authenticated.'", "text/plain": ", 1 option(s), 141 byte(s) payload>" }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "req = Message(code=GET, uri=\"coap://demo.coap.amsuess.com/whoami\")\n", "# FIXME: We should have a prettier way to set a proxy\n", "req.opt.proxy_scheme = \"coap\"\n", "req.remote = aiocoap.message.UndecidedRemote.from_pathless_uri(\n", " \"coaps+ws://proxy.coap.amsuess.com\"\n", ")\n", "res = await ctx.request(req).response\n", "res" ] } ], "metadata": { "kernelspec": { "display_name": "Python (Pyodide)", "language": "python", "name": "python" }, "language_info": { "codemirror_mode": { "name": "python", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8" } }, "nbformat": 4, "nbformat_minor": 4 } aiocoap-0.4.12/contrib/multicast-platform-evaluation000077500000000000000000000205331472205122200225730ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This script does not do anything really CoAP-related, but is a testing tool for multicast messages. It binds to multicast addresses of different scopes (ff02::1:2, ff05::1:5) on different ports (2002 and 2005), tries sending with different settings (zone set in source or destination), and reports the findings. It was written to verify observations made with `ping` -- namely, that for link-local addresses it is sufficient to give the zone identifier in the target (`ping ff02::1%eth0` works), but that for wider scopes like site-local, the zone identifier must be on the source address, even if that's empty (`ping ff05::1 -I eth0`). On Linux, it is required to set a source address for at least the ff05 class of addresses (and that working mental model is reflected in any error messages); FreeBSD behaves in a way that appears more sane and accepts a zone identifier even in the destination address. """ import argparse import socket import struct import time in6_pktinfo = struct.Struct("16sI") p = argparse.ArgumentParser(description=__doc__) p.add_argument("ifname", help="Network interface name to bind to and try to send to") p.add_argument("bindaddr", help="Address to bind to on the interface (default: %(default)s)", default="::", nargs="?") p.add_argument("unicast_addr", help="Address to send the initial unicast test messages to (default: %(default)s)", default="::1", nargs="?") p.add_argument("nonlocal_multicast", help="Address to use for the zone-free multicast test (default: %(default)s", default="ff35:30:2001:db8::1", nargs="?") args = p.parse_args() ifindex = socket.if_nametoindex(args.ifname) listen2 = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) listen2.bind((args.bindaddr, 2002)) s = struct.pack('16si', socket.inet_pton(socket.AF_INET6, 'ff02::1:2'), ifindex) listen2.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, s) listen5 = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) listen5.bind((args.bindaddr, 2005)) s = struct.pack('16si', socket.inet_pton(socket.AF_INET6, 'ff05::1:5'), ifindex) listen5.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, s) sender = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) sender.sendmsg([b"Test1"], [], 0, ('::1', 2002)) sender.sendmsg([b"Test2"], [], 0, ('::1', 2005)) time.sleep(0.2) assert listen2.recv(1024, socket.MSG_DONTWAIT) == b"Test1" assert listen5.recv(1024, socket.MSG_DONTWAIT) == b"Test2" print("Unicast tests passed") # Setting no interface index # # ff02: On FreeBSD this fails on send with OSError, on Linux only at receiving # ff05 fails on Linux and works on FreeBSD, but that could be an artefact of default routes. print("Sending to ff02::2".format(args.ifname)) try: sender.sendmsg([b"Test3"], [], 0, ('ff02::1:2', 2002, 0, 0)) time.sleep(0.2) assert listen2.recv(1024, socket.MSG_DONTWAIT) == b"Test3" except (OSError, BlockingIOError) as e: print("Erred with %r, as expected" % e) else: print("That was not expected to work!") print("Sending to ff05::5".format(args.ifname)) try: sender.sendmsg([b"Test4"], [], 0, ('ff05::1:5', 2005, 0, ifindex)) time.sleep(0.2) assert listen5.recv(1024, socket.MSG_DONTWAIT) == b"Test4" except (OSError, BlockingIOError) as e: print("Erred with %r, as expected" % e) else: print("That was not expected to work!") # Setting the ifindex in the destination only works for ff02 and not for ff05 print("Sending to ff02::2%{}".format(args.ifname)) sender.sendmsg([b"Test5"], [], 0, ('ff02::1:2', 2002, 0, ifindex)) time.sleep(0.2) assert listen2.recv(1024, socket.MSG_DONTWAIT) == b"Test5" print("Sending to ff05::5%{}".format(args.ifname)) sender.sendmsg([b"Test6"], [], 0, ('ff05::1:5', 2005, 0, ifindex)) time.sleep(0.2) try: assert listen5.recv(1024, socket.MSG_DONTWAIT) == b"Test6" except BlockingIOError: print("Failed as expected for Linux") else: print("That was not expected to work!") # Setting the ifindex in the source works for both print("Sending to ff02::2 from ::%{}".format(args.ifname)) dest = in6_pktinfo.pack(socket.inet_pton(socket.AF_INET6, '::'), ifindex) sender.sendmsg([b"Test7"], [(socket.IPPROTO_IPV6, socket.IPV6_PKTINFO, dest)], 0, ('ff02::1:2', 2002, 0, 0)) time.sleep(0.2) assert listen2.recv(1024, socket.MSG_DONTWAIT) == b"Test7" print("Sending to ff05::5 from {}".format(args.ifname)) dest = in6_pktinfo.pack(socket.inet_pton(socket.AF_INET6, '::'), ifindex) sender.sendmsg([b"Test8"], [(socket.IPPROTO_IPV6, socket.IPV6_PKTINFO, dest)], 0, ('ff05::1:5', 2005, 0, 0)) time.sleep(0.2) assert listen5.recv(1024, socket.MSG_DONTWAIT) == b"Test8" # Setting both works on both (unlike when testing this with ping) print("Sending to ff02::2%{} from ::%{}".format(args.ifname, args.ifname)) dest = in6_pktinfo.pack(socket.inet_pton(socket.AF_INET6, '::'), ifindex) sender.sendmsg([b"Test9"], [(socket.IPPROTO_IPV6, socket.IPV6_PKTINFO, dest)], 0, ('ff02::1:2', 2002, 0, ifindex)) time.sleep(0.2) assert listen2.recv(1024, socket.MSG_DONTWAIT) == b"Test9" print("Sending to ff05::5%{} from {}".format(args.ifname, args.ifname)) dest = in6_pktinfo.pack(socket.inet_pton(socket.AF_INET6, '::'), ifindex) sender.sendmsg([b"Test10"], [(socket.IPPROTO_IPV6, socket.IPV6_PKTINFO, dest)], 0, ('ff05::1:5', 2005, 0, ifindex)) time.sleep(0.2) assert listen5.recv(1024, socket.MSG_DONTWAIT) == b"Test10" # global addresses shouldn't need a thing (there's routability), but still do print("Binding to {} on {}".format(args.nonlocal_multicast, args.ifname)) listendb8 = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) listendb8.bind((args.bindaddr, 2008)) s = struct.pack('16si', socket.inet_pton(socket.AF_INET6, args.nonlocal_multicast), ifindex) listendb8.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, s) print("Sending to {}%{}".format(args.nonlocal_multicast, args.ifname)) sender.sendmsg([b"Test11"], [], 0, (args.nonlocal_multicast, 2008, 0, ifindex)) time.sleep(0.2) try: assert listendb8.recv(1024, socket.MSG_DONTWAIT) == b"Test11" except BlockingIOError: print("Failed as expected for Linux") else: print("That was not expected to work!") print("Sending to {} from {}".format(args.nonlocal_multicast, args.ifname)) dest = in6_pktinfo.pack(socket.inet_pton(socket.AF_INET6, '::'), ifindex) sender.sendmsg([b"Test12"], [(socket.IPPROTO_IPV6, socket.IPV6_PKTINFO, dest)], 0, (args.nonlocal_multicast, 2008, 0, 0)) time.sleep(0.2) assert listendb8.recv(1024, socket.MSG_DONTWAIT) == b"Test12" print("Sending to {} without interface".format(args.nonlocal_multicast)) sender.sendmsg([b"Test13"], [], 0, (args.nonlocal_multicast, 2008, 0, 0)) time.sleep(0.2) try: assert listendb8.recv(1024, socket.MSG_DONTWAIT) == b"Test13" except BlockingIOError: print("Failed as expected for Linux") else: print("That was not expected to work!") print("Now binding to {} without interface specification".format(args.nonlocal_multicast, args.ifname)) listendb8any = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) listendb8any.bind((args.bindaddr, 2018)) s = struct.pack('16si', socket.inet_pton(socket.AF_INET6, args.nonlocal_multicast), 0) listendb8any.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, s) print("Sending to {}%{}".format(args.nonlocal_multicast, args.ifname)) sender.sendmsg([b"Test14"], [], 0, (args.nonlocal_multicast, 2018, 0, ifindex)) time.sleep(0.2) try: assert listendb8.recv(1024, socket.MSG_DONTWAIT) == b"Test14" except BlockingIOError: print("Failed as expected") else: print("That was not expected to work!") print("Sending to {} from {}".format(args.nonlocal_multicast, args.ifname)) dest = in6_pktinfo.pack(socket.inet_pton(socket.AF_INET6, '::'), ifindex) sender.sendmsg([b"Test15"], [(socket.IPPROTO_IPV6, socket.IPV6_PKTINFO, dest)], 0, (args.nonlocal_multicast, 2018, 0, 0)) time.sleep(0.2) try: assert listendb8.recv(1024, socket.MSG_DONTWAIT) == b"Test15" except BlockingIOError: print("Failed as expected") else: print("That was not expected to work!") print("Sending to {} without interface".format(args.nonlocal_multicast)) sender.sendmsg([b"Test16"], [], 0, (args.nonlocal_multicast, 2018, 0, 0)) time.sleep(0.2) try: assert listendb8.recv(1024, socket.MSG_DONTWAIT) == b"Test16" except BlockingIOError: print("Failed as expected") else: print("That was not expected to work!") aiocoap-0.4.12/contrib/oscore-plugtest/000077500000000000000000000000001472205122200200055ustar00rootroot00000000000000aiocoap-0.4.12/contrib/oscore-plugtest/.gitignore000066400000000000000000000000161472205122200217720ustar00rootroot00000000000000temp-contexts aiocoap-0.4.12/contrib/oscore-plugtest/common-context/000077500000000000000000000000001472205122200227575ustar00rootroot00000000000000aiocoap-0.4.12/contrib/oscore-plugtest/common-context/a/000077500000000000000000000000001472205122200231775ustar00rootroot00000000000000aiocoap-0.4.12/contrib/oscore-plugtest/common-context/a/secret.json000066400000000000000000000001311472205122200253520ustar00rootroot00000000000000{ "secret_hex": "0102030405060708090a0b0c0d0e0f10", "salt_hex": "9e7ca92223786340" } aiocoap-0.4.12/contrib/oscore-plugtest/common-context/a/settings.json000066400000000000000000000001331472205122200257270ustar00rootroot00000000000000{ "algorithm": "AES-CCM-16-64-128", "sender-id_hex": "", "recipient-id_hex": "01" } aiocoap-0.4.12/contrib/oscore-plugtest/common-context/b/000077500000000000000000000000001472205122200232005ustar00rootroot00000000000000aiocoap-0.4.12/contrib/oscore-plugtest/common-context/b/secret.json000066400000000000000000000001311472205122200253530ustar00rootroot00000000000000{ "secret_hex": "0102030405060708090a0b0c0d0e0f10", "salt_hex": "9e7ca92223786340" } aiocoap-0.4.12/contrib/oscore-plugtest/common-context/b/settings.json000066400000000000000000000001331472205122200257300ustar00rootroot00000000000000{ "algorithm": "AES-CCM-16-64-128", "recipient-id_hex": "", "sender-id_hex": "01" } aiocoap-0.4.12/contrib/oscore-plugtest/common-context/c/000077500000000000000000000000001472205122200232015ustar00rootroot00000000000000aiocoap-0.4.12/contrib/oscore-plugtest/common-context/c/secret.json000066400000000000000000000001311472205122200253540ustar00rootroot00000000000000{ "secret_hex": "0102030405060708090a0b0c0d0e0f10", "salt_hex": "9e7ca92223786340" } aiocoap-0.4.12/contrib/oscore-plugtest/common-context/c/settings.json000066400000000000000000000002041472205122200257300ustar00rootroot00000000000000{ "algorithm": "AES-CCM-16-64-128", "sender-id_hex": "", "recipient-id_hex": "01", "id-context_hex": "37cbf3210017a2d3" } aiocoap-0.4.12/contrib/oscore-plugtest/common-context/d/000077500000000000000000000000001472205122200232025ustar00rootroot00000000000000aiocoap-0.4.12/contrib/oscore-plugtest/common-context/d/secret.json000066400000000000000000000001311472205122200253550ustar00rootroot00000000000000{ "secret_hex": "0102030405060708090a0b0c0d0e0f10", "salt_hex": "9e7ca92223786340" } aiocoap-0.4.12/contrib/oscore-plugtest/common-context/d/settings.json000066400000000000000000000002041472205122200257310ustar00rootroot00000000000000{ "algorithm": "AES-CCM-16-64-128", "recipient-id_hex": "", "sender-id_hex": "01", "id-context_hex": "37cbf3210017a2d3" } aiocoap-0.4.12/contrib/oscore-plugtest/plugtest-client000077500000000000000000000416161472205122200230660ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A client suitable for running the OSCORE plug test series against a given server See https://github.com/EricssonResearch/OSCOAP for the test suite description.""" import argparse import asyncio import logging import signal import functools from pathlib import Path import sys from aiocoap import * from aiocoap import error from aiocoap import interfaces from aiocoap import credentials # In some nested combinations of unittest and coverage, the usually # provided-by-default inclusion of local files does not work. Ensuring the # local plugtest_common *can* be included. import os.path sys.path.append(os.path.dirname(__file__)) from plugtest_common import * class PlugtestClientProgram: async def run(self): p = argparse.ArgumentParser(description="Client for the OSCORE plug test.") p.add_argument("host", help="Hostname of the server") p.add_argument("contextdir", help="Directory name where to persist sequence numbers", type=Path) p.add_argument("testno", nargs="?", type=int, help="Test number to run (integer part); leave out for interactive mode") p.add_argument("--verbose", help="Show aiocoap debug messages", action='store_true') opts = p.parse_args() self.host = opts.host # this also needs to be called explicitly as only the # 'logging.warning()'-style functions will call it; creating a # sub-logger and logging from there makes the whole logging system not # emit the 'WARNING' prefixes that set apart log messages from regular # prints and also help the test suite catch warnings and errors if opts.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) security_context_a = get_security_context('a', opts.contextdir / "a") security_context_c = get_security_context('c', opts.contextdir / "c") self.ctx = await Context.create_client_context() self.ctx.client_credentials[":ab"] = security_context_a self.ctx.client_credentials[":cd"] = security_context_c if opts.testno is not None: await self.run_test(opts.testno) else: next_testno = 0 delta = 1 while True: # Yes this is blocking, but since the tests usually terminate # by themselves ... *shrug* try: next = input("Enter a test number (empty input runs %s, q to quit): " % next_testno) except (KeyboardInterrupt, EOFError): keeprunning = False break if next == "q": keeprunning = False break if next: try: as_int = int(next) except ValueError: print("That's not a number.") continue else: if as_int - next_testno + delta in (0, 1): # Don't jump around randomly if user jumped around, # but otherwise do something sane delta = as_int - next_testno + delta next_testno = as_int print("Running test %s" % next_testno) try: await self.run_test(next_testno) except error.Error as e: print("Test failed with an exception:", e) print() next_testno += delta ctx = None async def run_with_shutdown(self): # Having SIGTERM cause a more graceful shutdown (even if it's not # asynchronously awaiting the shutdown, which would be impractical # since we're likely inside some unintended timeout already) allow for # output buffers to be flushed when the unit test program instrumenting # it terminates it. loop = asyncio.get_running_loop() loop.add_signal_handler(signal.SIGTERM, loop.close) try: await self.run() finally: if self.ctx is not None: await self.ctx.shutdown() def use_context(self, which): if which is None: self.ctx.client_credentials.pop("coap://%s/*" % self.host, None) else: self.ctx.client_credentials["coap://%s/*" % self.host] = ":" + which async def run_test(self, testno): self.testno = testno testfun = self.__methods[testno] try: await getattr(self, testfun)() except oscore.NotAProtectedMessage as e: print("Response carried no Object-Security option, but was: %s %s"%(e.plain_message, e.plain_message.payload)) raise __methods = {} def __implements_tests(numbers, __methods=__methods): def registerer(method): for n in numbers: __methods[n] = method.__name__ return method return registerer @__implements_tests([0]) async def test_plain(self): request = Message(code=GET, uri='coap://' + self.host + '/oscore/hello/coap') self.use_context(None) response = await self.ctx.request(request).response print("Response:", response) additional_verify("Responde had correct code", response.code, CONTENT) additional_verify("Responde had correct payload", response.payload, b"Hello World!") additional_verify("Options as expected", response.opt, Message(content_format=0).opt) @__implements_tests([1, 2]) async def test_hello12(self): if self.testno == 1: self.use_context("ab") else: self.use_context("cd") request = Message(code=GET, uri='coap://' + self.host+ '/oscore/hello/1') expected = {'content_format': 0} unprotected_response = await self.ctx.request(request).response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", unprotected_response.code, CONTENT) additional_verify("Options as expected", unprotected_response.opt, Message(**expected).opt) additional_verify("Payload as expected", unprotected_response.payload, b"Hello World!") @__implements_tests([3]) async def test_hellotest3(self): self.use_context("ab") request = Message(code=GET, uri='coap://' + self.host+ '/oscore/hello/2?first=1') expected = {'content_format': 0, 'etag': b'\x2b'} unprotected_response = await self.ctx.request(request).response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", unprotected_response.code, CONTENT) additional_verify("Options as expected", unprotected_response.opt, Message(**expected).opt) additional_verify("Payload as expected", unprotected_response.payload, b"Hello World!") @__implements_tests([4]) async def test_hellotest4(self): self.use_context("ab") request = Message(code=GET, uri='coap://' + self.host+ '/oscore/hello/3', accept=0) expected = {'content_format': 0, 'max_age': 5} unprotected_response = await self.ctx.request(request).response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", unprotected_response.code, CONTENT) additional_verify("Options as expected", unprotected_response.opt, Message(**expected).opt) additional_verify("Payload as expected", unprotected_response.payload, b"Hello World!") @__implements_tests([5]) async def test_nonobservable(self): self.use_context("ab") request = Message(code=GET, uri='coap://' + self.host + '/oscore/hello/1', observe=0) request = self.ctx.request(request) unprotected_response = await request.response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", unprotected_response.code, CONTENT) additional_verify("Observe option is absent", unprotected_response.opt.observe, None) async for o in request.observation: print("Expectation failed: Observe events coming in.") @__implements_tests([6, 7]) async def test_observable(self): self.use_context("ab") request = Message(code=GET, uri='coap://' + self.host + '/oscore/observe%s' % (self.testno - 5), observe=0) request = self.ctx.request(request) unprotected_response = await request.response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", unprotected_response.code, CONTENT) additional_verify("Observe option present", unprotected_response.opt.observe is not None, True) payloads = [unprotected_response.payload] async for o in request.observation: # FIXME: where's the 'two' stuck? payloads.append(o.payload) print("Verify: Received message", o, o.payload) if len(payloads) == 2 and self.testno == 7: # FIXME: Not yet ready to send actual cancellations break expected_payloads = [b'one', b'two'] if self.testno == 6: expected_payloads.append(b'Terminate Observe') additional_verify("Server gave the correct responses", payloads, expected_payloads) @__implements_tests([8]) async def test_post(self): self.use_context("ab") request = Message(code=POST, uri='coap://' + self.host+ '/oscore/hello/6', payload=b"\x4a", content_format=0) unprotected_response = await self.ctx.request(request).response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", CHANGED, unprotected_response.code) additional_verify("Options as expected", unprotected_response.opt, Message(content_format=0).opt) additional_verify("Payload as expected", unprotected_response.payload, b"\x4a") @__implements_tests([9]) async def test_put_match(self): self.use_context("ab") request = Message(code=PUT, uri='coap://' + self.host+ '/oscore/hello/7', payload=b"\x7a", content_format=0, if_match=[b"\x7b"]) unprotected_response = await self.ctx.request(request).response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", CHANGED, unprotected_response.code) additional_verify("Options empty as expected", Message().opt, unprotected_response.opt) additional_verify("Payload absent as expected", unprotected_response.payload, b"") @__implements_tests([10]) async def test_put_nonmatch(self): self.use_context("ab") request = Message(code=PUT, uri='coap://' + self.host+ '/oscore/hello/7', payload=b"\x8a", content_format=0, if_none_match=True) unprotected_response = await self.ctx.request(request).response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", PRECONDITION_FAILED, unprotected_response.code) additional_verify("Options empty as expected", Message().opt, unprotected_response.opt) @__implements_tests([11]) async def test_delete(self): self.use_context("ab") request = Message(code=DELETE, uri='coap://' + self.host+ '/oscore/test') unprotected_response = await self.ctx.request(request).response print("Unprotected response:", unprotected_response) additional_verify("Code as expected", DELETED, unprotected_response.code) additional_verify("Options empty as expected", Message().opt, unprotected_response.opt) @__implements_tests([12, 13, 14]) async def test_oscoreerror_server_reports_error(self): self.use_context("ab") secctx = self.ctx.client_credentials[":ab"] if self.testno == 12: expected_code = UNAUTHORIZED expected_error = oscore.NotAProtectedMessage # FIXME: frobbing the sender_id breaks sequence numbers... frobnicate_field = 'sender_id' elif self.testno == 13: expected_code = BAD_REQUEST expected_error = oscore.NotAProtectedMessage frobnicate_field = 'sender_key' elif self.testno == 14: expected_code = None expected_error = oscore.ProtectionInvalid frobnicate_field = 'recipient_key' unfrobnicated = getattr(secctx, frobnicate_field) if unfrobnicated == b'': setattr(secctx, frobnicate_field, b'spam') else: setattr(secctx, frobnicate_field, bytes((255 - x) for x in unfrobnicated)) request = Message(code=GET, uri='coap://' + self.host + '/oscore/hello/1') try: unprotected_response = await self.ctx.request(request).response except expected_error as e: if expected_code is not None: if e.plain_message.code == expected_code: print("Check passed: The server responded with unencrypted %s."%(expected_code)) else: print("Failed: Server responded with something unencrypted, but not the expected code %s: %s"%(expected_code, e.plain_message)) else: print("Check passed: The validation failed. (%s)" % e) else: print("Failed: The validation passed.") print("Unprotected response:", unprotected_response) finally: setattr(secctx, frobnicate_field, unfrobnicated) # With a frobnicated sender_id, the stored context could not be # loaded for later use; making sure it's stored properly again. secctx._store() @__implements_tests([15]) async def test_replay(self): self.use_context("ab") request = Message(code=GET, uri='coap://' + self.host + '/oscore/hello/1') unprotected_response = await self.ctx.request(request).response # make this _nonraising as soon as there's a proper context backend if unprotected_response.code != CONTENT: print("Failed: Request did not even pass before replay (%s)"%unprotected_response) return secctx = self.ctx.client_credentials[":ab"] secctx.sender_sequence_number -= 1 try: unprotected_response = await self.ctx.request(request).response except oscore.NotAProtectedMessage as e: if e.plain_message.code == UNAUTHORIZED: print("Check passed: The server responded with unencrypted replay error.") else: print("Failed: Server responded with something unencrypted, but not the expected code %s: %s"%(e.plain_message.code, e.plain_message)) else: print("Failed: the validation passed.") print("Unprotected response:", unprotected_response) @__implements_tests([16]) async def test_nonoscore_server(self): self.use_context("ab") request = Message(code=GET, uri='coap://' + self.host+ '/oscore/hello/coap') try: response = await self.ctx.request(request).response except oscore.NotAProtectedMessage as e: if e.plain_message.code == BAD_OPTION: print("Passed: Server reported bad option.") elif e.plain_message.code == METHOD_NOT_ALLOWED: print("Dubious: Server reported Method Not Allowed.") else: print("Failed: Server reported %s" % e.plain_message.code) else: print("Failed: The server accepted an OSCORE message") @__implements_tests([17]) async def test_unauthorized(self): request = Message(code=GET, uri='coap://' + self.host + '/oscore/hello/1') self.use_context(None) response = await self.ctx.request(request).response print("Response:", response, response.payload) additional_verify("Responde had correct code", response.code, UNAUTHORIZED) # # unofficial blockwise tests start here # # @__implements_tests([16, 17]) # async def test_block2(self): # #request = Message(code=GET, uri='coap://' + self.host + '/oscore/block/' + {16: 'outer', 17: 'inner'}[self.testno]) # request = Message(code=GET, uri='coap://' + self.host + '/oscore/LargeResource') # # expected = {'content_format': 0} # unprotected_response = await self.ctx.request(request, handle_blockwise=True).response # if self.testno == 17: # # the library should probably strip that # expected['block2'] = optiontypes.BlockOption.BlockwiseTuple(block_number=1, more=False, size_exponent=6) # # print("Unprotected response:", unprotected_response) # additional_verify("Code as expected", unprotected_response.code, CONTENT) # additional_verify("Options as expected", unprotected_response.opt, Message(**expected).opt) if __name__ == "__main__": asyncio.run(PlugtestClientProgram().run_with_shutdown()) aiocoap-0.4.12/contrib/oscore-plugtest/plugtest-server000077500000000000000000000232501472205122200231100ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A server suitable for running the OSCORE plug test series against it See https://github.com/EricssonResearch/OSCOAP for the test suite description.""" import sys import asyncio import logging import argparse from pathlib import Path import aiocoap import aiocoap.oscore as oscore from aiocoap.oscore_sitewrapper import OscoreSiteWrapper import aiocoap.error as error from aiocoap.util.cli import AsyncCLIDaemon import aiocoap.resource as resource from aiocoap.credentials import CredentialsMap from aiocoap.cli.common import add_server_arguments, server_context_from_arguments from aiocoap.transports.oscore import OSCOREAddress # In some nested combinations of unittest and coverage, the usually # provided-by-default inclusion of local files does not work. Ensuring the # local plugtest_common *can* be included. import os.path sys.path.append(os.path.dirname(__file__)) from plugtest_common import * class PleaseUseOscore(error.ConstructionRenderableError): code = aiocoap.UNAUTHORIZED message = "This is an OSCORE plugtest, please use option %d"%aiocoap.numbers.optionnumbers.OptionNumber.OSCORE def additional_verify_request_options(reference, request): if request.opt.echo is not None: # Silently accepting Echo as that's an artefact of B.1.2 recovery reference.opt.echo = request.opt.echo additional_verify("Request options as expected", reference.opt, request.opt) class PlugtestResource(resource.Resource): options = {} expected_options = {} async def render_get(self, request): reference = aiocoap.Message(**self.expected_options) if request.opt.observe is not None and 'observe' not in self.expected_options: # workaround for test 4 hitting on Hello1 reference.opt.observe = request.opt.observe additional_verify_request_options(reference, request) return aiocoap.Message(payload=self.message.encode('ascii'), **self.options) class Hello(PlugtestResource): options = {'content_format': 0} expected_options = {} # Uri-Path is stripped by the site message = "Hello World!" Hello1 = Hello # same, just registered with the site for protected access class Hello2(Hello): expected_options = {'uri_query': ['first=1']} options = {'etag': b"\x2b", **Hello1.options} class Hello3(Hello): expected_options = {'accept': 0} options = {'max_age': 5, **Hello1.options} class Observe(PlugtestResource, aiocoap.interfaces.ObservableResource): expected_options = {'observe': 0} options = {'content_format': 0} message = "one" async def add_observation(self, request, serverobservation): async def keep_entertained(): await asyncio.sleep(2) serverobservation.trigger(aiocoap.Message( mtype=aiocoap.CON, code=aiocoap.CONTENT, payload=b"two", content_format=0, )) await asyncio.sleep(2) serverobservation.trigger(aiocoap.Message( mtype=aiocoap.CON, code=aiocoap.INTERNAL_SERVER_ERROR, payload=b"Terminate Observe", content_format=0, )) t = asyncio.create_task(keep_entertained()) serverobservation.accept(t.cancel) class Hello6(resource.Resource): async def render_post(self, request): additional_verify_request_options(aiocoap.Message(content_format=0), request) additional_verify("Request payload as expected", request.payload, b"\x4a") return aiocoap.Message(code=aiocoap.CHANGED, payload=b"\x4a", content_format=0) class Hello7(resource.Resource): async def render_put(self, request): if request.opt.if_none_match: print("This looks like test 10b") additional_verify_request_options(aiocoap.Message(content_format=0, if_none_match=True), request) additional_verify("Request payload as expected", request.payload, b"\x8a") return aiocoap.Message(code=aiocoap.PRECONDITION_FAILED) else: print("This looks like test 9b") additional_verify_request_options(aiocoap.Message(content_format=0, if_match=[b"{"]), request) additional_verify("Request payload as expected", request.payload, b"z") return aiocoap.Message(code=aiocoap.CHANGED) class DeleteResource(resource.Resource): async def render_delete(self, request): additional_verify_request_options(aiocoap.Message(), request) return aiocoap.Message(code=aiocoap.DELETED) class BlockResource(PlugtestResource): expected_options = {} options = {'content_format': 0} message = "This is a large resource\n" + "0123456789" * 101 class InnerBlockMixin: # this might become general enough that it could replace response blockwise # handler some day -- right now, i'm only doing the absolute minimum # necessary to satisfy the use case inner_default_szx = aiocoap.MAX_REGULAR_BLOCK_SIZE_EXP async def render(self, request): response = await super().render(request) if request.opt.block2 is None: szx = self.inner_default_szx blockno = 0 else: szx = request.opt.block2.size_exponent blockno = request.opt.block2.block_number return response._extract_block(blockno, szx) class InnerBlockResource(InnerBlockMixin, BlockResource): pass class SeqnoManager(resource.ObservableResource): def __init__(self, contextmap): super().__init__() self.contextmap = contextmap for c in self.contextmap.values(): c.notification_hooks.append(self.updated_state) async def render_get(self, request): text = "" for name in ('b', 'd'): the_context = self.contextmap[':' + name] # this direct access is technically outside the interface for a # SecurityContext, but then again, there isn't one yet text += """In context %s, next seqno is %d (persisted up to %d)\n""" % (name.upper(), the_context.sender_sequence_number, the_context.sequence_number_persisted) if the_context.recipient_replay_window.is_initialized(): index = the_context.recipient_replay_window._index bitfield = the_context.recipient_replay_window._bitfield # Useless for the internal representation, but much more readable while bitfield & 1: bitfield >>= 1 index += 1 print(index, bitfield) bitfield_values = [i + index for (i, v) in enumerate(bin(bitfield)[2:][::-1]) if v == '1'] text += """I've seen all sequence numbers lower than %d%s.""" % ( index, ", and also %s" % bitfield_values if bitfield else "" ) else: text += "The replay window is uninitialized" text += "\n" return aiocoap.Message(payload=text.encode('utf-8'), content_format=0) async def render_put(self, request): try: number = int(request.payload.decode('utf8')) except (ValueError, UnicodeDecodeError): raise aiocoap.error.BadRequest("Only numeric values are accepted.") raise NotImplementedError class PlugtestSite(resource.Site): def __init__(self, server_credentials): super().__init__() self.add_resource(['.well-known', 'core'], resource.WKCResource(self.get_resources_as_linkheader)) self.add_resource(['oscore', 'hello', 'coap'], Hello()) self.add_resource(['oscore', 'hello', '1'], Hello1()) self.add_resource(['oscore', 'hello', '2'], Hello2()) self.add_resource(['oscore', 'hello', '3'], Hello3()) self.add_resource(['oscore', 'hello', '6'], Hello6()) self.add_resource(['oscore', 'hello', '7'], Hello7()) self.add_resource(['oscore', 'observe1'], Observe()) self.add_resource(['oscore', 'observe2'], Observe()) self.add_resource(['oscore', 'test'], DeleteResource()) self.add_resource(['oscore', 'block', 'outer'], BlockResource()) self.add_resource(['oscore', 'block', 'inner'], InnerBlockResource()) self.add_resource(['sequence-numbers'], SeqnoManager(server_credentials)) class PlugtestServerProgram(AsyncCLIDaemon): async def start(self): p = argparse.ArgumentParser(description="Server for the OSCORE plug test. Requires a test number to be present.") p.add_argument("contextdir", help="Directory name where to persist sequence numbers", type=Path) p.add_argument('--verbose', help="Increase log level", action='store_true') p.add_argument('--state-was-lost', help="Lose memory of the replay window, forcing B.1.2 recovery", action='store_true') add_server_arguments(p) opts = p.parse_args() if opts.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) server_credentials = CredentialsMap() server_credentials[':b'] = get_security_context('b', opts.contextdir / "b", opts.state_was_lost) server_credentials[':d'] = get_security_context('d', opts.contextdir / "d", opts.state_was_lost) site = PlugtestSite(server_credentials) site = OscoreSiteWrapper(site, server_credentials) self.context = await server_context_from_arguments(site, opts) print("Plugtest server ready.") sys.stdout.flush() # the unit tests might wait abundantly long for this otherwise async def shutdown(self): await self.context.shutdown() if __name__ == "__main__": PlugtestServerProgram.sync_main() aiocoap-0.4.12/contrib/oscore-plugtest/plugtest_common.py000066400000000000000000000045411472205122200236020ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import shutil from pathlib import Path import cbor2 as cbor from aiocoap import oscore contextdir = Path(__file__).parent / "common-context" class LoggingFilesystemSecurityContext(oscore.FilesystemSecurityContext): def _extract_external_aad(self, message, request_id, local_is_sender): result = super()._extract_external_aad(message, request_id, local_is_sender) print( "Verify: External AAD: bytes.fromhex(%r), %r" % (result.hex(), cbor.loads(result)) ) return result class NotifyingPlugtestSecurityContext(oscore.FilesystemSecurityContext): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.notification_hooks = [] def notify(self): for x in self.notification_hooks: x() def post_seqnoincrease(self): super().post_seqnoincrease() self.notify() def _replay_window_changed(self): super()._replay_window_changed() self.notify() class PlugtestFilesystemSecurityContext( LoggingFilesystemSecurityContext, NotifyingPlugtestSecurityContext ): pass def get_security_context( contextname, contextcopy: Path, simulate_unclean_shutdown=False ): """Copy the base context (disambiguated by contextname in "ab", "cd") onto the path in contextcopy if it does not already exist, and load the resulting context with the given role. The context will be monkey-patched for debugging purposes. With the simulate_unclean_shutdown aprameter set to True, any existing replay window is removed from the loaded state.""" if not contextcopy.exists(): contextcopy.parent.mkdir(parents=True, exist_ok=True) shutil.copytree((contextdir / contextname), contextcopy) print("Context %s copied to %s" % (contextname, contextcopy)) secctx = PlugtestFilesystemSecurityContext(contextcopy) if simulate_unclean_shutdown: secctx.recipient_replay_window._index = None secctx.recipient_replay_window._bitfield = None return secctx def additional_verify(description, lhs, rhs): if lhs == rhs: print("Additional verify passed: %s" % description) else: print("Additional verify failed (%s != %s): %s" % (lhs, rhs, description)) aiocoap-0.4.12/contrib/rd-plugtest/000077500000000000000000000000001472205122200171205ustar00rootroot00000000000000aiocoap-0.4.12/contrib/rd-plugtest/registrant.py000077500000000000000000000125351472205122200216650ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A very limited registrant-ep for Resorce Directory plugtests. It largely utilizes the client registration features already built into aiocoap, but also offers a --simple optiion to run the simple registration that's otherwise not needed in aiocoap as the clients are featureful enough to be able to do (and need features of) full registration. Even though aiocoap typically does not emit such links, it produces the spurious links from the plug test description to see whether the RD deals with them well. All three site descriptions from the plug test are selectable with --site; it is up to the operator to pick the right one for the right test, and to decide whether to register simply, regularly, or using a CT (which is easily simulated by making the registrant simply-register at a rd-relay). The --register-as parameter should have a usable default for the plug test. """ import argparse from urllib.parse import urljoin from aiocoap import Message, POST from aiocoap.cli.common import add_server_arguments, server_context_from_arguments from aiocoap.util.cli import AsyncCLIDaemon from aiocoap.resource import Resource, Site, WKCResource from aiocoap.util.linkformat import LinkFormat, Link from aiocoap.resourcedirectory.client.register import Registerer class DemoResource(Resource): async def render_get(self, request): if request.opt.accept == 41: return Message( payload=("%s" % self.text).encode("utf8"), content_format=41, ) else: return Message(payload=self.text.encode("utf8"), content_format=0) class Temp(DemoResource): rt = "temperature-c" text = "39.1°C" class Light(DemoResource): rt = "light-lux" text = "There are four lights." class Light3(DemoResource): rt = "light" ct = 0 def build_site_node1(): site = Site() temp = Temp() site.add_resource(["sensors", "temp"], temp) light = Light() site.add_resource(["sensors", "light"], light) for x in (temp, light): x.if_ = "sensor" x.ct = 41 def get_links(): links = site.get_resources_as_linkheader() for l in links.links: if l.href == "/sensors/light": l.attr_pairs.append(("anchor", "coap://spurious.example.com:5683")) return LinkFormat(links.links) site.add_resource([".well-known", "core"], WKCResource(get_links)) return site def build_site_node2(): site = Site() temp = Temp() site.add_resource(["temp"], temp) light = Light() site.add_resource(["light"], light) for x in (temp, light): x.ct = 0 def get_links(): links = site.get_resources_as_linkheader() links.links.append(Link("/t", anchor="sensors/temp", rel="alternate")) links.links.append( Link( "http://www.example.com/sensors/t123", anchor="sensors/temp", rel="describedby", ) ) return LinkFormat(links.links) site.add_resource([".well-known", "core"], WKCResource(get_links)) return site def build_site_node3(): site = Site() for x in "left", "middle", "right": site.add_resource(["light", x], Light3()) site.add_resource( [".well-known", "core"], WKCResource(site.get_resources_as_linkheader) ) return site class RDRegistrant(AsyncCLIDaemon): registerer = None async def start(self): p = argparse.ArgumentParser() p.add_argument( "rd_uri", help="Preconfigured address of the resource" " directory", nargs="?", default="coap://[ff05::fd]", ) p.add_argument( "--simple", help="Run simple registration rather than" " full (incomplete: Never renews)", action="store_true", ) p.add_argument( "--register-as", help="Endpoint name to register as (default: node$SITE", default=None, ) p.add_argument( "--site", help="Use a different resource / link layout", default=1, type=int ) add_server_arguments(p) opts = p.parse_args() if opts.site == 1: site = build_site_node1() elif opts.site == 2: site = build_site_node2() elif opts.site == 3: site = build_site_node3() else: raise p.error("Invalid site value") if opts.register_as is None: opts.register_as = "node%s" % opts.site self.context = await server_context_from_arguments(site, opts) if opts.simple: rd_wkc = urljoin( opts.rd_uri, "/.well-known/core?ep=%s<=6000" % opts.register_as ) await self.context.request(Message(code=POST, uri=rd_wkc)).response else: self.registerer = Registerer( self.context, opts.rd_uri, lt=120, registration_parameters={"ep": opts.register_as}, ) async def shutdown(self): if self.registerer is not None: await self.registerer.shutdown() await self.context.shutdown() if __name__ == "__main__": RDRegistrant.sync_main() aiocoap-0.4.12/contrib/rd-relay000077500000000000000000000130621472205122200163150ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """A minimal server that can be placed where an RD might be auto-discovered, and announces the resources on a full resource directory. This is "minimal" because it only does two things: * Let clients query for Resource Directories from an explicitly configured list of links * Accept simple registrations and forward them as simple registrations to a configured RD. This expects that the actual RD accepts con= on a simple registration, which will probably be disallowed. It does not (but could be extended to) discover RDs or their resources on its own, or allow the list of servers to be updated remotely. While basically usable (eg. on 6LoWPAN border routers), it is more considered a demo; the devices on which such functionality is expected to run might easily prefer a more constrained implementation.""" import argparse import logging from urllib.parse import urljoin import asyncio from aiocoap import Message, GET, POST, CHANGED, CONTENT from aiocoap.error import NotFound, MethodNotAllowed, AnonymousHost, BadRequest from aiocoap.resource import WKCResource, Site from aiocoap.cli.common import (add_server_arguments, server_context_from_arguments) from aiocoap.util.cli import AsyncCLIDaemon from aiocoap.util.linkformat import LinkFormat, Link, parse class RelayingWKC(WKCResource): def __init__(self): self.links = LinkFormat() self.forward = None super().__init__(listgenerator=lambda: self.links) async def setup(self, rd_uri): # FIXME: deduplicate with .client.register (esp. when that recognizes more than just multicast) rd_uri = rd_uri or 'coap://[ff05::fd]' rd_wkc = urljoin(rd_uri, '/.well-known/core?rt=core.rd*') if not rd_wkc.startswith(rd_uri): print("Ignoring path of given RD argument") # This would request to self too if the RD is on the LAN, but # fortunately .wk/c filtering triggers and we don't get a response from # self b/c there's nothing to report yet. uri_response = await self.context.request( Message(code=GET, uri=rd_wkc) ).response_raising if uri_response.code != CONTENT: raise RuntimeError("Unexpected response code %s" % uri_response.code) if uri_response.opt.content_format != 40: raise RuntimeError("Unexpected response content format %s" % uri_response.opt.content_format) links = parse(uri_response.payload.decode('utf8')) relayable = ("core.rd-lookup-ep", "core.rd-lookup-res", "core.rd-lookup-gp", "core.rd", "core.rd-group") for l in links.links: rt = (" ".join(l.rt)).split(" ") rt_relayable = [r for r in rt if r in relayable] if rt_relayable: target = l.get_target(uri_response.get_request_uri()) new = Link(target, rt=" ".join(rt_relayable), rel="x-see-also") print("Exporting link: %s" % new) self.links.links.append(new) if "core.rd" in rt: print("Using that link as target for simple registration") self.forward = target async def render_post(self, request): if self.forward is None: raise MethodNotAllowed try: client_uri = request.remote.uri except AnonymousHost: raise BadRequest("explicit base required") # FIXME: deduplicate with aiocoap.cli.rd.SimpleRegistrationWKC args = request.opt.uri_query if any (a.startswith('base=') for a in args): raise BadRequest("base is not allowed in simple registrations") # FIXME: Could do some formal checks on those, like whether the lifetime is numeric async def work(): get_wkc = Message(code=GET, uri=urljoin(client_uri, '/.well-known/core')) response = await self.context.request(get_wkc).response_raising if response.code != CONTENT: raise RuntimeError("Odd response code from %s" % client_uri) fwd = Message(code=POST, uri=self.forward, content_format=response.opt.content_format, payload=response.payload, ) fwd.opt.uri_query = args + ('base=%s' % client_uri,) await self.context.request(fwd).response asyncio.ensure_future(work()) return Message(code=CHANGED) class RDRelayProgram(AsyncCLIDaemon): async def start(self): p = argparse.ArgumentParser() p.add_argument("-v", "--verbose", help="Be more verbose (repeat to debug)", action='count', dest="verbosity", default=0) p.add_argument("rd_uri", help="Preconfigured address of the resource directory", nargs='?') add_server_arguments(p) opts = p.parse_args() if opts.verbosity > 1: logging.basicConfig(level=logging.DEBUG) elif opts.verbosity == 1: logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.WARNING) site = Site() wkc = RelayingWKC() site.add_resource(['.well-known', 'core'], wkc) self.context = await server_context_from_arguments(site, opts) wkc.context = self.context await wkc.setup(opts.rd_uri) print("Resource directory relay operational") async def shutdown(self): await self.context.shutdown() if __name__ == "__main__": RDRelayProgram.sync_main() aiocoap-0.4.12/contrib/widgets_common/000077500000000000000000000000001472205122200176645ustar00rootroot00000000000000aiocoap-0.4.12/contrib/widgets_common/__init__.py000066400000000000000000000002701472205122200217740ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """These modules provide mechanisms shared between different widget demos""" aiocoap-0.4.12/contrib/widgets_common/gobject_resource.py000066400000000000000000000100061472205122200235570ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from gi.repository import Gtk from gi.repository import Gdk from aiocoap import resource from aiocoap import Message, BAD_REQUEST from .resource import * from .util import _Throttler class GobjectBacked(SenmlResource): """Provides a .value bound to the self.widget_property property of the self.backend_widget Gobject. The widget and widget_property need to be set before this class's __init__ is called.""" def __init__(self): super().__init__() throttler = _Throttler(self.value_changed) self.backend_widget.connect( "notify::" + self.widget_property, lambda *k: throttler() ) def _get_value(self): return self.backend_widget.get_property(self.widget_property) def _set_value(self, updated): self.backend_widget.set_property(self.widget_property, updated) value = property(_get_value, _set_value) class Switch(BooleanResource, GobjectBacked): if_ = "core.s" # FIXME export as read-only widget_property = "state" def __init__(self, args): self.widget = self.backend_widget = Gtk.Switch() super().__init__() class Bulb(BooleanResource): if_ = "core.a" # FIXME export as write-only def _get_value(self): # raise RuntimeError("This object is not readable") # this was a nice idea for a demo, but precludes even toggling return self.widget.props.icon_name == "weather-clear" def _set_value(self, new_state): self.widget.props.icon_name = { True: "weather-clear", False: "weather-clear-night", }[new_state] value = property(_get_value, _set_value) def __init__(self, args): super().__init__() self.widget = Gtk.Image() @ContenttypeRendered.empty_post_handler() def emptypost(self): self.value = not self.value class RGBChannel(FloatResource, PythonBacked): if_ = "core.a" channel_name = property(lambda self: ("r", "g", "b")[self.channel]) def __init__(self, channel): super().__init__() self.channel = channel self.value = 0 # FIXME this doesn't cater for notifications class RGBRoot(SubsiteBatch): @ContenttypeRendered.get_handler("text/plain;charset=utf-8", default=True) def __regular_get(self): return "#" + "".join("%02x" % int(255 * c.value) for c in self.site.channels) @ContenttypeRendered.put_handler("text/plain;charset=utf-8", default=True) def render_put(self, payload): if len(payload) == 7 and payload[0:1] == b"#": values = tuple( int(payload[i : i + 2].decode("ascii"), 16) / 255 for i in (1, 3, 5) ) else: return Message(code=BAD_REQUEST) # FIXME it'd be nice to update them in a way so that our own # value_changed only fires once for i, v in enumerate(values): self.site.channels[i].value = v class RGBLight(resource.Site): def __init__(self, args): super().__init__() self.channels = [RGBChannel(i) for i in range(3)] for c in self.channels: self.add_resource([c.channel_name], c) rgbroot = RGBRoot(self) self.add_resource([], RGBRoot(self)) rgbroot.add_valuechange_callback(self.trigger_repaint) self.widget = Gtk.DrawingArea() self.widget.set_size_request(200, 100) self.widget.connect("draw", self.cb_draw) def cb_draw(self, widget, cr): Gdk.cairo_set_source_rgba( cr, Gdk.RGBA( self.channels[0].value, self.channels[1].value, self.channels[2].value ), ) cr.paint() def trigger_repaint(self): # this is assuming that GtkPaintable doesn't have its own window, which # seems not to have been the case for quite some time self.widget.queue_draw_area( 0, 0, self.widget.get_allocated_width(), self.widget.get_allocated_height() ) aiocoap-0.4.12/contrib/widgets_common/kivy_resource.py000066400000000000000000000053741472205122200231400ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from .resource import * from .util import _Throttler class KivyPropertyBacked(SenmlResource): """Provides a .value bound to the self.widget_property property of the self.backend_widget Kivy widget. The widget and widget_property need to be set before this class's __init__ is called.""" def __init__(self): super().__init__() throttler = _Throttler(self.value_changed) self.backend_widget.bind(**{self.widget_property: lambda *args: throttler()}) def _get_value(self): return getattr(self.backend_widget, self.widget_property) def _set_value(self, updated): setattr(self.backend_widget, self.widget_property, updated) value = property(_get_value, _set_value) class Color(KivyPropertyBacked): def __init__(self, kivy_backend, widget_property): self.backend_widget = kivy_backend self.widget_property = widget_property super().__init__() @ContenttypeRendered.get_handler("text/plain;charset=utf-8", default=True) def __regular_get(self): return "#" + "".join("%02x" % int(255 * c) for c in self._get_value()[:3]) # 65362 is the content format used for SAUL RGB values, also in verdigris (manual clients will just send none) # # See also https://rustdoc.etonomy.org/riot_coap_handler_demos/saul/index.html @ContenttypeRendered.put_handler(65362, default=True) def render_put(self, payload): if len(payload) == 7 and payload[0:1] == b"#": values = tuple( int(payload[i : i + 2].decode("ascii"), 16) / 255 for i in (1, 3, 5) ) else: return Message(code=BAD_REQUEST) self._set_value(values + (1,)) def get_link_description(self): return {"saul": "ACT_LED_RGB", "if": "core.p"} class Text(StringResource, KivyPropertyBacked): """A resource that represents a Kivy widget's text""" if_ = "core.p" widget_property = "text" def __init__(self, kivy_backend): self.backend_widget = kivy_backend super().__init__() class PressState(BooleanResource, KivyPropertyBacked): """A resource that resesents a Kivy widget's "pressed" property""" if_ = "core.s" widget_property = "state" # This is no good modelling (for a button modelled as press-release is hard # to observe with eventualy consistent semantics only), but it works for a # first demo. def __init__(self, kivy_backend): self.backend_widget = kivy_backend super().__init__() # fake boolean, and don't allow setting def _get_value(self): return {"down": 1, "normal": 0}[super()._get_value()] value = property(_get_value) aiocoap-0.4.12/contrib/widgets_common/resource.py000066400000000000000000000307401472205122200220710ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from collections import namedtuple, defaultdict import functools import inspect import abc import json import cbor2 as cbor from aiocoap import resource, numbers, interfaces from aiocoap import GET, PUT, POST, Message, CONTENT, CHANGED from aiocoap.error import BadRequest, UnsupportedContentFormat, UnallowedMethod _ContenttypeRenderedHandler = namedtuple( "_ContenttypeRenderedHandler", ("method", "accept", "contentformat", "implementation", "responseformat"), ) # this could become an alternative to the resource.Resource currently implemented in aiocoap.resource class ContenttypeRendered( resource._ExposesWellknownAttributes, interfaces.Resource, metaclass=abc.ABCMeta ): def __init_subclass__(cls): # __new__ code moved in here to use __ properties cls.__handlers = defaultdict(lambda: {}) for member in vars(cls).values(): if isinstance(member, _ContenttypeRenderedHandler): for accept in member.accept: for contentformat in member.contentformat: cls.__handlers[member.method][(accept, contentformat)] = ( member.implementation, member.responseformat, ) @staticmethod def get_handler(accept, *, default=False): """Decorate a method with this to make it the GET handler for a given method and Accept value (or additionally the empty one if default=True). FIXME move to ContenttypeRendered Methods that accept a payload will get the payload passed as an argument, and be decided based on the Content-Format header (with the Accept header being ignored; possibly, that's a reasons to split this decorator up per-method). The method will not be usable by its name any more. It is recommended to use a double-underscore name for thusly decorated methods (eg. __get_plain). The method has some freedom in the types it may return (None is treated as an empty payload, strings are encoded in UTF-8). It is unclear yet whether more complex conversions (eg. JSON, CBOR) will be supported by this or need additonal decorators.""" def wrapper(func): cf = numbers.media_types_rev[accept] return _ContenttypeRenderedHandler( GET, (cf, None) if default else (cf,), (None,), func, cf ) return wrapper @staticmethod def put_handler(contentformat, *, default=False): def wrapper(func): if isinstance(contentformat, str): cf = numbers.media_types_rev[contentformat] else: cf = contentformat return _ContenttypeRenderedHandler( PUT, (None,), (cf, None) if default else (cf,), func, None ) return wrapper @staticmethod def empty_post_handler(): # i suppose this'll be replaced with something more generic when i add something that needs request or response payloads def wrapper(func): return _ContenttypeRenderedHandler(POST, (None,), (None,), func, None) return wrapper async def needs_blockwise_assembly(self, request): return True async def render(self, request): cf = request.opt.content_format acc = request.opt.accept raise_class = UnallowedMethod method_would_have_worked = False # FIXME: manually walking the MRO is not a nice way to go about this; # is there no other way to query the registered handlers according to # the regular inheritance patterns? for cls in type(self).mro(): if not issubclass(cls, ContenttypeRendered) or cls is ContenttypeRendered: continue for_method = cls.__handlers.get(request.code, None) if for_method is None: continue raise_class = UnsupportedContentFormat handler, responseformat = for_method.get((acc, cf), (None, None)) if handler is not None: break else: raise raise_class() sig = inspect.signature(handler) parameters = set(sig.parameters.keys()) parameters.remove("self") kwargs = {} if request.payload and "payload" not in parameters: raise BadRequest("Unexpected payload") if request.opt.uri_query and "query" not in parameters: raise BadRequest("Unexepcted query arguments") for p in parameters: if p == "payload": kwargs["payload"] = request.payload elif p == "request_uri": # BIG FIXME: This does not give the expected results due to the # URI path stripping in Site, and because Message gets the # requested authority wrong on the server side. kwargs["request_uri"] = request.get_request_uri() else: raise RuntimeError("Unexpected argument requested: %s" % p) payload = handler(self, **kwargs) if payload is None: payload = b"" elif isinstance(payload, str): payload = payload.encode("utf8") return Message( code={GET: CONTENT, PUT: CHANGED}[request.code], payload=payload, content_format=responseformat, no_response=request.opt.no_response, ) class ObservableContenttypeRendered(ContenttypeRendered, interfaces.ObservableResource): def __init__(self): super().__init__() self._callbacks = set() async def add_observation(self, request, serverobservation): """Implementation of interfaces.ObservableResource""" callback = serverobservation.trigger self._callbacks.add(callback) remover = functools.partial(self._callbacks.remove, callback) serverobservation.accept(remover) def add_valuechange_callback(self, cb): """Call this when you want a callback outside of aiocoap called whenever value_change is called, typically because the callback recipient would extract the state of the resource in a non-CoAP way.""" self._callbacks.add(cb) def value_changed(self): """Call this whenever the object was modified in such a way that any rendition might change.""" for c in self._callbacks: c() class SenmlResource(ObservableContenttypeRendered): """A resource that has its state in .value; this class implements SenML getters and setters as well as plain text. Implementors need to provide a .value instance property as well as .jsonsenml_key / .cborsenml_key class properties for picking the right value key in the respective SenML serialization, and a .valuetype type that is used both for converting any text/plain'ly PUT string as well as for filtering (typically copy-constructing) data from SenML.""" @ContenttypeRendered.get_handler("application/senml+json") def __jsonsenml_get(self, request_uri): return json.dumps([{"n": request_uri, self.jsonsenml_key: self.value}]) @ContenttypeRendered.get_handler("application/senml+cbor") def __cborsenml_get(self, request_uri): return cbor.dumps([{0: request_uri, self.cborsenml_key: self.value}]) @ContenttypeRendered.get_handler("text/plain;charset=utf-8", default=True) def __textplain_get(self): return str(self.value) @ContenttypeRendered.put_handler("application/senml+json") def __jsonsenml_set(self, payload, request_uri): try: new = json.loads(payload.decode("utf8")) if ( len(new) != 1 or new[0].get("bn", "") + new[0].get("n", "") != request_uri ): raise BadRequest("Not a single record pertaining to this resource") self.value = self.valuetype(new[0][self.jsonsenml_key]) except (KeyError, ValueError): raise BadRequest() @ContenttypeRendered.put_handler("application/senml+cbor") def __cborsenml_set(self, payload, request_uri): try: new = cbor.loads(payload) if len(new) != 1 or new[0].get(-2, "") + new[0].get(0, "") != request_uri: raise BadRequest("Not a single record pertaining to this resource") self.value = self.valuetype(new[self.cborsenml_key]) except (KeyError, ValueError): raise BadRequest() @ContenttypeRendered.put_handler("text/plain;charset=utf-8", default=True) def __textplain_set(self, payload): try: self.value = self.valuetype(payload.decode("utf8").strip()) except ValueError: raise BadRequest() class BooleanResource(SenmlResource): jsonsenml_key = "vb" cborsenml_key = 4 valuetype = bool @ContenttypeRendered.get_handler("text/plain;charset=utf-8", default=True) def __textplain_get(self): return "01"[self.value] @ContenttypeRendered.put_handler("text/plain;charset=utf-8", default=True) def __textplain_set(self, payload): try: self.value = {"0": False, "1": True}[payload.decode("utf8").strip()] except (KeyError, ValueError): raise BadRequest() class FloatResource(SenmlResource): jsonsenml_key = "v" cborsenml_key = 2 valuetype = float class StringResource(SenmlResource): jsonsenml_key = "vs" cborsenml_key = 3 valuetype = str class SubsiteBatch(ObservableContenttypeRendered): """An implementation of a CoRE interfaces batch that is the root resource of a subsite This currently depends on being added to the site after all other resources; it could enumerate them later, but it installs its own value_changed callbacks of other members at initialization time.""" if_ = "core.b" def __init__(self, site): self.site = site super().__init__() # FIXME this ties in directly into resource.Site's privates, AND it # should actually react to changes in the site, AND overriding the # callback to install an own hook is not compatible with any other # ObservableResource implementations for subres in self.site._resources.values(): if isinstance(subres, ObservableContenttypeRendered): subres.add_valuechange_callback(self.value_changed) for subsite in self.site._subsites.values(): if not isinstance(subsite, resource.Site): continue # can't access privates if () not in subsite._resources: continue # no root, better not try rootres = subsite._resources[()] if not isinstance(rootres, SubsiteBatch): continue rootres.add_valuechange_callback(self.value_changed) def __get_records(self, request_uri): records = [] # FIXME this ties in directly into resource.Site's privates for path, subres in self.site._resources.items(): if isinstance( subres, SenmlResource ): # this conveniently filters out self as well records.append( {"n": "/".join(path), subres.jsonsenml_key: subres.value} ) print(self.site, vars(self.site)) for path, subsite in self.site._subsites.items(): if not isinstance(subsite, resource.Site): continue # can't access privates if () not in subsite._resources: continue # no root, better not try rootres = subsite._resources[()] if not isinstance(rootres, SubsiteBatch): continue for r in rootres.__get_records(request_uri): r = dict(**r) r.pop("bn", None) r["n"] = "/".join(path) + "/" + r["n"] records.append(r) records[0]["bn"] = request_uri return records @ContenttypeRendered.get_handler("application/senml+json", default=True) def __regular_get(self, request_uri): return json.dumps(self.__get_records(request_uri)) class PythonBacked(SenmlResource): """Provides a .value stored in regular Python, but pulls the .value_changed() trigger on every change""" def _set_value(self, value): changed = not hasattr(self, "_value") or self._value != value self._value = value if changed: self.value_changed() value = property(lambda self: self._value, _set_value) aiocoap-0.4.12/contrib/widgets_common/util.py000066400000000000000000000011101472205122200212040ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import time class _Throttler: """Wrapper around an argumentless function that silently drops calls if there are too many.""" # FIXME i'd rather have the ObservableResource or even the observation # itself handle this def __init__(self, callback): self.callback = callback self.last = 0 def __call__(self): now = time.time() if now - self.last < 0.2: return self.last = now self.callback() aiocoap-0.4.12/doc/000077500000000000000000000000001472205122200137535ustar00rootroot00000000000000aiocoap-0.4.12/doc/LICENSE.rst000066400000000000000000000002261472205122200155670ustar00rootroot00000000000000LICENSE ======= .. include:: ../LICENSES/MIT.txt Files in ``aiocoap/util/vendored/`` may have different (but compatible and OSI approved) licenses. aiocoap-0.4.12/doc/README.doc000066400000000000000000000020441472205122200153770ustar00rootroot00000000000000index hack ========== in order to have a readme that is usable for simple ReST builders like github, the readme now contains hardwired links to readthedocs or local files. sphinx (and thus readthedocs) sees, via the aiocoap_index.py sphinx extension, a preprocessed version where all the static links to readthedocs are replaced with sphinx-internal directives and properly resolved in the locally built docs. per-module documentation hack ============================= pages similar to sphinx-apidoc's output are built by the sphinx module aiocoap_index.py in the doc/module/ directory. sphinx-apidoc is not used here because it would need to be invoked outside of sphinx (which is not practical with tools like readthedocs), and because some customization is done (eg. for aiocoap.numbers). for both hacks, suggestions for cleaner solutions that keep the git tree free of generated files are appreciated. man pages ========= html is the default output format, but man pages can also be generated: $ python3 -m sphinx -b man doc/ build/sphinx/man aiocoap-0.4.12/doc/aiocoap_index.py000066400000000000000000000124361472205122200171350ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import functools import re import os import textwrap import glob import os.path from docutils.parsers.rst.directives.misc import Include rtd_re = re.compile( "^\\.\\. _([^:]+): http://aiocoap.readthedocs.io/en/latest/(.*)\\.html$" ) filelink_re = re.compile("^\\.\\. _([^:]+): ([^:]+)$") def addrepl(replacements, pattern, prefix, linkbody): """Given details of a sphinx-ified link, place an entry in the replacements dictionary that is suitable for full-text substitution later on""" pretty = pattern.strip("`") if pretty != linkbody: sphinxlink = ":%s:`%s <%s>`" % (prefix, pretty, linkbody) else: sphinxlink = ":%s:`%s`" % (prefix, linkbody) replacements[pattern + "_"] = sphinxlink def modified_insert_input(include_lines, path, original=None): """A filter for the insert_input function that preprocesses the input in the following ways: * Remove all external hyperlink targets to readthedocs (and guess the way this link needs to be expressed in Sphinx) * Remove all local (relative) link targets * Replace words referencing them with the appropriate Sphinx reference""" new_lines = [] replacements = {} for line in include_lines: rtd_match = rtd_re.match(line) filelink_match = filelink_re.match(line) if rtd_match: pattern, linkbody = rtd_match.groups() prefix = "doc" addrepl(replacements, pattern, prefix, linkbody) elif filelink_match: # for things like LICENSE pattern, linkbody = filelink_match.groups() addrepl(replacements, pattern, "doc", linkbody) else: # only keep lines that are still relevant (that's most lines) new_lines.append(line) new_lines = [ functools.reduce(lambda s, rep: s.replace(*rep), replacements.items(), x) for x in new_lines ] original(new_lines, path) class IncludePreprocessed(Include): """Include a file (like the 'Include' directive), but preprocess its input as described in modified_insert_input.""" def run(self): self.state_machine.insert_input = functools.partial( modified_insert_input, original=self.state_machine.insert_input ) try: result = super().run() finally: del self.state_machine.insert_input return result def build_moduledocs(app): """Create per-module sources like sphinx-apidoc, but at build time and with customizations.""" srcdir = app.builder.srcdir moddir = srcdir / "module" os.makedirs(moddir, exist_ok=True) basedir = srcdir.parent docs = [ x.removesuffix(".py").replace("/", ".").replace(".__init__", "") for x in glob.glob("aiocoap/**/*.py", recursive=True, root_dir=basedir) ] for x in docs: commonstart = textwrap.dedent(f"""\ {x} module ==================================================================================== """) if x in ("aiocoap.numbers", "aiocoap.transports"): # They have explicit intros pointing out submodules and/or # describing any reexports text = commonstart + textwrap.dedent(f""" .. automodule:: {x} .. toctree:: :glob: {x}.* """) elif x in ("aiocoap",): # They have explicit intros listing submodules text = commonstart + textwrap.dedent(f""" .. automodule:: {x} """) elif x.startswith("aiocoap.cli."): if x in ("aiocoap.cli.defaults", "aiocoap.cli.common"): # These neither have a man page, nor do they go into the documentation continue executablename = "aiocoap-" + x.removeprefix("aiocoap.cli.") # no ".. automodule:: {x}" because the doc string is already used # by the argparse, and thus would be repeated text = textwrap.dedent(f""" {executablename} ============================== .. argparse:: :ref: {x}.build_parser :prog: {executablename} """) else: text = commonstart + textwrap.dedent(f""" .. automodule:: {x} :members: :undoc-members: :show-inheritance: """) docname = f"{moddir}/{x}.rst" if os.path.exists(docname) and open(docname).read() == text: continue else: with open(moddir / (x + ".rst"), "w") as outfile: outfile.write(text) for f in os.listdir(moddir): if f.endswith(".rst") and f.removesuffix(".rst") not in docs: os.unlink(moddir + "/" + f) def setup(app): """Sphinx extension that builds the aiocoap index page from a non-sphinx and thus github-suitable ReST file, and also creates sphinx-apidoc style per-module pages customized to what I'd like to see there""" app.add_directive("include_preprocessed", IncludePreprocessed) app.connect("builder-inited", build_moduledocs) aiocoap-0.4.12/doc/api.rst000066400000000000000000000044351472205122200152640ustar00rootroot00000000000000The aiocoap API =============== This is about the Python API of the aiocoap library; see :doc:`design` for notes on how CoAP concepts play into the API. API stability ------------- In preparation for a `semantically versioned`_ 1.0 release, some parts of aiocoap are described as stable. The library does not try to map the distinction between "public API" and internal components in the sense of semantic versioning to Python's "public" and "private" (``_``-prefixed) interaces -- tying those together would mean intrusive refactoring every time a previously internal mechanism is stabilized. Neither does it only document the public API, as that would mean that library development would need to resort to working with code comments; that would also impede experimentation, and migrating comments to docstrings would be intrusive again. All modules' documentation can be searched, and most modules are listed below. Instead, functions, methods and properties in the library should only be considered public (in the semantic versioning sense) if they are described as "stable" in their documentation. The documentation may limit how an interface may used or what can be expected from it. (For example, while a method may be typed to return a particular class, the stable API may only guarantee that an instance of a particular abstract base class is returned). The ``__all__`` attributes of aiocoap modules try to represent semantic publicality of its members (in accordance with PEP8); however, the documentation is the authoritative source. Modules with stable components ------------------------------ .. toctree:: :titlesonly: module/aiocoap module/aiocoap.protocol module/aiocoap.message module/aiocoap.options module/aiocoap.interfaces module/aiocoap.error module/aiocoap.pipe module/aiocoap.defaults module/aiocoap.transports module/aiocoap.proxy module/aiocoap.proxy.client module/aiocoap.proxy.server module/aiocoap.numbers module/aiocoap.optiontypes module/aiocoap.resource module/aiocoap.resourcedirectory.client.register module/aiocoap.util module/aiocoap.cli module/aiocoap.meta module/aiocoap.oscore module/aiocoap.oscore_sitewrapper module/aiocoap.edhoc module/aiocoap.credentials .. _`semantically versioned`: https://semver.org/ aiocoap-0.4.12/doc/conf.py000066400000000000000000000027011472205122200152520ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import sys import os # for aiocoap_index.py sys.path.insert(0, os.path.abspath(".")) # for aiocoap.meta sys.path.insert(0, os.path.abspath("..")) import aiocoap.meta extensions = [ "sphinx.ext.autodoc", "aiocoap_index", "sphinxarg.ext", ] source_suffix = ".rst" # This is renamed and already has the default "index" in later Sphinx versions, # but so far readthedocs renders this with Sphinx 1 (this is more easily # addressed after an update to use pyproject.toml) master_doc = "index" project = "aiocoap" copyright = "Christian Amsüss and the aiocoap contributors" # The full version, including alpha/beta/rc tags. release = aiocoap.meta.version # The short X.Y version. version = ".".join(release.split(".")[:2]) html_logo = "logo.svg" html_favicon = "logo-square.svg" autodoc_member_order = "bysource" man_pages = [ ( "module/aiocoap.cli.client", "aiocoap-client", "query CoAP servers from the command line", "", 1, ), ( "module/aiocoap.cli.proxy", "aiocoap-proxy", "forward and reverse proxy server for CoAP", "", 1, ), ("module/aiocoap.cli.rd", "aiocoap-rd", "Resource Directory server", "", 1), ( "module/aiocoap.cli.fileserver", "aiocoap-fileserver", "File server for CoAP", "", 1, ), ] aiocoap-0.4.12/doc/design.rst000066400000000000000000000100771472205122200157630ustar00rootroot00000000000000CoAP API design notes ===================== This documentation chapter is not a full-fledged guide yet; rather, it highlights some points of how CoAP is expressed in aiocoap. * Library as a proxy: A CoAP library and API can, to some extent, be viewed as a CoAP proxy and a CoAP transport protocol, respectively. This shapes what the library can do, and makes guidance on how to do it accessible -- for CoAP specification describe what proxies may do but say nothing on APIs. For example, splitting up large messages into block-wise chunks is something aiocoap does unless asked specificially not to; in its operation, it follows the guidance set out for proxies in RFC7959. Likewise, this is what justifies that aiocoap intermittently drops observe notifications. (Future releases might even take on intermediate proxies due to discovered alternative protocols). On the flip side, going all the way with this would mean that the application gets no choice in properties lost across proxies: The application could not decide whether reliable transport should be used. Furthermore, applied in full, the application could not use any proxy-unsafe options not suported by the library. In aiocoap, a balance is attempted. It behaves like a proxy for some convenience operations, which can be disabled as needed. It still allows the application author to set message properties for the first hop, and does not reject messages with proxy-unsafe options (trusting that no new proxy unsafe options are unsafe for the limited thing the library does). * Messages as exchange objects: In aiocoap, requests and responses on the server and client side are handed to the application as CoAP messages. This gives the application a lot of flexibility in terms of setting and reacting to options; it allows application authors to explore extensions to CoAP. It is also the style of API used by libcoap and gcoap / nanocoap. On the other hand, it makes it relatively verbose to write applications that exclusively operate on predefined patterns (like objects that can be rendered into a representation depending on content format negotiation, or getter-setter patterns using GET and PUT). Simplified handlers for such cases can be built on aiocoap; the ``contrib`` directory contains some exploratory examples. In combination with the abovementioned proxy paradigm, this can lead to some weirdness when messages are represented differently on different transports. The general approach currenlty taken is to build the application level messages like a CoAP-over-UDP message was treated if UDP messages could be arbitrarily long (or possibly, with future changes to the internal block-wise mechanisms, using BERT). Notably, this means that applications that set Observe numbers manually should pack them into a 4-byte integer (which the TCP transport would then elide); transports may, however, do any deduplication and then just forward to the application that there *is* still an Observe number set. This is all not set in stone, though, and open for further development. Handling of properties outside of code, options and payload is currently still a bit mixed: Most resides in custom attributes of the message (like :attr:`aiocoap.Message.remote` or :attr:`aiocoap.Message.mtype`); thes are generally treated as hints and not always fully applicable. Some properties are also transported in options even though they are not exactly fitting here; for example, the No-Response option is used in responses to indicate to the stack that no response should be set. The latter should be cleaned up. * URIs and remotes: In generall, aiocoap will aim for good user experience when the user passes in a URI per request, eg. by keeping TCP connections open for a bit. As the library can not keep track of URI strings, it is sometimes preferrable to keep the :attr:`aiocoap.Message.remote` around and build requests based on that. This will ensure that aiocoap makes all efforts to keep established security contexts and connections around for as long as possible. aiocoap-0.4.12/doc/examples.rst000066400000000000000000000020721472205122200163240ustar00rootroot00000000000000Usage Examples ============== These files can serve as reference implementations for a simplistic server and client. In order to test them, run ``./server.py`` in one terminal, and use ``./clientGET.py`` and ``./clientPUT.py`` to interact with it. The programs' source code should give you a good starting point to get familiar with the library if you prefer reading code to reading tutorials. Otherwise, you might want to have a look at the :doc:`guidedtour`, where the relevant concepts are introduced and explained step by step. .. note:: These example programs are not shipped in library version of aiocoap. They are present if you followed the :ref:`installation-development` section of the installation instructions; otherwise, you can download them from the project website. Client ------ .. literalinclude:: ../clientGET.py :language: python :linenos: :lines: 11- .. literalinclude:: ../clientPUT.py :language: python :linenos: :lines: 11- Server ------ .. literalinclude:: ../server.py :language: python :linenos: :lines: 11- aiocoap-0.4.12/doc/faq.rst000066400000000000000000000110571472205122200152600ustar00rootroot00000000000000Frequently Answered Questions ============================= (Not *actually* asked frequently -- actually, this is a bunch of notes that users of the library should probably see at some point, while it is not clear where to better put them). * **Which platforms are supported?** aiocoap requires Python 3.10 (or PyPy 3.10), and should run on all operating systems supported by Python. Development and automated tests run on Linux, and this is where all listed features are supported. aiocoap generally runs on FreeBSD, Windows and macOS as well. Tests on FreeBSD are conducted manually; for Windows and macOS it's all largely relying on user feedback tracked in the `bug tracker for portability issues `_. Note that the main CoAP-over-UDP transport :mod:`udp6` is only on-by-default on Linux because other platforms have no way of receiving network errors from an unconnected socket. The simpler UDP transports used on the other platforms do not support all features, and in particular lack multicast support. aiocoap is agnostic of the backing asyncio implementation as long as it implements the functionality required by the transport (``add_reader`` for udp6, ``sockname`` extra for role reversal on simple6). It is known to work with uvloop_ and gbulb_. When aiocoap is used with pyodide_ (that is, in a web browser, with a Python interpreter compiled through emscripten), for example in a `Jupyter notebook`_, the "regular" transports are not functional. An alternative is provided transparently in the :mod:`WebSockets transport`. .. _uvloop: https://uvloop.readthedocs.io/ .. _gbulb: https://github.com/nathan-hoad/gbulb .. _pyodide: https://pyodide.org/ .. _`Jupyter notebook`: https://jupyter.org/try-jupyter/lab/ * **How can a server be scaled up to use multiple cores?** Python is often considered weak around threading. While setups with multiple asyncio worker should conceptually work, the easiest way to parallelize is just to have multiple instances of your server running at the same time. This works when transports and platform support the SO_REUSEPORT option (this is the case on Linux with the default transports, but never on Windows), with which incoming requests are dispatched to any of the processes serving the port by the operating system. This requires an application design that has all its persistence managed outside the server process; that is typically the case with file system or database backed servers. (aiocoap internally does hold some state, but that is always per client, and the load balancing typically ensures that requests from the same client wind up in the same process.) * **Why do I get a "The transport can not be bound to any-address." error message?** For your platform, the ``simplesocketserver`` module was selected. See :mod:`the simplesocketserver documentation` for why it can not bind to that address. * **How is multicast supported?** Support for multicast is currently limited. On the server side, things are mostly ready. Groups are joined :meth:`at server creation`. On the client side, requests to multicast addresses can be sent, and while they are treated adaequately on the protocol level (eg. will not send CON requests), the :meth:`request interface` only exposes the first response. Thus, it can be used in discovery situations as long as only one response is processed, but not yet to its full power of obtaining data from multiple devices. Note that multicast requests often require specification of an interface, as otherwise the request is underspecified. Thus, a typical command line request might look like this:: ./aiocoap-client coap://'[ff02::fd%eth0]'/.well-known/core --non * **aiocoap fails to start if IPv6 is disabled system-wide.** Yes. `Don't do that.`__ It is not a supported mode of operation with the default implementation. .. __: https://howtodisableipv6.com/ Background details: The default transport of aiocoap uses APIs that are well specified for IPv6 and work there for both IPv4 and IPv6 packets. Explicitly re-implementing everything on v4 would not only be needless extra work, it would also be a portability problem as unlike for IPv6, the interfaces are not specified platform independenlty for IPv4. Moreover, that mode would be error prone because it wouldn't receive regular testing. aiocoap-0.4.12/doc/guidedtour.rst000066400000000000000000000247261472205122200166730ustar00rootroot00000000000000Guided Tour through aiocoap =========================== This page gets you started on the concepts used in aiocoap; it will assume rough familiarity with what CoAP is, and a working knowledge of Python development, but introduce you to asynchronous programming and explain some CoAP concepts along with the aiocoap API. If you are already familiar with asynchronous programming and/or some other concepts involved, or if you prefer reading code to reading tutorials, you might want to go after the :doc:`examples` instead. .. note:: If you want to give aiocoap a try without any installation, you can also `run it from your web browser` -- albeit with some limitations. :doc:`pyodide` describes how to use it and its differences from regular operation. You can still follow this guided tour; any changes will be noted to *pyodide users*. First, some tools ----------------- Before we get into programming, let's establish tools with which we can probe a server, and a server itself. If you have not done it already, :ref:`install aiocoap for development`. *pyodide users* just install aiocoap :ref:`their regular way`. You can not run the commands we are using in this section; just read through it, expected outputs are displayed anyway. Start off with the sample server by running the following in a terminal inside the aiocoap directory:: $ ./server.py .. note:: The ``$`` sign indicates the prompt; you enter everything after it in a terminal shell. Lines not starting with a dollar sign are the program output, if any. Later on, we'll see lines starting with ``>>>``; those are run inside a Python interpreter. I recommend that you use the IPython_ interpreter. One useful feature for following through this tutorial is that you can copy full lines (including any ``>>>`` parts) to the clipboard and use the ``%paste`` IPython command to run it, taking care of indentation etc. This has started a CoAP server with some demo content, and keeps running until you terminate it with Ctrl-C. In a separate terminal, use :doc:`the aiocoap-client tool ` to send a GET request to the server:: $ ./aiocoap-client coap://localhost/.well-known/core application/link-format content was re-formatted ; ct="40", ; obs, , ; title="A large resource", , ; rel="impl-info" The address we're using here is a resource on the local machine (``localhost``) at the well-known location ``.well-known/core``, which in CoAP is the go-to location if you don't know anything about the paths on the server beforehand. It tells that there is a resource at the path ``/time`` that has the ``obs``\ ervable attribute, a resource at the path ``/.well-known/core``, and more at ``/other/...`` and ``/whoami``. .. note:: Getting all lines in a single row or no color? Then there are third party modules missing. Run ``python3 -m aiocoap.cli.defaults`` to see which they are, or just go back to the :ref:`installation step` and make sure to include the "``[all]``" part. .. note:: There can be a "(No newline at end of message)" line below your output. This just makes sure your prompt does not start in the middle of the screen. I'll just ignore that. Let's see what ``/time`` gives us:: $ ./aiocoap-client coap://localhost/time 2021-12-07 10:08 The response should have arrived immediately: The client sent a message to the server in which it requested the resource at ``/time``, and the server could right away send a message back. In contrast, ``/other/separate`` is slower:: $ ./aiocoap-client coap://localhost/other/separate Three rings for the elven kings [abbreviated] The response to this message comes back with a delay. Here, it is simulated by the server; in real-life situations, this delay can stem from network latency, servers waiting for some sensor to read out a value, slow hard drives etc. A request --------- In order to run a similar request programmatically, we'll need a request message. :: >>> from aiocoap import * >>> msg = Message(code=GET, uri="coap://localhost/other/separate") >>> print(msg) The message consists of several parts. The non-optional ones are largely handled by aiocoap (message type, ID, token and remote are all None or empty here and will be populated when the message is sent). The options are roughly equivalent to what you might know as HTTP headers:: >>> msg.opt You might have noticed that the Uri-Path option is shown with some space around the slash. This is because paths in CoAP are not a structured byte string with slashes in it (as they are in HTTP), but actually repeated options of a (UTF-8) string, which are represented as a tuple in Python:: >>> msg.opt.uri_path ('other', 'separate') Now to send that network as a request over the network, we'll need a network protocol object. That has a request method, and can give a response (**bear with me, these examples don't actually work**):: >>> protocol.request(msg).response That is obviously not a proper response -- yet. If the protocol returned a finished response, the program couldn't do any work in the meantime. Instead, it returns a Future -- an object that will (at some time in the *future*) contain the response. Because the Future is returned immediately, the user can start other requests in parallel, or do other processing in the meantime. For now, all we want is to wait until the response is ready:: >>> await protocol.request(msg).response , 186 byte(s) payload> Here, we have a successful message ("2.05 Content" is the rough equivalent of HTTP's "200 OK", and the 186 bytes of payload look promising). Until we can dissect that, we'll have to get those asynchronous things to work properly, though. Asynchronous operation ---------------------- To work interactively with asynchronous Python, start your Python interpreter like this:: $ python3 -m asyncio >>> Users of the highly recommended IPython_ can continue in their existing session, as support for the asynchronous shell is always available there. *pyodide users* should rather use ``"coaps+ws://demo.coap.amsuess.com/other/separate"`` here, and change later URIs accordingly. That is an online service that runs the same server, and can be used without any local installation. :: >>> from aiocoap import * >>> protocol = await Context.create_client_context() >>> msg = Message(code=GET, uri="coap://localhost/other/separate") >>> response = await protocol.request(msg).response >>> print(response) , 186 byte(s) payload> That's better! Now the ``protocol`` object could also be created -- we need to start that once to prepare a socket for all the requests we're sending later. That doesn't actually take a long time, but could, depending on the operating system. .. note:: If you want to pack any of the code into functions, these functions need to be asynchronous functions. When working in a ``.py`` file, the ``await`` keyword is not available outside, and you'll need to kick off your program using `asyncio.run`__. .. __: https://docs.python.org/3/library/asyncio-task.html#asyncio.run The same code as above packed up in a file would look like this:: import asyncio from aiocoap import * async def main(): protocol = await Context.create_client_context() msg = Message(code=GET, uri="coap://localhost/other/separate") response = await protocol.request(msg).response print(response) asyncio.run(main()) The response ------------ The response obtained in the main function is a message like the request message, just that it has a different code (2.05 is of the successful 2.00 group), incidentally no options (because it's a very simple server), and actual data. The response code is represented in Python by an enum with some utility functions; the remote address (actually remote-local address pair) is an object too:: >>> response.code >>> response.code.is_successful() True >>> response.remote.hostinfo '[::ffff:127.0.0.1]' >>> response.remote.is_multicast False The actual response message, the body, or the payload of the response, is accessible in the payload property, and is always a bytestring:: >>> response.payload b'Three rings for the elven kings [ abbreviated ]' aiocoap does not yet provide utilities to parse the message according to its content format (which would be accessed as ``response.opt.content_format``). .. topic:: More asynchronous fun The other examples don't show simultaneous requests in flight, so let's have one with parallel requests: >>> async def main(): ... responses = [ ... protocol.request(Message(code=GET, uri=u)).response ... for u ... in ("coap://localhost/time", "coap://vs0.inf.ethz.ch/obs", "coap://coap.me/test") ... ] ... for f in asyncio.as_completed(responses): ... response = await f ... print("Response from {}: {}".format(response.get_request_uri(), response.payload)) >>> run(main()) Response from coap://localhost/time: b'2016-12-07 18:16' Response from coap://vs0.inf.ethz.ch/obs: b'18:16:11' Response from coap://coap.me/test: b'welcome to the ETSI plugtest! last change: 2016-12-06 16:02:33 UTC' This also shows that the response messages do keep some information of their original request (in particular, the request URI) with them to ease further parsing. .. The server side --------------- WIP This is currently the end of the guided tour; see the :mod:`aiocoap.resource` documentation for the server side until the tour covers that is complete. .. _IPython: http://ipython.org/ aiocoap-0.4.12/doc/index.rst000066400000000000000000000003231472205122200156120ustar00rootroot00000000000000.. include_preprocessed:: ../README.rst .. toctree:: :maxdepth: 1 installation guidedtour stateofoscore stateofedhoc api design examples tools pyodide faq news LICENSE aiocoap-0.4.12/doc/installation.rst000066400000000000000000000116671472205122200172210ustar00rootroot00000000000000Installing aiocoap ================== .. note:: The commands here will install aiocoap in your current environment. By default, that is your platform's user install directory. To keep that clean, or to use different sets or versions of libraries for different purposes, you may want to look into the `venv documentation`_, which explains both the concept of virtual environments and how they are used on different platforms. .. _`venv documentation`: https://docs.python.org/3/library/venv In most situations, it is recommended to install the latest released version of aiocoap. This is done using a simple:: $ pip3 install --upgrade "aiocoap[all]" (In some cases, the program is called ``pip`` only). .. _installation-development: Development version ------------------- If you want to play with aiocoap's internals or consider contributing to the project, the suggested way of operation is getting a Git checkout of the project:: $ git clone https://github.com/chrysn/aiocoap $ cd aiocoap You can then use the project from that location, or install it with :: $ pip3 install --upgrade ".[all,docs]" If you need to install the latest development version of aiocoap but do not plan on editing (eg. because you were asked in the course of a bug report to test something against the latest aiocoap version), you can install it directly from the web:: $ pip3 install --upgrade "git+https://github.com/chrysn/aiocoap#egg=aiocoap[all]" With the ``-e`` option, that is also a viable option if you want to modify aiocoap and pip's `choice of checkout directories`_ is suitable for you. .. _`Python package index`: https://pypi.python.org/pypi/aiocoap/ .. _`choice of checkout directories`: https://pip.pypa.io/en/stable/reference/pip_install/#vcs-support Common errors ------------- When upstream libraries change, or when dependencies of used libraries are not there (eg. no C compiler, C libraries missing), the installation process can fail. On Debian based systems, it helps to install the packages ``python3-dev``, ``build-essential`` and ``autoconf``; generally, the error output will contain some hints as to what is missing. As a workaround, it can be helpful to not install with all extras, but replace the ``all`` with the extras you actually want from the list below. For example, if you see errors from DTLSSocket, rather than installing with ``[all,docs]``, you can leave out the ``tinydtls`` extra and install with ``[linkheader,oscore,prettyprint,docs]``. Slimmer installations --------------------- As aiocoap does not strictly depend on many of the libraries that are installed when following the above recommendations, a setup can be stripped down by entering any combination of the below "extras" in the place of the ``all`` in the above lines, or leaving out the ``[all]`` expression for a minimal installation. The extras currently supported are: * ``oscore``: Required for the :mod:`aiocoap.transports.oscore` transport, as well as for using EDHOC. * ``tinydtls``: Required for using CoAP over DTLS. * ``ws``: Required for using CoAP over WebSockets. * ``prettyprint``: Allows using the ``--color`` and ``--pretty-print`` options of :doc:`module/aiocoap.cli.client`. * ``docs``: Installs tools needed to build the documentation (not part of ``all``). * ``linkheader``: Originally needed for generating and parsing files in RFC6690_ link format, eg. ``.well-known/core`` files. This extra does not contain any external dependencies, but was left in place for compatibility. Which libraries and versions are pulled in by this exactly is documented in the ``setup.py`` file. .. _RFC6690: https://tools.ietf.org/html/rfc6690 .. _installation-pyodide: On pyodide ---------- aiocoap can be run in a Python interpreter that is running in the browser called pyodide_. When using pyodide (either directly or through a `Jupyter notebook`_), ``pip`` is unavailable, but there is ``micropip`` to replace it. Installation is then done directly in the Python environment using:: >>> import micropip >>> await micropip.install("aiocoap[prettyprint,oscore]") See the :doc:`pyodide` section of the documentation on how aiocoap can be used there. .. _pyodide: https://pyodide.org/ .. _`Jupyter notebook`: https://jupyter.org/try-jupyter/ If you want to run an unreleased branch or test own code, get a Git checkout as described for development above, and run:: python3 -m build Then, copy the newly created file ``dist/aiocoap-${VERSION}-py3-none-any.whl`` to a file server on the public web. Make sure to leave the file name as is, because micropip will attempt to parse it. Then you can pass the URI of the file instead of the name "aiocoap" to micropip.install. Note that the server may need some CORS_ setup to allow loading of the file from foreign web sites. For that reason, running the ``http.server`` module as a web server on localhost creates an insufficient server. .. _CORS: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing aiocoap-0.4.12/doc/logo-square.svg000066400000000000000000000161371472205122200167420ustar00rootroot00000000000000 The aiocoap library logo image/svg+xml The aiocoap library logo chrysn <chrysn@fsfe.org> The file is published under the same license as the aiocoap project (see its LICENSE file). aiocoap-0.4.12/doc/logo.svg000066400000000000000000000710501472205122200154370ustar00rootroot00000000000000 The aiocoap library logo image/svg+xml The aiocoap library logo chrysn <chrysn@fsfe.org> The file is published under the same license as the aiocoap project (see its LICENSE file). aiocoap-0.4.12/doc/news.rst000066400000000000000000000003701472205122200154610ustar00rootroot00000000000000Change log ========== This summarizes the changes between released versions. For a complete change log, see the git history. For details on the changes, see the respective git commits indicated at the start of the entry. .. include:: ../NEWS.rst aiocoap-0.4.12/doc/pyodide.rst000066400000000000000000000036071472205122200161500ustar00rootroot00000000000000pyodide and Jupyter =================== aiocoap can be run in a Python interpreter that is running in the browser called pyodide_. See :doc:`the its section in the installation instructions ` for how to install in this environment. The recommended way to use pyodide is through a `Jupyter notebook`_. In a new "Notebook" of type "Python (Pyodide)", first perform the installation steps, and then use aiocoap like in the rest of the :doc:`guidedtour`. Beware that when running in a web browser, no general purpose UDP or TCP connectins can be created. The only transport that is available is the client role of :mod:`CoAP over WebSockets `; any attempt to send requests to ``coap://``, ``coaps+tcp://`` or similar will fail with "RuntimeError: No request interface could route message" (or a NotImplementedError). Also, browsers are picky about when they allow unencrypted HTTP connections, so the unsecure ``coap+ws://`` may be unavailable as well, leaving only ``coaps+ws://``. When going through the guided tour, it is suggested to just use ``coaps+ws://demo.coap.amsuess.com/`` as a server, as that is available without further setup. Jupyter has the nice feature of allowing custom HTML to be shown in lieu of plain text \_\_repr\_\_esentations. For some types, this is implemented; for example, a message will show an HTML expandable view with options and payload, where options and other constants have tooltips indicating their numeric values. These features should be available not only when using pyodide, but also when using aiocoap in a server side Python session in Jupyter, in which case any networking limitations of the hosting virtual machine may apply. The ``./contrib`` directory of the aiocoap source code contains some example IPython notebooks that can be run right away. .. _pyodide: https://pyodide.org/ .. _`Jupyter notebook`: https://jupyter.org/try-jupyter aiocoap-0.4.12/doc/release-checklist000066400000000000000000000015471472205122200172740ustar00rootroot00000000000000Notes to the maintainer on what to do during a release ====================================================== * Update NEWS file * Bump versions in aiocoap/meta.py, and at the head of NEWS * Commit: git commit -m "Prepare $VERSION release" * Tag it: git tag -as "$VERSION" -m "$VERSION" * Clean up: git stash -u; git clean -fxd * Build it: python3 -m build (This builds, by default, both tar ball and wheel) * Upload it: twine upload ./dist/aiocoap-"$VERSION"{.tar.gz,-py3-none-any.whl} * Push it: git push github main:master --tags; git push gitlab main:master --tags; git push codeberg main --tags * log in to readthedocs, add new tag as new version * Bump versions to add a ".post0" * Commit that: git commit -m "Bump version to .post0 to indicate development" (This serves to indicate that anything built from git is not identical to the last release.) aiocoap-0.4.12/doc/stateofedhoc.rst000066400000000000000000000102101472205122200171470ustar00rootroot00000000000000EDHOC in aiocoap ================ Introducing EDHOC ----------------- EDHOC (RFC9528_) is a key establishment protocol that shares many desirable properties of :doc:`OSCORE `: It can use any CoAP transport (as well as HTTP), and can traverse and benefit from proxies without granting them access to the communication. .. _RFC9528: https://tools.ietf.org/html/rfc9528 Using EDHOC in aiocoap ---------------------- EDHOC credentials can be stored in the same JSON/CBOR/EDN based credentials files used with OSCORE and (D)TLS. For a client, a single entry describes both the client-side and the server-side credentials, as well as the URI(s) for which to use them. For a server, the presented credential for any requested origin (URI excluding the path, as that is the information available during EDHOC) is stored in one record, and one record per client lists and labels the known peers. EDHOC example ------------- This example sets up encrypted access to the file server demo from the generic command line client. On the server side, generate a key for the server:: $ aiocoap-keygen generate fileserver.cosekey --kid 0a --subject "s" {14: {2: "s", 8: {1: {1: 2, 2: h'0a', -1: 1, -2: ..., -3: ...}}}} .. note:: The KIDs and subject length limitations that make all those be single-byte are subject to ongoing development in aiocoap and lakers. The output of this is a public key, which goes into the credential file you create next as ``fileserver.cred.diag``:: { "coap://[::1]/*": { # replace with own pubic address "edhoc-oscore": { # or leave for local testing "suite": 2, "method": 3, "own_cred_style": "by-key-id", "own_cred": ..., # replace dots with the {14:...} # from before "private_key_file": "fileserver.cosekey", } }, } Now over to the client:: $ aiocoap-keygen generate client.cosekey --kid 01 --subject "c" {14: {2: "c", 8: {1: {1: 2, 2: h'01', -1: 1, -2: ..., -3: ...}}}} Likewise, create a credential file we call ``client.cred.diag``:: { "coap://[::1]/*": { "edhoc-oscore": { "suite": 2, "method": 3, "own_cred_style": "by-key-id", "own_cred": ..., # replace dots with the {14:...} # from before "private_key_file": "client.cosekey", "peer_cred": ..., # replace with {...} from server } }, } Finally, we have to extend the server's ``fileserver.cred.diag`` to accept this client -- we extend the last two lines to:: }, ":ourclient": { "edhoc-oscore": { "suite": 2, "method": 3, "peer_cred": ..., # replace dots with own_cred from client }} } Finally we can start the fileserver:: $ ./aiocoap-fileserver --credentials fileserver.cred.diag … and exchange data with EDHOC and OSCORE initiated by the client:: $ aiocoap-client "coap://[::1]/" --credentials client.cred.diag # application/link-format content was re-formatted ; ct=40, ; ct=40, [...] .. warning:: This is a rudimentary setup that is just enough to show how things work. This does not yet perform any authorization control: the file server will still let any unauthenticated client perform the same operations as the newly authenticated client. .. note:: As EDHOC support is extended, the steps described here should be vastly simplified: * The credentials file format needs an overhaul: entering peers should become as easy as creating entries in an ``.ssh/authorized_keys`` file. * Generating local identities should be more streamlined, with less copy-pasting involved. * The necessity of having a kid and a subject in keys for everything to work in the first place is subject to ongoing discussion. * unilateral authentication is supported by setting the peer's credentials to `{"unauthenticated": true}` -- but that needs some more explaining as to the security consequences. aiocoap-0.4.12/doc/stateofoscore.rst000066400000000000000000000116521472205122200173720ustar00rootroot00000000000000OSCORE in aiocoap ================= Introducing OSCORE ------------------ OSCORE (RFC8613_) is an end-to-end security mechanism available for CoAP and implemented in aiocoap. Its main advantage over lower-layer protection (IPsec, (D)TLS) is that it can leverage any CoAP transport (as well as HTTP), can traverse proxies preserving some of their features (like block-wise fragmentation and retransmission) and supports multicast and other group communication scenarios (implemented, but not covered here as it needs even more manual actions so far). By itself, OSCORE has no key exchange protocol; it relies on other protocols to establidsh keys (there is ongoing work on a lightweight key exchange named EDHOC, and the ACE-OSCORE_ profile goes some way). Until those are implemented and wide-spread, OSCORE contexts can be provisioned manually to devices. An implementation of EDHOC is :doc:`available in aiocoap `, but is still rather experimental. OSCORE state ------------ Unless an add-on mode (sometimes called B2 mode as it's describe in OSCORE's Appendix B.2_) is used, some run-time information needs to be stored along with an OSCORE key. This allows instantaneous zero-round-trip trusted requests with just a single round-trip (ie. a client can shut down, wake up with a different network address, and still the first UDP package it sends to the server can be relied and acted upon immediately). In this mode, there is no need for the device to have a reliable source of entropy. In practice, this means that OSCORE keys need to reside in writable directories, are occasionally written to (the mechanisms of Appendix B.1 ensure that writes are rare: they happen at startup, shutdown, and only occasionally at runtime). .. warning:: This also means that stored OSCORE contexts must never be copied, only moved (or have the original deleted right after a copy). Where copies are unavoidable (eg. as part of a system backup), they must not be used unless it can be proven that the original was not written to at all after the backup was taken. When that can not be proven, the context must be deemed lost and reestablished by different means. OSCORE credentials ------------------ As an experimental format, OSCORE uses JSON based credentials files that describes OSCORE or (D)TLS credentials. For client, they indicate which URIs should be accessed using which OSCORE context. For servers, they indicate the available OSCORE contexts clients could use, and provide labels for them. The keys and IDs themselves are stored in a directory referenced by the credentials file; this allows the state writes to be performed independently. .. _RFC8613: https://tools.ietf.org/html/rfc8613 .. _EDHOC: https://tools.ietf.org/html/draft-selander-lake-edhoc-01 .. _ACE-OSCORE: https://tools.ietf.org/html/draft-ietf-ace-oscore-profile-11 .. _B.2: https://tools.ietf.org/html/rfc8613#appendix-B.2 OSCORE example -------------- This example sets up encrypted access to the file server demo from the generic command line client. .. note:: Manual provisioning of OSCORE contexts is not expected to be a long-term solution, and meant primarily for initial experimentation. Do not expect the security contexts set up here to be usable indefinitely, as the credentials and security context format used by aiocoap is still in flux. Moreover, the expample will change over time to reflect the best use of OSCORE possible with the current implementation. First, create a pair of security contexts: ``client1/for-fileserver/settings.json``:: { "sender-id_hex": "01", "recipient-id_ascii": "file", "secret_ascii": "Correct Horse Battery Staple" } ``server/from-client1/settings.json``:: { "recipient-id_hex": "01", "sender-id_ascii": "file", "secret_ascii": "Correct Horse Battery Staple" } A single secret must only be used once -- please use something more unique than the `standard passphrase`_. With each of those goes a credentials map: ``client1.json``:: { "coap://localhost/*": { "oscore": { "basedir": "client1/for-fileserver/" } } } ``server.json``:: { ":client1": { "oscore": { "basedir": "server/from-client1/" } } } Then, the server can be started:: $ ./aiocoap-fileserver data-to-be-served/ --credentials server.json And queried using the client:: $ ./aiocoap-client coap://localhost/ --credentials client1.json ; ct="40", ; ct="40", Note that just passing in those credentials does not on its own make the server require encrypted communication, let alone require authorization. Requests without credentials still work, and in this very example it'd need a network sniffer (or increased verbosity) to even be sure *that* the request was protected. Ways of implementing access controls, mandatory encryption and access control are being explored - as are extensions that simplify the setup process. .. _`standard passphrase`: https://xkcd.com/936/ aiocoap-0.4.12/doc/tools.rst000066400000000000000000000056331472205122200156540ustar00rootroot00000000000000CoAP tools ========== As opposed to the :doc:`examples`, programs listed here are not tuned to show the use of aiocoap, but are tools for everyday work with CoAP implemented in aiocoap. Still, they can serve as examples of how to deal with user-provided addresses (as opposed to the fixed addresses in the examples), or of integration in a bigger project in general. .. toctree:: :maxdepth: 1 aiocoap-client: A CoAP client that supports observations and proxying aiocoap-proxy: A command-line configurable forward and reverse proxy aiocoap-rd: A standalone resource directory server aiocoap-fileserver: A simple read-only file server with directory listings aiocoap-keygen: A tool for generating keys for EDHOC Those utilities are installed by `setup.py` at the usual executable locations; during development or when working from a git checkout of the project, wrapper scripts are available in the root directory. In some instances, it might be practical to access their functionality from within Python; see the :mod:`aiocoap.cli` module documentation for details. All tools provide details on their invocation and arguments when called with the ``--help`` option. contrib ------- Tools in the ``contrib/`` folder are somewhere inbetween :doc:`examples` and the tools above; the rough idea is that they should be generally useful but not necessarily production tools, and simple enough to be useful as an inspiration for writing other tools; none of this is set in stone, though, so that area can serve as a noncommittal playground. These tools are currently present: * ``aiocoap-widgets``: Graphical software implementations of example CoAP devices as servers (eg. light bulb, switch). They should become an example of how CoRE interfaces and dynlinks can be used to discover and connect servers, and additionally serve as a playground for a more suitable Resource implementation. The GUI is implemented in Gtk3 using the gbulb_ asyncio loop. * ``aiocoap-kivy-widget``: A similar (and smaller) widget implemented in Kivy_. * ``oscore-plugtest``: Server and client for the interoperability tests conducted during the development of OSCORE. The programs in there are also used as part of the test suite. * ``rd-relay``: An experiment of how much a host must implement if it is to be discovered during a Resource Directory discovery process, but does not serve as the full resource directory itself and redirects the client there. * ``*.ipynb``: Jupyter notebooks in which aiocoap is run in a web browser and accesses the larger network through WebSockets. These files can be uploaded to `a live version of Jupyter Lite`_. .. _gbulb: https://github.com/nathan-hoad/gbulb .. _Kivy: https://kivy.org/ .. _`a live version of Jupyter Lite`: https://jupyter.org/try-jupyter/lab/ aiocoap-0.4.12/pyproject.toml000066400000000000000000000065471472205122200161360ustar00rootroot00000000000000[build-system] requires = [ "setuptools >= 66.0" ] build-backend = "setuptools.build_meta" [project] name = "aiocoap" description = "Python CoAP library" readme = "README.rst" authors = [ { name = "Christian Amsüss", email = "chrysn@fsfe.org" }, { name = "the aiocoap contributors" }, ] license = { text = "MIT" } keywords = [ "coap", "asyncio", "iot" ] classifiers= [ "Development Status :: 5 - Production/Stable", "Framework :: AsyncIO", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Internet", "Topic :: Security", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Networking", ] dynamic = [ "version" ] # When changing this, also look into doc/faq.rst and README.rst requires-python = ">=3.10" [project.urls] homepage = "https://christian.amsuess.com/tools/aiocoap/" repository = "https://codeberg.org/aiocoap/aiocoap" documentation = "https://aiocoap.readthedocs.org/" changelog = "https://codeberg.org/aiocoap/aiocoap/src/branch/main/NEWS.rst" [project.scripts] aiocoap-client = "aiocoap.cli.client:sync_main" aiocoap-proxy = "aiocoap.cli.proxy:sync_main" aiocoap-rd = "aiocoap.cli.rd:sync_main" aiocoap-fileserver = "aiocoap.cli.fileserver:FileServerProgram.sync_main" aiocoap-keygen = "aiocoap.cli.keygen:main" [project.optional-dependencies] # Extra is still present for compatibility, but its dependency has been # vendored in. linkheader = [] # ge25519 is a workaround for # ; being pure python it's # light enough to not warrant a dedicated group-oscore extra. oscore = [ "cbor2", "cryptography (>= 2.5)", "filelock", "ge25519", "lakers-python == 0.4.1" ] tinydtls = [ "DTLSSocket >= 0.1.18" ] # technically websockets is not needed when running on pyodide, but that's hard # to express here ws = [ "websockets >= 13, < 14" ] prettyprint = [ "cbor2", "pygments >= 2.1", "cbor-diag" ] docs = [ "sphinx >= 5", "sphinx-argparse" ] # manually kept up to date to include everything except docs # # when updating this, also check what is needed to build in .readthedocs.yaml and .woodpecker.yml all = [ "cbor2", "cryptography (>= 2.5)", "filelock", "ge25519", "lakers-python == 0.4.1", "DTLSSocket >= 0.1.18", "websockets >= 13, < 14", "cbor2", "pygments", "cbor-diag", ] [tool.setuptools.dynamic] version = { attr = "aiocoap.meta.version" } [tool.setuptools.packages.find] where = [ "." ] # It's unclear why, but in some environments including aiocoap included also # its submodules, in others it didn't. Listing both is safe for both. include = [ "aiocoap", "aiocoap.*" ] [[tool.mypy.overrides]] module = [ # Tracked in https://github.com/chrysn/cbor-diag-py/issues/2 "cbor_diag", # Tracked in https://github.com/openwsn-berkeley/lakers/issues/282 "lakers", # Typing checks currently not tracked upstream "ge25519", "fe25519", "DTLSSocket", # Not regularly available on development machines anyway "js", "pyodide.*", ] # Required because not only will many developers not have rare modules, they # will even never all be together at the same time (pyodide only exists in the # browser, DTLSSocket might not even build there) ignore_missing_imports = true aiocoap-0.4.12/server.py000077500000000000000000000127021472205122200150730ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This is a usage example of aiocoap that demonstrates how to implement a simple server. See the "Usage Examples" section in the aiocoap documentation for some more information.""" import datetime import logging import asyncio import aiocoap.resource as resource from aiocoap.numbers.contentformat import ContentFormat import aiocoap class Welcome(resource.Resource): representations = { ContentFormat.TEXT: b"Welcome to the demo server", ContentFormat.LINKFORMAT: b",ct=40", # ad-hoc for application/xhtml+xml;charset=utf-8 ContentFormat(65000): b'' b"aiocoap demo" b"

    Welcome to the aiocoap demo server!

    " b'", } default_representation = ContentFormat.TEXT async def render_get(self, request): cf = ( self.default_representation if request.opt.accept is None else request.opt.accept ) try: return aiocoap.Message(payload=self.representations[cf], content_format=cf) except KeyError: raise aiocoap.error.UnsupportedContentFormat class BlockResource(resource.Resource): """Example resource which supports the GET and PUT methods. It sends large responses, which trigger blockwise transfer.""" def __init__(self): super().__init__() self.set_content( b"This is the resource's default content. It is padded " b"with numbers to be large enough to trigger blockwise " b"transfer.\n" ) def set_content(self, content): self.content = content while len(self.content) <= 1024: self.content = self.content + b"0123456789\n" async def render_get(self, request): return aiocoap.Message(payload=self.content) async def render_put(self, request): print("PUT payload: %s" % request.payload) self.set_content(request.payload) return aiocoap.Message(code=aiocoap.CHANGED, payload=self.content) class SeparateLargeResource(resource.Resource): """Example resource which supports the GET method. It uses asyncio.sleep to simulate a long-running operation, and thus forces the protocol to send empty ACK first.""" def get_link_description(self): # Publish additional data in .well-known/core return dict(**super().get_link_description(), title="A large resource") async def render_get(self, request): await asyncio.sleep(3) payload = ( "Three rings for the elven kings under the sky, seven rings " "for dwarven lords in their halls of stone, nine rings for " "mortal men doomed to die, one ring for the dark lord on his " "dark throne.".encode("ascii") ) return aiocoap.Message(payload=payload) class TimeResource(resource.ObservableResource): """Example resource that can be observed. The `notify` method keeps scheduling itself, and calles `update_state` to trigger sending notifications.""" def __init__(self): super().__init__() self.handle = None def notify(self): self.updated_state() self.reschedule() def reschedule(self): self.handle = asyncio.get_event_loop().call_later(5, self.notify) def update_observation_count(self, count): if count and self.handle is None: print("Starting the clock") self.reschedule() if count == 0 and self.handle: print("Stopping the clock") self.handle.cancel() self.handle = None async def render_get(self, request): payload = datetime.datetime.now().strftime("%Y-%m-%d %H:%M").encode("ascii") return aiocoap.Message(payload=payload) class WhoAmI(resource.Resource): async def render_get(self, request): text = ["Used protocol: %s." % request.remote.scheme] text.append("Request came from %s." % request.remote.hostinfo) text.append("The server address used %s." % request.remote.hostinfo_local) claims = list(request.remote.authenticated_claims) if claims: text.append( "Authenticated claims of the client: %s." % ", ".join(repr(c) for c in claims) ) else: text.append("No claims authenticated.") return aiocoap.Message(content_format=0, payload="\n".join(text).encode("utf8")) # logging setup logging.basicConfig(level=logging.INFO) logging.getLogger("coap-server").setLevel(logging.DEBUG) async def main(): # Resource tree creation root = resource.Site() root.add_resource( [".well-known", "core"], resource.WKCResource(root.get_resources_as_linkheader) ) root.add_resource([], Welcome()) root.add_resource(["time"], TimeResource()) root.add_resource(["other", "block"], BlockResource()) root.add_resource(["other", "separate"], SeparateLargeResource()) root.add_resource(["whoami"], WhoAmI()) await aiocoap.Context.create_server_context(root) # Run forever await asyncio.get_running_loop().create_future() if __name__ == "__main__": asyncio.run(main()) aiocoap-0.4.12/setup.py000077500000000000000000000020421472205122200147210ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT from setuptools import setup, Command class Cite(Command): description = """Print how to cite aiocoap in a publication""" user_options = [("bibtex", None, "Output citation data as bibtex")] boolean_options = ["bibtex"] def initialize_options(self): self.bibtex = False def finalize_options(self): pass def run(self): if self.bibtex: print(self.bibtex_text) else: print(self.plain_text) plain_text = """Amsüss, Christian and the aiocoap contributors. aiocoap: Python CoAP Library. 2013–. https://christian.amsuess.com/tools/aiocoap/""" bibtex_text = """@Misc{, author = {Christian Amsüss and aiocoap contributors}, title = {{aiocoap}: Python CoAP Library}, year = {2013--}, url = {https://christian.amsuess.com/tools/aiocoap/}, }""" setup( cmdclass={ "cite": Cite, }, ) aiocoap-0.4.12/tests/000077500000000000000000000000001472205122200143505ustar00rootroot00000000000000aiocoap-0.4.12/tests/__init__.py000066400000000000000000000004741472205122200164660ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Module that contains the various test scenarios. Can be used most easily from setup.py as `./setup.py test` (which installs als test dependencies), or `python3 -m unittest` (which just runs the tests).""" aiocoap-0.4.12/tests/common.py000066400000000000000000000115201472205122200162110ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Non-fixture utilities shared between tests""" import sys import os import asyncio import aiocoap.defaults if os.environ.get("AIOCOAP_TESTS_LOOP", None) == "uvloop": import asyncio import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) elif os.environ.get("AIOCOAP_TESTS_LOOP", None) == "gbulb": import gbulb gbulb.install() elif os.environ.get("AIOCOAP_TESTS_LOOP", None) == "glib": from gi.events import GLibEventLoopPolicy policy = GLibEventLoopPolicy() asyncio.set_event_loop_policy(policy) # All test servers are bound to loopback; if for any reason one'd want to run # with particular transports, just set them explicitly. os.environ["AIOCOAP_DTLSSERVER_ENABLED"] = "1" if "coverage" in sys.modules: PYTHON_PREFIX = [sys.executable, "-m", "coverage", "run", "--parallel-mode"] else: PYTHON_PREFIX = [sys.executable] def _find_loopbacknames(): """Try the lookup results of common 'localhost' names and variations to return, in order, a name that resolves to 127.0.0.1, one that resolves to ::1 and one that can be resolved to both. If there is no result for any of the categories, None is returned in that place.""" import socket candidates = [ # the obvious choice "localhost", # seen on debian; ip6-localhost is present on debian too "ip6-localhost", "ip6-loopback", ] v4 = [] v6 = [] for c in candidates: try: results = socket.getaddrinfo(c, 1234, family=socket.AF_INET) except socket.gaierror: pass else: if results and all(x[4] == ("127.0.0.1", 1234) for x in results): v4.append(c) try: # Not probing for AF_INET6 because Windows applies its regular # resolution rules (that appear to be "give out V6 addresses to # unspecified families only when a V6 route is available") even to # 'ip6-loopback' (while 'localhost' is exempt and returns both). # # If we probed AF_INET6 here, on win32 we'd see ip6-localhost as a # usable address, but when the simple6 client transport later asks # getaddrinf unspecified (because it's really generic, despite its # name), that would come up empty when no route is availasble. # # (An alternative here would be to query V6 in the first place, # check `all` instead of `any` againt com and before appending do # another check on whether it still returns something to an # unspecified query) results = socket.getaddrinfo(c, 1234) except socket.gaierror: pass else: if results and any(x[4][:2] == ("::1", 1234) for x in results): v6.append(c) v4only = [c for c in v4 if c not in v6] v6only = [c for c in v6 if c not in v4] v46 = [c for c in v4 if c in v6] return ( v4only[0] if v4only else None, v6only[0] if v6only else None, v46[0] if v46 else None, ) loopbackname_v4, loopbackname_v6, loopbackname_v46 = _find_loopbacknames() using_simple6 = "simple6" in list(aiocoap.defaults.get_default_clienttransports()) tcp_disabled = "tcp" not in os.environ.get("AIOCOAP_SERVER_TRANSPORT", "tcp is default") ws_disabled = "ws" not in os.environ.get("AIOCOAP_SERVER_TRANSPORT", "ws is default") dtls_disabled = "dtls" not in os.environ.get( "AIOCOAP_SERVER_TRANSPORT", "dtls is default" ) class CapturingSubprocess(asyncio.SubprocessProtocol): """This protocol just captures stdout and stderr into properties of the same name. Unlike using communicate() on a create_subprocess_exec product, this does not discard any output that was collected when the task is cancelled, and thus allows cleanup. No way of passing data into the process is implemented, as it is not needed here.""" def __init__(self): self.stdout = b"" self.stderr = b"" self.read_more = asyncio.get_running_loop().create_future() def pipe_data_received(self, fd, data): self.read_more.set_result(None) self.read_more = asyncio.get_running_loop().create_future() if fd == 1: self.stdout += data elif fd == 2: self.stderr += data else: raise ValueError("Data on unexpected fileno") def process_exited(self): self.read_more.set_result(None) if __name__ == "__main__": print("Python prefix:", PYTHON_PREFIX) print( "Loopback names:\n %s (IPv4)\n %s (IPv6),\n %s (IPv4+IPv6)" % (loopbackname_v4, loopbackname_v6, loopbackname_v46) ) print("simple6 transport in use:", using_simple6) print("TCP disabled:", tcp_disabled) aiocoap-0.4.12/tests/fixtures.py000066400000000000000000000326101472205122200165750ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Test fixtures and decorators that are not test specific""" import asyncio import functools import gc import inspect import logging import os import pprint import sys import unittest import warnings import weakref # time granted to asyncio to receive datagrams sent via loopback, and to close # connections. if tearDown checks fail erratically, tune this up -- but it # causes per-fixture delays. CLEANUPTIME = 0.01 # This is chosen quite losely to avoid false positives -- but having a timeout # prevents any test runnier engine (like gitlab runners) from triggering its # timeout. Thus, the rest of the suite has a chance of running, and we get the # debug log from the fixture rather than losing the logs to a brutal # termination. # # Tests under system load have shown that TestOSCOREPlugtest.test_005 can # indeed take quite a while to complete; until I know why, this gives it a # chance to complete even on occupied systems. ASYNCTEST_TIMEOUT = 3 * 60 def is_test_successful(testcase): """Return true if a current TestCase instancance completed so far without raising errors. This is supposed to be used in tearDown handlers on self when additional debug information can be shown that would otherwise be discarded, or to skip tests during teardown that are bound to fail.""" return testcase._outcome.success def asynctest(method): """Decorator for async WithAsyncLoop fixtures methods that runs them from the fixture's loop with a static timeout""" @functools.wraps(method) def wrapper(self, *args, **kwargs): task = asyncio.ensure_future(method(self, *args, **kwargs), loop=self.loop) for f in asyncio.as_completed([task], timeout=ASYNCTEST_TIMEOUT): try: return self.loop.run_until_complete(f) except asyncio.TimeoutError: task.cancel() # give the task a chance to run finally handlers self.loop.run_until_complete(task) raise return wrapper def no_warnings(function, expected_warnings=None): if inspect.iscoroutinefunction(function): raise Exception( "no_warnings decorates functions, not coroutines. Put it over @asynctest." ) expected_warnings = expected_warnings or [] def wrapped(self, *args, function=function): # assertLogs does not work as assertDoesntLog anyway without major # tricking, and it interacts badly with WithLogMonitoring as they both # try to change the root logger's level. startcount = len(self.handler.list) result = function(self, *args) messages = [ m.getMessage() for m in self.handler.list[startcount:] if m.levelno >= logging.WARNING and "There is no current event loop" not in m.getMessage() # Tests are not generally run with precisely known load conditions, # and unless in normal operations where this would be an occasional # warning, this would trip up our whole test. and m.orig_msg != "Executing %s took %.3f seconds" ] if len(expected_warnings) != len(messages) or not all( e == m or (e.endswith("...") and m.startswith(e[:-3])) for (e, m) in zip(expected_warnings, messages) ): self.assertEqual( messages, expected_warnings, "Function %s had unexpected warnings" % function.__name__, ) return result wrapped.__name__ = function.__name__ wrapped.__doc__ = function.__doc__ return wrapped def precise_warnings(expected_warnings): """Expect that the expected_warnings list are the very warnings shown (no_warnings is a special case with []). "precise" is a bit of a misnomer here; the expected warnings may end with "..." indicating that the rest of the line may be arbitrary.""" return functools.partial(no_warnings, expected_warnings=expected_warnings) class WithLogMonitoring(unittest.TestCase): def setUp(self): self.handler = self.ListHandler() logging.root.setLevel(0) logging.root.addHandler(self.handler) logging.captureWarnings(True) warnings.simplefilter("always") super(WithLogMonitoring, self).setUp() def tearDown(self): super(WithLogMonitoring, self).tearDown() logging.root.removeHandler(self.handler) complete_log = " Complete log:\n" + "\n".join( x.preformatted for x in self.handler.list if x.name != "asyncio" ) if "AIOCOAP_TESTS_SHOWLOG" in os.environ: print(complete_log, file=sys.stderr) complete_log = "was just printed unconditionally anyway" class ListHandler(logging.Handler): """Handler that catches log records into a list for later evaluation The log records are formatted right away into a .preformatted attribute and have their args and exc_info stripped out. This approach retains the ability to later filter the messages by logger name or level, but drops any references the record might hold to stack frames or other passed arguments, as to not interfere with _del_to_be_sure. (In regular handlers, these references are not an issue because in regular logging the decision whether or not to log is taken right away, and maybe then formatting happens, and either way the rest is dropped).. """ def __init__(self): super().__init__() self.list = [] self.preformatter = logging.Formatter( fmt="%(asctime)s:%(levelname)s:%(name)s:%(message)s" ) def emit(self, record): record.preformatted = self.preformatter.format(record) if record.args and not hasattr(record, "style"): # Several of the precise_warnings and simiiar uses rely on the # ability to match on the message as shown. This mechanism # predates the preformatted messages, and is kept as a middle # ground. (Matching on something like "Aborting connection: No # CSM received" is impossible when the arguments are already # thrown away for then it'd be "Aboting connection: %s", but # matching into the fully preformatted log record is not ideal # either as it effectively means parsing by the above colon # format (yes, sub-string matching is probably good enough, but # why take chances). # # The original message is retained for use with cases when it # is easier that way. record.orig_msg = record.msg record.msg = record.msg % record.args record.args = None record.exc_info = None # The websockets module puts a self-reference into the records # through an extra, stripping that to make GC work in # _del_to_be_sure if hasattr(record, "websocket"): del record.websocket self.list.append(record) def __iter__(self): return self.list.__iter__() def assertWarned(self, message): """Assert that there was a warning with the given message. This function also removes the warning from the log, so an enclosing @no_warnings (or @precise_warnings) can succed.""" for entry in self.handler.list: if entry.msg == message and entry.levelno == logging.WARNING: self.handler.list.remove(entry) break else: raise AssertionError("Warning not logged: %r" % message) class WithAsyncLoop(unittest.TestCase): def setUp(self): super(WithAsyncLoop, self).setUp() self.loop = asyncio.get_event_loop() class Destructing(WithLogMonitoring): # Errors produced by this can be quite large, but the truncated version # that gets printed when they exceed maxDiff is not useful, so removing any # printing limits. maxDiff = None def _del_to_be_sure(self, attribute): if isinstance(attribute, str): getter = lambda self, attribute=attribute: getattr(self, attribute) deleter = lambda self, attribute=attribute: delattr(self, attribute) label = attribute else: getter = attribute["get"] deleter = attribute["del"] label = attribute["label"] weaksurvivor = weakref.ref(getter(self)) deleter(self) if not is_test_successful(self): # An error was already logged, and that error's backtrace usually # creates references that make any attempt to detect lingering # references fuitile. It'll show an error anyway, no use in # polluting the logs. return # let everything that gets async-triggered by close() happen self.loop.run_until_complete(asyncio.sleep(CLEANUPTIME)) gc.collect() def snapshot(): # This object is created locally and held by the same referrers # that also hold the now-recreated survivor. # # By comparing its referrers to the surviver's referrers, we can # filter out this tool's entry in the already hard to read list of # objects that kept the survivor alive. canary = object() survivor = weaksurvivor() if survivor is None: return None all_referrers = gc.get_referrers(survivor) canary_referrers = gc.get_referrers(canary) if canary_referrers: referrers = [r for r in all_referrers if r not in canary_referrers] assert len(all_referrers) == len(referrers) + 1, ( "Canary to filter out the debugging tool's reference did not work.\nReferrers:\n%s\ncanary_referrers:\n%s" % (pprint.pformat(all_referrers), pprint.pformat(canary_referrers)) ) else: # There is probably an optimization around that makes the # current locals not show up as referrers. It is hoped (and # least with the current Python it works) that this also works # for the survivor, so it's already not in the list. referrers = all_referrers def _format_any(frame, survivor_id): if str(type(frame)) == "": return _format_frame(frame, survivor_id) if isinstance(frame, dict): # If it's a __dict__, it'd be really handy to know whose dict that is framerefs = gc.get_referrers(frame) owners = [ o for o in framerefs if getattr(o, "__dict__", None) is frame ] if owners: return pprint.pformat( frame ) + "\n ... which is the __dict__ of %s" % (owners,) return pprint.pformat(frame) def _format_frame(frame, survivor_id): return "%s as %s in %s" % ( frame, " / ".join( k for (k, v) in frame.f_locals.items() if id(v) == survivor_id ), frame.f_code, ) # can't use survivor in list comprehension, or it would be moved # into a builtins.cell rather than a frame, and that won't spew out # the details _format_frame can extract survivor_id = id(survivor) referrer_strings = [_format_any(x, survivor_id) for x in referrers] formatted_survivor = pprint.pformat(vars(survivor)) return ( "Survivor found: %r\nReferrers of the survivor:\n*" " %s\n\nSurvivor properties: %s" % (survivor, "\n* ".join(referrer_strings), formatted_survivor) ) s = snapshot() if s is not None: original_s = s if False: # enable this if you think that a longer timeout would help # this helped finding that timer cancellations don't free the # callback, but in general, expect to modify this code if you # have to read it; this will need adjustment to your current # debugging situation logging.root.info("Starting extended grace period") for i in range(10): self.loop.run_until_complete(asyncio.sleep(1)) gc.collect() s = snapshot() if s is None: logging.root.info( "Survivor vanished after %r iterations", i + 1 ) break snapshotsmessage = ( "Before extended grace period:\n" + original_s + "\n\nAfter extended grace period:\n" + ("the same" if s == original_s else s) ) else: snapshotsmessage = s errormessage = ( "Protocol %s was not garbage collected.\n\n" % label + snapshotsmessage ) self.fail(errormessage) aiocoap-0.4.12/tests/test_blockwise.py000066400000000000000000000117601472205122200177500ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This tests advanced cases of blockwise transfer; simple sequential transfers are covered in test_server.TestServer.test_replacing_resource.""" import asyncio import unittest import aiocoap import aiocoap.defaults from .test_server import ( WithTestServer, WithClient, no_warnings, asynctest, BigResource, BasicTestingSite, ) class BigChunkyResource(BigResource): async def render_get(self, request): request.remote.maximum_block_size_exp = 3 return await super().render_get(request) class ChunkyTestingSite(BasicTestingSite): def __init__(self): super().__init__() self.add_resource(["big", "chunky"], BigChunkyResource()) class WithChunkyTestServer(WithTestServer): TestingSite = ChunkyTestingSite class TestBlockwise(WithChunkyTestServer, WithClient): # tracked as https://github.com/chrysn/aiocoap/issues/58; behavior can be successful more or less by chance @unittest.skip @no_warnings @asynctest async def test_sequential(self): """Test whether the client serializes simultaneous block requests""" pattern1 = b"01234 first pattern" + b"01" * 1024 pattern2 = b"01234 second pattern" + b"02" * 1024 request1 = aiocoap.Message( uri="coap://" + self.servernetloc + "/replacing/one", code=aiocoap.POST, payload=pattern1, ) request2 = aiocoap.Message( uri="coap://" + self.servernetloc + "/replacing/one", code=aiocoap.POST, payload=pattern2, ) responses = [] for response in asyncio.as_completed( [self.client.request(r).response for r in [request1, request2]] ): response = await response self.assertTrue( response.code.is_successful(), "Simultaneous blockwise requests caused error.", ) responses.append(response.payload) self.assertSetEqual( set(responses), set(x.replace(b"0", b"O") for x in (pattern1, pattern2)) ) @no_warnings @asynctest async def test_client_hints(self): """Test whether a handle_blockwise=True request takes a block2 option set in it as a hint to start requesting with a low size right away That way of hinting at size requests is not documented, but understood and occasionally used; the test primarily serves to identify problems the server has with initially low block sizes.""" resp = await self.client.request( aiocoap.Message( uri="coap://" + self.servernetloc + "/big", block2=(0, 0, 3), code=aiocoap.GET, ) ).response self.assertEqual(resp.code, aiocoap.CONTENT, "Request was unsuccessful") self.assertEqual( self._count_received_messages(), (len(resp.payload) + 127) // 128, "Response not chunked into 128 bytes", ) @no_warnings @asynctest async def test_server_hints(self): """Test whether the needs_blockwise server mechanism considers size exponent limits of the remote. Mutating the remote as it is done in the BigChunkyResource is not documented, but helps verify that the information in maximum_block_size_exp is generally used.""" resp = await self.client.request( aiocoap.Message( uri="coap://" + self.servernetloc + "/big/chunky", code=aiocoap.GET, ) ).response self.assertEqual(resp.code, aiocoap.CONTENT, "Request was unsuccessful") self.assertEqual( self._count_received_messages(), (len(resp.payload) + 127) // 128, "Response not chunked into 128 bytes", ) @no_warnings @asynctest async def test_client_hints_block1(self): """Test whether a client can successfully indicate per-remote sizes even for the block1 phase.""" # same as BigResource's content payload = b"0123456789----------" * 512 reqmsg = aiocoap.Message( uri="coap://" + self.servernetloc + "/replacing/one", code=aiocoap.PUT, payload=payload, ) reqmsg.remote.maximum_block_size_exp = 3 resp = await self.client.request(reqmsg).response self.assertEqual(resp.code, aiocoap.CHANGED, "Request was unsuccessful") self.assertEqual( self._count_received_messages(), (len(payload) + 127) // 128, "Response not chunked into 128 bytes", ) _received_logmsg = "Incoming message len(encoded): return messages, encoded messages.append(aiocoap.transports.tcp._decode_message(encoded[:size])) encoded = encoded[size:] async def should_abort_early(self, request: bytes): """Send request bytes, expect that the server closes the connection after having sent possibly a CSM and an abort""" self.mock_w.write(request) r = ( await self.mock_r.read() ) # timing out would be a typical failure case here too parsed, trail = self._read_as_messages(r) self.assertEqual(trail, b"", "Leftover data after closing message") if parsed[0].code == aiocoap.CSM: # don't discard the CSM unconditionallly: the server might have # read the request data before sending its own initial CSM. parsed.pop(0) self.assertEqual( len(parsed), 1, "Not exactly one (presumably abort) message received" ) self.assertEqual( parsed[0].code, aiocoap.ABORT, "Received message is not an abort message" ) async def should_idle(self, request: bytes, timeout=0.1): """Send request bytes, expect that the server sends CSM and does not close the connection, awaiting more from the client. Returns all messages received until the timeout.""" self.mock_w.write(request) triggered_eof = False async def kill_read(): """After a timeout, synthesize an end-of-file condition into the reader, hoping this doesn't beak too much.""" nonlocal triggered_eof await asyncio.sleep(timeout) triggered_eof = True self.mock_r.feed_eof() self.loop.create_task(kill_read()) r = ( await self.mock_r.read() ) # timing out would be a typical failure case here too self.assertEqual(triggered_eof, True, "Server closed connection prematurely") parsed, trail = self._read_as_messages(r) # if this happens, the server is either sending garbage (announcing # something long and not following up), or the timeout should be # increased self.assertEqual(trail, b"", "Leftover data after reading timeout") if parsed[0].code == aiocoap.CSM: # don't discard the CSM unconditionallly: the server might have # read the request data before sending its own initial CSM. parsed.pop(0) return parsed async def should_idle_quietly(self, request: bytes, timeout=0.1): """should_idle, but assert that no messages were returned""" messages = await self.should_idle(request, timeout) # it's not a per-spec wrong thing to do, but highly unusual self.assertEqual(messages, [], "Server sent messages on its own") @precise_warnings(["Aborting connection: Failed to parse message"]) @asynctest async def test_http_get(self): await self.should_abort_early(b"GET /.well-known/core HTTP/1.0") @precise_warnings(["Aborting connection: No CSM received"]) @asynctest async def test_early_get(self): await self.should_abort_early(b"\0\x01") @no_warnings @asynctest async def test_incomplete_small(self): await self.should_idle_quietly(b"\0") @no_warnings @asynctest async def test_incomplete_large1(self): # announcing but not sending 1 bytes extlen await self.should_idle_quietly(b"\xd0") @no_warnings @asynctest async def test_incomplete_large2(self): # sending one out of four bytes extlen # a server could in theory reject this on grounds of "no matter what # you say next, my buffer ain't large enough" await self.should_idle_quietly(b"\xf0\0") @no_warnings @asynctest async def test_incomplete_large3(self): # announcing a 269 byte long message, but not even sendin the code await self.should_idle_quietly(b"\xe0\0\0") @precise_warnings(["Aborting connection: Overly large message announced"]) @asynctest async def test_incomplete_large4(self): # announcing the longest possible message, this should excede # everyone's max-message-size. # # blocking to read more would be acceptable behavior as well. await self.should_abort_early(b"\xf0\xff\xff\xff\xff") @precise_warnings(["Aborting connection: Failed to parse message"]) @asynctest async def test_wrong_tkl(self): # send an unspecified token length of 15. # the rest of the message is an empty CSM, so if the server were to # extrapolate from the meaning of tkl 0..8, it'd read it as OK. await self.should_abort_early(b"\x0fxxxxxxxxxxxxxxx\xe1") # Fun inside the CSM @no_warnings @asynctest async def test_exotic_elective_csm_option(self): # send option number something-even (something-odd plus 269) as an empty option await self.should_idle_quietly(b"\x30\xe1\xe0\xf1\xf1") @precise_warnings(["Aborting connection: Option not supported"]) @asynctest async def test_exotic_compulsory_csm_option(self): # send option number something-odd (something-even plus 269) as an empty option await self.should_abort_early(b"\x30\xe1\xe0\xf2\xf2") @precise_warnings(["Aborting connection: Option not supported"]) @asynctest async def test_exotic_compulsory_csm_option_late(self): # send an empty CSM, and after that the one from compulsory_csm_option await self.should_abort_early(b"\0\xe1\x30\xe1\xe0\xf2\xf2") aiocoap-0.4.12/tests/test_observe.py000066400000000000000000000355521472205122200174400ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Tests for resource observation Note that cancellation of observations is checked in neither way; that's largely because the implementation has fallen behind on the drafts anyway and needs to be updated.""" import asyncio import aiocoap from aiocoap.numbers import ContentFormat import gc import unittest from aiocoap.resource import ObservableResource, WKCResource from .test_server import ( WithTestServer, WithClient, no_warnings, ReplacingResource, MultiRepresentationResource, run_fixture_as_standalone_server, asynctest, ) class ObservableCounter(ObservableResource): def __init__(self, number_formatter=lambda x: x): super(ObservableCounter, self).__init__() self.count = 0 self.number_formatter = number_formatter async def render_delete(self, request): self.count = 0 self.updated_state() return aiocoap.Message(code=aiocoap.CHANGED) async def render_post(self, request): if request.payload == b"double": # Calling updated_state() twice without yielding inbetween is key # here; this ensures that Futures are not treated carelessly. self.count += 1 # Triggering with explicit value because if left empty, the value # would be synthesized after the next yielding anyway, and no # ill-effects would be visible. self.updated_state( aiocoap.Message( code=aiocoap.CONTENT, payload=str(self.number_formatter(self.count)).encode("ascii"), ) ) self.count += 1 self.updated_state() return aiocoap.Message(code=aiocoap.CHANGED) async def render_get(self, request): return aiocoap.Message( code=aiocoap.CONTENT, payload=str(self.number_formatter(self.count)).encode("ascii"), ) async def render_fetch(self, request): return aiocoap.Message( code=aiocoap.CONTENT, payload=( "%s; request had length %s" % (self.number_formatter(self.count), len(request.payload)) ).encode("ascii"), ) class ObservableReplacingResource(ReplacingResource, ObservableResource): async def render_put(self, request): result = await super(ObservableReplacingResource, self).render_put(request) self.updated_state() return result class ObserveLateUnbloomer(ObservableResource): """A resource that accepts the server observation at first but at rendering time decides it can't do it""" def __init__(self): super().__init__() self._cancel_right_away = [] async def add_observation(self, request, serverobservation): self._cancel_right_away.append( lambda: serverobservation.deregister("Changed my mind at render time") ) serverobservation.accept(lambda: None) async def render_get(self, request): while self._cancel_right_away: self._cancel_right_away.pop(0)() return aiocoap.Message() class ObservableFailure(ObservableResource): async def render_get(self, request): return aiocoap.Message(code=aiocoap.UNAUTHORIZED) class ObserveTestingSite(aiocoap.resource.Site): prefix = [] def __init__(self): super(ObserveTestingSite, self).__init__() self.add_resource( self.prefix + ["unobservable"], MultiRepresentationResource( { ContentFormat.TEXT: b"", } ), ) self.add_resource(self.prefix + ["count"], ObservableCounter()) self.add_resource(self.prefix + ["echo"], ObservableReplacingResource()) self.add_resource(self.prefix + ["notreally"], ObserveLateUnbloomer()) self.add_resource(self.prefix + ["failure"], ObservableFailure()) self.add_resource( self.prefix + ["large"], ObservableCounter(lambda x: (" %3d" % x) * 400) ) class NestedSite(aiocoap.resource.Site): def __init__(self): super().__init__() # Not part of the test suite, but handy when running standalone self.add_resource( [".well-known", "core"], WKCResource(self.get_resources_as_linkheader) ) self.subsite = ObserveTestingSite() self.add_resource(["deep"], self.subsite) class UnnestedSite(ObserveTestingSite): prefix = ["deep"] class WithObserveTestServer(WithTestServer): def create_testing_site(self): self.testingsite = NestedSite() # use this when you suspect that things go wrong due to nesting; # usually not running this because it has no way to fail without nested # failing too # self.testingsite = UnnestedSite() return self.testingsite class TestObserve(WithObserveTestServer, WithClient): @no_warnings @asynctest async def test_normal_get(self): request = aiocoap.Message(code=aiocoap.GET) request.opt.uri_path = ["deep", "count"] request.unresolved_remote = self.servernetloc response = await self.client.request(request).response self.assertEqual( response.code, aiocoap.CONTENT, "Normal request did not succede" ) self.assertEqual( response.payload, b"0", "Normal request gave unexpected result" ) def build_observer(self, path, baserequest=None): if baserequest is not None: request = baserequest else: request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.servernetloc request.opt.uri_path = path request.opt.observe = 0 requester = self.client.request(request) observation_results = [] async def task(): try: async for obs in requester.observation: observation_results.append(obs.payload) # This is how non-observability is indicated: The loop just terminates observation_results.append("done") except Exception as e: observation_results.append(e) running_task = asyncio.create_task(task()) return requester, observation_results, running_task.cancel @no_warnings @asynctest async def test_unobservable(self): requester, observation_results, notinterested = self.build_observer( ["deep", "unobservable"] ) response = await requester.response self.assertEqual( response.code, aiocoap.CONTENT, "Unobservable base request did not succede" ) self.assertEqual( response.payload, b"", "Unobservable base request gave unexpected result" ) await asyncio.sleep(0.1) self.assertEqual(observation_results, ["done"]) async def _change_counter(self, method, payload, path=["deep", "count"]): request = aiocoap.Message(code=method, uri_path=path, payload=payload) request.unresolved_remote = self.servernetloc await self.client.request(request).response_raising @asynctest async def _test_counter( self, baserequest, formatter, postpayload=b"", path=["deep", "count"] ): """Run a counter test with requests built from baserequest. Expect response payloads to be equal to the formatter(n) for n being the counter value""" await self._change_counter(aiocoap.DELETE, b"", path=path) requester, observation_results, notinterested = self.build_observer( path, baserequest=baserequest ) response = await requester.response self.assertEqual( response.code, aiocoap.CONTENT, "Observe base request did not succede" ) self.assertEqual( response.payload, formatter(0), "Observe base request gave unexpected result", ) await self._change_counter(aiocoap.POST, postpayload, path=path) await asyncio.sleep(0.1) self.assertEqual(observation_results, [formatter(1)]) await self._change_counter(aiocoap.POST, postpayload, path=path) await asyncio.sleep(0.1) self.assertEqual(observation_results, [formatter(1), formatter(2)]) notinterested() @no_warnings def test_counter(self): self._test_counter(None, lambda x: str(x).encode("ascii")) @no_warnings def test_counter_blockwise(self): self._test_counter( None, lambda x: str((" %3d" % x) * 400).encode("ascii"), path=["deep", "large"], ) @no_warnings def test_counter_fetch(self): self._test_counter( aiocoap.Message(code=aiocoap.FETCH, payload=b"12345"), lambda x: ("%s; request had length 5" % x).encode("ascii"), ) # Test hard disabled because not only it expects failure, but that failure # also causes protocol GC issues. Tracked in # https://github.com/chrysn/aiocoap/issues/95 # @no_warnings # def test_counter_fetch_big(self): # self._test_counter( # aiocoap.Message(code=aiocoap.FETCH, payload=b'12345' * 1000), # lambda x: ('%s; request had length 5000'%x).encode('ascii')) @no_warnings def test_counter_double(self): # see comments on b"double" in render_post self._test_counter(None, lambda x: str(x * 2).encode("ascii"), b"double") @no_warnings @asynctest async def test_echo(self): async def put(b): m = aiocoap.Message(code=aiocoap.PUT, payload=b) m.unresolved_remote = self.servernetloc m.opt.uri_path = ["deep", "echo"] response = await self.client.request(m).response self.assertEqual(response.code, aiocoap.CHANGED) await put(b"test data 1") requester, observation_results, notinterested = self.build_observer( ["deep", "echo"] ) response = await requester.response self.assertEqual( response.code, aiocoap.CONTENT, "Observe base request did not succede" ) self.assertEqual( response.payload, b"test data 1", "Observe base request gave unexpected result", ) await put(b"test data 2") await asyncio.sleep(0.1) self.assertEqual(observation_results, [b"test data 2"]) notinterested() @unittest.expectedFailure # regression since 82b35c1f8f, tracked as @no_warnings # https://github.com/chrysn/aiocoap/issues/104 @asynctest async def test_lingering(self): """Simulate what happens when a request is sent with an observe option, but the code only waits for the response and does not subscribe to the observation.""" request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.servernetloc request.opt.uri_path = ["deep", "count"] request.opt.observe = 0 requester = self.client.request(request) response = await requester.response del requester, response gc.collect() # this needs to happen now and not in a precise_warnings because by the # time precise_warnings checks the messages, the context was already # shut down, but we want to make sure the warning is raised in time. self.assertWarned("Observation deleted without explicit cancellation") @no_warnings @asynctest async def test_unknownhost(self): request = aiocoap.Message( code=aiocoap.GET, uri="coap://cant.resolve.this.example./empty", observe=0 ) requester = self.client.request(request) events = [] async def task(): try: async for obs in requester.observation: events.append("Callback: %s" % obs.payload) events.append("Regular empty return") except Exception as e: events.append("Error %s" % type(e).__name__) pull_task = asyncio.create_task(task()) response = await requester.response_nonraising await asyncio.sleep(0.1) pull_task.cancel() self.assertEqual(events, ["Error ResolutionError"]) @no_warnings @asynctest async def test_late_subscription_eventual_consistency(self): await self._change_counter(aiocoap.DELETE, b"") request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.servernetloc request.opt.uri_path = ("deep", "count") request.opt.observe = 0 requester = self.client.request(request) first_response = await requester.response self.assertEqual( first_response.payload, b"0", "Observe base request gave unexpected result" ) await self._change_counter(aiocoap.POST, b"") await self._change_counter(aiocoap.POST, b"") last_seen = None async def task(): nonlocal last_seen async for obs in requester.observation: last_seen = obs.payload pull_task = asyncio.create_task(task()) # this is not required in the current implementation as it calls back # right from the registration, but i don't want to prescribe that. wait_a_moment = asyncio.get_running_loop().create_future() self.loop.call_soon(lambda: wait_a_moment.set_result(None)) await wait_a_moment pull_task.cancel() self.assertEqual(last_seen, b"2") # only testing what was last seen because both receiving b"1" and b"2" # and only b"2" are correct eventually consistent results. requester.observation.cancel() async def _test_no_observe(self, path): m = aiocoap.Message(code=aiocoap.GET, observe=0) m.unresolved_remote = self.servernetloc m.opt.uri_path = path request = self.client.request(m) response = await request.response self.assertEqual(response.opt.observe, None) return request @no_warnings @asynctest async def test_notreally(self): await self._test_no_observe(["deep", "notreally"]) @no_warnings @asynctest async def test_failure(self): request = await self._test_no_observe(["deep", "failure"]) failure = None async def task(): nonlocal failure try: async for obs in requester.observation: pass except Exception as e: failure = e pull_task = asyncio.create_task(task()) await asyncio.sleep(0.1) self.assertTrue( failure is not None, "Errback was not called on a failed observation" ) if __name__ == "__main__": # due to the imports, you'll need to run this as `python3 -m tests.test_observe` run_fixture_as_standalone_server(WithObserveTestServer) aiocoap-0.4.12/tests/test_oscore.py000066400000000000000000000560261472205122200172640ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Tests for the aiocoap.oscore module based on the test vectors.""" import unittest import aiocoap oscore_modules = aiocoap.defaults.oscore_missing_modules() if not oscore_modules: import aiocoap.oscore # shortcut definition, as this will be used all over the place with copy-pasted # values from the specification h = bytes.fromhex C1_KEY = h("0102030405060708090a0b0c0d0e0f10") C1_SALT = h("9e7ca92223786340") C2_KEY = h("0102030405060708090a0b0c0d0e0f10") C2_SALT = None C3_KEY = h("0102030405060708090a0b0c0d0e0f10") C3_SALT = h("9e7ca92223786340") C3_ID_CTX = h("37cbf3210017a2d3") default_algorithm = aiocoap.oscore.AES_CCM_16_64_128 default_hashfun = aiocoap.oscore.hashfunctions["sha256"] import aiocoap.oscore class NonsavingSecurityContext( aiocoap.oscore.CanProtect, aiocoap.oscore.CanUnprotect, aiocoap.oscore.SecurityContextUtils, ): def post_seqnoincrease(self): # obviously, don't use this anywhere else, especially not with secret # keys pass _skip_unless_oscore = unittest.skipIf( oscore_modules, "Modules missing for running OSCORE tests: %s" % (oscore_modules,) ) @_skip_unless_oscore class TestOSCOAPStatic(unittest.TestCase): def test_c1_1(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"" secctx.recipient_id = b"\x01" secctx.id_context = None secctx.derive_keys(C1_SALT, C1_KEY) # info not compared; that would be tricky to extract and adds no value self.assertEqual( secctx.sender_key, h("f0910ed7295e6ad4b54fc793154302ff"), "Sender key derivation disagrees with test vector", ) self.assertEqual( secctx.recipient_key, h("ffb14e093c94c9cac9471648b4f98710"), "Recipient key derivation disagrees with test vector", ) self.assertEqual( secctx.common_iv, h("4622d4dd6d944168eefb54987c"), "Common IV key derivation disagrees with test vector", ) sender_nonce_0 = secctx._construct_nonce(b"\0", secctx.sender_id) self.assertEqual( sender_nonce_0, h("4622d4dd6d944168eefb54987c"), "Sender nonce disagrees with test vector", ) recipient_nonce_0 = secctx._construct_nonce(b"\0", secctx.recipient_id) self.assertEqual( recipient_nonce_0, h("4722d4dd6d944169eefb54987c"), "Recipient nonce disagrees with test vector", ) def test_c1_2(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"\x01" secctx.recipient_id = b"" secctx.id_context = None secctx.derive_keys(C1_SALT, C1_KEY) # info not compared; that would be tricky to extract and adds no value self.assertEqual( secctx.sender_key, h("ffb14e093c94c9cac9471648b4f98710"), "Sender key derivation disagrees with test vector", ) self.assertEqual( secctx.recipient_key, h("f0910ed7295e6ad4b54fc793154302ff"), "Recipient key derivation disagrees with test vector", ) self.assertEqual( secctx.common_iv, h("4622d4dd6d944168eefb54987c"), "Common IV key derivation disagrees with test vector", ) sender_nonce_0 = secctx._construct_nonce(b"\0", secctx.sender_id) self.assertEqual( sender_nonce_0, h("4722d4dd6d944169eefb54987c"), "Sender nonce disagrees with test vector", ) recipient_nonce_0 = secctx._construct_nonce(b"\0", secctx.recipient_id) self.assertEqual( recipient_nonce_0, h("4622d4dd6d944168eefb54987c"), "Recipient nonce disagrees with test vector", ) def test_c2_1(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"\x00" secctx.recipient_id = b"\x01" secctx.id_context = None secctx.derive_keys(C2_SALT, C2_KEY) # info not compared; that would be tricky to extract and adds no value self.assertEqual( secctx.sender_key, h("321b26943253c7ffb6003b0b64d74041"), "Sender key derivation disagrees with test vector", ) self.assertEqual( secctx.recipient_key, h("e57b5635815177cd679ab4bcec9d7dda"), "Recipient key derivation disagrees with test vector", ) self.assertEqual( secctx.common_iv, h("be35ae297d2dace910c52e99f9"), "Common IV key derivation disagrees with test vector", ) sender_nonce_0 = secctx._construct_nonce(b"\0", secctx.sender_id) self.assertEqual( sender_nonce_0, h("bf35ae297d2dace910c52e99f9"), "Sender nonce disagrees with test vector", ) recipient_nonce_0 = secctx._construct_nonce(b"\0", secctx.recipient_id) self.assertEqual( recipient_nonce_0, h("bf35ae297d2dace810c52e99f9"), "Recipient nonce disagrees with test vector", ) # skipping the server side for c.2.2 as it is very redundant def test_c3_1(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"" secctx.recipient_id = b"\x01" secctx.id_context = C3_ID_CTX secctx.derive_keys(C3_SALT, C3_KEY) # info not compared; that would be tricky to extract and adds no value self.assertEqual( secctx.sender_key, h("af2a1300a5e95788b356336eeecd2b92"), "Sender key derivation disagrees with test vector", ) self.assertEqual( secctx.recipient_key, h("e39a0c7c77b43f03b4b39ab9a268699f"), "Recipient key derivation disagrees with test vector", ) self.assertEqual( secctx.common_iv, h("2ca58fb85ff1b81c0b7181b85e"), "Common IV key derivation disagrees with test vector", ) sender_nonce_0 = secctx._construct_nonce(b"\0", secctx.sender_id) self.assertEqual( sender_nonce_0, h("2ca58fb85ff1b81c0b7181b85e"), "Sender nonce disagrees with test vector", ) recipient_nonce_0 = secctx._construct_nonce(b"\0", secctx.recipient_id) self.assertEqual( recipient_nonce_0, h("2da58fb85ff1b81d0b7181b85e"), "Recipient nonce disagrees with test vector", ) def test_c4(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"" secctx.recipient_id = b"\x01" secctx.id_context = None secctx.derive_keys(C1_SALT, C1_KEY) secctx.sender_sequence_number = 20 unprotected = aiocoap.Message.decode( h("44015d1f00003974396c6f63616c686f737483747631") ) outer_message, _ = secctx.protect(unprotected) outer_message.mid = unprotected.mid outer_message.token = unprotected.token outer_message.mtype = unprotected.mtype # again skipping some intermediary values that are all checked in the final result as well encoded = outer_message.encode() self.assertEqual( encoded, h("44025d1f00003974396c6f63616c686f7374620914ff612f1092f1776f1c1668b3825e"), "Encoded message differs", ) def test_c5(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"\x00" secctx.recipient_id = b"\x01" secctx.id_context = None secctx.derive_keys(C2_SALT, C2_KEY) secctx.sender_sequence_number = 20 unprotected = aiocoap.Message.decode( h("440171c30000b932396c6f63616c686f737483747631") ) outer_message, _ = secctx.protect(unprotected) outer_message.mid = unprotected.mid outer_message.token = unprotected.token outer_message.mtype = unprotected.mtype # again skipping some intermediary values that are all checked in the final result as well encoded = outer_message.encode() self.assertEqual( encoded, h( "440271c30000b932396c6f63616c686f737463091400ff4ed339a5a379b0b8bc731fffb0" ), "Encoded message differs", ) def test_c6(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"" secctx.recipient_id = b"\x01" secctx.id_context = C3_ID_CTX secctx.derive_keys(C3_SALT, C3_KEY) secctx.sender_sequence_number = 20 unprotected = aiocoap.Message.decode( h("44012f8eef9bbf7a396c6f63616c686f737483747631") ) outer_message, _ = secctx.protect(unprotected, kid_context=True) outer_message.mid = unprotected.mid outer_message.token = unprotected.token outer_message.mtype = unprotected.mtype # again skipping some intermediary values that are all checked in the final result as well encoded = outer_message.encode() # FIXME: This is composed from the expected ciphertext, not the protected coap request, see https://github.com/core-wg/oscoap/issues/241 self.assertEqual( encoded, h( "44022f8eef9bbf7a396c6f63616c686f73746b19140837cbf3210017a2d3ff72cd7273fd331ac45cffbe55c3" ), "Encoded message differs", ) def test_c7(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"\x01" secctx.recipient_id = b"" secctx.id_context = None secctx.derive_keys(C1_SALT, C1_KEY) unprotected = aiocoap.Message.decode( h("64455d1f00003974ff48656c6c6f20576f726c6421") ) request_sender_id = secctx.recipient_id request_piv_short = b"\x14" request_nonce = secctx._construct_nonce(request_piv_short, request_sender_id) outer_message, _ = secctx.protect( unprotected, aiocoap.oscore.RequestIdentifiers( request_sender_id, request_piv_short, request_nonce, True, aiocoap.POST ), ) outer_message.mid = unprotected.mid outer_message.token = unprotected.token outer_message.mtype = unprotected.mtype # again skipping some intermediary values that are all checked in the final result as well encoded = outer_message.encode() self.assertEqual( encoded, h("64445d1f0000397490ffdbaad1e9a7e7b2a813d3c31524378303cdafae119106"), "Encoded message differs", ) def test_c8(self): secctx = NonsavingSecurityContext() secctx.alg_aead = default_algorithm secctx.hashfun = default_hashfun secctx.sender_id = b"\x01" secctx.recipient_id = b"" secctx.id_context = None secctx.derive_keys(C1_SALT, C1_KEY) secctx.sender_sequence_number = 0 unprotected = aiocoap.Message.decode( h("64455d1f00003974ff48656c6c6f20576f726c6421") ) request_sender_id = secctx.recipient_id request_piv_short = b"\x14" request_nonce = secctx._construct_nonce(request_piv_short, request_sender_id) outer_message, _ = secctx.protect( unprotected, aiocoap.oscore.RequestIdentifiers( request_sender_id, request_piv_short, request_nonce, False, aiocoap.POST ), ) outer_message.mid = unprotected.mid outer_message.token = unprotected.token outer_message.mtype = unprotected.mtype # again skipping some intermediary values that are all checked in the final result as well encoded = outer_message.encode() self.assertEqual( encoded, h("64445d1f00003974920100ff4d4c13669384b67354b2b6175ff4b8658c666a6cf88e"), "Encoded message differs", ) @_skip_unless_oscore class TestOSCOAAsymmetric(unittest.TestCase): """Test asymmetric algorithms As the Group OSCORE document currently has no test vectors, this is using values from the IETF109 hackathon. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # From https://github.com/ace-wg/Hackathon-109/blob/master/GroupKeys.md, # "Rikard Test 1" self.r1_1_d = bytes.fromhex( "FEA2190084748436543C5EC8E329D2AFBD7068054F595CA1F987B9E43E2205E6" ) self.r1_1_y = bytes.fromhex( "64CE3DD128CC4EFA6DE209BE8ABD111C7272F612C2DB654057B6EC00FBFB0684" ) self.r1_1_x = bytes.fromhex( "1ADB2AB6AF48F17C9877CF77DB4FA39DC0923FBE215E576FE6F790B1FF2CBC96" ) self.r1_2_d = bytes.fromhex( "DA2593A6E0BCC81A5941069CB76303487816A2F4E6C0F21737B56A7C90381597" ) self.r1_2_y = bytes.fromhex( "1897A28666FE1CC4FACEF79CC7BDECDC271F2A619A00844FCD553A12DD679A4F" ) self.r1_2_x = bytes.fromhex( "0EB313B4D314A1001244776D321F2DD88A5A31DF06A6EEAE0A79832D39408BC1" ) self.r1_3_d = bytes.fromhex( "BF31D3F9670A7D1342259E700F48DD9983A5F9DF80D58994C667B6EBFD23270E" ) self.r1_3_y = bytes.fromhex( "5694315AD17A4DA5E3F69CA02F83E9C3D594712137ED8AFB748A70491598F9CD" ) self.r1_3_x = bytes.fromhex( "FAD4312A45F45A3212810905B223800F6CED4BC8D5BACBC8D33BB60C45FC98DD" ) # "Rikard Test 2" self.r2_csalg = -8 self.r2_csalg_params = [[1], [1, 6]] self.r2_1_private = bytes.fromhex( "397CEB5A8D21D74A9258C20C33FC45AB152B02CF479B2E3081285F77454CF347" ) self.r2_1_public = bytes.fromhex( "CE616F28426EF24EDB51DBCEF7A23305F886F657959D4DF889DDFC0255042159" ) self.r2_2_private = bytes.fromhex( "70559B9EECDC578D5FC2CA37F9969630029F1592AFF3306392AB15546C6A184A" ) self.r2_2_public = bytes.fromhex( "2668BA6CA302F14E952228DA1250A890C143FDBA4DAED27246188B9E42C94B6D" ) self.r2_3_private = bytes.fromhex( "E550CD532B881D52AD75CE7B91171063E568F2531FBDFB32EE01D1910BCF810F" ) self.r2_3_public = bytes.fromhex( "5394E43633CDAC96F05120EA9F21307C9355A1B66B60A834B53E9BF60B1FB7DF" ) # from https://github.com/ace-wg/Hackathon-109/blob/master/GroupDerivation.md self.r1_shared_12 = bytes.fromhex( "56ede6c59e919031cfc8afa3e74a7b7615c2e7a08494cf3638c78757293adc80" ) self.r1_shared_13 = bytes.fromhex( "f568ec5f7df45db137fc79a27595eba737b62e8ee385c7309e316dd409de6953" ) # Not actually used; these are already verified in tests_util_cryptography's vectors self.r2_shared_12 = bytes.fromhex( "4546babdb9482396c167af11d21953bfa49eb9f630c45de93ee4d3b9ef059576" ) self.r2_shared_13 = bytes.fromhex( "bb11648af3dfebb35e612914a7a21fc751b001aceb0267c5536528e2b9261450" ) def alg(self): alg_sign = aiocoap.oscore.Ed25519() alg_pairwise = aiocoap.oscore.Ed25519() self.assertEqual(alg_sign.value, self.r2_csalg) return (alg_sign, alg_pairwise) def test_publickey_derivation(self): alg, _ = self.alg() self.assertEqual(self.r2_1_public, alg.public_from_private(self.r2_1_private)) self.assertEqual(self.r2_2_public, alg.public_from_private(self.r2_2_private)) self.assertEqual(self.r2_3_public, alg.public_from_private(self.r2_3_private)) def _test_keypair(self, alg_sign, alg_pairwise, private, public): body = b"" aad = b"" signature = alg_sign.sign(body, aad, private) alg_sign.verify(signature, body, aad, public) self.assertRaises( aiocoap.oscore.ProtectionInvalid, lambda: alg_sign.verify(signature, body + b"x", aad, public), ) @unittest.skip def test_publickey_signatures(self): alg = self.alg() self._test_keypair(*alg, self.r2_1_private, self.r2_1_public) self._test_keypair(*alg, self.r2_2_private, self.r2_2_public) self._test_keypair(*alg, self.r2_3_private, self.r2_3_public) @unittest.skip def test_generation(self): alg = self.alg() for alg in aiocoap.oscore.algorithms_staticstatic.values(): random_key = alg.generate() public_key = alg.public_from_private(random_key) self._test_keypair(*alg, random_key, public_key) second_random = alg.generate() second_public = alg.public_from_private(second_random) self.assertEqual( alg.staticstatic(random_key, second_public), alg.staticstatic(second_random, public_key), ) def test_ecdsa_vectors(self): alg = aiocoap.oscore.ECDSA_SHA256_P256() r1_1 = alg.from_private_parts(self.r1_1_x, self.r1_1_y, self.r1_1_d) r1_2 = alg.from_private_parts(self.r1_2_x, self.r1_2_y, self.r1_2_d) r1_3 = alg.from_private_parts(self.r1_3_x, self.r1_3_y, self.r1_3_d) self.assertEqual( alg.staticstatic(r1_1, alg.public_from_private(r1_2)), self.r1_shared_12 ) self.assertEqual( alg.staticstatic(r1_3, alg.public_from_private(r1_1)), self.r1_shared_13 ) @_skip_unless_oscore class TestOSCORECompression(unittest.TestCase): def compare_uncompress( self, ref_option, ref_payload, ref_protected, ref_unprotected, ref_ciphertext ): message = aiocoap.Message(payload=ref_payload, oscore=ref_option) protected_serialized, protected, unprotected, ciphertext = ( aiocoap.oscore.CanUnprotect._extract_encrypted0(message) ) self.assertEqual(protected, ref_protected, "Protected dictionary mismatch") self.assertEqual( unprotected, ref_unprotected, "Unprotected dictionary mismatch" ) self.assertEqual(ciphertext, ref_ciphertext, "Ciphertext mismatch") def compare_compress( self, ref_option, ref_payload, ref_protected, ref_unprotected, ref_ciphertext ): option, payload = aiocoap.oscore.CanProtect._compress( ref_protected, ref_unprotected, ref_ciphertext ) self.assertEqual(option, ref_option, "Compressed option mismatch") self.assertEqual(payload, ref_payload, "Compressed payload mismatch") def compare_all( self, ref_option, ref_payload, ref_protected, ref_unprotected, ref_ciphertext ): self.compare_uncompress( ref_option, ref_payload, ref_protected, ref_unprotected, ref_ciphertext ) self.compare_compress( ref_option, ref_payload, ref_protected, ref_unprotected, ref_ciphertext ) def test_empty(self): self.compare_all(b"", b"1234", {}, {}, b"1234") def test_short(self): self.compare_all( b"\x01\x00", b"1234", {}, {aiocoap.oscore.COSE_PIV: b"\x00"}, b"1234" ) def test_long(self): self.compare_all( b"\x05ABCDE", b"1234", {}, {aiocoap.oscore.COSE_PIV: b"ABCDE"}, b"1234" ) def test_kid(self): self.compare_all( b"\x0bABC--------", b"1234", {}, {aiocoap.oscore.COSE_PIV: b"ABC", aiocoap.oscore.COSE_KID: b"--------"}, b"1234", ) def test_idcontext(self): self.compare_all( b"\x1bABC\x03abc--------", b"1234", {}, { aiocoap.oscore.COSE_PIV: b"ABC", aiocoap.oscore.COSE_KID: b"--------", aiocoap.oscore.COSE_KID_CONTEXT: b"abc", }, b"1234", ) def test_idcontext_nopiv(self): self.compare_all( b"\x18\x03abc--------", b"1234", {}, { aiocoap.oscore.COSE_KID: b"--------", aiocoap.oscore.COSE_KID_CONTEXT: b"abc", }, b"1234", ) def test_counterisgnature(self): # This is only as correct as it gets with the interactions between # determining the countersignature (or its length) and uncompression: # The flag is registered, but its value is deferred to a later step # (that'd actually know the algorithm) and relies on the later process # to move data over from the plaintext self.compare_all( b"\x2bABC--", b"1234sigsigsig", {}, { aiocoap.oscore.COSE_PIV: b"ABC", aiocoap.oscore.COSE_KID: b"--", aiocoap.oscore.COSE_COUNTERSIGNATURE0: aiocoap.oscore.PRESENT_BUT_NO_VALUE_YET, }, b"1234sigsigsig", ) def test_reserved(self): self.assertRaises( aiocoap.oscore.DecodeError, lambda: self.compare_uncompress(b"\x4bABC--", b"1234", None, None, None), ) self.assertRaises( aiocoap.oscore.DecodeError, lambda: self.compare_uncompress(b"\x8bABC--", b"1234", None, None, None), ) def test_unknown(self): self.assertRaises( RuntimeError, lambda: self.compare_compress(None, None, {"x": "y"}, {}, b"") ) self.assertRaises( RuntimeError, lambda: self.compare_compress(None, None, {}, {"x": "y"}, b"") ) @_skip_unless_oscore class TestAlgorithms(unittest.TestCase): def test_aescbc(self): # Test vectors generated using PyCryptodome as described at # https://codeigo.com/python/aes-encryption-and-decryption-in-python-64-128-256/ # ("Implementation of AES-CBC Encryption and Decryption in Python" with # they key length set to 16) from aiocoap.oscore import A128CBC key = b"3\x8b\xd0\xf2\x18&\x15\nS\x14%\xec!\x16\n$" iv = b"7\x07\xe3TA65~\x8d%'&\x8e\x1anE" plain = b"This is the message to be encrypted" expected_ciphertext = b"\x05\xbb\xd0\xea\xc2\x97pvF-P\x9a\x9e\xa4=\x94\xac\xad\x00\xc0/\x00&\x7f\x83\xca@c\xeew\xc3\x16\x14h\xca\x023\xb6Vq\xf4\x8d\xb7\xc2S3\xfa\xae" aad = b"" encrypted = A128CBC.encrypt(plain, aad, key, iv) self.assertEqual(encrypted, expected_ciphertext) decrypted = A128CBC.decrypt(encrypted, aad, key, iv) self.assertEqual(decrypted, plain) aiocoap-0.4.12/tests/test_oscore_plugtest.py000066400000000000000000000175021472205122200212070ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """Run the OSCORE plug test""" import asyncio import unittest import tempfile import shutil import aiocoap import aiocoap.defaults from aiocoap.util import hostportjoin from .test_server import WithAsyncLoop, WithClient, asynctest from .fixtures import is_test_successful from .common import PYTHON_PREFIX, CapturingSubprocess SERVER_ADDRESS = "::1" SERVER = PYTHON_PREFIX + [ "./contrib/oscore-plugtest/plugtest-server", "--verbose", "--bind", hostportjoin(SERVER_ADDRESS, None), ] CLIENT = PYTHON_PREFIX + ["./contrib/oscore-plugtest/plugtest-client", "--verbose"] # those are to be expected to contain bad words -- 'Check passed: X failed' is legitimate output_whitelist = ["Check passed: "] # explicitly whitelisted for when the server is run with increased verbosity debug_whitelist = [ "INFO:coap-server:Render request raised a renderable error", "DEBUG:oscore-site:Will encrypt message as response: ", ] class WithAssertNofaillines(unittest.TestCase): def assertNoFaillines(self, text_to_check, message): """Assert that there are no lines that contain the phrase 'fail' or 'WARNING'/'ERROR' in the output, unless they are a 'Check passed' line or other whitelisted ones. This is to check the output of the plugtest client, which may successfully report: 'Check passed: The validation failed. (Tag invalid)'""" lines = text_to_check.decode("utf8").split("\n") lines = ( l # "failed" and "error" are always legitimate in this position # as they happen by design; whereever they are unexpected, # they're caught by the regular plug test operation .replace("Precondition Failed", "Precondition @@@led").replace( "Internal Server Error", "Internal Server @@@or" ) for l in lines ) lines = ( l for l in lines if not any(l.startswith(white) for white in output_whitelist) ) lines = (l for l in lines if not any(white in l for white in debug_whitelist)) errorlines = ( l for l in lines if "fail" in l.lower() or "warning" in l.lower() or "error" in l.lower() ) self.assertEqual([], list(errorlines), message) @unittest.skipIf( aiocoap.defaults.oscore_missing_modules(), "Module missing for running OSCORE tests: %s" % (aiocoap.defaults.oscore_missing_modules(),), ) class WithPlugtestServer(WithAsyncLoop, WithAssertNofaillines): def setUp(self): super(WithPlugtestServer, self).setUp() ready = self.loop.create_future() self.__done = self.loop.create_future() self.contextdir = tempfile.mkdtemp(suffix="-contexts") self.__task = self.loop.create_task(self.run_server(ready, self.__done)) self.__task.add_done_callback( lambda _: None if ready.done() else ready.set_exception(self.__task.exception()) ) self.loop.run_until_complete(ready) async def run_server(self, readiness, done): self.process, process_outputs = await self.loop.subprocess_exec( CapturingSubprocess, *self.SERVER, self.contextdir + "/server", stdin=None ) try: while True: if b"Plugtest server ready.\n" in process_outputs.stdout: break if self.process.get_returncode() is not None: readiness.set_exception( RuntimeError( "OSCORE server process terminated during startup:\n%s\n%s" % ( process_outputs.stdout.decode("utf8"), process_outputs.stderr.decode("utf8"), ) ) ) return await process_outputs.read_more readiness.set_result(True) while True: if self.process.get_returncode() is not None: break await process_outputs.read_more done.set_result((process_outputs.stdout, process_outputs.stderr)) finally: self.process.close() def tearDown(self): # Don't leave this over, even if anything is raised during teardown self.process.terminate() super().tearDown() out, err = self.loop.run_until_complete(self.__done) if not is_test_successful(self): if not out and not err: return self.fail( "Previous errors occurred." + ( "\nServer stdout was:\n " + out.decode("utf8").replace("\n", "\n ") if out else "" ) + ( "\nServer stderr was:\n " + err.decode("utf8").replace("\n", "\n ") if err else "" ) ) else: self.assertNoFaillines(out, '"failed" showed up in plugtest server stdout') self.assertNoFaillines(err, '"failed" showed up in plugtest server stderr') # Unlike the server process termination, leaving those around can be # helpful and barely does any harm. shutil.rmtree(self.contextdir) class TestOSCOREPlugtestBase(WithPlugtestServer, WithClient, WithAssertNofaillines): @asynctest async def _test_plugtestclient(self, x): proc, transport = await self.loop.subprocess_exec( CapturingSubprocess, *( CLIENT + ["[" + SERVER_ADDRESS + "]", self.contextdir + "/client", str(x)] ), stdin=None, ) try: while True: if proc.get_returncode() is not None: break await transport.read_more except asyncio.CancelledError: proc.terminate() else: proc.close() self.assertEqual( proc.get_returncode(), 0, "Plugtest client return non-zero exit state\nOutput was:\n" + transport.stdout.decode("utf8") + "\nErrorr output was:\n" + transport.stderr.decode("utf8"), ) self.assertNoFaillines( transport.stdout, '"failed" showed up in plugtest client stdout' ) self.assertNoFaillines( transport.stderr, '"failed" showed up in plugtest client stderr' ) class TestOSCOREPlugtestWithoutRecovery(TestOSCOREPlugtestBase): SERVER = SERVER class TestOSCOREPlugtestWithRecovery(TestOSCOREPlugtestBase): SERVER = SERVER + ["--state-was-lost"] for x in range(0, 17): for cls in (TestOSCOREPlugtestWithRecovery, TestOSCOREPlugtestWithoutRecovery): t = lambda self, x=x: self._test_plugtestclient(x) if x == 16: # That test can not succeed against a regular plugtest server t = unittest.expectedFailure(t) if x == 7: # That test fails because there is no proper observation cancellation # aroun yet, see https://github.com/chrysn/aiocoap/issues/104 # # Not making a statement on whether this is ecpected to work or # not, because it is highly irregular (it works with setup.py test # and fails with tox?) continue # enforcing them to sort properly is purely a readability thing, they # execute correctly out-of-order too. setattr(cls, "test_%03d" % x, t) # Let's not leak a global that'd be picked up for testing, given these are # already being tested del cls aiocoap-0.4.12/tests/test_protocol.py000066400000000000000000000044411472205122200176250ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import unittest from .fixtures import asynctest, no_warnings, WithAsyncLoop, WithLogMonitoring from . import common from aiocoap import Context class TestProtocolSetup(WithLogMonitoring, WithAsyncLoop): """Tests that are only concerned with the setup of contexts, and the way they can be set up.""" @no_warnings @asynctest async def test_empty_setup_shutdown(self): ctx = await Context.create_client_context() await ctx.shutdown() # There is not yet a way to set things up in an async context manager. # @no_warnings # @asynctest # async def test_empty_contextmgr(self): # async with Context.create_client_context(): # pass # The following tests should be converted to context managers once usable. @no_warnings @asynctest # Workaround for https://github.com/chrysn/aiocoap/issues/321 @unittest.skipIf( hasattr(common, "gbulb"), reason="uvloop has unresolved issues with unused contexts", ) async def test_multiple_contexts(self): # Not that that'd be a regular thing to do, just checking it *can* be # done c1 = await Context.create_client_context(loggername="coap-client1") c2 = await Context.create_client_context(loggername="coap-client2") # None is an acceptable site; binding to a concrete port # removes the worries of situations where the default # transports can't bind to "any". s1 = await Context.create_server_context( None, bind=("::1", None), loggername="coap-server" ) await c1.shutdown() await s1.shutdown() await c2.shutdown() @no_warnings @asynctest async def test_serverports_no_conflict(self): # When different ports are used, servers should not get into conflict. # # (To some extent, that this is so easy is the fault of the weird way # the other protocols' ports are set for lack of configurability). s1 = await Context.create_server_context(None, bind=("::1", 1234)) s2 = await Context.create_server_context(None, bind=("::1", None)) await s1.shutdown() await s2.shutdown() aiocoap-0.4.12/tests/test_proxy.py000066400000000000000000000053461472205122200171520ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import asyncio import unittest from . import common from .test_server import WithAsyncLoop, Destructing, WithClient, TestServer, CLEANUPTIME from .test_client import TestClientWithSetHost import aiocoap.proxy.client import aiocoap.cli.proxy from aiocoap.util import hostportjoin class WithProxyServer(WithAsyncLoop, Destructing): def setUp(self): super(WithProxyServer, self).setUp() self.forwardproxy = aiocoap.cli.proxy.Main( ["--forward", "--bind", hostportjoin(self.proxyhost, self.proxyport)] ) self.loop.run_until_complete(self.forwardproxy.initializing) def tearDown(self): super(WithProxyServer, self).tearDown() self.loop.run_until_complete(self.forwardproxy.shutdown()) self._del_to_be_sure("forwardproxy") self.loop.run_until_complete(asyncio.sleep(CLEANUPTIME)) proxyport = 56839 proxyhost = common.loopbackname_v6 or common.loopbackname_v46 proxyaddress = "%s:%d" % (proxyhost, proxyport) class WithProxyClient(WithClient, WithProxyServer): def setUp(self): super(WithProxyClient, self).setUp() original_client_log = self.client.log self.client = aiocoap.proxy.client.ProxyForwarder( self.proxyaddress, self.client ) self.client.log = original_client_log def tearDown(self): self.client = self.client.context class TestServerWithProxy(WithProxyClient, TestServer): def build_request(self): # this needs to be run differently because tests/server.py # doesn't exactly use the high-level apis. (and that's ok because we need # to test the server with simple messages too.) request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.proxyaddress request.opt.proxy_scheme = "coap" request.opt.uri_host = self.serveraddress return request test_replacing_resource = unittest.skipIf( common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)", )(TestServer.test_replacing_resource) test_slowbig_resource = unittest.skipIf( common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)", )(TestServer.test_slowbig_resource) # leaving that out for a moment because it fails more slowly # class TestClientWithProxy(WithProxyClient, TestClientWithSetHost): # pass # no need to run them again del TestClientWithSetHost del TestServer # none of those tests would currently work, disabling them all. see # https://github.com/chrysn/aiocoap/issues/106 del TestServerWithProxy aiocoap-0.4.12/tests/test_rd_examples.py000066400000000000000000000066451472205122200202770ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT """This test runs a resource directory and executes the examples from the RD specification against it. """ import unittest import aiocoap from aiocoap.util import hostportjoin from .test_server import WithAsyncLoop, Destructing, WithClient, asynctest linkheader_modules = aiocoap.defaults.linkheader_missing_modules() _skip_unless_linkheader = unittest.skipIf( linkheader_modules, "Modules missing for running RD tests: %s" % (linkheader_modules,), ) if not linkheader_modules: from aiocoap.util.linkformat import link_header import aiocoap.cli.rd class WithResourceDirectory(WithAsyncLoop, Destructing): rd_address = "::1" rd_port = 56830 rd_netloc = "[%s]:%d" % (rd_address, rd_port) def setUp(self): super().setUp() self.rd = aiocoap.cli.rd.Main( ["--bind", hostportjoin("::1", self.rd_port)], loop=self.loop ) self.loop.run_until_complete(self.rd.initializing) def tearDown(self): self.loop.run_until_complete(self.rd.shutdown()) super().tearDown() self._del_to_be_sure("rd") class TestDiscovery(WithResourceDirectory, WithClient): @_skip_unless_linkheader @asynctest async def test_discovery(self): request = aiocoap.Message( code=aiocoap.GET, uri="coap://%s/.well-known/core?rt=core.rd*" % self.rd_netloc, ) response = await self.client.request(request).response self.assertEqual( response.code, aiocoap.CONTENT, "RD discovery did not give content" ) links = link_header.parse(response.payload.decode("utf8")) # Not checking for presence of group resources: not implemented here for rt in ("core.rd", "core.rd-lookup-ep", "core.rd-lookup-res"): self.assertEqual( len([x for x in links.links if x.rt == [rt]]), 1, "Not exactly one entry of rt=%s found" % rt, ) async def _get_endpoint(self, rt): """Return the URI for a given rt in the configured RD""" if not hasattr(self, "_endpoints"): request = aiocoap.Message( code=aiocoap.GET, uri="coap://%s/.well-known/core?rt=core.rd*" % self.rd_netloc, ) response = await self.client.request(request).response self._endpoints = { entry.rt[0]: entry.get_target(response.get_request_uri()) for entry in link_header.parse(response.payload.decode("utf8")).links } return self._endpoints[rt] @_skip_unless_linkheader @asynctest async def test_registration(self): request = aiocoap.Message( code=aiocoap.POST, uri=(await self._get_endpoint("core.rd")) + "?ep=node1", content_format=40, payload=b';ct=41;rt="temperature-c";if="sensor",;ct=41;rt="light-lux";if="sensor"', ) response = await self.client.request(request).response self.assertEqual( response.code, aiocoap.CREATED, "Registration did not result in Created" ) self.assertTrue( len(response.opt.location_path) > 0, "Registration did not result in non-empty registration resource", ) # FIXME: there are many more things to be tested here aiocoap-0.4.12/tests/test_reverseproxy.py000066400000000000000000000105401472205122200205360ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import asyncio import unittest from . import common from .test_server import ( WithAsyncLoop, Destructing, WithClient, WithTestServer, CLEANUPTIME, ) import aiocoap.proxy.client import aiocoap.cli.proxy from aiocoap.util import hostportjoin class WithReverseProxy(WithAsyncLoop, Destructing): def setUp(self): super(WithReverseProxy, self).setUp() self.reverseproxy = aiocoap.cli.proxy.Main( [ "--reverse", "--bind", hostportjoin(self.proxyhost, self.proxyport), "--namebased", "%s:%s" % (self.name_for_real_server, self.servernetloc), "--pathbased", "%s:%s" % ("/".join(self.path_for_real_server), self.servernetloc), ], loop=self.loop, ) self.loop.run_until_complete(self.reverseproxy.initializing) def tearDown(self): super(WithReverseProxy, self).tearDown() self.loop.run_until_complete(self.reverseproxy.shutdown()) self._del_to_be_sure("reverseproxy") self.loop.run_until_complete(asyncio.sleep(CLEANUPTIME)) proxyport = 56839 proxyhost = common.loopbackname_v6 or common.loopbackname_v46 proxyaddress = "%s:%d" % (proxyhost, proxyport) name_for_real_server = "aliasedname" path_for_real_server = ("aliased", "name") class TestReverseProxy(WithReverseProxy, WithClient, WithTestServer): @unittest.skipIf( common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)", ) def test_routing(self): yieldfrom = lambda f: self.loop.run_until_complete(f) def req(): request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.proxyaddress request.opt.uri_path = ("big",) return request request = req() response = yieldfrom(self.client.request(request).response) self.assertEqual( response.code, aiocoap.BAD_REQUEST, "GET without hostname gave resource (something like BAD_REQUEST expected)", ) request = req() request.opt.uri_host = self.name_for_real_server response = yieldfrom(self.client.request(request).response) self.assertEqual( response.code, aiocoap.CONTENT, "GET with hostname based proxying was not successful)", ) request = req() request.opt.uri_path = self.path_for_real_server + request.opt.uri_path response = yieldfrom(self.client.request(request).response) self.assertEqual( response.code, aiocoap.CONTENT, "GET with path based proxying was not successful)", ) @unittest.skipIf( common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)", ) def test_options(self): yieldfrom = lambda f: self.loop.run_until_complete(f) def req(): request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.proxyaddress request.opt.uri_path = ("big",) request.opt.uri_host = self.name_for_real_server return request request = req() request.opt.proxy_scheme = "coap" response = yieldfrom(self.client.request(request).response) self.assertEqual( response.code, aiocoap.BAD_OPTION, "Reverse proxy supports proxying even though it shouldn't.", ) request = req() request.opt.add_option( aiocoap.optiontypes.StringOption(2**10 + 2, "can't proxy this") ) response = yieldfrom(self.client.request(request).response) self.assertEqual( response.code, aiocoap.BAD_OPTION, "Proxy did not react to unsafe option." ) request = req() request.opt.add_option( aiocoap.optiontypes.StringOption(2**10, "nothing to see here") ) response = yieldfrom(self.client.request(request).response) self.assertEqual( response.code, aiocoap.CONTENT, "Proxy did not ignore to safe-to-forward option.", ) aiocoap-0.4.12/tests/test_server.py000066400000000000000000000517221472205122200172760ustar00rootroot00000000000000# SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors # # SPDX-License-Identifier: MIT import asyncio import contextlib import re import aiocoap import aiocoap.resource from aiocoap.numbers import ContentFormat import unittest import logging import os import json from . import common from .fixtures import no_warnings, WithAsyncLoop, Destructing, CLEANUPTIME, asynctest class slow_empty_ack(contextlib.ContextDecorator): def __enter__(self): # FIXME: Pushing the times here so that even on a loaded system # with pypy and coverage, this can complete realistically. # # This will break as soon as transport tuning becomes more # per-transport or even per-remote; the proper fix would be to use # mock time anyway. self.original_empty_ack_delay = aiocoap.numbers.TransportTuning.EMPTY_ACK_DELAY aiocoap.numbers.TransportTuning.EMPTY_ACK_DELAY = ( 0.9 * aiocoap.numbers.TransportTuning.ACK_TIMEOUT ) return self def __exit__(self, *exc): aiocoap.numbers.TransportTuning.EMPTY_ACK_DELAY = self.original_empty_ack_delay return False class MultiRepresentationResource(aiocoap.resource.Resource): def __init__(self, representations): self._representations = representations super().__init__() async def render_get(self, request): if request.opt.proxy_scheme is not None: # For CLI test_noproxy -- but otherwise a symptom of https://github.com/chrysn/aiocoap/issues/268 return aiocoap.Message(code=aiocoap.CONTENT, payload=b"This is no proxy") m = request.opt.accept or ContentFormat.TEXT if m in self._representations: response = self._representations[m] else: return aiocoap.Message(code=aiocoap.NOT_ACCEPTABLE) return aiocoap.Message(payload=response, content_format=request.opt.accept or 0) class SlowResource(aiocoap.resource.Resource): async def render_get(self, request): await asyncio.sleep(0.2) # Marking the response as confirmable overrides the default behavior of # sending NON responses to NON requests. # # This is done to aid the test_freeoncancel test, and should revert to # the default behavior once that has better control over the environment. return aiocoap.Message(mtype=aiocoap.CON) class BigResource(aiocoap.resource.Resource): async def render_get(self, request): # 10kb payload = b"0123456789----------" * 512 response = aiocoap.Message(payload=payload) aiocoap.resource.hashing_etag(request, response) return response class SlowBigResource(aiocoap.resource.Resource): async def render_get(self, request): # Should be 0.2s usually, but while running in slow_empty_ack mode, we # have to slow it down. Adds 2s to the tests, but that makes the test # more reliable. await asyncio.sleep(aiocoap.numbers.TransportTuning.EMPTY_ACK_DELAY * 1.1) # 1.6kb payload = b"0123456789----------" * 80 return aiocoap.Message(payload=payload) class ManualBigResource(aiocoap.resource.Resource): async def needs_blockwise_assembly(self, request): return False async def render_get(self, request): BlockwiseTuple = aiocoap.optiontypes.BlockOption.BlockwiseTuple block2 = request.opt.block2 or BlockwiseTuple(0, 0, 6) # as above body = b"0123456789----------" * 80 # in a more realistic example, we wouldn't build this in memory but eg. # seek and read limited length slice = body[block2.start :] more = len(slice) > block2.size slice = slice[: block2.size] block2 = BlockwiseTuple(block2.block_number, more, block2.size_exponent) return aiocoap.Message(payload=slice, block2=block2) class ReplacingResource(aiocoap.resource.Resource): async def render_get(self, request): return aiocoap.Message(payload=self.value) async def render_put(self, request): self.value = request.payload.replace(b"0", b"O") return aiocoap.Message() async def render_post(self, request): response = request.payload.replace(b"0", b"O") return aiocoap.Message(code=aiocoap.CONTENT, payload=response) class RootResource(aiocoap.resource.Resource): async def render_get(self, request): return aiocoap.Message( code=aiocoap.CONTENT, payload=b"Welcome to the test server" ) class GenericErrorResource(aiocoap.resource.Resource): async def render_get(self, request): raise RuntimeError() class PrettyErrorResource(aiocoap.resource.Resource): async def render_get(self, request): raise self.MyError() class MyError(aiocoap.error.ConstructionRenderableError): code = aiocoap.NOT_FOUND message = "I'm sorry nothing is here" class DoubleErrorResource(aiocoap.resource.Resource): async def render_get(self, request): raise self.MyError() class MyError(aiocoap.error.RenderableError): def to_message(self): raise RuntimeError() class WhoAmI(aiocoap.resource.Resource): async def render_get(self, request): p = dict( repr=repr(request.remote), hostinfo=request.remote.hostinfo, hostinfo_local=request.remote.hostinfo_local, scheme=request.remote.scheme, urihost_option=request.opt.uri_host, # FIXME: The whole get_request_uri is a mess requested_uri=request._original_request_uri, claims=request.remote.authenticated_claims, ) return aiocoap.Message( code=aiocoap.CONTENT, content_format=ContentFormat.JSON, payload=json.dumps(p).encode("utf8"), ) class BasicTestingSite(aiocoap.resource.Site): def __init__(self): super(BasicTestingSite, self).__init__() # Not part of the test suite, but handy when running standalone self.add_resource( [".well-known", "core"], aiocoap.resource.WKCResource(self.get_resources_as_linkheader), ) self.add_resource( ["empty"], MultiRepresentationResource( { ContentFormat.JSON: b"{}", ContentFormat.LINKFORMAT: b"<>", ContentFormat.TEXT: b"", } ), ) self.add_resource( ["answer"], MultiRepresentationResource( { ContentFormat.JSON: b'{"answer": 42}', ContentFormat.CBOR: b"\xa1\x66\x61\x6e\x73\x77\x65\x72\x18\x2a", ContentFormat.LINKFORMAT: b';rel="answer";anchor="https://en.wikipedia.org/wiki/Phrases_from_The_Hitchhiker%27s_Guide_to_the_Galaxy#Answer_to_the_Ultimate_Question_of_Life,_the_Universe,_and_Everything_(42)"', ContentFormat.TEXT: b"The answer to life, the universe, and everything is 42.", } ), ) self.add_resource(["slow"], SlowResource()) self.add_resource(["big"], BigResource()) self.add_resource(["slowbig"], SlowBigResource()) self.add_resource(["manualbig"], ManualBigResource()) self.add_resource(["replacing"], self.Subsite()) self.add_resource(["error", "generic"], GenericErrorResource()) self.add_resource(["error", "pretty"], PrettyErrorResource()) self.add_resource(["error", "double"], DoubleErrorResource()) self.add_resource(["whoami"], WhoAmI()) self.add_resource([], RootResource()) class Subsite(aiocoap.resource.Site): def __init__(self): super().__init__() self.add_resource(["one"], ReplacingResource()) class WithTestServer(WithAsyncLoop, Destructing): # to allow overriding the factory class TestingSite = BasicTestingSite def create_testing_site(self): return self.TestingSite() def get_server_ssl_context(self): """Override hook for subclasses that want to populate _ssl_context at construction""" return None def setUp(self): super(WithTestServer, self).setUp() multicastif = ( os.environ["AIOCOAP_TEST_MCIF"].split(":") if "AIOCOAP_TEST_MCIF" in os.environ else [] ) self.server = self.loop.run_until_complete( aiocoap.Context.create_server_context( self.create_testing_site(), bind=(self.serveraddress, None), multicast=multicastif, _ssl_context=self.get_server_ssl_context(), ) ) def tearDown(self): # let the server receive the acks we just sent self.loop.run_until_complete(asyncio.sleep(CLEANUPTIME)) self.loop.run_until_complete(self.server.shutdown()) # Nothing in the context should keep the request interfaces alive; # delete them first to see *which* of them is the one causing the # trouble while self.server.request_interfaces: self._del_to_be_sure( { "get": (lambda self: self.server.request_interfaces[0]), "del": (lambda self: self.server.request_interfaces.__delitem__(0)), "label": "request_interfaces[%s]" % self.server.request_interfaces[0], } ) self._del_to_be_sure("server") super(WithTestServer, self).tearDown() serveraddress = "::1" servernetloc = "[%s]" % serveraddress servernamealias = common.loopbackname_v6 or common.loopbackname_v46 class WithClient(WithAsyncLoop, Destructing): def setUp(self): super(WithClient, self).setUp() self.client = self.loop.run_until_complete( aiocoap.Context.create_client_context() ) def tearDown(self): self.loop.run_until_complete(asyncio.sleep(CLEANUPTIME)) self.loop.run_until_complete(self.client.shutdown()) # Nothing in the context should keep the request interfaces alive; # delete them first to see *which* of them is the one causing the # trouble while self.client.request_interfaces: self._del_to_be_sure( { "get": (lambda self: self.client.request_interfaces[0]), "del": (lambda self: self.client.request_interfaces.__delitem__(0)), "label": "request_interfaces[%s]" % self.client.request_interfaces[0], } ) self._del_to_be_sure("client") super(WithClient, self).tearDown() # test cases class TestServerBase(WithTestServer, WithClient): """All the tools for building requests, but no actual tests; use this when working off the test server with no intention to to the full set of tests.""" @no_warnings def build_request(self): request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.servernetloc return request @asynctest async def fetch_response(self, request): return await self.client.request(request).response class TestServer(TestServerBase): @no_warnings def test_empty_accept(self): request = self.build_request() request.opt.uri_path = ["empty"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.CONTENT, "Simple request did not succede" ) self.assertEqual(response.payload, b"", "Simple request gave unexpected result") @no_warnings def test_unacceptable_accept(self): request = self.build_request() request.opt.uri_path = ["empty"] request.opt.accept = 9999 response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.NOT_ACCEPTABLE, "Inacceptable request was not not accepted", ) @no_warnings def test_js_accept(self): request = self.build_request() request.opt.uri_path = ["empty"] request.opt.accept = ContentFormat.JSON response = self.fetch_response(request) self.assertEqual(response.code, aiocoap.CONTENT, "JSON request did not succede") self.assertEqual(response.payload, b"{}", "JSON request gave unexpected result") @no_warnings def test_nonexisting_resource(self): request = self.build_request() request.opt.uri_path = ["nonexisting"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.NOT_FOUND, "Nonexisting resource was not not found" ) @no_warnings def test_spurious_resource(self): request = self.build_request() request.opt.uri_path = ["..", "empty"] response = self.fetch_response(request) # different behavior would be ok-ish, as the .. in the request is forbidden, but returning 4.04 is sane here self.assertEqual( response.code, aiocoap.NOT_FOUND, "'..' component in path did not get ignored the way it was expected", ) @no_warnings @slow_empty_ack() def test_fast_resource(self): request = self.build_request() request.opt.uri_path = ["empty"] response = self.fetch_response(request) self.assertEqual(response.code, aiocoap.CONTENT, "Fast request did not succede") self.assertEqual(self._count_empty_acks(), 0, "Fast resource had an empty ack") @no_warnings def test_slow_resource(self): request = self.build_request() request.opt.uri_path = ["slow"] response = self.fetch_response(request) self.assertEqual(response.code, aiocoap.CONTENT, "Slow request did not succede") if response.requested_scheme in (None, "coap"): self.assertEqual( self._count_empty_acks(), 1, "Slow resource was not handled in two exchanges", ) @no_warnings def test_big_resource(self): request = self.build_request() request.opt.uri_path = ["big"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.CONTENT, "Big resource request did not succede" ) self.assertEqual( len(response.payload), 10240, "Big resource is not as big as expected" ) self.assertTrue( response.opt.etag != None, "Big resource does not provide an ETag" ) request = self.build_request() request.opt.uri_path = ["big"] request.opt.etags = [response.opt.etag] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.VALID, "Big resource does not support ETag validation", ) self.assertTrue( response.opt.etag != None, "Big resource does not send ETag for validation" ) @no_warnings @slow_empty_ack() def test_slowbig_resource(self): request = self.build_request() request.opt.uri_path = ["slowbig"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.CONTENT, "SlowBig resource request did not succede" ) self.assertEqual( len(response.payload), 1600, "SlowBig resource is not as big as expected" ) if response.requested_scheme in (None, "coap"): self.assertEqual( self._count_empty_acks(), 1, "SlowBig resource was not handled in two exchanges", ) @no_warnings def test_manualbig_resource(self): request = self.build_request() request.opt.uri_path = ["manualbig"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.CONTENT, "ManualBig resource request did not succede" ) self.assertEqual( len(response.payload), 1600, "ManualBig resource is not as big as expected" ) @no_warnings def test_replacing_resource(self): testpattern = b"01" * 1024 request = self.build_request() request.code = aiocoap.PUT request.payload = testpattern request.opt.uri_path = ["replacing", "one"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.CHANGED, "PUT did not result in CHANGED" ) self.assertEqual(response.payload, b"", "PUT has unexpected payload") request = self.build_request() request.code = aiocoap.GET request.opt.uri_path = ["replacing", "one"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.CONTENT, "Replacing resource could not be GOT (GET'd?) successfully", ) self.assertEqual( response.payload, testpattern.replace(b"0", b"O"), "Replacing resource did not replace as expected between PUT and GET", ) request = self.build_request() request.code = aiocoap.POST request.payload = testpattern request.opt.uri_path = ["replacing", "one"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.CONTENT, "Replacing resource could not be POSTed to successfully", ) self.assertEqual( response.payload, testpattern.replace(b"0", b"O"), "Replacing resource did not replace as expected when POSTed", ) def test_error_resources(self): request = self.build_request() request.opt.uri_path = ["error", "generic"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.INTERNAL_SERVER_ERROR, "Runtime error possibly leaking information", ) self.assertEqual( response.payload, b"", "Runtime error possibly leaking information" ) request = self.build_request() request.opt.uri_path = ["error", "pretty"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.NOT_FOUND, "Runtime error possibly leaking information", ) self.assertTrue( response.payload.decode("ascii").startswith("I'm sorry"), "Runtime error possibly leaking information", ) request = self.build_request() request.opt.uri_path = ["error", "double"] response = self.fetch_response(request) self.assertEqual( response.code, aiocoap.INTERNAL_SERVER_ERROR, "Runtime double error possibly leaking information", ) self.assertEqual( response.payload, b"", "Runtime double error possibly leaking information" ) @no_warnings def test_root_resource(self): request = self.build_request() request.opt.uri_path = [] response = self.fetch_response(request) self.assertEqual(response.code, aiocoap.CONTENT, "Root resource was not found") _empty_ack_logmsg = re.compile( "^Incoming message = 3.49 extras = all setenv = AIOCOAP_TESTS_LOOP=glib ; Not running the -simple versions everywhere to save some CI time. ; ; For the CPython part we're using the uvloop runner, because AFAIR there were ; some extra workarounds. uvloop doesn't currently work with pypy, which gives ; us the opportunity there to run it on some Python also without uvloop. [testenv:py312-uvloop-simpleudp] description = Running on uvloop with simple* UDP transports deps = coverage pytest uvloop extras = all ; client-/server-transport substitute the default udp6 with the non-udp6 ; alternative. that test could just as well be done on the default main loop, ; but AFAIR uvloop required occasional workarounds setenv = AIOCOAP_DEFAULTS_EXPECT_ALL=1 AIOCOAP_TESTS_LOOP=uvloop AIOCOAP_CLIENT_TRANSPORT=oscore:tinydtls:tcpclient:tlsclient:ws:simple6 AIOCOAP_SERVER_TRANSPORT=oscore:tinydtls_server:tinydtls:tcpserver:tcpclient:tlsserver:tlsclient:ws:simplesocketserver [testenv:pypy3-simpleudp] description = Running pypy3 with simple UDP transports deps = coverage pytest extras = all setenv = AIOCOAP_DEFAULTS_EXPECT_ALL=1 AIOCOAP_CLIENT_TRANSPORT=oscore:tinydtls:tcpclient:tlsclient:ws:simple6 AIOCOAP_SERVER_TRANSPORT=oscore:tinydtls_server:tinydtls:tcpserver:tcpclient:tlsserver:tlsclient:ws:simplesocketserver