pax_global_header00006660000000000000000000000064150000120570014500gustar00rootroot0000000000000052 comment=65f94b4fba2cc95dd718cbda683a326a8e5e670a ollama-python-0.4.8/000077500000000000000000000000001500001205700142755ustar00rootroot00000000000000ollama-python-0.4.8/.github/000077500000000000000000000000001500001205700156355ustar00rootroot00000000000000ollama-python-0.4.8/.github/dependabot.yml000066400000000000000000000002771500001205700204730ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: github-actions directory: / schedule: interval: daily - package-ecosystem: pip directory: / schedule: interval: daily ollama-python-0.4.8/.github/workflows/000077500000000000000000000000001500001205700176725ustar00rootroot00000000000000ollama-python-0.4.8/.github/workflows/publish.yaml000066400000000000000000000011531500001205700222240ustar00rootroot00000000000000name: publish on: release: types: - created jobs: publish: runs-on: ubuntu-latest environment: release permissions: id-token: write contents: write steps: - uses: actions/checkout@v4 - run: pipx install poetry - uses: actions/setup-python@v5 with: python-version: '3.x' cache: poetry - run: | poetry version ${GITHUB_REF_NAME#v} poetry build - uses: pypa/gh-action-pypi-publish@release/v1 - run: gh release upload $GITHUB_REF_NAME dist/* env: GH_TOKEN: ${{ github.token }} ollama-python-0.4.8/.github/workflows/test.yaml000066400000000000000000000026051500001205700215400ustar00rootroot00000000000000name: test on: push: branches: - main pull_request: jobs: test: strategy: matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - run: pipx install poetry - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: poetry - run: poetry install --with=dev - run: poetry run pytest . --junitxml=junit/test-results-${{ matrix.python-version }}.xml --cov=ollama --cov-report=xml --cov-report=html - uses: actions/upload-artifact@v4 with: name: pytest-results-${{ matrix.python-version }} path: junit/test-results-${{ matrix.python-version }}.xml if: ${{ always() }} lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - run: pipx install poetry - uses: actions/setup-python@v5 with: python-version: "3.13" cache: poetry - run: poetry install --with=dev - run: poetry run ruff check --output-format=github . - run: poetry run ruff format --check . - name: check poetry.lock is up-to-date run: poetry check --lock - name: check requirements.txt is up-to-date run: | poetry export >requirements.txt git diff --exit-code requirements.txt ollama-python-0.4.8/.gitignore000066400000000000000000000060061500001205700162670ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. # https://pdm.fming.dev/#use-with-ide .pdm.toml # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ ollama-python-0.4.8/LICENSE000066400000000000000000000020421500001205700153000ustar00rootroot00000000000000MIT License Copyright (c) Ollama Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ollama-python-0.4.8/README.md000066400000000000000000000074401500001205700155610ustar00rootroot00000000000000# Ollama Python Library The Ollama Python library provides the easiest way to integrate Python 3.8+ projects with [Ollama](https://github.com/ollama/ollama). ## Prerequisites - [Ollama](https://ollama.com/download) should be installed and running - Pull a model to use with the library: `ollama pull ` e.g. `ollama pull llama3.2` - See [Ollama.com](https://ollama.com/search) for more information on the models available. ## Install ```sh pip install ollama ``` ## Usage ```python from ollama import chat from ollama import ChatResponse response: ChatResponse = chat(model='llama3.2', messages=[ { 'role': 'user', 'content': 'Why is the sky blue?', }, ]) print(response['message']['content']) # or access fields directly from the response object print(response.message.content) ``` See [_types.py](ollama/_types.py) for more information on the response types. ## Streaming responses Response streaming can be enabled by setting `stream=True`. ```python from ollama import chat stream = chat( model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], stream=True, ) for chunk in stream: print(chunk['message']['content'], end='', flush=True) ``` ## Custom client A custom client can be created by instantiating `Client` or `AsyncClient` from `ollama`. All extra keyword arguments are passed into the [`httpx.Client`](https://www.python-httpx.org/api/#client). ```python from ollama import Client client = Client( host='http://localhost:11434', headers={'x-some-header': 'some-value'} ) response = client.chat(model='llama3.2', messages=[ { 'role': 'user', 'content': 'Why is the sky blue?', }, ]) ``` ## Async client The `AsyncClient` class is used to make asynchronous requests. It can be configured with the same fields as the `Client` class. ```python import asyncio from ollama import AsyncClient async def chat(): message = {'role': 'user', 'content': 'Why is the sky blue?'} response = await AsyncClient().chat(model='llama3.2', messages=[message]) asyncio.run(chat()) ``` Setting `stream=True` modifies functions to return a Python asynchronous generator: ```python import asyncio from ollama import AsyncClient async def chat(): message = {'role': 'user', 'content': 'Why is the sky blue?'} async for part in await AsyncClient().chat(model='llama3.2', messages=[message], stream=True): print(part['message']['content'], end='', flush=True) asyncio.run(chat()) ``` ## API The Ollama Python library's API is designed around the [Ollama REST API](https://github.com/ollama/ollama/blob/main/docs/api.md) ### Chat ```python ollama.chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) ``` ### Generate ```python ollama.generate(model='llama3.2', prompt='Why is the sky blue?') ``` ### List ```python ollama.list() ``` ### Show ```python ollama.show('llama3.2') ``` ### Create ```python ollama.create(model='example', from_='llama3.2', system="You are Mario from Super Mario Bros.") ``` ### Copy ```python ollama.copy('llama3.2', 'user/llama3.2') ``` ### Delete ```python ollama.delete('llama3.2') ``` ### Pull ```python ollama.pull('llama3.2') ``` ### Push ```python ollama.push('user/llama3.2') ``` ### Embed ```python ollama.embed(model='llama3.2', input='The sky is blue because of rayleigh scattering') ``` ### Embed (batch) ```python ollama.embed(model='llama3.2', input=['The sky is blue because of rayleigh scattering', 'Grass is green because of chlorophyll']) ``` ### Ps ```python ollama.ps() ``` ## Errors Errors are raised if requests return an error status or if an error is detected while streaming. ```python model = 'does-not-yet-exist' try: ollama.chat(model) except ollama.ResponseError as e: print('Error:', e.error) if e.status_code == 404: ollama.pull(model) ``` ollama-python-0.4.8/SECURITY.md000066400000000000000000000017551500001205700160760ustar00rootroot00000000000000# Security The Ollama maintainer team takes security seriously and will actively work to resolve security issues. ## Reporting a vulnerability If you discover a security vulnerability, please do not open a public issue. Instead, please report it by emailing hello@ollama.com. We ask that you give us sufficient time to investigate and address the vulnerability before disclosing it publicly. Please include the following details in your report: - A description of the vulnerability - Steps to reproduce the issue - Your assessment of the potential impact - Any possible mitigations ## Security best practices While the maintainer team does their best to secure Ollama, users are encouraged to implement their own security best practices, such as: - Regularly updating to the latest version of Ollama - Securing access to hosted instances of Ollama - Monitoring systems for unusual activity ## Contact For any other questions or concerns related to security, please contact us at hello@ollama.com ollama-python-0.4.8/examples/000077500000000000000000000000001500001205700161135ustar00rootroot00000000000000ollama-python-0.4.8/examples/README.md000066400000000000000000000032671500001205700174020ustar00rootroot00000000000000# Running Examples Run the examples in this directory with: ```sh # Run example python3 examples/.py ``` See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md) for full API documentation ### Chat - Chat with a model - [chat.py](chat.py) - [async-chat.py](async-chat.py) - [chat-stream.py](chat-stream.py) - Streamed outputs - [chat-with-history.py](chat-with-history.py) - Chat with model and maintain history of the conversation ### Generate - Generate text with a model - [generate.py](generate.py) - [async-generate.py](async-generate.py) - [generate-stream.py](generate-stream.py) - Streamed outputs - [fill-in-middle.py](fill-in-middle.py) - Given a prefix and suffix, fill in the middle ### Tools/Function Calling - Call a function with a model - [tools.py](tools.py) - Simple example of Tools/Function Calling - [async-tools.py](async-tools.py) ### Multimodal with Images - Chat with a multimodal (image chat) model - [multimodal-chat.py](multimodal-chat.py) - [multimodal-generate.py](multimodal-generate.py) ### Structured Outputs - Generate structured outputs with a model - [structured-outputs.py](structured-outputs.py) - [async-structured-outputs.py](async-structured-outputs.py) - [structured-outputs-image.py](structured-outputs-image.py) ### Ollama List - List all downloaded models and their properties - [list.py](list.py) ### Ollama ps - Show model status with CPU/GPU usage - [ps.py](ps.py) ### Ollama Pull - Pull a model from Ollama Requirement: `pip install tqdm` - [pull.py](pull.py) ### Ollama Create - Create a model from a Modelfile - [create.py](create.py) ### Ollama Embed - Generate embeddings with a model - [embed.py](embed.py) ollama-python-0.4.8/examples/async-chat.py000066400000000000000000000005251500001205700205210ustar00rootroot00000000000000import asyncio from ollama import AsyncClient async def main(): messages = [ { 'role': 'user', 'content': 'Why is the sky blue?', }, ] client = AsyncClient() response = await client.chat('llama3.2', messages=messages) print(response['message']['content']) if __name__ == '__main__': asyncio.run(main()) ollama-python-0.4.8/examples/async-generate.py000066400000000000000000000004471500001205700213770ustar00rootroot00000000000000import asyncio import ollama async def main(): client = ollama.AsyncClient() response = await client.generate('llama3.2', 'Why is the sky blue?') print(response['response']) if __name__ == '__main__': try: asyncio.run(main()) except KeyboardInterrupt: print('\nGoodbye!') ollama-python-0.4.8/examples/async-structured-outputs.py000066400000000000000000000016411500001205700235270ustar00rootroot00000000000000import asyncio from pydantic import BaseModel from ollama import AsyncClient # Define the schema for the response class FriendInfo(BaseModel): name: str age: int is_available: bool class FriendList(BaseModel): friends: list[FriendInfo] async def main(): client = AsyncClient() response = await client.chat( model='llama3.1:8b', messages=[{'role': 'user', 'content': 'I have two friends. The first is Ollama 22 years old busy saving the world, and the second is Alonso 23 years old and wants to hang out. Return a list of friends in JSON format'}], format=FriendList.model_json_schema(), # Use Pydantic to generate the schema options={'temperature': 0}, # Make responses more deterministic ) # Use Pydantic to validate the response friends_response = FriendList.model_validate_json(response.message.content) print(friends_response) if __name__ == '__main__': asyncio.run(main()) ollama-python-0.4.8/examples/async-tools.py000066400000000000000000000046361500001205700207510ustar00rootroot00000000000000import asyncio import ollama from ollama import ChatResponse def add_two_numbers(a: int, b: int) -> int: """ Add two numbers Args: a (int): The first number b (int): The second number Returns: int: The sum of the two numbers """ return a + b def subtract_two_numbers(a: int, b: int) -> int: """ Subtract two numbers """ return a - b # Tools can still be manually defined and passed into chat subtract_two_numbers_tool = { 'type': 'function', 'function': { 'name': 'subtract_two_numbers', 'description': 'Subtract two numbers', 'parameters': { 'type': 'object', 'required': ['a', 'b'], 'properties': { 'a': {'type': 'integer', 'description': 'The first number'}, 'b': {'type': 'integer', 'description': 'The second number'}, }, }, }, } messages = [{'role': 'user', 'content': 'What is three plus one?'}] print('Prompt:', messages[0]['content']) available_functions = { 'add_two_numbers': add_two_numbers, 'subtract_two_numbers': subtract_two_numbers, } async def main(): client = ollama.AsyncClient() response: ChatResponse = await client.chat( 'llama3.1', messages=messages, tools=[add_two_numbers, subtract_two_numbers_tool], ) if response.message.tool_calls: # There may be multiple tool calls in the response for tool in response.message.tool_calls: # Ensure the function is available, and then call it if function_to_call := available_functions.get(tool.function.name): print('Calling function:', tool.function.name) print('Arguments:', tool.function.arguments) output = function_to_call(**tool.function.arguments) print('Function output:', output) else: print('Function', tool.function.name, 'not found') # Only needed to chat with the model using the tool call results if response.message.tool_calls: # Add the function response to messages for the model to use messages.append(response.message) messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name}) # Get final response from model with function outputs final_response = await client.chat('llama3.1', messages=messages) print('Final response:', final_response.message.content) else: print('No tool calls returned from model') if __name__ == '__main__': try: asyncio.run(main()) except KeyboardInterrupt: print('\nGoodbye!') ollama-python-0.4.8/examples/chat-stream.py000066400000000000000000000003541500001205700206770ustar00rootroot00000000000000from ollama import chat messages = [ { 'role': 'user', 'content': 'Why is the sky blue?', }, ] for part in chat('llama3.2', messages=messages, stream=True): print(part['message']['content'], end='', flush=True) print() ollama-python-0.4.8/examples/chat-with-history.py000066400000000000000000000023351500001205700220570ustar00rootroot00000000000000from ollama import chat messages = [ { 'role': 'user', 'content': 'Why is the sky blue?', }, { 'role': 'assistant', 'content': "The sky is blue because of the way the Earth's atmosphere scatters sunlight.", }, { 'role': 'user', 'content': 'What is the weather in Tokyo?', }, { 'role': 'assistant', 'content': 'The weather in Tokyo is typically warm and humid during the summer months, with temperatures often exceeding 30°C (86°F). The city experiences a rainy season from June to September, with heavy rainfall and occasional typhoons. Winter is mild, with temperatures rarely dropping below freezing. The city is known for its high-tech and vibrant culture, with many popular tourist attractions such as the Tokyo Tower, Senso-ji Temple, and the bustling Shibuya district.', }, ] while True: user_input = input('Chat with history: ') response = chat( 'llama3.2', messages=messages + [ {'role': 'user', 'content': user_input}, ], ) # Add the response to the messages to maintain the history messages += [ {'role': 'user', 'content': user_input}, {'role': 'assistant', 'content': response.message.content}, ] print(response.message.content + '\n') ollama-python-0.4.8/examples/chat.py000066400000000000000000000003021500001205700173770ustar00rootroot00000000000000from ollama import chat messages = [ { 'role': 'user', 'content': 'Why is the sky blue?', }, ] response = chat('llama3.2', messages=messages) print(response['message']['content']) ollama-python-0.4.8/examples/create.py000077500000000000000000000003151500001205700177320ustar00rootroot00000000000000from ollama import Client client = Client() response = client.create( model='my-assistant', from_='llama3.2', system='You are mario from Super Mario Bros.', stream=False, ) print(response.status) ollama-python-0.4.8/examples/embed.py000066400000000000000000000001621500001205700175400ustar00rootroot00000000000000from ollama import embed response = embed(model='llama3.2', input='Hello, world!') print(response['embeddings']) ollama-python-0.4.8/examples/fill-in-middle.py000066400000000000000000000005321500001205700212530ustar00rootroot00000000000000from ollama import generate prompt = '''def remove_non_ascii(s: str) -> str: """ ''' suffix = """ return result """ response = generate( model='codellama:7b-code', prompt=prompt, suffix=suffix, options={ 'num_predict': 128, 'temperature': 0, 'top_p': 0.9, 'stop': [''], }, ) print(response['response']) ollama-python-0.4.8/examples/generate-stream.py000066400000000000000000000002221500001205700215440ustar00rootroot00000000000000from ollama import generate for part in generate('llama3.2', 'Why is the sky blue?', stream=True): print(part['response'], end='', flush=True) ollama-python-0.4.8/examples/generate.py000066400000000000000000000001611500001205700202550ustar00rootroot00000000000000from ollama import generate response = generate('llama3.2', 'Why is the sky blue?') print(response['response']) ollama-python-0.4.8/examples/list.py000066400000000000000000000007041500001205700174410ustar00rootroot00000000000000from ollama import ListResponse, list response: ListResponse = list() for model in response.models: print('Name:', model.model) print(' Size (MB):', f'{(model.size.real / 1024 / 1024):.2f}') if model.details: print(' Format:', model.details.format) print(' Family:', model.details.family) print(' Parameter Size:', model.details.parameter_size) print(' Quantization Level:', model.details.quantization_level) print('\n') ollama-python-0.4.8/examples/multimodal-chat.py000066400000000000000000000007751500001205700215620ustar00rootroot00000000000000from ollama import chat # from pathlib import Path # Pass in the path to the image path = input('Please enter the path to the image: ') # You can also pass in base64 encoded image data # img = base64.b64encode(Path(path).read_bytes()).decode() # or the raw bytes # img = Path(path).read_bytes() response = chat( model='llama3.2-vision', messages=[ { 'role': 'user', 'content': 'What is in this image? Be concise.', 'images': [path], } ], ) print(response.message.content) ollama-python-0.4.8/examples/multimodal-generate.py000066400000000000000000000012431500001205700224240ustar00rootroot00000000000000import random import sys import httpx from ollama import generate latest = httpx.get('https://xkcd.com/info.0.json') latest.raise_for_status() if len(sys.argv) > 1: num = int(sys.argv[1]) else: num = random.randint(1, latest.json().get('num')) comic = httpx.get(f'https://xkcd.com/{num}/info.0.json') comic.raise_for_status() print(f'xkcd #{comic.json().get("num")}: {comic.json().get("alt")}') print(f'link: https://xkcd.com/{num}') print('---') raw = httpx.get(comic.json().get('img')) raw.raise_for_status() for response in generate('llava', 'explain this comic:', images=[raw.content], stream=True): print(response['response'], end='', flush=True) print() ollama-python-0.4.8/examples/ps.py000066400000000000000000000013751500001205700171150ustar00rootroot00000000000000from ollama import ProcessResponse, chat, ps, pull # Ensure at least one model is loaded response = pull('llama3.2', stream=True) progress_states = set() for progress in response: if progress.get('status') in progress_states: continue progress_states.add(progress.get('status')) print(progress.get('status')) print('\n') print('Waiting for model to load... \n') chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) response: ProcessResponse = ps() for model in response.models: print('Model: ', model.model) print(' Digest: ', model.digest) print(' Expires at: ', model.expires_at) print(' Size: ', model.size) print(' Size vram: ', model.size_vram) print(' Details: ', model.details) print('\n') ollama-python-0.4.8/examples/pull.py000066400000000000000000000011331500001205700174370ustar00rootroot00000000000000from tqdm import tqdm from ollama import pull current_digest, bars = '', {} for progress in pull('llama3.2', stream=True): digest = progress.get('digest', '') if digest != current_digest and current_digest in bars: bars[current_digest].close() if not digest: print(progress.get('status')) continue if digest not in bars and (total := progress.get('total')): bars[digest] = tqdm(total=total, desc=f'pulling {digest[7:19]}', unit='B', unit_scale=True) if completed := progress.get('completed'): bars[digest].update(completed - bars[digest].n) current_digest = digest ollama-python-0.4.8/examples/structured-outputs-image.py000066400000000000000000000025031500001205700234720ustar00rootroot00000000000000from pathlib import Path from typing import Literal from pydantic import BaseModel from ollama import chat # Define the schema for image objects class Object(BaseModel): name: str confidence: float attributes: str class ImageDescription(BaseModel): summary: str objects: list[Object] scene: str colors: list[str] time_of_day: Literal['Morning', 'Afternoon', 'Evening', 'Night'] setting: Literal['Indoor', 'Outdoor', 'Unknown'] text_content: str | None = None # Get path from user input path = input('Enter the path to your image: ') path = Path(path) # Verify the file exists if not path.exists(): raise FileNotFoundError(f'Image not found at: {path}') # Set up chat as usual response = chat( model='llama3.2-vision', format=ImageDescription.model_json_schema(), # Pass in the schema for the response messages=[ { 'role': 'user', 'content': 'Analyze this image and return a detailed JSON description including objects, scene, colors and any text detected. If you cannot determine certain details, leave those fields empty.', 'images': [path], }, ], options={'temperature': 0}, # Set temperature to 0 for more deterministic output ) # Convert received content to the schema image_analysis = ImageDescription.model_validate_json(response.message.content) print(image_analysis) ollama-python-0.4.8/examples/structured-outputs.py000066400000000000000000000020721500001205700224130ustar00rootroot00000000000000from pydantic import BaseModel from ollama import chat # Define the schema for the response class FriendInfo(BaseModel): name: str age: int is_available: bool class FriendList(BaseModel): friends: list[FriendInfo] # schema = {'type': 'object', 'properties': {'friends': {'type': 'array', 'items': {'type': 'object', 'properties': {'name': {'type': 'string'}, 'age': {'type': 'integer'}, 'is_available': {'type': 'boolean'}}, 'required': ['name', 'age', 'is_available']}}}, 'required': ['friends']} response = chat( model='llama3.1:8b', messages=[{'role': 'user', 'content': 'I have two friends. The first is Ollama 22 years old busy saving the world, and the second is Alonso 23 years old and wants to hang out. Return a list of friends in JSON format'}], format=FriendList.model_json_schema(), # Use Pydantic to generate the schema or format=schema options={'temperature': 0}, # Make responses more deterministic ) # Use Pydantic to validate the response friends_response = FriendList.model_validate_json(response.message.content) print(friends_response) ollama-python-0.4.8/examples/tools.py000066400000000000000000000046721500001205700176360ustar00rootroot00000000000000from ollama import ChatResponse, chat def add_two_numbers(a: int, b: int) -> int: """ Add two numbers Args: a (int): The first number b (int): The second number Returns: int: The sum of the two numbers """ # The cast is necessary as returned tool call arguments don't always conform exactly to schema # E.g. this would prevent "what is 30 + 12" to produce '3012' instead of 42 return int(a) + int(b) def subtract_two_numbers(a: int, b: int) -> int: """ Subtract two numbers """ # The cast is necessary as returned tool call arguments don't always conform exactly to schema return int(a) - int(b) # Tools can still be manually defined and passed into chat subtract_two_numbers_tool = { 'type': 'function', 'function': { 'name': 'subtract_two_numbers', 'description': 'Subtract two numbers', 'parameters': { 'type': 'object', 'required': ['a', 'b'], 'properties': { 'a': {'type': 'integer', 'description': 'The first number'}, 'b': {'type': 'integer', 'description': 'The second number'}, }, }, }, } messages = [{'role': 'user', 'content': 'What is three plus one?'}] print('Prompt:', messages[0]['content']) available_functions = { 'add_two_numbers': add_two_numbers, 'subtract_two_numbers': subtract_two_numbers, } response: ChatResponse = chat( 'llama3.1', messages=messages, tools=[add_two_numbers, subtract_two_numbers_tool], ) if response.message.tool_calls: # There may be multiple tool calls in the response for tool in response.message.tool_calls: # Ensure the function is available, and then call it if function_to_call := available_functions.get(tool.function.name): print('Calling function:', tool.function.name) print('Arguments:', tool.function.arguments) output = function_to_call(**tool.function.arguments) print('Function output:', output) else: print('Function', tool.function.name, 'not found') # Only needed to chat with the model using the tool call results if response.message.tool_calls: # Add the function response to messages for the model to use messages.append(response.message) messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name}) # Get final response from model with function outputs final_response = chat('llama3.1', messages=messages) print('Final response:', final_response.message.content) else: print('No tool calls returned from model') ollama-python-0.4.8/ollama/000077500000000000000000000000001500001205700155425ustar00rootroot00000000000000ollama-python-0.4.8/ollama/__init__.py000066400000000000000000000016161500001205700176570ustar00rootroot00000000000000from ollama._client import AsyncClient, Client from ollama._types import ( ChatResponse, EmbeddingsResponse, EmbedResponse, GenerateResponse, Image, ListResponse, Message, Options, ProcessResponse, ProgressResponse, RequestError, ResponseError, ShowResponse, StatusResponse, Tool, ) __all__ = [ 'AsyncClient', 'ChatResponse', 'Client', 'EmbedResponse', 'EmbeddingsResponse', 'GenerateResponse', 'Image', 'ListResponse', 'Message', 'Options', 'ProcessResponse', 'ProgressResponse', 'RequestError', 'ResponseError', 'ShowResponse', 'StatusResponse', 'Tool', ] _client = Client() generate = _client.generate chat = _client.chat embed = _client.embed embeddings = _client.embeddings pull = _client.pull push = _client.push create = _client.create delete = _client.delete list = _client.list copy = _client.copy show = _client.show ps = _client.ps ollama-python-0.4.8/ollama/_client.py000066400000000000000000001027461500001205700175430ustar00rootroot00000000000000import ipaddress import json import os import platform import sys import urllib.parse from hashlib import sha256 from os import PathLike from pathlib import Path from typing import ( Any, Callable, Dict, List, Literal, Mapping, Optional, Sequence, Type, TypeVar, Union, overload, ) from pydantic.json_schema import JsonSchemaValue from ollama._utils import convert_function_to_tool if sys.version_info < (3, 9): from typing import AsyncIterator, Iterator else: from collections.abc import AsyncIterator, Iterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' import httpx from ollama._types import ( ChatRequest, ChatResponse, CopyRequest, CreateRequest, DeleteRequest, EmbeddingsRequest, EmbeddingsResponse, EmbedRequest, EmbedResponse, GenerateRequest, GenerateResponse, Image, ListResponse, Message, Options, ProcessResponse, ProgressResponse, PullRequest, PushRequest, ResponseError, ShowRequest, ShowResponse, StatusResponse, Tool, ) T = TypeVar('T') class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, headers: Optional[Mapping[str, str]] = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, # Lowercase all headers to ensure override headers={ k.lower(): v for k, v in { **(headers or {}), 'Content-Type': 'application/json', 'Accept': 'application/json', 'User-Agent': f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}', }.items() }, **kwargs, ) CONNECTION_ERROR_MESSAGE = 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request_raw(self, *args, **kwargs): try: r = self._client.request(*args, **kwargs) r.raise_for_status() return r except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None except httpx.ConnectError: raise ConnectionError(CONNECTION_ERROR_MESSAGE) from None @overload def _request( self, cls: Type[T], *args, stream: Literal[False] = False, **kwargs, ) -> T: ... @overload def _request( self, cls: Type[T], *args, stream: Literal[True] = True, **kwargs, ) -> Iterator[T]: ... @overload def _request( self, cls: Type[T], *args, stream: bool = False, **kwargs, ) -> Union[T, Iterator[T]]: ... def _request( self, cls: Type[T], *args, stream: bool = False, **kwargs, ) -> Union[T, Iterator[T]]: if stream: def inner(): with self._client.stream(*args, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): part = json.loads(line) if err := part.get('error'): raise ResponseError(err) yield cls(**part) return inner() return cls(**self._request_raw(*args, **kwargs).json()) @overload def generate( self, model: str = '', prompt: str = '', suffix: str = '', *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[False] = False, raw: bool = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, images: Optional[Sequence[Union[str, bytes, Image]]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> GenerateResponse: ... @overload def generate( self, model: str = '', prompt: str = '', suffix: str = '', *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[True] = True, raw: bool = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, images: Optional[Sequence[Union[str, bytes, Image]]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Iterator[GenerateResponse]: ... def generate( self, model: str = '', prompt: Optional[str] = None, suffix: Optional[str] = None, *, system: Optional[str] = None, template: Optional[str] = None, context: Optional[Sequence[int]] = None, stream: bool = False, raw: Optional[bool] = None, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, images: Optional[Sequence[Union[str, bytes, Image]]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[GenerateResponse, Iterator[GenerateResponse]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ return self._request( GenerateResponse, 'POST', '/api/generate', json=GenerateRequest( model=model, prompt=prompt, suffix=suffix, system=system, template=template, context=context, stream=stream, raw=raw, format=format, images=[image for image in _copy_images(images)] if images else None, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), stream=stream, ) @overload def chat( self, model: str = '', messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: Literal[False] = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> ChatResponse: ... @overload def chat( self, model: str = '', messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: Literal[True] = True, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Iterator[ChatResponse]: ... def chat( self, model: str = '', messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: bool = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[ChatResponse, Iterator[ChatResponse]]: """ Create a chat response using the requested model. Args: tools: A JSON schema as a dict, an Ollama Tool or a Python Function. Python functions need to follow Google style docstrings to be converted to an Ollama Tool. For more information, see: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings stream: Whether to stream the response. format: The format of the response. Example: def add_two_numbers(a: int, b: int) -> int: ''' Add two numbers together. Args: a: First number to add b: Second number to add Returns: int: The sum of a and b ''' return a + b client.chat(model='llama3.2', tools=[add_two_numbers], messages=[...]) Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ return self._request( ChatResponse, 'POST', '/api/chat', json=ChatRequest( model=model, messages=[message for message in _copy_messages(messages)], tools=[tool for tool in _copy_tools(tools)], stream=stream, format=format, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), stream=stream, ) def embed( self, model: str = '', input: Union[str, Sequence[str]] = '', truncate: Optional[bool] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> EmbedResponse: return self._request( EmbedResponse, 'POST', '/api/embed', json=EmbedRequest( model=model, input=input, truncate=truncate, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), ) def embeddings( self, model: str = '', prompt: Optional[str] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> EmbeddingsResponse: """ Deprecated in favor of `embed`. """ return self._request( EmbeddingsResponse, 'POST', '/api/embeddings', json=EmbeddingsRequest( model=model, prompt=prompt, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), ) @overload def pull( self, model: str, *, insecure: bool = False, stream: Literal[False] = False, ) -> ProgressResponse: ... @overload def pull( self, model: str, *, insecure: bool = False, stream: Literal[True] = True, ) -> Iterator[ProgressResponse]: ... def pull( self, model: str, *, insecure: bool = False, stream: bool = False, ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request( ProgressResponse, 'POST', '/api/pull', json=PullRequest( model=model, insecure=insecure, stream=stream, ).model_dump(exclude_none=True), stream=stream, ) @overload def push( self, model: str, *, insecure: bool = False, stream: Literal[False] = False, ) -> ProgressResponse: ... @overload def push( self, model: str, *, insecure: bool = False, stream: Literal[True] = True, ) -> Iterator[ProgressResponse]: ... def push( self, model: str, *, insecure: bool = False, stream: bool = False, ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request( ProgressResponse, 'POST', '/api/push', json=PushRequest( model=model, insecure=insecure, stream=stream, ).model_dump(exclude_none=True), stream=stream, ) @overload def create( self, model: str, quantize: Optional[str] = None, from_: Optional[str] = None, files: Optional[Dict[str, str]] = None, adapters: Optional[Dict[str, str]] = None, template: Optional[str] = None, license: Optional[Union[str, List[str]]] = None, system: Optional[str] = None, parameters: Optional[Union[Mapping[str, Any], Options]] = None, messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, stream: Literal[False] = False, ) -> ProgressResponse: ... @overload def create( self, model: str, quantize: Optional[str] = None, from_: Optional[str] = None, files: Optional[Dict[str, str]] = None, adapters: Optional[Dict[str, str]] = None, template: Optional[str] = None, license: Optional[Union[str, List[str]]] = None, system: Optional[str] = None, parameters: Optional[Union[Mapping[str, Any], Options]] = None, messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, stream: Literal[True] = True, ) -> Iterator[ProgressResponse]: ... def create( self, model: str, quantize: Optional[str] = None, from_: Optional[str] = None, files: Optional[Dict[str, str]] = None, adapters: Optional[Dict[str, str]] = None, template: Optional[str] = None, license: Optional[Union[str, List[str]]] = None, system: Optional[str] = None, parameters: Optional[Union[Mapping[str, Any], Options]] = None, messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, stream: bool = False, ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request( ProgressResponse, 'POST', '/api/create', json=CreateRequest( model=model, stream=stream, quantize=quantize, from_=from_, files=files, adapters=adapters, license=license, template=template, system=system, parameters=parameters, messages=messages, ).model_dump(exclude_none=True), stream=stream, ) def create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' with open(path, 'rb') as r: self._request_raw('POST', f'/api/blobs/{digest}', content=r) return digest def list(self) -> ListResponse: return self._request( ListResponse, 'GET', '/api/tags', ) def delete(self, model: str) -> StatusResponse: r = self._request_raw( 'DELETE', '/api/delete', json=DeleteRequest( model=model, ).model_dump(exclude_none=True), ) return StatusResponse( status='success' if r.status_code == 200 else 'error', ) def copy(self, source: str, destination: str) -> StatusResponse: r = self._request_raw( 'POST', '/api/copy', json=CopyRequest( source=source, destination=destination, ).model_dump(exclude_none=True), ) return StatusResponse( status='success' if r.status_code == 200 else 'error', ) def show(self, model: str) -> ShowResponse: return self._request( ShowResponse, 'POST', '/api/show', json=ShowRequest( model=model, ).model_dump(exclude_none=True), ) def ps(self) -> ProcessResponse: return self._request( ProcessResponse, 'GET', '/api/ps', ) class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request_raw(self, *args, **kwargs): try: r = await self._client.request(*args, **kwargs) r.raise_for_status() return r except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None except httpx.ConnectError: raise ConnectionError(CONNECTION_ERROR_MESSAGE) from None @overload async def _request( self, cls: Type[T], *args, stream: Literal[False] = False, **kwargs, ) -> T: ... @overload async def _request( self, cls: Type[T], *args, stream: Literal[True] = True, **kwargs, ) -> AsyncIterator[T]: ... @overload async def _request( self, cls: Type[T], *args, stream: bool = False, **kwargs, ) -> Union[T, AsyncIterator[T]]: ... async def _request( self, cls: Type[T], *args, stream: bool = False, **kwargs, ) -> Union[T, AsyncIterator[T]]: if stream: async def inner(): async with self._client.stream(*args, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: await e.response.aread() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): part = json.loads(line) if err := part.get('error'): raise ResponseError(err) yield cls(**part) return inner() return cls(**(await self._request_raw(*args, **kwargs)).json()) @overload async def generate( self, model: str = '', prompt: str = '', suffix: str = '', *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[False] = False, raw: bool = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, images: Optional[Sequence[Union[str, bytes, Image]]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> GenerateResponse: ... @overload async def generate( self, model: str = '', prompt: str = '', suffix: str = '', *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[True] = True, raw: bool = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, images: Optional[Sequence[Union[str, bytes, Image]]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> AsyncIterator[GenerateResponse]: ... async def generate( self, model: str = '', prompt: Optional[str] = None, suffix: Optional[str] = None, *, system: Optional[str] = None, template: Optional[str] = None, context: Optional[Sequence[int]] = None, stream: bool = False, raw: Optional[bool] = None, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, images: Optional[Sequence[Union[str, bytes, Image]]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[GenerateResponse, AsyncIterator[GenerateResponse]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ return await self._request( GenerateResponse, 'POST', '/api/generate', json=GenerateRequest( model=model, prompt=prompt, suffix=suffix, system=system, template=template, context=context, stream=stream, raw=raw, format=format, images=[image for image in _copy_images(images)] if images else None, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), stream=stream, ) @overload async def chat( self, model: str = '', messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: Literal[False] = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> ChatResponse: ... @overload async def chat( self, model: str = '', messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: Literal[True] = True, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> AsyncIterator[ChatResponse]: ... async def chat( self, model: str = '', messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: bool = False, format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[ChatResponse, AsyncIterator[ChatResponse]]: """ Create a chat response using the requested model. Args: tools: A JSON schema as a dict, an Ollama Tool or a Python Function. Python functions need to follow Google style docstrings to be converted to an Ollama Tool. For more information, see: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings stream: Whether to stream the response. format: The format of the response. Example: def add_two_numbers(a: int, b: int) -> int: ''' Add two numbers together. Args: a: First number to add b: Second number to add Returns: int: The sum of a and b ''' return a + b await client.chat(model='llama3.2', tools=[add_two_numbers], messages=[...]) Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ return await self._request( ChatResponse, 'POST', '/api/chat', json=ChatRequest( model=model, messages=[message for message in _copy_messages(messages)], tools=[tool for tool in _copy_tools(tools)], stream=stream, format=format, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), stream=stream, ) async def embed( self, model: str = '', input: Union[str, Sequence[str]] = '', truncate: Optional[bool] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> EmbedResponse: return await self._request( EmbedResponse, 'POST', '/api/embed', json=EmbedRequest( model=model, input=input, truncate=truncate, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), ) async def embeddings( self, model: str = '', prompt: Optional[str] = None, options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, ) -> EmbeddingsResponse: """ Deprecated in favor of `embed`. """ return await self._request( EmbeddingsResponse, 'POST', '/api/embeddings', json=EmbeddingsRequest( model=model, prompt=prompt, options=options, keep_alive=keep_alive, ).model_dump(exclude_none=True), ) @overload async def pull( self, model: str, *, insecure: bool = False, stream: Literal[False] = False, ) -> ProgressResponse: ... @overload async def pull( self, model: str, *, insecure: bool = False, stream: Literal[True] = True, ) -> AsyncIterator[ProgressResponse]: ... async def pull( self, model: str, *, insecure: bool = False, stream: bool = False, ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request( ProgressResponse, 'POST', '/api/pull', json=PullRequest( model=model, insecure=insecure, stream=stream, ).model_dump(exclude_none=True), stream=stream, ) @overload async def push( self, model: str, *, insecure: bool = False, stream: Literal[False] = False, ) -> ProgressResponse: ... @overload async def push( self, model: str, *, insecure: bool = False, stream: Literal[True] = True, ) -> AsyncIterator[ProgressResponse]: ... async def push( self, model: str, *, insecure: bool = False, stream: bool = False, ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request( ProgressResponse, 'POST', '/api/push', json=PushRequest( model=model, insecure=insecure, stream=stream, ).model_dump(exclude_none=True), stream=stream, ) @overload async def create( self, model: str, quantize: Optional[str] = None, from_: Optional[str] = None, files: Optional[Dict[str, str]] = None, adapters: Optional[Dict[str, str]] = None, template: Optional[str] = None, license: Optional[Union[str, List[str]]] = None, system: Optional[str] = None, parameters: Optional[Union[Mapping[str, Any], Options]] = None, messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, stream: Literal[True] = True, ) -> ProgressResponse: ... @overload async def create( self, model: str, quantize: Optional[str] = None, from_: Optional[str] = None, files: Optional[Dict[str, str]] = None, adapters: Optional[Dict[str, str]] = None, template: Optional[str] = None, license: Optional[Union[str, List[str]]] = None, system: Optional[str] = None, parameters: Optional[Union[Mapping[str, Any], Options]] = None, messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, stream: Literal[True] = True, ) -> AsyncIterator[ProgressResponse]: ... async def create( self, model: str, quantize: Optional[str] = None, from_: Optional[str] = None, files: Optional[Dict[str, str]] = None, adapters: Optional[Dict[str, str]] = None, template: Optional[str] = None, license: Optional[Union[str, List[str]]] = None, system: Optional[str] = None, parameters: Optional[Union[Mapping[str, Any], Options]] = None, messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, *, stream: bool = False, ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request( ProgressResponse, 'POST', '/api/create', json=CreateRequest( model=model, stream=stream, quantize=quantize, from_=from_, files=files, adapters=adapters, license=license, template=template, system=system, parameters=parameters, messages=messages, ).model_dump(exclude_none=True), stream=stream, ) async def create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request_raw('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def list(self) -> ListResponse: return await self._request( ListResponse, 'GET', '/api/tags', ) async def delete(self, model: str) -> StatusResponse: r = await self._request_raw( 'DELETE', '/api/delete', json=DeleteRequest( model=model, ).model_dump(exclude_none=True), ) return StatusResponse( status='success' if r.status_code == 200 else 'error', ) async def copy(self, source: str, destination: str) -> StatusResponse: r = await self._request_raw( 'POST', '/api/copy', json=CopyRequest( source=source, destination=destination, ).model_dump(exclude_none=True), ) return StatusResponse( status='success' if r.status_code == 200 else 'error', ) async def show(self, model: str) -> ShowResponse: return await self._request( ShowResponse, 'POST', '/api/show', json=ShowRequest( model=model, ).model_dump(exclude_none=True), ) async def ps(self) -> ProcessResponse: return await self._request( ProcessResponse, 'GET', '/api/ps', ) def _copy_images(images: Optional[Sequence[Union[Image, Any]]]) -> Iterator[Image]: for image in images or []: yield image if isinstance(image, Image) else Image(value=image) def _copy_messages(messages: Optional[Sequence[Union[Mapping[str, Any], Message]]]) -> Iterator[Message]: for message in messages or []: yield Message.model_validate( {k: [image for image in _copy_images(v)] if k == 'images' else v for k, v in dict(message).items() if v}, ) def _copy_tools(tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None) -> Iterator[Tool]: for unprocessed_tool in tools or []: yield convert_function_to_tool(unprocessed_tool) if callable(unprocessed_tool) else Tool.model_validate(unprocessed_tool) def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' >>> _parse_host('example.com/path') 'http://example.com:11434/path' >>> _parse_host('example.com:56789/path') 'http://example.com:56789/path' >>> _parse_host('https://example.com:56789/path') 'https://example.com:56789/path' >>> _parse_host('example.com:56789/path/') 'http://example.com:56789/path' >>> _parse_host('[0001:002:003:0004::1]') 'http://[0001:002:003:0004::1]:11434' >>> _parse_host('[0001:002:003:0004::1]:56789') 'http://[0001:002:003:0004::1]:56789' >>> _parse_host('http://[0001:002:003:0004::1]') 'http://[0001:002:003:0004::1]:80' >>> _parse_host('https://[0001:002:003:0004::1]') 'https://[0001:002:003:0004::1]:443' >>> _parse_host('https://[0001:002:003:0004::1]:56789') 'https://[0001:002:003:0004::1]:56789' >>> _parse_host('[0001:002:003:0004::1]/') 'http://[0001:002:003:0004::1]:11434' >>> _parse_host('[0001:002:003:0004::1]:56789/') 'http://[0001:002:003:0004::1]:56789' >>> _parse_host('[0001:002:003:0004::1]/path') 'http://[0001:002:003:0004::1]:11434/path' >>> _parse_host('[0001:002:003:0004::1]:56789/path') 'http://[0001:002:003:0004::1]:56789/path' >>> _parse_host('https://[0001:002:003:0004::1]:56789/path') 'https://[0001:002:003:0004::1]:56789/path' >>> _parse_host('[0001:002:003:0004::1]:56789/path/') 'http://[0001:002:003:0004::1]:56789/path' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port try: if isinstance(ipaddress.ip_address(host), ipaddress.IPv6Address): # Fix missing square brackets for IPv6 from urlsplit host = f'[{host}]' except ValueError: ... if path := split.path.strip('/'): return f'{scheme}://{host}:{port}/{path}' return f'{scheme}://{host}:{port}' ollama-python-0.4.8/ollama/_types.py000066400000000000000000000340521500001205700174230ustar00rootroot00000000000000import json from base64 import b64decode, b64encode from datetime import datetime from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Union from pydantic import ( BaseModel, ByteSize, ConfigDict, Field, model_serializer, ) from pydantic.json_schema import JsonSchemaValue from typing_extensions import Annotated, Literal class SubscriptableBaseModel(BaseModel): def __getitem__(self, key: str) -> Any: """ >>> msg = Message(role='user') >>> msg['role'] 'user' >>> msg = Message(role='user') >>> msg['nonexistent'] Traceback (most recent call last): KeyError: 'nonexistent' """ if key in self: return getattr(self, key) raise KeyError(key) def __setitem__(self, key: str, value: Any) -> None: """ >>> msg = Message(role='user') >>> msg['role'] = 'assistant' >>> msg['role'] 'assistant' >>> tool_call = Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={})) >>> msg = Message(role='user', content='hello') >>> msg['tool_calls'] = [tool_call] >>> msg['tool_calls'][0]['function']['name'] 'foo' """ setattr(self, key, value) def __contains__(self, key: str) -> bool: """ >>> msg = Message(role='user') >>> 'nonexistent' in msg False >>> 'role' in msg True >>> 'content' in msg False >>> msg.content = 'hello!' >>> 'content' in msg True >>> msg = Message(role='user', content='hello!') >>> 'content' in msg True >>> 'tool_calls' in msg False >>> msg['tool_calls'] = [] >>> 'tool_calls' in msg True >>> msg['tool_calls'] = [Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={}))] >>> 'tool_calls' in msg True >>> msg['tool_calls'] = None >>> 'tool_calls' in msg True >>> tool = Tool() >>> 'type' in tool True """ if key in self.model_fields_set: return True if key in self.model_fields: return self.model_fields[key].default is not None return False def get(self, key: str, default: Any = None) -> Any: """ >>> msg = Message(role='user') >>> msg.get('role') 'user' >>> msg = Message(role='user') >>> msg.get('nonexistent') >>> msg = Message(role='user') >>> msg.get('nonexistent', 'default') 'default' >>> msg = Message(role='user', tool_calls=[ Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={}))]) >>> msg.get('tool_calls')[0]['function']['name'] 'foo' """ return self[key] if key in self else default class Options(SubscriptableBaseModel): # load time options numa: Optional[bool] = None num_ctx: Optional[int] = None num_batch: Optional[int] = None num_gpu: Optional[int] = None main_gpu: Optional[int] = None low_vram: Optional[bool] = None f16_kv: Optional[bool] = None logits_all: Optional[bool] = None vocab_only: Optional[bool] = None use_mmap: Optional[bool] = None use_mlock: Optional[bool] = None embedding_only: Optional[bool] = None num_thread: Optional[int] = None # runtime options num_keep: Optional[int] = None seed: Optional[int] = None num_predict: Optional[int] = None top_k: Optional[int] = None top_p: Optional[float] = None tfs_z: Optional[float] = None typical_p: Optional[float] = None repeat_last_n: Optional[int] = None temperature: Optional[float] = None repeat_penalty: Optional[float] = None presence_penalty: Optional[float] = None frequency_penalty: Optional[float] = None mirostat: Optional[int] = None mirostat_tau: Optional[float] = None mirostat_eta: Optional[float] = None penalize_newline: Optional[bool] = None stop: Optional[Sequence[str]] = None class BaseRequest(SubscriptableBaseModel): model: Annotated[str, Field(min_length=1)] 'Model to use for the request.' class BaseStreamableRequest(BaseRequest): stream: Optional[bool] = None 'Stream response.' class BaseGenerateRequest(BaseStreamableRequest): options: Optional[Union[Mapping[str, Any], Options]] = None 'Options to use for the request.' format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None 'Format of the response.' keep_alive: Optional[Union[float, str]] = None 'Keep model alive for the specified duration.' class Image(BaseModel): value: Union[str, bytes, Path] @model_serializer def serialize_model(self): if isinstance(self.value, (Path, bytes)): return b64encode(self.value.read_bytes() if isinstance(self.value, Path) else self.value).decode() if isinstance(self.value, str): try: if Path(self.value).exists(): return b64encode(Path(self.value).read_bytes()).decode() except Exception: # Long base64 string can't be wrapped in Path, so try to treat as base64 string pass # String might be a file path, but might not exist if self.value.split('.')[-1] in ('png', 'jpg', 'jpeg', 'webp'): raise ValueError(f'File {self.value} does not exist') try: # Try to decode to check if it's already base64 b64decode(self.value) return self.value except Exception: raise ValueError('Invalid image data, expected base64 string or path to image file') from Exception class GenerateRequest(BaseGenerateRequest): prompt: Optional[str] = None 'Prompt to generate response from.' suffix: Optional[str] = None 'Suffix to append to the response.' system: Optional[str] = None 'System prompt to prepend to the prompt.' template: Optional[str] = None 'Template to use for the response.' context: Optional[Sequence[int]] = None 'Tokenized history to use for the response.' raw: Optional[bool] = None images: Optional[Sequence[Image]] = None 'Image data for multimodal models.' class BaseGenerateResponse(SubscriptableBaseModel): model: Optional[str] = None 'Model used to generate response.' created_at: Optional[str] = None 'Time when the request was created.' done: Optional[bool] = None 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' done_reason: Optional[str] = None 'Reason for completion. Only present when done is True.' total_duration: Optional[int] = None 'Total duration in nanoseconds.' load_duration: Optional[int] = None 'Load duration in nanoseconds.' prompt_eval_count: Optional[int] = None 'Number of tokens evaluated in the prompt.' prompt_eval_duration: Optional[int] = None 'Duration of evaluating the prompt in nanoseconds.' eval_count: Optional[int] = None 'Number of tokens evaluated in inference.' eval_duration: Optional[int] = None 'Duration of evaluating inference in nanoseconds.' class GenerateResponse(BaseGenerateResponse): """ Response returned by generate requests. """ response: str 'Response content. When streaming, this contains a fragment of the response.' context: Optional[Sequence[int]] = None 'Tokenized history up to the point of the response.' class Message(SubscriptableBaseModel): """ Chat message. """ role: str "Assumed role of the message. Response messages has role 'assistant' or 'tool'." content: Optional[str] = None 'Content of the message. Response messages contains message fragments when streaming.' images: Optional[Sequence[Image]] = None """ Optional list of image data for multimodal models. Valid input types are: - `str` or path-like object: path to image file - `bytes` or bytes-like object: raw image data Valid image formats depend on the model. See the model card for more information. """ class ToolCall(SubscriptableBaseModel): """ Model tool calls. """ class Function(SubscriptableBaseModel): """ Tool call function. """ name: str 'Name of the function.' arguments: Mapping[str, Any] 'Arguments of the function.' function: Function 'Function to be called.' tool_calls: Optional[Sequence[ToolCall]] = None """ Tools calls to be made by the model. """ class Tool(SubscriptableBaseModel): type: Optional[Literal['function']] = 'function' class Function(SubscriptableBaseModel): name: Optional[str] = None description: Optional[str] = None class Parameters(SubscriptableBaseModel): model_config = ConfigDict(populate_by_name=True) type: Optional[Literal['object']] = 'object' defs: Optional[Any] = Field(None, alias='$defs') items: Optional[Any] = None required: Optional[Sequence[str]] = None class Property(SubscriptableBaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) type: Optional[Union[str, Sequence[str]]] = None items: Optional[Any] = None description: Optional[str] = None enum: Optional[Sequence[Any]] = None properties: Optional[Mapping[str, Property]] = None parameters: Optional[Parameters] = None function: Optional[Function] = None class ChatRequest(BaseGenerateRequest): @model_serializer(mode='wrap') def serialize_model(self, nxt): output = nxt(self) if 'tools' in output and output['tools']: for tool in output['tools']: if 'function' in tool and 'parameters' in tool['function'] and 'defs' in tool['function']['parameters']: tool['function']['parameters']['$defs'] = tool['function']['parameters'].pop('defs') return output messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None 'Messages to chat with.' tools: Optional[Sequence[Tool]] = None 'Tools to use for the chat.' class ChatResponse(BaseGenerateResponse): """ Response returned by chat requests. """ message: Message 'Response message.' class EmbedRequest(BaseRequest): input: Union[str, Sequence[str]] 'Input text to embed.' truncate: Optional[bool] = None 'Truncate the input to the maximum token length.' options: Optional[Union[Mapping[str, Any], Options]] = None 'Options to use for the request.' keep_alive: Optional[Union[float, str]] = None class EmbedResponse(BaseGenerateResponse): """ Response returned by embed requests. """ embeddings: Sequence[Sequence[float]] 'Embeddings of the inputs.' class EmbeddingsRequest(BaseRequest): prompt: Optional[str] = None 'Prompt to generate embeddings from.' options: Optional[Union[Mapping[str, Any], Options]] = None 'Options to use for the request.' keep_alive: Optional[Union[float, str]] = None class EmbeddingsResponse(SubscriptableBaseModel): """ Response returned by embeddings requests. """ embedding: Sequence[float] 'Embedding of the prompt.' class PullRequest(BaseStreamableRequest): """ Request to pull the model. """ insecure: Optional[bool] = None 'Allow insecure (HTTP) connections.' class PushRequest(BaseStreamableRequest): """ Request to pull the model. """ insecure: Optional[bool] = None 'Allow insecure (HTTP) connections.' class CreateRequest(BaseStreamableRequest): @model_serializer(mode='wrap') def serialize_model(self, nxt): output = nxt(self) if 'from_' in output: output['from'] = output.pop('from_') return output """ Request to create a new model. """ quantize: Optional[str] = None from_: Optional[str] = None files: Optional[Dict[str, str]] = None adapters: Optional[Dict[str, str]] = None template: Optional[str] = None license: Optional[Union[str, List[str]]] = None system: Optional[str] = None parameters: Optional[Union[Mapping[str, Any], Options]] = None messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None class ModelDetails(SubscriptableBaseModel): parent_model: Optional[str] = None format: Optional[str] = None family: Optional[str] = None families: Optional[Sequence[str]] = None parameter_size: Optional[str] = None quantization_level: Optional[str] = None class ListResponse(SubscriptableBaseModel): class Model(SubscriptableBaseModel): model: Optional[str] = None modified_at: Optional[datetime] = None digest: Optional[str] = None size: Optional[ByteSize] = None details: Optional[ModelDetails] = None models: Sequence[Model] 'List of models.' class DeleteRequest(BaseRequest): """ Request to delete a model. """ class CopyRequest(BaseModel): """ Request to copy a model. """ source: str 'Source model to copy.' destination: str 'Destination model to copy to.' class StatusResponse(SubscriptableBaseModel): status: Optional[str] = None class ProgressResponse(StatusResponse): completed: Optional[int] = None total: Optional[int] = None digest: Optional[str] = None class ShowRequest(BaseRequest): """ Request to show model information. """ class ShowResponse(SubscriptableBaseModel): modified_at: Optional[datetime] = None template: Optional[str] = None modelfile: Optional[str] = None license: Optional[str] = None details: Optional[ModelDetails] = None modelinfo: Optional[Mapping[str, Any]] = Field(alias='model_info') parameters: Optional[str] = None class ProcessResponse(SubscriptableBaseModel): class Model(SubscriptableBaseModel): model: Optional[str] = None name: Optional[str] = None digest: Optional[str] = None expires_at: Optional[datetime] = None size: Optional[ByteSize] = None size_vram: Optional[ByteSize] = None details: Optional[ModelDetails] = None models: Sequence[Model] class RequestError(Exception): """ Common class for request errors. """ def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' class ResponseError(Exception): """ Common class for response errors. """ def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' def __str__(self) -> str: return f'{self.error} (status code: {self.status_code})' ollama-python-0.4.8/ollama/_utils.py000066400000000000000000000053001500001205700174110ustar00rootroot00000000000000from __future__ import annotations import inspect import re from collections import defaultdict from typing import Callable, Union import pydantic from ollama._types import Tool def _parse_docstring(doc_string: Union[str, None]) -> dict[str, str]: parsed_docstring = defaultdict(str) if not doc_string: return parsed_docstring key = hash(doc_string) for line in doc_string.splitlines(): lowered_line = line.lower().strip() if lowered_line.startswith('args:'): key = 'args' elif lowered_line.startswith('returns:') or lowered_line.startswith('yields:') or lowered_line.startswith('raises:'): key = '_' else: # maybe change to a list and join later parsed_docstring[key] += f'{line.strip()}\n' last_key = None for line in parsed_docstring['args'].splitlines(): line = line.strip() if ':' in line: # Split the line on either: # 1. A parenthetical expression like (integer) - captured in group 1 # 2. A colon : # Followed by optional whitespace. Only split on first occurrence. parts = re.split(r'(?:\(([^)]*)\)|:)\s*', line, maxsplit=1) arg_name = parts[0].strip() last_key = arg_name # Get the description - will be in parts[1] if parenthetical or parts[-1] if after colon arg_description = parts[-1].strip() if len(parts) > 2 and parts[1]: # Has parenthetical content arg_description = parts[-1].split(':', 1)[-1].strip() parsed_docstring[last_key] = arg_description elif last_key and line: parsed_docstring[last_key] += ' ' + line return parsed_docstring def convert_function_to_tool(func: Callable) -> Tool: doc_string_hash = hash(inspect.getdoc(func)) parsed_docstring = _parse_docstring(inspect.getdoc(func)) schema = type( func.__name__, (pydantic.BaseModel,), { '__annotations__': {k: v.annotation if v.annotation != inspect._empty else str for k, v in inspect.signature(func).parameters.items()}, '__signature__': inspect.signature(func), '__doc__': parsed_docstring[doc_string_hash], }, ).model_json_schema() for k, v in schema.get('properties', {}).items(): # If type is missing, the default is string types = {t.get('type', 'string') for t in v.get('anyOf')} if 'anyOf' in v else {v.get('type', 'string')} if 'null' in types: schema['required'].remove(k) types.discard('null') schema['properties'][k] = { 'description': parsed_docstring[k], 'type': ', '.join(types), } tool = Tool( function=Tool.Function( name=func.__name__, description=schema.get('description', ''), parameters=Tool.Function.Parameters(**schema), ) ) return Tool.model_validate(tool) ollama-python-0.4.8/ollama/py.typed000066400000000000000000000000001500001205700172270ustar00rootroot00000000000000ollama-python-0.4.8/poetry.lock000066400000000000000000001610451500001205700165000ustar00rootroot00000000000000# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [package.dependencies] typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" groups = ["main"] files = [ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, ] [package.dependencies] exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.26.1)"] [[package]] name = "certifi" version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" groups = ["main"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, ] [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["dev"] markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] [[package]] name = "coverage" version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "exceptiongroup" version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["main", "dev"] markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] test = ["pytest (>=6)"] [[package]] name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" groups = ["main"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, ] [[package]] name = "httpcore" version = "1.0.8" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"}, {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"}, ] [package.dependencies] certifi = "*" h11 = ">=0.13,<0.15" [package.extras] asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" [package.extras] brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] [[package]] name = "iniconfig" version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, ] [[package]] name = "markupsafe" version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" groups = ["dev"] files = [ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] [[package]] name = "packaging" version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] name = "pluggy" version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" groups = ["main"] files = [ {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] [package.dependencies] annotated-types = ">=0.6.0" pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" groups = ["main"] files = [ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pytest" version = "8.3.5" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, ] [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" pluggy = ">=1.5,<2" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" version = "0.24.0" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"}, {file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"}, ] [package.dependencies] pytest = ">=8.2,<9" [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] name = "pytest-cov" version = "5.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, ] [package.dependencies] coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-httpserver" version = "1.1.1" description = "pytest-httpserver is a httpserver for pytest" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "pytest_httpserver-1.1.1-py3-none-any.whl", hash = "sha256:aadc744bfac773a2ea93d05c2ef51fa23c087e3cc5dace3ea9d45cdd4bfe1fe8"}, {file = "pytest_httpserver-1.1.1.tar.gz", hash = "sha256:e5c46c62c0aa65e5d4331228cb2cb7db846c36e429c3e74ca806f284806bf7c6"}, ] [package.dependencies] Werkzeug = ">=2.0.0" [[package]] name = "ruff" version = "0.9.10" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" groups = ["dev"] files = [ {file = "ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d"}, {file = "ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d"}, {file = "ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d"}, {file = "ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c"}, {file = "ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e"}, {file = "ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12"}, {file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16"}, {file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52"}, {file = "ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1"}, {file = "ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c"}, {file = "ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43"}, {file = "ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c"}, {file = "ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5"}, {file = "ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8"}, {file = "ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029"}, {file = "ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1"}, {file = "ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69"}, {file = "ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7"}, ] [[package]] name = "sniffio" version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] [[package]] name = "tomli" version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["dev"] markers = "python_full_version <= \"3.11.0a6\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] [[package]] name = "typing-extensions" version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" groups = ["main"] files = [ {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, ] [[package]] name = "werkzeug" version = "3.0.6" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"}, {file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"}, ] [package.dependencies] MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] [metadata] lock-version = "2.1" python-versions = "^3.8" content-hash = "ff70c562172e38058111703d8219e18a0ed367e8e3ff647d7eff457d3bf3204e" ollama-python-0.4.8/pyproject.toml000066400000000000000000000020531500001205700172110ustar00rootroot00000000000000[tool.poetry] name = "ollama" version = "0.0.0" description = "The official Python client for Ollama." authors = ["Ollama "] license = "MIT" readme = "README.md" homepage = "https://ollama.com" repository = "https://github.com/ollama/ollama-python" [tool.poetry.dependencies] python = "^3.8" httpx = ">=0.27,<0.29" pydantic = "^2.9.0" [tool.poetry.requires-plugins] poetry-plugin-export = ">=1.8" [tool.poetry.group.dev.dependencies] pytest = ">=7.4.3,<9.0.0" pytest-asyncio = ">=0.23.2,<0.25.0" pytest-cov = ">=4.1,<6.0" pytest-httpserver = "^1.0.8" ruff = ">=0.9.1,<0.10.0" [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.ruff] line-length = 999 indent-width = 2 [tool.ruff.format] quote-style = "single" indent-style = "space" [tool.ruff.lint] select = [ "E", # pycodestyle errors "F", # pyflakes "B", # bugbear (likely bugs) "I", # sort imports "RUF022", # sort __all__ ] ignore = [ "E501", # line too long ] [tool.pytest.ini_options] addopts = '--doctest-modules --ignore examples' ollama-python-0.4.8/requirements.txt000066400000000000000000000257211500001205700175700ustar00rootroot00000000000000annotated-types==0.7.0 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 anyio==4.5.2 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b \ --hash=sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f certifi==2025.1.31 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651 \ --hash=sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe exceptiongroup==1.2.2 ; python_version >= "3.8" and python_version < "3.11" \ --hash=sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b \ --hash=sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc h11==0.14.0 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 httpcore==1.0.8 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be \ --hash=sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad httpx==0.28.1 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc \ --hash=sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad idna==3.10 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 pydantic-core==2.27.2 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278 \ --hash=sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50 \ --hash=sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9 \ --hash=sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f \ --hash=sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6 \ --hash=sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc \ --hash=sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54 \ --hash=sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630 \ --hash=sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9 \ --hash=sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236 \ --hash=sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7 \ --hash=sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee \ --hash=sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b \ --hash=sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048 \ --hash=sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc \ --hash=sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130 \ --hash=sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4 \ --hash=sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd \ --hash=sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4 \ --hash=sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7 \ --hash=sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7 \ --hash=sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4 \ --hash=sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e \ --hash=sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa \ --hash=sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6 \ --hash=sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962 \ --hash=sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b \ --hash=sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f \ --hash=sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474 \ --hash=sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5 \ --hash=sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459 \ --hash=sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf \ --hash=sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a \ --hash=sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c \ --hash=sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76 \ --hash=sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362 \ --hash=sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4 \ --hash=sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934 \ --hash=sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320 \ --hash=sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118 \ --hash=sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96 \ --hash=sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306 \ --hash=sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046 \ --hash=sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3 \ --hash=sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2 \ --hash=sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af \ --hash=sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9 \ --hash=sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67 \ --hash=sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a \ --hash=sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27 \ --hash=sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35 \ --hash=sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b \ --hash=sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151 \ --hash=sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b \ --hash=sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154 \ --hash=sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133 \ --hash=sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef \ --hash=sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145 \ --hash=sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15 \ --hash=sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4 \ --hash=sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc \ --hash=sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee \ --hash=sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c \ --hash=sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0 \ --hash=sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5 \ --hash=sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57 \ --hash=sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b \ --hash=sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8 \ --hash=sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1 \ --hash=sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da \ --hash=sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e \ --hash=sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc \ --hash=sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993 \ --hash=sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656 \ --hash=sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4 \ --hash=sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c \ --hash=sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb \ --hash=sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d \ --hash=sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9 \ --hash=sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e \ --hash=sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1 \ --hash=sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc \ --hash=sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a \ --hash=sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9 \ --hash=sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506 \ --hash=sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b \ --hash=sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1 \ --hash=sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d \ --hash=sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99 \ --hash=sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3 \ --hash=sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31 \ --hash=sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c \ --hash=sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39 \ --hash=sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a \ --hash=sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308 \ --hash=sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2 \ --hash=sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228 \ --hash=sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b \ --hash=sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9 \ --hash=sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad pydantic==2.10.6 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584 \ --hash=sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236 sniffio==1.3.1 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc typing-extensions==4.13.2 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c \ --hash=sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef ollama-python-0.4.8/tests/000077500000000000000000000000001500001205700154375ustar00rootroot00000000000000ollama-python-0.4.8/tests/test_client.py000066400000000000000000001116251500001205700203340ustar00rootroot00000000000000import base64 import json import os import re import tempfile from pathlib import Path from typing import Any import pytest from httpx import Response as httpxResponse from pydantic import BaseModel, ValidationError from pytest_httpserver import HTTPServer, URIPattern from werkzeug.wrappers import Request, Response from ollama._client import CONNECTION_ERROR_MESSAGE, AsyncClient, Client, _copy_tools from ollama._types import Image, Message PNG_BASE64 = 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGNgYGAAAAAEAAH2FzhVAAAAAElFTkSuQmCC' PNG_BYTES = base64.b64decode(PNG_BASE64) class PrefixPattern(URIPattern): def __init__(self, prefix: str): self.prefix = prefix def match(self, uri): return uri.startswith(self.prefix) def test_client_chat(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': "I don't know.", }, } ) client = Client(httpserver.url_for('/')) response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == "I don't know." def test_client_chat_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): for message in ['I ', "don't ", 'know.']: yield ( json.dumps( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': message, }, } ) + '\n' ) return Response(generate()) httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': True, }, ).respond_with_handler(stream_handler) client = Client(httpserver.url_for('/')) response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], stream=True) it = iter(['I ', "don't ", 'know.']) for part in response: assert part['message']['role'] in 'assistant' assert part['message']['content'] == next(it) @pytest.mark.parametrize('message_format', ('dict', 'pydantic_model')) @pytest.mark.parametrize('file_style', ('path', 'bytes')) def test_client_chat_images(httpserver: HTTPServer, message_format: str, file_style: str, tmp_path): from ollama._types import Image, Message httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [ { 'role': 'user', 'content': 'Why is the sky blue?', 'images': [PNG_BASE64], }, ], 'tools': [], 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': "I don't know.", }, } ) client = Client(httpserver.url_for('/')) if file_style == 'bytes': image_content = PNG_BYTES elif file_style == 'path': image_path = tmp_path / 'transparent.png' image_path.write_bytes(PNG_BYTES) image_content = str(image_path) if message_format == 'pydantic_model': messages = [Message(role='user', content='Why is the sky blue?', images=[Image(value=image_content)])] elif message_format == 'dict': messages = [{'role': 'user', 'content': 'Why is the sky blue?', 'images': [image_content]}] else: raise ValueError(f'Invalid message format: {message_format}') response = client.chat('dummy', messages=messages) assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == "I don't know." def test_client_chat_format_json(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'format': 'json', 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': '{"answer": "Because of Rayleigh scattering"}', }, } ) client = Client(httpserver.url_for('/')) response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format='json') assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering"}' def test_client_chat_format_pydantic(httpserver: HTTPServer): class ResponseFormat(BaseModel): answer: str confidence: float httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', }, } ) client = Client(httpserver.url_for('/')) response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format=ResponseFormat.model_json_schema()) assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' @pytest.mark.asyncio async def test_async_client_chat_format_json(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'format': 'json', 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': '{"answer": "Because of Rayleigh scattering"}', }, } ) client = AsyncClient(httpserver.url_for('/')) response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format='json') assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering"}' @pytest.mark.asyncio async def test_async_client_chat_format_pydantic(httpserver: HTTPServer): class ResponseFormat(BaseModel): answer: str confidence: float httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', }, } ) client = AsyncClient(httpserver.url_for('/')) response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format=ResponseFormat.model_json_schema()) assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' def test_client_generate(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'response': 'Because it is.', } ) client = Client(httpserver.url_for('/')) response = client.generate('dummy', 'Why is the sky blue?') assert response['model'] == 'dummy' assert response['response'] == 'Because it is.' def test_client_generate_with_image_type(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'What is in this image?', 'stream': False, 'images': [PNG_BASE64], }, ).respond_with_json( { 'model': 'dummy', 'response': 'A blue sky.', } ) client = Client(httpserver.url_for('/')) response = client.generate('dummy', 'What is in this image?', images=[Image(value=PNG_BASE64)]) assert response['model'] == 'dummy' assert response['response'] == 'A blue sky.' def test_client_generate_with_invalid_image(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'What is in this image?', 'stream': False, 'images': ['invalid_base64'], }, ).respond_with_json({'error': 'Invalid image data'}, status=400) client = Client(httpserver.url_for('/')) with pytest.raises(ValueError): client.generate('dummy', 'What is in this image?', images=[Image(value='invalid_base64')]) def test_client_generate_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): for message in ['Because ', 'it ', 'is.']: yield ( json.dumps( { 'model': 'dummy', 'response': message, } ) + '\n' ) return Response(generate()) httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'stream': True, }, ).respond_with_handler(stream_handler) client = Client(httpserver.url_for('/')) response = client.generate('dummy', 'Why is the sky blue?', stream=True) it = iter(['Because ', 'it ', 'is.']) for part in response: assert part['model'] == 'dummy' assert part['response'] == next(it) def test_client_generate_images(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'stream': False, 'images': [PNG_BASE64], }, ).respond_with_json( { 'model': 'dummy', 'response': 'Because it is.', } ) client = Client(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as temp: temp.write(PNG_BYTES) temp.flush() response = client.generate('dummy', 'Why is the sky blue?', images=[temp.name]) assert response['model'] == 'dummy' assert response['response'] == 'Because it is.' def test_client_generate_format_json(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'format': 'json', 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'response': '{"answer": "Because of Rayleigh scattering"}', } ) client = Client(httpserver.url_for('/')) response = client.generate('dummy', 'Why is the sky blue?', format='json') assert response['model'] == 'dummy' assert response['response'] == '{"answer": "Because of Rayleigh scattering"}' def test_client_generate_format_pydantic(httpserver: HTTPServer): class ResponseFormat(BaseModel): answer: str confidence: float httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'response': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', } ) client = Client(httpserver.url_for('/')) response = client.generate('dummy', 'Why is the sky blue?', format=ResponseFormat.model_json_schema()) assert response['model'] == 'dummy' assert response['response'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' @pytest.mark.asyncio async def test_async_client_generate_format_json(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'format': 'json', 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'response': '{"answer": "Because of Rayleigh scattering"}', } ) client = AsyncClient(httpserver.url_for('/')) response = await client.generate('dummy', 'Why is the sky blue?', format='json') assert response['model'] == 'dummy' assert response['response'] == '{"answer": "Because of Rayleigh scattering"}' @pytest.mark.asyncio async def test_async_client_generate_format_pydantic(httpserver: HTTPServer): class ResponseFormat(BaseModel): answer: str confidence: float httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'response': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', } ) client = AsyncClient(httpserver.url_for('/')) response = await client.generate('dummy', 'Why is the sky blue?', format=ResponseFormat.model_json_schema()) assert response['model'] == 'dummy' assert response['response'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' def test_client_pull(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/pull', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': False, }, ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) response = client.pull('dummy') assert response['status'] == 'success' def test_client_pull_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): yield json.dumps({'status': 'pulling manifest'}) + '\n' yield json.dumps({'status': 'verifying sha256 digest'}) + '\n' yield json.dumps({'status': 'writing manifest'}) + '\n' yield json.dumps({'status': 'removing any unused layers'}) + '\n' yield json.dumps({'status': 'success'}) + '\n' return Response(generate()) httpserver.expect_ordered_request( '/api/pull', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': True, }, ).respond_with_handler(stream_handler) client = Client(httpserver.url_for('/')) response = client.pull('dummy', stream=True) it = iter(['pulling manifest', 'verifying sha256 digest', 'writing manifest', 'removing any unused layers', 'success']) for part in response: assert part['status'] == next(it) def test_client_push(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/push', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': False, }, ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) response = client.push('dummy') assert response['status'] == 'success' def test_client_push_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): yield json.dumps({'status': 'retrieving manifest'}) + '\n' yield json.dumps({'status': 'pushing manifest'}) + '\n' yield json.dumps({'status': 'success'}) + '\n' return Response(generate()) httpserver.expect_ordered_request( '/api/push', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': True, }, ).respond_with_handler(stream_handler) client = Client(httpserver.url_for('/')) response = client.push('dummy', stream=True) it = iter(['retrieving manifest', 'pushing manifest', 'success']) for part in response: assert part['status'] == next(it) @pytest.fixture def userhomedir(): with tempfile.TemporaryDirectory() as temp: home = os.getenv('HOME', '') os.environ['HOME'] = temp yield Path(temp) os.environ['HOME'] = home def test_client_create_with_blob(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/create', method='POST', json={ 'model': 'dummy', 'files': {'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 'stream': False, }, ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) with tempfile.NamedTemporaryFile(): response = client.create('dummy', files={'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}) assert response['status'] == 'success' def test_client_create_with_parameters_roundtrip(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/create', method='POST', json={ 'model': 'dummy', 'quantize': 'q4_k_m', 'from': 'mymodel', 'adapters': {'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 'template': '[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', 'license': 'this is my license', 'system': '\nUse\nmultiline\nstrings.\n', 'parameters': {'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, 'messages': [{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], 'stream': False, }, ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) with tempfile.NamedTemporaryFile(): response = client.create( 'dummy', quantize='q4_k_m', from_='mymodel', adapters={'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, template='[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', license='this is my license', system='\nUse\nmultiline\nstrings.\n', parameters={'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, messages=[{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], stream=False, ) assert response['status'] == 'success' def test_client_create_from_library(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/create', method='POST', json={ 'model': 'dummy', 'from': 'llama2', 'stream': False, }, ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) response = client.create('dummy', from_='llama2') assert response['status'] == 'success' def test_client_create_blob(httpserver: HTTPServer): httpserver.expect_ordered_request(re.compile('^/api/blobs/sha256[:-][0-9a-fA-F]{64}$'), method='POST').respond_with_response(Response(status=201)) client = Client(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as blob: response = client.create_blob(blob.name) assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' def test_client_create_blob_exists(httpserver: HTTPServer): httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) client = Client(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as blob: response = client.create_blob(blob.name) assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' def test_client_delete(httpserver: HTTPServer): httpserver.expect_ordered_request(PrefixPattern('/api/delete'), method='DELETE').respond_with_response(Response(status=200)) client = Client(httpserver.url_for('/api/delete')) response = client.delete('dummy') assert response['status'] == 'success' def test_client_copy(httpserver: HTTPServer): httpserver.expect_ordered_request(PrefixPattern('/api/copy'), method='POST').respond_with_response(Response(status=200)) client = Client(httpserver.url_for('/api/copy')) response = client.copy('dum', 'dummer') assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_chat(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': "I don't know.", }, } ) client = AsyncClient(httpserver.url_for('/')) response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == "I don't know." @pytest.mark.asyncio async def test_async_client_chat_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): for message in ['I ', "don't ", 'know.']: yield ( json.dumps( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': message, }, } ) + '\n' ) return Response(generate()) httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': True, }, ).respond_with_handler(stream_handler) client = AsyncClient(httpserver.url_for('/')) response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], stream=True) it = iter(['I ', "don't ", 'know.']) async for part in response: assert part['message']['role'] == 'assistant' assert part['message']['content'] == next(it) @pytest.mark.asyncio async def test_async_client_chat_images(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/chat', method='POST', json={ 'model': 'dummy', 'messages': [ { 'role': 'user', 'content': 'Why is the sky blue?', 'images': [PNG_BASE64], }, ], 'tools': [], 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'message': { 'role': 'assistant', 'content': "I don't know.", }, } ) client = AsyncClient(httpserver.url_for('/')) response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?', 'images': [PNG_BYTES]}]) assert response['model'] == 'dummy' assert response['message']['role'] == 'assistant' assert response['message']['content'] == "I don't know." @pytest.mark.asyncio async def test_async_client_generate(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'stream': False, }, ).respond_with_json( { 'model': 'dummy', 'response': 'Because it is.', } ) client = AsyncClient(httpserver.url_for('/')) response = await client.generate('dummy', 'Why is the sky blue?') assert response['model'] == 'dummy' assert response['response'] == 'Because it is.' @pytest.mark.asyncio async def test_async_client_generate_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): for message in ['Because ', 'it ', 'is.']: yield ( json.dumps( { 'model': 'dummy', 'response': message, } ) + '\n' ) return Response(generate()) httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'stream': True, }, ).respond_with_handler(stream_handler) client = AsyncClient(httpserver.url_for('/')) response = await client.generate('dummy', 'Why is the sky blue?', stream=True) it = iter(['Because ', 'it ', 'is.']) async for part in response: assert part['model'] == 'dummy' assert part['response'] == next(it) @pytest.mark.asyncio async def test_async_client_generate_images(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/generate', method='POST', json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', 'stream': False, 'images': [PNG_BASE64], }, ).respond_with_json( { 'model': 'dummy', 'response': 'Because it is.', } ) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as temp: temp.write(PNG_BYTES) temp.flush() response = await client.generate('dummy', 'Why is the sky blue?', images=[temp.name]) assert response['model'] == 'dummy' assert response['response'] == 'Because it is.' @pytest.mark.asyncio async def test_async_client_pull(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/pull', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': False, }, ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) response = await client.pull('dummy') assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_pull_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): yield json.dumps({'status': 'pulling manifest'}) + '\n' yield json.dumps({'status': 'verifying sha256 digest'}) + '\n' yield json.dumps({'status': 'writing manifest'}) + '\n' yield json.dumps({'status': 'removing any unused layers'}) + '\n' yield json.dumps({'status': 'success'}) + '\n' return Response(generate()) httpserver.expect_ordered_request( '/api/pull', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': True, }, ).respond_with_handler(stream_handler) client = AsyncClient(httpserver.url_for('/')) response = await client.pull('dummy', stream=True) it = iter(['pulling manifest', 'verifying sha256 digest', 'writing manifest', 'removing any unused layers', 'success']) async for part in response: assert part['status'] == next(it) @pytest.mark.asyncio async def test_async_client_push(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/push', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': False, }, ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) response = await client.push('dummy') assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_push_stream(httpserver: HTTPServer): def stream_handler(_: Request): def generate(): yield json.dumps({'status': 'retrieving manifest'}) + '\n' yield json.dumps({'status': 'pushing manifest'}) + '\n' yield json.dumps({'status': 'success'}) + '\n' return Response(generate()) httpserver.expect_ordered_request( '/api/push', method='POST', json={ 'model': 'dummy', 'insecure': False, 'stream': True, }, ).respond_with_handler(stream_handler) client = AsyncClient(httpserver.url_for('/')) response = await client.push('dummy', stream=True) it = iter(['retrieving manifest', 'pushing manifest', 'success']) async for part in response: assert part['status'] == next(it) @pytest.mark.asyncio async def test_async_client_create_with_blob(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/create', method='POST', json={ 'model': 'dummy', 'files': {'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 'stream': False, }, ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile(): response = await client.create('dummy', files={'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}) assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_with_parameters_roundtrip(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/create', method='POST', json={ 'model': 'dummy', 'quantize': 'q4_k_m', 'from': 'mymodel', 'adapters': {'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 'template': '[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', 'license': 'this is my license', 'system': '\nUse\nmultiline\nstrings.\n', 'parameters': {'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, 'messages': [{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], 'stream': False, }, ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile(): response = await client.create( 'dummy', quantize='q4_k_m', from_='mymodel', adapters={'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, template='[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', license='this is my license', system='\nUse\nmultiline\nstrings.\n', parameters={'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, messages=[{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], stream=False, ) assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_from_library(httpserver: HTTPServer): httpserver.expect_ordered_request( '/api/create', method='POST', json={ 'model': 'dummy', 'from': 'llama2', 'stream': False, }, ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) response = await client.create('dummy', from_='llama2') assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_blob(httpserver: HTTPServer): httpserver.expect_ordered_request(re.compile('^/api/blobs/sha256[:-][0-9a-fA-F]{64}$'), method='POST').respond_with_response(Response(status=201)) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as blob: response = await client.create_blob(blob.name) assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' @pytest.mark.asyncio async def test_async_client_create_blob_exists(httpserver: HTTPServer): httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as blob: response = await client.create_blob(blob.name) assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' @pytest.mark.asyncio async def test_async_client_delete(httpserver: HTTPServer): httpserver.expect_ordered_request(PrefixPattern('/api/delete'), method='DELETE').respond_with_response(Response(status=200)) client = AsyncClient(httpserver.url_for('/api/delete')) response = await client.delete('dummy') assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_copy(httpserver: HTTPServer): httpserver.expect_ordered_request(PrefixPattern('/api/copy'), method='POST').respond_with_response(Response(status=200)) client = AsyncClient(httpserver.url_for('/api/copy')) response = await client.copy('dum', 'dummer') assert response['status'] == 'success' def test_headers(): client = Client() assert client._client.headers['content-type'] == 'application/json' assert client._client.headers['accept'] == 'application/json' assert client._client.headers['user-agent'].startswith('ollama-python/') client = Client( headers={ 'X-Custom': 'value', 'Content-Type': 'text/plain', } ) assert client._client.headers['x-custom'] == 'value' assert client._client.headers['content-type'] == 'application/json' def test_copy_tools(): def func1(x: int) -> str: """Simple function 1. Args: x (integer): A number """ pass def func2(y: str) -> int: """Simple function 2. Args: y (string): A string """ pass # Test with list of functions tools = list(_copy_tools([func1, func2])) assert len(tools) == 2 assert tools[0].function.name == 'func1' assert tools[1].function.name == 'func2' # Test with empty input assert list(_copy_tools()) == [] assert list(_copy_tools(None)) == [] assert list(_copy_tools([])) == [] # Test with mix of functions and tool dicts tool_dict = { 'type': 'function', 'function': { 'name': 'test', 'description': 'Test function', 'parameters': { 'type': 'object', 'properties': {'x': {'type': 'string', 'description': 'A string', 'enum': ['a', 'b', 'c']}, 'y': {'type': ['integer', 'number'], 'description': 'An integer'}}, 'required': ['x'], }, }, } tools = list(_copy_tools([func1, tool_dict])) assert len(tools) == 2 assert tools[0].function.name == 'func1' assert tools[1].function.name == 'test' def test_tool_validation(): # Raises ValidationError when used as it is a generator with pytest.raises(ValidationError): invalid_tool = {'type': 'invalid_type', 'function': {'name': 'test'}} list(_copy_tools([invalid_tool])) def test_client_connection_error(): client = Client('http://localhost:1234') with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): client.chat('model', messages=[{'role': 'user', 'content': 'prompt'}]) with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): client.chat('model', messages=[{'role': 'user', 'content': 'prompt'}]) with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): client.generate('model', 'prompt') with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): client.show('model') @pytest.mark.asyncio async def test_async_client_connection_error(): client = AsyncClient('http://localhost:1234') with pytest.raises(ConnectionError) as exc_info: await client.chat('model', messages=[{'role': 'user', 'content': 'prompt'}]) assert str(exc_info.value) == 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' with pytest.raises(ConnectionError) as exc_info: await client.generate('model', 'prompt') assert str(exc_info.value) == 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' with pytest.raises(ConnectionError) as exc_info: await client.show('model') assert str(exc_info.value) == 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' def test_arbitrary_roles_accepted_in_message(): _ = Message(role='somerandomrole', content="I'm ok with you adding any role message now!") def _mock_request(*args: Any, **kwargs: Any) -> Response: return httpxResponse(status_code=200, content="{'response': 'Hello world!'}") def test_arbitrary_roles_accepted_in_message_request(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(Client, '_request', _mock_request) client = Client() client.chat(model='llama3.1', messages=[{'role': 'somerandomrole', 'content': "I'm ok with you adding any role message now!"}, {'role': 'user', 'content': 'Hello world!'}]) async def _mock_request_async(*args: Any, **kwargs: Any) -> Response: return httpxResponse(status_code=200, content="{'response': 'Hello world!'}") @pytest.mark.asyncio async def test_arbitrary_roles_accepted_in_message_request_async(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(AsyncClient, '_request', _mock_request_async) client = AsyncClient() await client.chat(model='llama3.1', messages=[{'role': 'somerandomrole', 'content': "I'm ok with you adding any role message now!"}, {'role': 'user', 'content': 'Hello world!'}]) ollama-python-0.4.8/tests/test_type_serialization.py000066400000000000000000000063331500001205700227730ustar00rootroot00000000000000import tempfile from base64 import b64encode from pathlib import Path import pytest from ollama._types import CreateRequest, Image def test_image_serialization_bytes(): image_bytes = b'test image bytes' encoded_string = b64encode(image_bytes).decode() img = Image(value=image_bytes) assert img.model_dump() == encoded_string def test_image_serialization_base64_string(): b64_str = 'dGVzdCBiYXNlNjQgc3RyaW5n' img = Image(value=b64_str) assert img.model_dump() == b64_str # Should return as-is if valid base64 def test_image_serialization_long_base64_string(): b64_str = 'dGVzdCBiYXNlNjQgc3RyaW5n' * 1000 img = Image(value=b64_str) assert img.model_dump() == b64_str # Should return as-is if valid base64 def test_image_serialization_plain_string(): img = Image(value='not a path or base64') assert img.model_dump() == 'not a path or base64' # Should return as-is def test_image_serialization_path(): with tempfile.NamedTemporaryFile() as temp_file: temp_file.write(b'test file content') temp_file.flush() img = Image(value=Path(temp_file.name)) assert img.model_dump() == b64encode(b'test file content').decode() def test_image_serialization_string_path(): with tempfile.NamedTemporaryFile() as temp_file: temp_file.write(b'test file content') temp_file.flush() img = Image(value=temp_file.name) assert img.model_dump() == b64encode(b'test file content').decode() with pytest.raises(ValueError): img = Image(value='some_path/that/does/not/exist.png') img.model_dump() with pytest.raises(ValueError): img = Image(value='not an image') img.model_dump() def test_create_request_serialization(): request = CreateRequest(model='test-model', from_='base-model', quantize='q4_0', files={'file1': 'content1'}, adapters={'adapter1': 'content1'}, template='test template', license='MIT', system='test system', parameters={'param1': 'value1'}) serialized = request.model_dump() assert serialized['from'] == 'base-model' assert 'from_' not in serialized assert serialized['quantize'] == 'q4_0' assert serialized['files'] == {'file1': 'content1'} assert serialized['adapters'] == {'adapter1': 'content1'} assert serialized['template'] == 'test template' assert serialized['license'] == 'MIT' assert serialized['system'] == 'test system' assert serialized['parameters'] == {'param1': 'value1'} def test_create_request_serialization_exclude_none_true(): request = CreateRequest(model='test-model', from_=None, quantize=None) serialized = request.model_dump(exclude_none=True) assert serialized == {'model': 'test-model'} assert 'from' not in serialized assert 'from_' not in serialized assert 'quantize' not in serialized def test_create_request_serialization_exclude_none_false(): request = CreateRequest(model='test-model', from_=None, quantize=None) serialized = request.model_dump(exclude_none=False) assert 'from' in serialized assert 'quantize' in serialized assert 'adapters' in serialized assert 'from_' not in serialized def test_create_request_serialization_license_list(): request = CreateRequest(model='test-model', license=['MIT', 'Apache-2.0']) serialized = request.model_dump() assert serialized['license'] == ['MIT', 'Apache-2.0'] ollama-python-0.4.8/tests/test_utils.py000066400000000000000000000215161500001205700202150ustar00rootroot00000000000000import json import sys from typing import Dict, List, Mapping, Sequence, Set, Tuple, Union from ollama._utils import convert_function_to_tool def test_function_to_tool_conversion(): def add_numbers(x: int, y: Union[int, None] = None) -> int: """Add two numbers together. args: x (integer): The first number y (integer, optional): The second number Returns: integer: The sum of x and y """ return x + y tool = convert_function_to_tool(add_numbers).model_dump() assert tool['type'] == 'function' assert tool['function']['name'] == 'add_numbers' assert tool['function']['description'] == 'Add two numbers together.' assert tool['function']['parameters']['type'] == 'object' assert tool['function']['parameters']['properties']['x']['type'] == 'integer' assert tool['function']['parameters']['properties']['x']['description'] == 'The first number' assert tool['function']['parameters']['required'] == ['x'] def test_function_with_no_args(): def simple_func(): """ A simple function with no arguments. Args: None Returns: None """ pass tool = convert_function_to_tool(simple_func).model_dump() assert tool['function']['name'] == 'simple_func' assert tool['function']['description'] == 'A simple function with no arguments.' assert tool['function']['parameters']['properties'] == {} def test_function_with_all_types(): if sys.version_info >= (3, 10): def all_types( x: int, y: str, z: list[int], w: dict[str, int], v: int | str | None, ) -> int | dict[str, int] | str | list[int] | None: """ A function with all types. Args: x (integer): The first number y (string): The second number z (array): The third number w (object): The fourth number v (integer | string | None): The fifth number """ pass else: def all_types( x: int, y: str, z: Sequence, w: Mapping[str, int], d: Dict[str, int], s: Set[int], t: Tuple[int, str], l: List[int], # noqa: E741 o: Union[int, None], ) -> Union[Mapping[str, int], str, None]: """ A function with all types. Args: x (integer): The first number y (string): The second number z (array): The third number w (object): The fourth number d (object): The fifth number s (array): The sixth number t (array): The seventh number l (array): The eighth number o (integer | None): The ninth number """ pass tool_json = convert_function_to_tool(all_types).model_dump_json() tool = json.loads(tool_json) assert tool['function']['parameters']['properties']['x']['type'] == 'integer' assert tool['function']['parameters']['properties']['y']['type'] == 'string' if sys.version_info >= (3, 10): assert tool['function']['parameters']['properties']['z']['type'] == 'array' assert tool['function']['parameters']['properties']['w']['type'] == 'object' assert set(x.strip().strip("'") for x in tool['function']['parameters']['properties']['v']['type'].removeprefix('[').removesuffix(']').split(',')) == {'string', 'integer'} assert tool['function']['parameters']['properties']['v']['type'] != 'null' assert tool['function']['parameters']['required'] == ['x', 'y', 'z', 'w'] else: assert tool['function']['parameters']['properties']['z']['type'] == 'array' assert tool['function']['parameters']['properties']['w']['type'] == 'object' assert tool['function']['parameters']['properties']['d']['type'] == 'object' assert tool['function']['parameters']['properties']['s']['type'] == 'array' assert tool['function']['parameters']['properties']['t']['type'] == 'array' assert tool['function']['parameters']['properties']['l']['type'] == 'array' assert tool['function']['parameters']['properties']['o']['type'] == 'integer' assert tool['function']['parameters']['properties']['o']['type'] != 'null' assert tool['function']['parameters']['required'] == ['x', 'y', 'z', 'w', 'd', 's', 't', 'l'] def test_function_docstring_parsing(): from typing import Any, Dict, List def func_with_complex_docs(x: int, y: List[str]) -> Dict[str, Any]: """ Test function with complex docstring. Args: x (integer): A number with multiple lines y (array of string): A list with multiple lines Returns: object: A dictionary with multiple lines """ pass tool = convert_function_to_tool(func_with_complex_docs).model_dump() assert tool['function']['description'] == 'Test function with complex docstring.' assert tool['function']['parameters']['properties']['x']['description'] == 'A number with multiple lines' assert tool['function']['parameters']['properties']['y']['description'] == 'A list with multiple lines' def test_skewed_docstring_parsing(): def add_two_numbers(x: int, y: int) -> int: """ Add two numbers together. Args: x (integer): : The first number y (integer ): The second number Returns: integer: The sum of x and y """ pass tool = convert_function_to_tool(add_two_numbers).model_dump() assert tool['function']['parameters']['properties']['x']['description'] == ': The first number' assert tool['function']['parameters']['properties']['y']['description'] == 'The second number' def test_function_with_no_docstring(): def no_docstring(): pass def no_docstring_with_args(x: int, y: int): pass tool = convert_function_to_tool(no_docstring).model_dump() assert tool['function']['description'] == '' tool = convert_function_to_tool(no_docstring_with_args).model_dump() assert tool['function']['description'] == '' assert tool['function']['parameters']['properties']['x']['description'] == '' assert tool['function']['parameters']['properties']['y']['description'] == '' def test_function_with_only_description(): def only_description(): """ A function with only a description. """ pass tool = convert_function_to_tool(only_description).model_dump() assert tool['function']['description'] == 'A function with only a description.' assert tool['function']['parameters'] == {'type': 'object', 'defs': None, 'items': None, 'required': None, 'properties': {}} def only_description_with_args(x: int, y: int): """ A function with only a description. """ pass tool = convert_function_to_tool(only_description_with_args).model_dump() assert tool['function']['description'] == 'A function with only a description.' assert tool['function']['parameters'] == { 'type': 'object', 'defs': None, 'items': None, 'properties': { 'x': {'type': 'integer', 'description': '', 'enum': None, 'items': None}, 'y': {'type': 'integer', 'description': '', 'enum': None, 'items': None}, }, 'required': ['x', 'y'], } def test_function_with_yields(): def function_with_yields(x: int, y: int): """ A function with yields section. Args: x: the first number y: the second number Yields: The sum of x and y """ pass tool = convert_function_to_tool(function_with_yields).model_dump() assert tool['function']['description'] == 'A function with yields section.' assert tool['function']['parameters']['properties']['x']['description'] == 'the first number' assert tool['function']['parameters']['properties']['y']['description'] == 'the second number' def test_function_with_no_types(): def no_types(a, b): """ A function with no types. """ pass tool = convert_function_to_tool(no_types).model_dump() assert tool['function']['parameters']['properties']['a']['type'] == 'string' assert tool['function']['parameters']['properties']['b']['type'] == 'string' def test_function_with_parentheses(): def func_with_parentheses(a: int, b: int) -> int: """ A function with parentheses. Args: a: First (:thing) number to add b: Second number to add Returns: int: The sum of a and b """ pass def func_with_parentheses_and_args(a: int, b: int): """ A function with parentheses and args. Args: a(integer) : First (:thing) number to add b(integer) :Second number to add """ pass tool = convert_function_to_tool(func_with_parentheses).model_dump() assert tool['function']['parameters']['properties']['a']['description'] == 'First (:thing) number to add' assert tool['function']['parameters']['properties']['b']['description'] == 'Second number to add' tool = convert_function_to_tool(func_with_parentheses_and_args).model_dump() assert tool['function']['parameters']['properties']['a']['description'] == 'First (:thing) number to add' assert tool['function']['parameters']['properties']['b']['description'] == 'Second number to add'