diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 9ba0ad9e..00000000 --- a/.coveragerc +++ /dev/null @@ -1,6 +0,0 @@ -[run] -omit = - */site-packages/* - */distutils/* - tests/* - .tox/* diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000..ff261bad --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,9 @@ +ARG VARIANT="3.9" +FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} + +USER vscode + +RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.44.0" RYE_INSTALL_OPTION="--yes" bash +ENV PATH=/home/vscode/.rye/shims:$PATH + +RUN echo "[[ -d .venv ]] && source .venv/bin/activate || export PATH=\$PATH" >> /home/vscode/.bashrc diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..c17fdc16 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,43 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/debian +{ + "name": "Debian", + "build": { + "dockerfile": "Dockerfile", + "context": ".." + }, + + "postStartCommand": "rye sync --all-features", + + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python" + ], + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "python.pythonPath": ".venv/bin/python", + "python.defaultInterpreterPath": ".venv/bin/python", + "python.typeChecking": "basic", + "terminal.integrated.env.linux": { + "PATH": "/home/vscode/.rye/shims:${env:PATH}" + } + } + } + }, + "features": { + "ghcr.io/devcontainers/features/node:1": {} + } + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index d265f239..00000000 --- a/.editorconfig +++ /dev/null @@ -1,17 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -indent_style = space -indent_size = 4 -insert_final_newline = true -trim_trailing_whitespace = true -end_of_line = lf -charset = utf-8 - -[*.{cfg, ini, json, toml, yml}] -indent_size = 2 - -[Makefile] -indent_style = tab diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..76587286 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,98 @@ +name: CI +on: + push: + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' + +jobs: + lint: + timeout-minutes: 10 + name: lint + runs-on: ${{ github.repository == 'stainless-sdks/imagekit-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run lints + run: ./scripts/lint + + build: + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + timeout-minutes: 10 + name: build + permissions: + contents: read + id-token: write + runs-on: ${{ github.repository == 'stainless-sdks/imagekit-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run build + run: rye build + + - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/imagekit-python' + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + if: github.repository == 'stainless-sdks/imagekit-python' + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + + test: + timeout-minutes: 10 + name: test + runs-on: ${{ github.repository == 'stainless-sdks/imagekit-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 00000000..08adafc8 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,31 @@ +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/imagekit-developer/imagekit-python/actions/workflows/publish-pypi.yml +name: Publish PyPI +on: + workflow_dispatch: + + release: + types: [published] + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.IMAGE_KIT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml deleted file mode 100644 index eb50c37b..00000000 --- a/.github/workflows/publish.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Upload Python Package - -on: - release: - types: [created] - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v1 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: "3.x" - - name: Install Tox and any other packages - run: | - python -m pip install --upgrade pip - pip install -r requirements/test.txt - - name: Run Tox - run: tox -e py - - name: Install build dependencies - run: | - pip install setuptools wheel twine - - name: Build and publish - env: - TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} - TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} - run: | - python setup.py sdist bdist_wheel - twine upload dist/* diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml new file mode 100644 index 00000000..c36a89df --- /dev/null +++ b/.github/workflows/release-doctor.yml @@ -0,0 +1,21 @@ +name: Release Doctor +on: + pull_request: + branches: + - master + workflow_dispatch: + +jobs: + release_doctor: + name: release doctor + runs-on: ubuntu-latest + if: github.repository == 'imagekit-developer/imagekit-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + + steps: + - uses: actions/checkout@v4 + + - name: Check release environment + run: | + bash ./bin/check-release-environment + env: + PYPI_TOKEN: ${{ secrets.IMAGE_KIT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index aced653c..00000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Python CI - -on: [push] - -jobs: - build: - runs-on: ubuntu-latest - strategy: - matrix: - python: [3.7, 3.8] - - steps: - - uses: actions/checkout@v1 - - name: Setup Python - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python }} - - name: Install Tox and any other packages - run: pip install -r requirements/test.txt - - name: Run Tox - run: tox -e py - - name: Upload Coverage to codecov - run: bash <(curl -s https://codecov.io/bash) diff --git a/.gitignore b/.gitignore index d26187f6..f382096f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,57 +1,17 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class +.prism.log +_dev -# C extensions -*.so +__pycache__ +.mypy_cache -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST +dist -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# pyenv -.python-version +.venv +.idea -# Environments .env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ +.envrc +codegen.log +Brewfile.lock.json -# Visual Studio Code -.vscode/ -htmlcov/ +examples/temp \ No newline at end of file diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..43077b24 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.9.18 diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 00000000..8e76abb5 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "5.0.0" +} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml new file mode 100644 index 00000000..333dfb4f --- /dev/null +++ b/.stats.yml @@ -0,0 +1,4 @@ +configured_endpoints: 43 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/imagekit-inc%2Fimagekit-9d184cb502ab32a85db2889c796cdfebe812f2a55a604df79c85dd4b5e7e2add.yml +openapi_spec_hash: a9aa620376fce66532c84f9364209b0b +config_hash: 71cab8223bb5610c6c7ca6e9c4cc1f89 diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..c3a26d68 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "python.analysis.importFormat": "relative", + "python.analysis.typeCheckingMode": "basic" +} diff --git a/Brewfile b/Brewfile new file mode 100644 index 00000000..492ca37b --- /dev/null +++ b/Brewfile @@ -0,0 +1,2 @@ +brew "rye" + diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..e015abb4 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,57 @@ +# Changelog + +## 5.0.0 (2025-12-13) + +Full Changelog: [v0.0.1...v5.0.0](https://github.com/imagekit-developer/imagekit-python/compare/v0.0.1...v5.0.0) + +### Features + +* add bulk delete options ([c1c4d32](https://github.com/imagekit-developer/imagekit-python/commit/c1c4d3206b06594ba77a8a1c4dab7d0c5b74de9a)) +* add file related functionalities ([681677b](https://github.com/imagekit-developer/imagekit-python/commit/681677bc60a207f433b4bc242c41e37f2d4c05a1)) +* add sdk version to url ([9c3e67d](https://github.com/imagekit-developer/imagekit-python/commit/9c3e67d20f78b799e974889420ead23f457b5cfa)) +* add url class for url genration ([5e615ed](https://github.com/imagekit-developer/imagekit-python/commit/5e615ed34386e3231c5c7963ff37ceb28ab7d2f1)) +* **api:** python publish true ([8072dfd](https://github.com/imagekit-developer/imagekit-python/commit/8072dfd2eee562f98ac79fb5b11afe700e0dd6a3)) +* implement client with all func. ([67dd4b2](https://github.com/imagekit-developer/imagekit-python/commit/67dd4b28822086009278e4ab3f85d52690e6e9b7)) +* implement get_remote_url_metadata ([1272740](https://github.com/imagekit-developer/imagekit-python/commit/12727400dc5bc6678f6769c5143c11962f58eea4)) +* **webhooks:** allow key parameter to accept bytes in unwrap method ([09ae375](https://github.com/imagekit-developer/imagekit-python/commit/09ae37575b6b1eba57f67c6b1dea3d59e10d270d)) + + +### Bug Fixes + +* binary file upload ([23c9c46](https://github.com/imagekit-developer/imagekit-python/commit/23c9c46f37a5b32144f86700227254e6f05bf491)) +* change ubuntu latest to ubuntu-20.04 in test.yml ([1e4b551](https://github.com/imagekit-developer/imagekit-python/commit/1e4b55192d08ebf1aa436fa56832322477605942)) +* Changes for CI/CD ([0bd2ac3](https://github.com/imagekit-developer/imagekit-python/commit/0bd2ac3e9b11e8269a2eacb2424d49ef58e37c5f)) +* fix issue [#35](https://github.com/imagekit-developer/imagekit-python/issues/35),[#37](https://github.com/imagekit-developer/imagekit-python/issues/37),[#41](https://github.com/imagekit-developer/imagekit-python/issues/41),[#44](https://github.com/imagekit-developer/imagekit-python/issues/44) ([1f913c8](https://github.com/imagekit-developer/imagekit-python/commit/1f913c8e34a06afbffa93adbbc79e8a174a02dac)) +* fix query params implementation ([2b7e6d4](https://github.com/imagekit-developer/imagekit-python/commit/2b7e6d4a148b6d94b52532846bd950d4eeeefac4)) +* make ik-attachment option handle True boolean value ([6eb9cd0](https://github.com/imagekit-developer/imagekit-python/commit/6eb9cd099021a1fd9bcc9dfeb080ec610d4bcfbd)) +* move the workflow to correct folder ([d9f933a](https://github.com/imagekit-developer/imagekit-python/commit/d9f933a8e78c61b8a61df1d74a28859f9e889378)) +* request toolbelt to 0.10.1 in requirements/test/txt ([c22ed89](https://github.com/imagekit-developer/imagekit-python/commit/c22ed89208f69f7d8fb21cc777049d72dad40093)) +* **serialization:** adjust custom_metadata type check for serialization ([6e3f209](https://github.com/imagekit-developer/imagekit-python/commit/6e3f2092cad4b2c3ed7d1f3086c7bfb2a9a51b08)) + + +### Chores + +* add func alias ([d7ce593](https://github.com/imagekit-developer/imagekit-python/commit/d7ce593318b24f33ba828b65042e16e892690b80)) +* add init file ([0cbbd27](https://github.com/imagekit-developer/imagekit-python/commit/0cbbd27f00ac3fe36d3fbc0bf6fa2b015308576c)) +* add publish github workflow script ([a275172](https://github.com/imagekit-developer/imagekit-python/commit/a275172c3e7096b7390665102bae4d95c718db9d)) +* add required constants ([48de1c0](https://github.com/imagekit-developer/imagekit-python/commit/48de1c02295fb42d522f8ee930c16ee763d7b93d)) +* add requirements files ([e8d3d9d](https://github.com/imagekit-developer/imagekit-python/commit/e8d3d9d60e946b036b3f8e37a9dbf1e68be5482d)) +* add sample file for devs ([65d1a3f](https://github.com/imagekit-developer/imagekit-python/commit/65d1a3f77eaa5a5c9dba5202a75dee3c70aa64a0)) +* add sample of get file metadata ([6d11584](https://github.com/imagekit-developer/imagekit-python/commit/6d115841c341df0f7a9d4d9bd0c33c1cf386d9c7)) +* change pacakge name & fix import ([2c1734a](https://github.com/imagekit-developer/imagekit-python/commit/2c1734a6e12c935bc80f72ec6b8cdd5a971e5a47)) +* fix package name ([c0c939d](https://github.com/imagekit-developer/imagekit-python/commit/c0c939d86fa5738855a0d6b606e33249ecd5a47a)) +* fix package name ([4bc8041](https://github.com/imagekit-developer/imagekit-python/commit/4bc8041e22c6333710645ddc95446c9c348eea5b)) +* fix sample ([2188038](https://github.com/imagekit-developer/imagekit-python/commit/2188038436aabfce68a3c1d7bb198ffda203dc72)) +* init ([febccef](https://github.com/imagekit-developer/imagekit-python/commit/febccef19d6ca6ae2b6c4272d44ae1625c9f3391)) +* remove unecessary workflow file ([97f19eb](https://github.com/imagekit-developer/imagekit-python/commit/97f19eb8284c5edfe164f98ad296ea1e69b21bf8)) +* remove unused dummy methods from API documentation ([4727908](https://github.com/imagekit-developer/imagekit-python/commit/472790845ef7009aa3695fc084ef8c5d1d63f2ab)) +* sync repo ([c6afd44](https://github.com/imagekit-developer/imagekit-python/commit/c6afd449e74ebb20ebc8d3390355219fccaf2178)) +* unused import removed ([22774ff](https://github.com/imagekit-developer/imagekit-python/commit/22774fff1ac08c0573efc06ab10f3fe31e6d3f69)) +* update SDK settings ([81f0de9](https://github.com/imagekit-developer/imagekit-python/commit/81f0de954a0d531c6b98354386462f4186a58aba)) + + +### Build System + +* add url and requirements ([211228e](https://github.com/imagekit-developer/imagekit-python/commit/211228ef91fe29b83507c89f3bf22cfb6b1c8184)) +* add url and requirements ([683ad01](https://github.com/imagekit-developer/imagekit-python/commit/683ad016099d4e4614b6f369bff69d9a7422029e)) +* add url and requirements ([#2](https://github.com/imagekit-developer/imagekit-python/issues/2)) ([211228e](https://github.com/imagekit-developer/imagekit-python/commit/211228ef91fe29b83507c89f3bf22cfb6b1c8184)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..a6c5c7c1 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,128 @@ +## Setting up the environment + +### With Rye + +We use [Rye](https://rye.astral.sh/) to manage dependencies because it will automatically provision a Python environment with the expected Python version. To set it up, run: + +```sh +$ ./scripts/bootstrap +``` + +Or [install Rye manually](https://rye.astral.sh/guide/installation/) and run: + +```sh +$ rye sync --all-features +``` + +You can then run scripts using `rye run python script.py` or by activating the virtual environment: + +```sh +# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work +$ source .venv/bin/activate + +# now you can omit the `rye run` prefix +$ python script.py +``` + +### Without Rye + +Alternatively if you don't want to install `Rye`, you can stick with the standard `pip` setup by ensuring you have the Python version specified in `.python-version`, create a virtual environment however you desire and then install dependencies using this command: + +```sh +$ pip install -r requirements-dev.lock +``` + +## Modifying/Adding code + +Most of the SDK is generated code. Modifications to code will be persisted between generations, but may +result in merge conflicts between manual patches and changes from the generator. The generator will never +modify the contents of the `src/imagekitio/lib/` and `examples/` directories. + +## Adding and running examples + +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. + +```py +# add an example to examples/.py + +#!/usr/bin/env -S rye run python +… +``` + +```sh +$ chmod +x examples/.py +# run the example against your api +$ ./examples/.py +``` + +## Using the repository from source + +If you’d like to use the repository from source, you can either install from git or link to a cloned repository: + +To install via git: + +```sh +$ pip install git+ssh://git@github.com/imagekit-developer/imagekit-python#master.git +``` + +Alternatively, you can build from source and install the wheel file: + +Building this package will create two files in the `dist/` directory, a `.tar.gz` containing the source files and a `.whl` that can be used to install the package efficiently. + +To create a distributable version of the library, all you have to do is run this command: + +```sh +$ rye build +# or +$ python -m build +``` + +Then to install: + +```sh +$ pip install ./path-to-wheel-file.whl +``` + +## Running tests + +Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. + +```sh +# you will need npm installed +$ npx prism mock path/to/your/openapi.yml +``` + +```sh +$ ./scripts/test +``` + +## Linting and formatting + +This repository uses [ruff](https://github.com/astral-sh/ruff) and +[black](https://github.com/psf/black) to format the code in the repository. + +To lint: + +```sh +$ ./scripts/lint +``` + +To format and fix all ruff issues automatically: + +```sh +$ ./scripts/format +``` + +## Publishing and releases + +Changes made to this repository via the automated release PR pipeline should publish to PyPI automatically. If +the changes aren't made through the automated pipeline, you may want to make releases manually. + +### Publish with a GitHub workflow + +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/imagekit-developer/imagekit-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. + +### Publish manually + +If you need to manually release a package, you can run the `bin/publish-pypi` script with a `PYPI_TOKEN` set on +the environment. diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md deleted file mode 100644 index 2f1ff7b2..00000000 --- a/DEVELOPMENT.md +++ /dev/null @@ -1,22 +0,0 @@ -# Development Guide - -**1. Setup dependencies** - -```shell -pip install -r requirements/requirements.txt -``` - -**2. Run test cases** - -```shell -pip install -r requirements/test.txt -tox -e py -``` - -**3. Running the sample app** - -```shell -pip install -r sample/requirements.txt -cd sample -python sample.py -``` diff --git a/LICENSE b/LICENSE index ec2f41ca..e7a4d160 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,201 @@ -MIT License - -Copyright (c) 2019 Imagekit - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Image Kit + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 717d8080..6ea03a48 100644 --- a/README.md +++ b/README.md @@ -1,428 +1,857 @@ -[ImageKit.io](https://imagekit.io) # ImageKit.io Python SDK -[![Python CI]()](https://github.com/imagekit-developer/imagekit-python/) -[![imagekitio]()](https://pypi.org/project/imagekitio) -[![codecov](https://codecov.io/gh/imagekit-developer/imagekit-python/branch/master/graph/badge.svg?token=CwKWqBIlCu)](https://codecov.io/gh/imagekit-developer/imagekit-python) -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -[![Twitter Follow](https://img.shields.io/twitter/follow/imagekitio?label=Follow&style=social)](https://twitter.com/ImagekitIo) + +[![PyPI version](https://img.shields.io/pypi/v/imagekitio.svg?label=pypi%20(stable))](https://pypi.org/project/imagekitio/) + +The ImageKit Python SDK provides convenient access to the ImageKit REST API from any Python 3.9+ application. It offers powerful tools for URL generation and transformation, signed URLs for secure content delivery, webhook verification, file uploads, and more. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). + +The REST API documentation can be found on [imagekit.io](https://imagekit.io/docs/api-reference). The full API of this library can be found in [api.md](api.md). + +## Table of Contents + +- [Installation](#installation) +- [Requirements](#requirements) +- [Usage](#usage) + - [Using types](#using-types) + - [Nested params](#nested-params) + - [Async usage](#async-usage) +- [URL generation](#url-generation) + - [Basic URL generation](#basic-url-generation) + - [URL generation with transformations](#url-generation-with-transformations) + - [URL generation with image overlay](#url-generation-with-image-overlay) + - [URL generation with text overlay](#url-generation-with-text-overlay) + - [URL generation with multiple overlays](#url-generation-with-multiple-overlays) + - [Signed URLs for secure delivery](#signed-urls-for-secure-delivery) + - [Using Raw transformations for undocumented features](#using-raw-transformations-for-undocumented-features) +- [Authentication parameters for client-side uploads](#authentication-parameters-for-client-side-uploads) +- [Webhook verification](#webhook-verification) +- [Advanced Usage](#advanced-usage) + - [File uploads](#file-uploads) + - [Handling errors](#handling-errors) + - [Retries](#retries) + - [Timeouts](#timeouts) + - [Logging](#logging) + - [Accessing raw response data](#accessing-raw-response-data-eg-headers) + - [Making custom/undocumented requests](#making-customundocumented-requests) + - [Configuring the HTTP client](#configuring-the-http-client) + - [Managing HTTP resources](#managing-http-resources) +- [Versioning](#versioning) +- [Contributing](#contributing) + +## Installation + +```sh +# install from PyPI +pip install imagekitio +``` -ImageKit Python SDK allows you to use [image resizing](https://docs.imagekit.io/features/image-transformations), [optimization](https://docs.imagekit.io/features/image-optimization), [file uploading](https://docs.imagekit.io/api-reference/upload-file-api) and other [ImageKit APIs](https://docs.imagekit.io/api-reference/api-introduction) from applications written in the Python language. +## Usage -Supported Python Versions: >=3.6 +The full API of this library can be found in [api.md](api.md). -Table of contents - - * [Installation](#Installation) - * [Initialization](#Initialization) - * [URL Generation](#URL-generation) - * [File Upload](#File-Upload) - * [File Management](#File-Management) - * [Utility Functions](#Utility-functions) - * [Support](#Support) - * [Links](#Links) +```python +import os +from imagekitio import ImageKit +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), # This is the default and can be omitted +) - ## Installation - Go to your terminal and type the following command -```bash -pip install imagekitio -``` +# Upload a file +with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() -## Initialization -```python -from imagekitio import ImageKit -imagekit = ImageKit( - private_key='your private_key', - public_key='your public_key', - url_endpoint = 'your url_endpoint' +response = client.files.upload( + file=file_data, + file_name="uploaded-image.jpg", ) +print(response.file_id) +print(response.url) ``` -## Usage +While you can provide a `private_key` keyword argument, +we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) +to add `IMAGEKIT_PRIVATE_KEY="My Private Key"` to your `.env` file +so that your Private Key is not stored in source control. -You can use this Python SDK for 3 different kinds of methods - URL generation, file upload, and file management. -The usage of the SDK has been explained below. -## URL generation +### Using types -**1. Using Image path and image hostname or endpoint** +Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: -This method allows you to create a URL using the path where the image exists and the URL -endpoint(url_endpoint) you want to use to access the image. You can refer to the documentation -[here](https://docs.imagekit.io/integration/url-endpoints) to read more about URL endpoints -in ImageKit and the section about [image origins](https://docs.imagekit.io/integration/configure-origin) to understand -about paths with different kinds of origins. +- Serializing back into JSON, `model.to_json()` +- Converting to a dictionary, `model.to_dict()` +Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`. + +### Nested params + +Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -imagekit_url = imagekit.url({ - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{"height": "300", "width": "400"}], - } +from imagekitio import ImageKit + +client = ImageKit() + +# Read file into memory and upload +with open("/path/to/file.jpg", "rb") as f: + file_data = f.read() + +response = client.files.upload( + file=file_data, + file_name="fileName", + transformation={ + "post": [ + { + "type": "thumbnail", + "value": "w-150,h-150", + }, + { + "protocol": "dash", + "type": "abs", + "value": "sr-240_360_480_720_1080", + }, + ] + }, ) +print(response.file_id) ``` -The result in a URL like +### Async usage + +Simply import `AsyncImageKit` instead of `ImageKit` and use `await` with each API call: + +```python +import os +import asyncio +from imagekitio import AsyncImageKit + +client = AsyncImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), # This is the default and can be omitted +) + + +async def main() -> None: + # Read file into memory and upload + with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + + response = await client.files.upload( + file=file_data, + file_name="file-name.jpg", + ) + print(response.file_id) + print(response.url) + + +asyncio.run(main()) ``` -https://ik.imagekit.io/your_imagekit_id/endpoint/tr:h-300,w-400/default-image.jpg + +Functionality between the synchronous and asynchronous clients is otherwise identical. + +#### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from PyPI +pip install imagekitio[aiohttp] ``` -**2.Using full image URL** -This method allows you to add transformation parameters to an absolute URL using `src` parameter. This method should be used if you have the complete image URL stored in your database. +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: ```python -image_url = imagekit.url({ - "src": "https://ik.imagekit.io/your_imagekit_id/endpoint/default-image.jpg", - "transformation" : [{ - "height": "300", - "width": "400" - }] -}) +import os +import asyncio +from imagekitio import DefaultAioHttpClient +from imagekitio import AsyncImageKit + + +async def main() -> None: + async with AsyncImageKit( + private_key=os.environ.get( + "IMAGEKIT_PRIVATE_KEY" + ), # This is the default and can be omitted + http_client=DefaultAioHttpClient(), + ) as client: + # Read file into memory and upload + with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + + response = await client.files.upload( + file=file_data, + file_name="file-name.jpg", + ) + print(response.file_id) + print(response.url) + + +asyncio.run(main()) ``` -The results in a URL like +## URL generation + +The ImageKit SDK provides a powerful `helper.build_url()` method for generating optimized image and video URLs with transformations. Here are examples ranging from simple URLs to complex transformations with overlays and signed URLs. +### Basic URL generation + +Generate a simple URL without any transformations: + +```python +import os +from imagekitio import ImageKit + +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), +) + +# Basic URL without transformations +url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/path/to/image.jpg", +) +print(url) +# Result: https://ik.imagekit.io/your_imagekit_id/path/to/image.jpg ``` -https://ik.imagekit.io/your_imagekit_id/endpoint/default-image.jpg?tr=h-300%2Cw-400 -``` +### URL generation with transformations + +Apply common transformations like resizing, cropping, and format conversion: + +```python +import os +from imagekitio import ImageKit -The ```.url()``` method accepts the following parameters. +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), +) -| Option | Description | -| :---------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| url_endpoint | Optional. The base URL to be appended before the path of the image. If not specified, the URL Endpoint specified at the time of SDK initialization is used. For example, https://ik.imagekit.io/your_imagekit_id/endpoint/ | -| path | Conditional. This is the path at which the image exists. For example, `/path/to/image.jpg`. Either the `path` or `src` parameter needs to be specified for URL generation. | -| src | Conditional. This is the complete URL of an image already mapped to ImageKit. For example, `https://ik.imagekit.io/your_imagekit_id/endpoint/path/to/image.jpg`. Either the `path` or `src` parameter needs to be specified for URL generation. | -| transformation | Optional. An array of objects specifying the transformation to be applied in the URL. The transformation name and the value should be specified as a key-value pair in the object. Different steps of a [chained transformation](https://docs.imagekit.io/features/image-transformations/chained-transformations) can be specified as different objects of the array. The complete list of supported transformations in the SDK and some examples of using them are given later. If you use a transformation name that is not specified in the SDK, it gets applied as it is in the URL. | -| transformation_position | Optional. The default value is `path` that places the transformation string as a path parameter in the URL. It can also be specified as `query`, which adds the transformation string as the query parameter `tr` in the URL. If you use the `src` parameter to create the URL, then the transformation string is always added as a query parameter. | -| query_parameters | Optional. These are the other query parameters that you want to add to the final URL. These can be any query parameters and not necessarily related to ImageKit. Especially useful if you want to add some versioning parameter to your URLs. | -| signed | Optional. Boolean. Default is `false`. If set to `true`, the SDK generates a signed image URL adding the image signature to the image URL. This can only be used if you are creating the URL with the `url_endpoint` and `path` parameters and not with the `src` parameter. | -| expire_seconds | Optional. Integer. Meant to be used along with the `signed` parameter to specify the time in seconds from now when the URL should expire. If specified, the URL contains the expiry timestamp in the URL, and the image signature is modified accordingly. | +# URL with basic transformations +url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/path/to/image.jpg", + transformation=[ + { + "width": 400, + "height": 300, + "crop": "maintain_ratio", + "quality": 80, + "format": "webp", + } + ], +) +print(url) +# Result: https://ik.imagekit.io/your_imagekit_id/path/to/image.jpg?tr=w-400,h-300,c-maintain_ratio,q-80,f-webp +``` +### URL generation with image overlay -## Examples of generating URLs -**1. Chained Transformations as a query parameter** +Add image overlays to your base image: ```python - image_url = imagekit.url({ - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{ - "height": "300", - "width": "400" +import os +from imagekitio import ImageKit + +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), +) + +# URL with image overlay +url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/path/to/base-image.jpg", + transformation=[ + { + "width": 500, + "height": 400, + "overlay": { + "type": "image", + "input": "/path/to/overlay-logo.png", + "position": { + "x": 10, + "y": 10, }, - { - "rotation": 90 - }], - "transformation_position ": "query" - }) -``` -Sample Result URL - -``` -https://ik.imagekit.io/your_imagekit_id/endpoint/default-image.jpg?tr=h-300%2Cw-400%3Art-90 + "transformation": [ + { + "width": 100, + "height": 50, + } + ], + }, + } + ], +) +print(url) +# Result: URL with image overlay positioned at x:10, y:10 ``` +### URL generation with text overlay +Add customized text overlays: -**2. Sharpening and contrast transforms and a progressive JPG image** +```python +import os +from imagekitio import ImageKit -There are some transforms like [Sharpening](https://docs.imagekit.io/features/image-transformations/image-enhancement-and-color-manipulation) -that can be added to the URL with or without any other value. To use such transforms without specifying a value, specify -the value as "-" in the transformation object. Otherwise, specify the value that you want to be added to this transformation. +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), +) +# URL with text overlay +url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/path/to/base-image.jpg", + transformation=[ + { + "width": 600, + "height": 400, + "overlay": { + "type": "text", + "text": "Sample Text Overlay", + "position": { + "x": 50, + "y": 50, + "focus": "center", + }, + "transformation": [ + { + "font_size": 40, + "font_family": "Arial", + "font_color": "FFFFFF", + "typography": "b", # bold + } + ], + }, + } + ], +) +print(url) +# Result: URL with bold white Arial text overlay at center position +``` + +### URL generation with multiple overlays + +Combine multiple overlays for complex compositions: ```python - image_url = imagekit.url({ - "src": "https://ik.imagekit.io/your_imagekit_id/endpoint/default-image.jpg", - "transformation": [{ - "format": "jpg", - "progressive": "true", - "effect_sharpen": "-", - "effect_contrast": "1" - }] - }) -``` +import os +from imagekitio import ImageKit +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), +) + +# URL with multiple overlays (text + image) +url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/path/to/base-image.jpg", + transformation=[ + { + "width": 800, + "height": 600, + "overlay": { + "type": "text", + "text": "Header Text", + "position": { + "x": 20, + "y": 20, + }, + "transformation": [ + { + "font_size": 30, + "font_color": "000000", + } + ], + }, + }, + { + "overlay": { + "type": "image", + "input": "/watermark.png", + "position": { + "focus": "bottom_right", + }, + "transformation": [ + { + "width": 100, + "opacity": 70, + } + ], + }, + }, + ], +) +print(url) +# Result: URL with text overlay at top-left and semi-transparent watermark at bottom-right ``` -//Note that because `src` parameter was used, the transformation string gets added as a query parameter `tr` -https://ik.imagekit.io/your_imagekit_id/endpoint/default-image.jpg?tr=f-jpg%2Cpr-true%2Ce-sharpen%2Ce-contrast-1 + +### Signed URLs for secure delivery + +Generate signed URLs that expire after a specified time for secure content delivery: + +```python +import os +from imagekitio import ImageKit + +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), +) + +# Generate a signed URL that expires in 1 hour (3600 seconds) +url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/private/secure-image.jpg", + transformation=[ + { + "width": 400, + "height": 300, + "quality": 90, + } + ], + signed=True, + expires_in=3600, # URL expires in 1 hour +) +print(url) +# Result: URL with signature parameters (?ik-t=timestamp&ik-s=signature) + +# Generate a signed URL that doesn't expire +permanent_signed_url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/private/secure-image.jpg", + signed=True, + # No expires_in means the URL won't expire +) +print(permanent_signed_url) +# Result: URL with signature parameter (?ik-s=signature) ``` -**3. Signed URL that expires in 300 seconds with the default URL endpoint and other query parameters** +### Using Raw transformations for undocumented features + +ImageKit frequently adds new transformation parameters that might not yet be documented in the SDK. You can use the `raw` parameter to access these features or create custom transformation strings: ```python - image_url = imagekit.url({ - "path": "/default-image", - "query_parameters": { - "p1": "123", - "p2": "345" +import os +from imagekitio import ImageKit + +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), +) + +# Using Raw transformation for undocumented or new parameters +url = client.helper.build_url( + url_endpoint="https://ik.imagekit.io/your_imagekit_id", + src="/path/to/image.jpg", + transformation=[ + { + # Combine documented transformations with raw parameters + "width": 400, + "height": 300, }, - "transformation": [{ - "height": "300", - "width": "400" - }], - "signed": True, - "expire_seconds": 300 - }) -``` -**Sample Result URL** -``` -https://ik.imagekit.io/your_imagekit_id/tr:h-300,w-400/default-image.jpg?v=123&ik-t=1567358667&ik-s=f2c7cdacbe7707b71a83d49cf1c6110e3d701054 + { + # Use raw for undocumented transformations or complex parameters + "raw": "something-new", + }, + ], +) +print(url) +# Result: https://ik.imagekit.io/your_imagekit_id/path/to/image.jpg?tr=w-400,h-300:something-new ``` -**List of transformations** - -The complete list of transformations supported and their usage in ImageKit can be found [here](https://docs.imagekit.io/features/image-transformations/resize-crop-and-other-transformations). -The SDK gives a name to each transformation parameter, making the code simpler, making the code simpler, and readable. -If a transformation is supported in ImageKit, but a name for it cannot be found in the table below, then use the -transformation code from ImageKit docs as the name when using the `url` function. - -| Supported Transformation Name | Translates to parameter | -| ----------------------------- | ----------------------- | -| height | h| -| width | w| -| aspect_ratio | ar| -| quality | q| -| crop | c| -| crop_mode | cm| -| x | x| -| y | y| -| focus | fo| -| format | f| -| radius | r| -| background | bg| -| border | b| -| rotation | rt| -| blur | bl| -| named | n| -| overlay_image | oi| -| overlay_image_aspect_ratio | oiar| -| overlay_image_background | oibg| -| overlay_image_border | oib| -| overlay_image_dpr | oidpr| -| overlay_image_quality | oiq| -| overlay_image_cropping | oic| -| overlay_image_trim | oit| -| overlay_x | ox| -| overlay_y | oy| -| overlay_focus | ofo| -| overlay_height | oh| -| overlay_width | ow| -| overlay_text | ot| -| overlay_text_font_size | ots| -| overlay_text_font_family | otf| -| overlay_text_color | otc| -| overlay_text_transparency | oa| -| overlay_alpha | oa| -| overlay_text_typography | ott| -| overlay_background | obg| -| overlay_image_trim | oit| -| overlay_text_encoded | ote| -| overlay_text_width | otw| -| overlay_text_background | otbg| -| overlay_text_padding | otp| -| overlay_text_inner_alignment | otia| -| overlay_radius | or| -| progressive | pr| -| lossless | lo| -| trim | t| -| metadata | md| -| color_profile | cp| -| default_image | di| -| dpr | dpr| -| effect_sharpen | e-sharpen| -| effect_usm | e-usm| -| effect_contrast | e-contrast| -| effect_gray | e-grayscale| -| original | orig| - -## File Upload - -The SDK provides a simple interface using the `.upload_file()` method to upload files to the ImageKit Media library. It -accepts all the parameters supported by the [ImageKit Upload API](https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload). - -The `upload_file()` method requires at least the `file` and the `file_name` parameter to upload a file and returns a Dict with error or success data. Use `options` parameter to pass other parameters supported by the [ImageKit Upload API](https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload). Use the same parameter name as specified in the upload API documentation. - -Simple usage +## Authentication parameters for client-side uploads + +Generate authentication parameters for secure client-side file uploads: ```python -imagekit.upload_file( - file= "", # required - file_name= "my_file_name.jpg", # required - options= { - "folder" : "/example-folder/", - "tags": ["sample-tag"], - "is_private_file": False, - "use_unique_file_name": True, - "response_fields": ["is_private_file", "tags"], - } +import os +from imagekitio import ImageKit + +client = ImageKit( + private_key=os.environ.get("IMAGEKIT_PRIVATE_KEY"), ) +# Generate authentication parameters for client-side uploads +auth_params = client.helper.get_authentication_parameters() +print(auth_params) +# Result: {'expire': , 'signature': '', 'token': ''} + +# Generate with custom token and expiry +custom_auth_params = client.helper.get_authentication_parameters( + token="my-custom-token", + expire=1800 +) +print(custom_auth_params) +# Result: {'expire': 1800, 'signature': '', 'token': 'my-custom-token'} ``` -If the upload succeeds, `error` will be `null,` and the `result` will be the same as what is received from ImageKit's servers. -If the upload fails, `error` will be the same as what is received from ImageKit's servers, and the `result` will be null. Learn more from the sample app in this repository. +These authentication parameters can be used in client-side upload forms to securely upload files without exposing your private API key. + +## Webhook verification + +The ImageKit SDK provides utilities to verify webhook signatures for secure event handling. This ensures that webhook requests are actually coming from ImageKit and haven't been tampered with. + +For detailed information about webhook setup, signature verification, and handling different webhook events, refer to the [ImageKit webhook documentation](https://imagekit.io/docs/webhooks#verify-webhook-signature). -## File Management +## Advanced Usage -The SDK provides a simple interface for all the [media APIs mentioned here](https://docs.imagekit.io/api-reference/media-api) -to manage your files. This also returns `error` and `result`. The error will be `None` if API succeeds. +### File uploads -**1. List & Search Files** +Request parameters that correspond to file uploads can be passed as `bytes`, a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, an `IO[bytes]` file object, or a tuple of `(filename, contents, media type)`. -Accepts an object specifying the parameters to be used to list and search files. All parameters specified -in the [documentation here](https://docs.imagekit.io/api-reference/media-api/list-and-search-files#list-and-search-file-api) can be passed as it is with the correct values to get the results. +Here are common file upload patterns: ```python -imagekit.list_files({ - "skip": 10, - "limit": 10, -}) +from pathlib import Path +from imagekitio import ImageKit +import io + +client = ImageKit() + +# Method 1: Upload from bytes +# Read file into memory first, then upload +with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + +response = client.files.upload( + file=file_data, + file_name="uploaded-image.jpg", +) + +# Method 2: Upload from file stream (for large files) +# Pass file object directly - SDK reads it +with open("/path/to/your/image.jpg", "rb") as file_stream: + response = client.files.upload( + file=file_stream, + file_name="uploaded-image.jpg", + ) + +# Method 3: Upload using Path object (SDK reads automatically) +response = client.files.upload( + file=Path("/path/to/file.jpg"), + file_name="fileName.jpg", +) + +# Method 4: Upload from BytesIO (for programmatically generated content) +content = b"your binary data" +bytes_io = io.BytesIO(content) +response = client.files.upload( + file=bytes_io, + file_name="binary-upload.jpg", +) + +# Method 5: Upload with custom content type using tuple format +image_data = b"your binary data" +response = client.files.upload( + file=("custom.jpg", image_data, "image/jpeg"), + file_name="custom-upload.jpg", +) ``` -**2. Get File Details** -Accepts the file ID and fetches the details as per the [API documentation here](https://docs.imagekit.io/api-reference/media-api/get-file-details) + +The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically. + +**Note:** URL strings (e.g., `"https://example.com/image.jpg"`) are not supported by the Python SDK. To upload from a URL, download the content first: ```python -imagekit.get_file_details(file_id) +import urllib.request + +# Download from URL and upload to ImageKit +url = "https://example.com/image.jpg" +with urllib.request.urlopen(url) as response: + url_content = response.read() + +# Upload the downloaded content +upload_response = client.files.upload( + file=url_content, + file_name="downloaded-image.jpg", +) ``` -**3. Get File Metadata** -Accepts the file ID and fetches the metadata as per the [API documentation here](https://docs.imagekit.io/api-reference/metadata-api/get-image-metadata-for-uploaded-media-files) +### Handling errors + +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `imagekitio.APIConnectionError` is raised. + +When the API returns a non-success status code (that is, 4xx or 5xx +response), a subclass of `imagekitio.APIStatusError` is raised, containing `status_code` and `response` properties. + +All errors inherit from `imagekitio.APIError`. + ```python -imagekit.get_file_metadata(file_id) +import imagekitio +from imagekitio import ImageKit + +client = ImageKit() + +try: + # Read file into memory and upload + with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + + response = client.files.upload( + file=file_data, + file_name="file-name.jpg", + ) +except imagekitio.APIConnectionError as e: + print("The server could not be reached") + print(e.__cause__) # an underlying Exception, likely raised within httpx. +except imagekitio.RateLimitError as e: + print("A 429 status code was received; we should back off a bit.") +except imagekitio.APIStatusError as e: + print("Another non-200-range status code was received") + print(e.status_code) + print(e.response) ``` +Error codes are as follows: + +| Status Code | Error Type | +| ----------- | -------------------------- | +| 400 | `BadRequestError` | +| 401 | `AuthenticationError` | +| 403 | `PermissionDeniedError` | +| 404 | `NotFoundError` | +| 422 | `UnprocessableEntityError` | +| 429 | `RateLimitError` | +| >=500 | `InternalServerError` | +| N/A | `APIConnectionError` | -**3. Get File Metadata from remote url** -Accepts the remote file url and fetches the metadata as per the [API documentation here](https://docs.imagekit.io/api-reference/metadata-api/get-image-metadata-from-remote-url) +### Retries + +Certain errors are automatically retried 2 times by default, with a short exponential backoff. +Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, +429 Rate Limit, and >=500 Internal errors are all retried by default. + +You can use the `max_retries` option to configure or disable retry settings: ```python -imagekit.get_remote_file_url_metadata(remote_file_url) +from imagekitio import ImageKit + +# Configure the default for all requests: +client = ImageKit( + # default is 2 + max_retries=0, +) + +# Or, configure per-request: +with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + +client.with_options(max_retries=5).files.upload( + file=file_data, + file_name="file-name.jpg", +) ``` -**4. Update File Details** -Update parameters associated with the file as per the [API documentation here](https://docs.imagekit.io/api-reference/media-api/update-file-details). -The first argument to the `update_field_details` method is the file ID, and a second argument is an object with the -parameters to be updated. +### Timeouts + +By default requests time out after 1 minute. You can configure this with a `timeout` option, +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -imagekit.update_file_details("file_id", { - "tags": ["image_tag"], - "custom_coordinates": "10,10,100, 100" -}) +from imagekitio import ImageKit + +# Configure the default for all requests: +client = ImageKit( + # 20 seconds (default is 1 minute) + timeout=20.0, +) + +# More granular control: +client = ImageKit( + timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0), +) + +# Override per-request: +with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + +client.with_options(timeout=5.0).files.upload( + file=file_data, + file_name="file-name.jpg", +) ``` -**6. Delete File** -Delete a file as per the [API documentation here](https://docs.imagekit.io/api-reference/media-api/delete-file). The method accepts the file ID of the file that has to be -deleted. +On timeout, an `APITimeoutError` is thrown. -```python -imagekit.delete_file(file_id) +Note that requests that time out are [retried twice by default](#retries). + +### Logging + +We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. + +You can enable logging by setting the environment variable `IMAGE_KIT_LOG` to `info`. + +```shell +$ export IMAGE_KIT_LOG=info ``` -**6. Bulk File Delete by IDs** -Delete a file as per the [API documentation here](https://docs.imagekit.io/api-reference/media-api/delete-files-bulk). The method accepts list of file IDs of files that has to be deleted. +Or to `debug` for more verbose logging. -```python -imagekit.bulk_file_delete(["file_id1", "file_id2"]) +### How to tell whether `None` means `null` or missing + +In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`: + +```py +if response.my_field is None: + if 'my_field' not in response.model_fields_set: + print('Got json like {}, without a "my_field" key present at all.') + else: + print('Got json like {"my_field": null}.') ``` -**6. Purge Cache** -Programmatically issue a cache clear request as per the [API documentation here](https://docs.imagekit.io/api-reference/media-api/purge-cache). -Accepts the full URL of the file for which the cache has to be cleared. -```python -imagekit.purge_file_cache(full_url) +### Accessing raw response data (e.g. headers) + +The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., + +```py +from imagekitio import ImageKit + +client = ImageKit() + +# Read file into memory and upload +with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + +response = client.files.with_raw_response.upload( + file=file_data, + file_name="file-name.jpg", +) +print(response.headers.get('X-My-Header')) + +file = response.parse() # get the object that `files.upload()` would have returned +print(file.file_id) ``` -**7. Purge Cache Status** -Get the purge cache request status using the request ID returned when a purge cache request gets submitted as pet the -[API documentation here](https://docs.imagekit.io/api-reference/media-api/purge-cache-status) +These methods return an [`APIResponse`](https://github.com/imagekit-developer/imagekit-python/tree/master/src/imagekitio/_response.py) object. + +The async client returns an [`AsyncAPIResponse`](https://github.com/imagekit-developer/imagekit-python/tree/master/src/imagekitio/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. + +#### `.with_streaming_response` + +The above interface eagerly reads the full response body when you make the request, which may not always be what you want. + +To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -imagekit.get_purge_file_cache_status(cache_request_id) +# Read file into memory and upload +with open("/path/to/your/image.jpg", "rb") as f: + file_data = f.read() + +with client.files.with_streaming_response.upload( + file=file_data, + file_name="file-name.jpg", +) as response: + print(response.headers.get("X-My-Header")) + + for line in response.iter_lines(): + print(line) ``` +The context manager is required so that the response will reliably be closed. -## Utility functions +### Making custom/undocumented requests -We have included the following commonly used utility functions in this package. +This library is typed for convenient access to the documented API. -**Authentication parameter generation** +If you need to access undocumented endpoints, params, or response properties, the library can still be used. -In case you are looking to implement client-side file upload, you are going to need a token, expiry timestamp -, and a valid signature for that upload. The SDK provides a simple method that you can use in your code to generate these -authentication parameters for you. +#### Undocumented endpoints -Note: The Private API Key should never be exposed in any client-side code. You must always generate these authentications parameters on the server-side +To make requests to undocumented endpoints, you can make requests using `client.get`, `client.post`, and other +http verbs. Options on the client will be respected (such as retries) when making this request. -authentication +```py +import httpx -`authentication_parameters = imagekit.get_authentication_parameters(token, expire)` +response = client.post( + "/foo", + cast_to=httpx.Response, + body={"my_param": True}, +) -Returns -```python -{ - "token": "unique_token", - "expire": "valid_expiry_timestamp", - "signature": "generated_signature" -} +print(response.headers.get("x-foo")) ``` -Both the `token` and `expire` parameters are optional. If not specified, the SDK uses the UUID to generate a random token and also generates a valid expiry timestamp internally. The value of the token and expire used to generate the signature are always returned in the response, no matter if they are provided as an input to this method or not. +#### Undocumented request params -**Distance calculation between two pHash values** +If you want to explicitly send an extra param, you can do so with the `extra_query`, `extra_body`, and `extra_headers` request +options. -Perceptual hashing allows you to construct a has value that uniquely identifies an input image based on the contents -of an image. [imagekit.io metadata API](https://docs.imagekit.io/api-reference/metadata-api) returns the pHash -value of an image in the response. You can use this value to [find a duplicate or similar image](https://docs.imagekit.io/api-reference/metadata-api#using-phash-to-find-similar-or-duplicate-images) by calculating the distance between the two images. +#### Undocumented response properties +To access undocumented response properties, you can access the extra fields like `response.unknown_prop`. You +can also get all the extra fields on the Pydantic model as a dict with +[`response.model_extra`](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel.model_extra). -This SDK exposes phash_distance function to calculate the distance between two pHash value. It accepts two pHash hexadecimal -strings and returns a numeric value indicative of the level of difference between the two images. +### Configuring the HTTP client -```python -def calculate_distance(): - # fetch metadata of two uploaded image files - ... - # extract pHash strings from both: say 'first_hash' and 'second_hash' - ... - # calculate the distance between them: +You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including: - distance = imagekit.phash_distance(first_hash, second_hash) - return distance +- Support for [proxies](https://www.python-httpx.org/advanced/proxies/) +- Custom [transports](https://www.python-httpx.org/advanced/transports/) +- Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality +```python +import httpx +from imagekitio import ImageKit, DefaultHttpxClient + +client = ImageKit( + # Or use the `IMAGE_KIT_BASE_URL` env var + base_url="http://my.test.server.example.com:8083", + http_client=DefaultHttpxClient( + proxy="http://my.test.proxy.example.com", + transport=httpx.HTTPTransport(local_address="0.0.0.0"), + ), +) ``` -**Distance calculation examples** +You can also customize the client on a per-request basis by using `with_options()`: + ```python -imagekit.phash_distance('f06830ca9f1e3e90', 'f06830ca9f1e3e90') -# output: 0 (ame image) +client.with_options(http_client=DefaultHttpxClient(...)) +``` -imagekit.phash_distance('2d5ad3936d2e015b', '2d6ed293db36a4fb') -# output: 17 (similar images) +### Managing HTTP resources -imagekit.phash_distance('a4a65595ac94518b', '7838873e791f8400') -# output: 37 (dissimilar images) -``` +By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. -### Sample Code Instruction -To run `sample` code go to the sample directory and run -```python -python sample.py +```py +from imagekitio import ImageKit + +with ImageKit() as client: + # make requests here + ... + +# HTTP client is now closed ``` -## Support -For any feedback or to report any issues or general implementation support, please reach out to [support@imagekit.io]() +## Versioning + +This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: + +1. Changes that only affect static types, without breaking runtime behavior. +2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ +3. Changes that we do not expect to impact the vast majority of users in practice. -## Links +We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -* [Documentation](https://docs.imagekit.io/) +We are keen for your feedback; please open an [issue](https://www.github.com/imagekit-developer/imagekit-python/issues) with questions, bugs, or suggestions. + +### Determining the installed version + +If you've upgraded to the latest version but aren't seeing any new features you were expecting then your python environment is likely still using an older version. + +You can determine the version that is being used at runtime with: + +```py +import imagekitio +print(imagekitio.__version__) +``` -* [Main Website](https://imagekit.io/) +## Requirements +Python 3.9 or higher. -## License -Released under the MIT license. +## Contributing +See [the contributing documentation](./CONTRIBUTING.md). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..8e64327a --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,27 @@ +# Security Policy + +## Reporting Security Issues + +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. + +To report a security issue, please contact the Stainless team at security@stainless.com. + +## Responsible Disclosure + +We appreciate the efforts of security researchers and individuals who help us maintain the security of +SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible +disclosure practices by allowing us a reasonable amount of time to investigate and address the issue +before making any information public. + +## Reporting Non-SDK Related Security Issues + +If you encounter security issues that are not directly related to SDKs but pertain to the services +or products provided by Image Kit, please follow the respective company's security reporting guidelines. + +### Image Kit Terms and Policies + +Please contact developer@imagekit.io for any questions or concerns regarding the security of our services. + +--- + +Thank you for helping us keep the SDKs and systems they interact with secure. diff --git a/api.md b/api.md new file mode 100644 index 00000000..b617936d --- /dev/null +++ b/api.md @@ -0,0 +1,260 @@ +# Shared Types + +```python +from imagekitio.types import ( + BaseOverlay, + Extensions, + GetImageAttributesOptions, + ImageOverlay, + Overlay, + OverlayPosition, + OverlayTiming, + ResponsiveImageAttributes, + SolidColorOverlay, + SolidColorOverlayTransformation, + SrcOptions, + StreamingResolution, + SubtitleOverlay, + SubtitleOverlayTransformation, + TextOverlay, + TextOverlayTransformation, + Transformation, + TransformationPosition, + VideoOverlay, +) +``` + +# CustomMetadataFields + +Types: + +```python +from imagekitio.types import ( + CustomMetadataField, + CustomMetadataFieldListResponse, + CustomMetadataFieldDeleteResponse, +) +``` + +Methods: + +- client.custom_metadata_fields.create(\*\*params) -> CustomMetadataField +- client.custom_metadata_fields.update(id, \*\*params) -> CustomMetadataField +- client.custom_metadata_fields.list(\*\*params) -> CustomMetadataFieldListResponse +- client.custom_metadata_fields.delete(id) -> CustomMetadataFieldDeleteResponse + +# Files + +Types: + +```python +from imagekitio.types import ( + File, + Folder, + Metadata, + UpdateFileRequest, + FileUpdateResponse, + FileCopyResponse, + FileMoveResponse, + FileRenameResponse, + FileUploadResponse, +) +``` + +Methods: + +- client.files.update(file_id, \*\*params) -> FileUpdateResponse +- client.files.delete(file_id) -> None +- client.files.copy(\*\*params) -> FileCopyResponse +- client.files.get(file_id) -> File +- client.files.move(\*\*params) -> FileMoveResponse +- client.files.rename(\*\*params) -> FileRenameResponse +- client.files.upload(\*\*params) -> FileUploadResponse + +## Bulk + +Types: + +```python +from imagekitio.types.files import ( + BulkDeleteResponse, + BulkAddTagsResponse, + BulkRemoveAITagsResponse, + BulkRemoveTagsResponse, +) +``` + +Methods: + +- client.files.bulk.delete(\*\*params) -> BulkDeleteResponse +- client.files.bulk.add_tags(\*\*params) -> BulkAddTagsResponse +- client.files.bulk.remove_ai_tags(\*\*params) -> BulkRemoveAITagsResponse +- client.files.bulk.remove_tags(\*\*params) -> BulkRemoveTagsResponse + +## Versions + +Types: + +```python +from imagekitio.types.files import VersionListResponse, VersionDeleteResponse +``` + +Methods: + +- client.files.versions.list(file_id) -> VersionListResponse +- client.files.versions.delete(version_id, \*, file_id) -> VersionDeleteResponse +- client.files.versions.get(version_id, \*, file_id) -> File +- client.files.versions.restore(version_id, \*, file_id) -> File + +## Metadata + +Methods: + +- client.files.metadata.get(file_id) -> Metadata +- client.files.metadata.get_from_url(\*\*params) -> Metadata + +# Assets + +Types: + +```python +from imagekitio.types import AssetListResponse +``` + +Methods: + +- client.assets.list(\*\*params) -> AssetListResponse + +# Cache + +## Invalidation + +Types: + +```python +from imagekitio.types.cache import InvalidationCreateResponse, InvalidationGetResponse +``` + +Methods: + +- client.cache.invalidation.create(\*\*params) -> InvalidationCreateResponse +- client.cache.invalidation.get(request_id) -> InvalidationGetResponse + +# Folders + +Types: + +```python +from imagekitio.types import ( + FolderCreateResponse, + FolderDeleteResponse, + FolderCopyResponse, + FolderMoveResponse, + FolderRenameResponse, +) +``` + +Methods: + +- client.folders.create(\*\*params) -> FolderCreateResponse +- client.folders.delete(\*\*params) -> FolderDeleteResponse +- client.folders.copy(\*\*params) -> FolderCopyResponse +- client.folders.move(\*\*params) -> FolderMoveResponse +- client.folders.rename(\*\*params) -> FolderRenameResponse + +## Job + +Types: + +```python +from imagekitio.types.folders import JobGetResponse +``` + +Methods: + +- client.folders.job.get(job_id) -> JobGetResponse + +# Accounts + +## Usage + +Types: + +```python +from imagekitio.types.accounts import UsageGetResponse +``` + +Methods: + +- client.accounts.usage.get(\*\*params) -> UsageGetResponse + +## Origins + +Types: + +```python +from imagekitio.types.accounts import OriginRequest, OriginResponse, OriginListResponse +``` + +Methods: + +- client.accounts.origins.create(\*\*params) -> OriginResponse +- client.accounts.origins.update(id, \*\*params) -> OriginResponse +- client.accounts.origins.list() -> OriginListResponse +- client.accounts.origins.delete(id) -> None +- client.accounts.origins.get(id) -> OriginResponse + +## URLEndpoints + +Types: + +```python +from imagekitio.types.accounts import ( + URLEndpointRequest, + URLEndpointResponse, + URLEndpointListResponse, +) +``` + +Methods: + +- client.accounts.url_endpoints.create(\*\*params) -> URLEndpointResponse +- client.accounts.url_endpoints.update(id, \*\*params) -> URLEndpointResponse +- client.accounts.url_endpoints.list() -> URLEndpointListResponse +- client.accounts.url_endpoints.delete(id) -> None +- client.accounts.url_endpoints.get(id) -> URLEndpointResponse + +# Beta + +## V2 + +### Files + +Types: + +```python +from imagekitio.types.beta.v2 import FileUploadResponse +``` + +Methods: + +- client.beta.v2.files.upload(\*\*params) -> FileUploadResponse + +# Webhooks + +Types: + +```python +from imagekitio.types import ( + BaseWebhookEvent, + UploadPostTransformErrorEvent, + UploadPostTransformSuccessEvent, + UploadPreTransformErrorEvent, + UploadPreTransformSuccessEvent, + VideoTransformationAcceptedEvent, + VideoTransformationErrorEvent, + VideoTransformationReadyEvent, + UnsafeUnwrapWebhookEvent, + UnwrapWebhookEvent, +) +``` diff --git a/bin/check-release-environment b/bin/check-release-environment new file mode 100644 index 00000000..b845b0f4 --- /dev/null +++ b/bin/check-release-environment @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +errors=() + +if [ -z "${PYPI_TOKEN}" ]; then + errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") +fi + +lenErrors=${#errors[@]} + +if [[ lenErrors -gt 0 ]]; then + echo -e "Found the following errors in the release environment:\n" + + for error in "${errors[@]}"; do + echo -e "- $error\n" + done + + exit 1 +fi + +echo "The environment is ready to push releases!" diff --git a/bin/publish-pypi b/bin/publish-pypi new file mode 100644 index 00000000..826054e9 --- /dev/null +++ b/bin/publish-pypi @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux +mkdir -p dist +rye build --clean +rye publish --yes --token=$PYPI_TOKEN diff --git a/examples/.keep b/examples/.keep new file mode 100644 index 00000000..d8c73e93 --- /dev/null +++ b/examples/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store example files demonstrating usage of this SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/imagekitio/__init__.py b/imagekitio/__init__.py deleted file mode 100644 index ba7a5020..00000000 --- a/imagekitio/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .client import ImageKit diff --git a/imagekitio/client.py b/imagekitio/client.py deleted file mode 100644 index 8e984197..00000000 --- a/imagekitio/client.py +++ /dev/null @@ -1,122 +0,0 @@ -from typing import Any, Dict - -from .constants.errors import ERRORS -from .file import File -from .resource import ImageKitRequest -from .url import Url -from .utils.calculation import get_authenticated_params, hamming_distance - - -class ImageKit(object): - """ - Main Class What user will use by creating - instance - """ - - def __init__( - self, - public_key=None, - private_key=None, - url_endpoint=None, - transformation_position=None, - options=None, - ): - self.ik_request = ImageKitRequest( - private_key, public_key, url_endpoint, transformation_position, options - ) - self.file = File(self.ik_request) - self.url_obj = Url(self.ik_request) - - def upload(self, file=None, file_name=None, options=None) -> Dict[str, Any]: - """Provides upload functionality - """ - return self.file.upload(file, file_name, options) - - def upload_file(self, file=None, file_name=None, options=None) -> Dict[str, Any]: - """Provides upload functionality - """ - return self.file.upload(file, file_name, options) - - def list_files(self, options: Dict) -> Dict: - """Get list(filtered if given param) of images of client - """ - return self.file.list(options) - - def get_file_details(self, file_identifier: str = None) -> Dict: - """Get file_detail by file_id or file_url - """ - return self.file.details(file_identifier) - - def update_file_details(self, file_id: str, options: dict = None) -> Dict: - """Update file detail by file id and options - """ - return self.file.update_file_details(file_id, options) - - def delete_file(self, file_id: str = None) -> Dict[str, Any]: - """Delete file by file_id - """ - return self.file.delete(file_id) - - def bulk_delete(self, file_ids: list = None): - """Delete files in bulk by provided list of ids - """ - return self.file.batch_delete(file_ids) - - def bulk_file_delete(self, file_ids: list = None): - """Delete files in bulk by provided list of ids - """ - return self.file.batch_delete(file_ids) - - def purge_cache(self, file_url: str = None) -> Dict[str, Any]: - """Purge Cache from server by file url - """ - return self.file.purge_cache(file_url) - - def purge_file_cache(self, file_url: str = None) -> Dict[str, Any]: - """Purge Cache from server by file url - """ - return self.file.purge_cache(file_url) - - def get_purge_cache_status(self, purge_cache_id: str = "") -> Dict[str, Any]: - """Get Purge Cache status by purge cache request_id - """ - return self.file.get_purge_cache_status(str(purge_cache_id)) - - def get_purge_file_cache_status(self, purge_cache_id: str = "") -> Dict[str, Any]: - """Get Purge Cache status by purge cache request_id - """ - return self.file.get_purge_cache_status(str(purge_cache_id)) - - def get_metadata(self, file_id: str = None) -> Dict[str, Any]: - """Get Meta Data of a file by file id - """ - return self.file.get_metadata(str(file_id)) - - def get_file_metadata(self, file_id: str = None) -> Dict[str, Any]: - """Get Meta Data of a file by file id - """ - return self.file.get_metadata(str(file_id)) - - def get_remote_url_metadata(self, remote_file_url: str = ""): - return self.file.get_metadata_from_remote_url(remote_file_url) - - def get_remote_file_url_metadata(self, remote_file_url: str = ""): - return self.file.get_metadata_from_remote_url(remote_file_url) - - def url(self, options: Dict[str, Any]) -> str: - """Get generated Url from options parameter - """ - return self.url_obj.generate_url(options) - - @staticmethod - def phash_distance(first, second): - """Get hamming distance between two phash(to check similarity) - """ - if not (first and second): - raise TypeError(ERRORS.MISSING_PHASH_VALUE.value) - return hamming_distance(first, second) - - def get_authentication_parameters(self, token="", expire=0): - """Get Authentication parameters - """ - return get_authenticated_params(token, expire, self.ik_request.private_key) diff --git a/imagekitio/constants/__init__.py b/imagekitio/constants/__init__.py deleted file mode 100644 index e1209380..00000000 --- a/imagekitio/constants/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .errors import ERRORS diff --git a/imagekitio/constants/defaults.py b/imagekitio/constants/defaults.py deleted file mode 100644 index f0fed926..00000000 --- a/imagekitio/constants/defaults.py +++ /dev/null @@ -1,19 +0,0 @@ -import enum - - -class Default(enum.Enum): - DEFAULT_TRANSFORMATION_POSITION = "path" - QUERY_TRANSFORMATION_POSITION = "query" - VALID_TRANSFORMATION_POSITION = [ - DEFAULT_TRANSFORMATION_POSITION, - QUERY_TRANSFORMATION_POSITION, - ] - DEFAULT_TIMESTAMP = 9999999999 - SDK_VERSION_PARAMETER = "ik-sdk-version" - SDK_VERSION = "python-2.2.8" - TRANSFORMATION_PARAMETER = "tr" - CHAIN_TRANSFORM_DELIMITER = ":" - TRANSFORM_DELIMITER = "," - TRANSFORM_KEY_VALUE_DELIMITER = "-" - SIGNATURE_PARAMETER = "ik-s" - TIMESTAMP_PARAMETER = "ik-t" diff --git a/imagekitio/constants/errors.py b/imagekitio/constants/errors.py deleted file mode 100644 index 1524a092..00000000 --- a/imagekitio/constants/errors.py +++ /dev/null @@ -1,76 +0,0 @@ -import enum - - -class ERRORS(enum.Enum): - MANDATORY_INITIALIZATION_MISSING = { - "message": "Missing public_key or private_key or url_endpoint during ImageKit initialization", - help: "", - } - INVALID_TRANSFORMATION_POSITION = { - "message": "Invalid transformationPosition parameter", - help: "", - } - MANDATORY_SRC_OR_PATH = { - "message": "Pass one of the mandatory parameter path or src" - } - INVALID_URL_GENERATION_PARAMETER = {"message": "Invalid url parameter", help: ""} - INVALID_TRANSFORMATION_OPTIONS = { - "message": "Invalid transformation parameter options", - help: "", - } - CACHE_PURGE_URL_MISSING = { - "message": "Missing URL parameter for this request", - help: "", - } - CACHE_PURGE_STATUS_ID_MISSING = { - "message": "Missing Request ID parameter for this request", - help: "", - } - FILE_ID_MISSING = { - "message": "Missing File ID parameter for this request", - help: "", - } - UPDATE_DATA_MISSING = { - "message": "Missing file update data for this request", - help: "", - } - UPDATE_DATA_TAGS_INVALID = { - "message": "Invalid tags parameter for this request", - help: "tags should be passed as null or an array like ['tag1', 'tag2']", - } - UPDATE_DATA_COORDS_INVALID = ( - { - "message": "Invalid custom_coordinates parameter for this request", - help: "custom_coordinates should be passed as null or a string like 'x,y,width,height'", - }, - ) - - LIST_FILES_INPUT_MISSING = { - "message": "Missing options for list files", - help: "If you do not want to pass any parameter for listing, pass an empty object", - } - MISSING_FILE_URL = {"message": "Missing file_url for purge_cache", help: ""} - MISSING_UPLOAD_DATA = {"message": "Missing data for upload", help: ""} - MISSING_UPLOAD_FILE_PARAMETER = { - "message": "Missing file parameter for upload", - help: "", - } - MISSING_UPLOAD_FILENAME_PARAMETER = { - "message": "Missing fileName parameter for upload", - help: "", - } - - INVALID_PHASH_VALUE = ( - { - "message": "Invalid pHash value", - help: "Both pHash strings must be valid hexadecimal numbers", - }, - ) - MISSING_PHASH_VALUE = { - "message": "Missing pHash value", - help: "Please pass two pHash values", - } - UNEQUAL_STRING_LENGTH = { - "message": "Unequal pHash string length", - help: "For distance calculation, the two pHash strings must have equal length", - } diff --git a/imagekitio/constants/files.py b/imagekitio/constants/files.py deleted file mode 100644 index 94d115b0..00000000 --- a/imagekitio/constants/files.py +++ /dev/null @@ -1,23 +0,0 @@ -VALID_FILE_OPTIONS = [ - "path", - "fileType", - "tags", - "includeFolder", - "name", - "limit", - "skip", -] - -VALID_FILE_DETAIL_OPTIONS = ["fileID"] - -VALID_UPLOAD_OPTIONS = [ - "file", - "file_name", - "use_unique_file_name", - "tags", - "folder", - "is_private_file", - "custom_coordinates", - "response_fields", - "metadata", -] diff --git a/imagekitio/constants/supported_transform.py b/imagekitio/constants/supported_transform.py deleted file mode 100644 index 5f99402d..00000000 --- a/imagekitio/constants/supported_transform.py +++ /dev/null @@ -1,58 +0,0 @@ -SUPPORTED_TRANS = { - "height": "h", - "width": "w", - "aspect_ratio": "ar", - "quality": "q", - "crop": "c", - "crop_mode": "cm", - "x": "x", - "y": "y", - "focus": "fo", - "format": "f", - "radius": "r", - "background": "bg", - "border": "b", - "rotation": "rt", - "blur": "bl", - "named": "n", - "overlay_image": "oi", - "overlay_image_aspect_ratio": "oiar", - "overlay_image_background": "oibg", - "overlay_image_border": "oib", - "overlay_image_dpr": "oidpr", - "overlay_image_quality": "oiq", - "overlay_image_cropping": "oic", - "overlay_image_trim": "oit", - "overlay_x": "ox", - "overlay_y": "oy", - "overlay_focus": "ofo", - "overlay_height": "oh", - "overlay_width": "ow", - "overlay_text": "ot", - "overlay_text_font_size": "ots", - "overlay_text_font_family": "otf", - "overlay_text_color": "otc", - "overlay_text_transparency": "oa", - "overlay_alpha": "oa", - "overlay_text_typography": "ott", - "overlay_background": "obg", - "overlay_image_trim": "oit", - "overlay_text_encoded": "ote", - "overlay_text_width": "otw", - "overlay_text_background": "otbg", - "overlay_text_padding": "otp", - "overlay_text_inner_alignment": "otia", - "overlay_radius": "or", - "progressive": "pr", - "lossless": "lo", - "trim": "t", - "metadata": "md", - "color_profile": "cp", - "default_image": "di", - "dpr": "dpr", - "effect_sharpen": "e-sharpen", - "effect_usm": "e-usm", - "effect_contrast": "e-contrast", - "effect_gray": "e-grayscale", - "original": "orig", -} diff --git a/imagekitio/constants/url.py b/imagekitio/constants/url.py deleted file mode 100644 index 932df023..00000000 --- a/imagekitio/constants/url.py +++ /dev/null @@ -1,9 +0,0 @@ -from enum import Enum - - -class URL(Enum): - BASE_URL = "https://api.imagekit.io/v1/files" - PURGE_CACHE = "/purge" - UPLOAD_URL = "https://upload.imagekit.io/api/v1/files/upload" - BULK_FILE_DELETE = "/batch/deleteByFileIds" - REMOTE_METADATA_FULL_URL = "https://api.imagekit.io/v1/metadata" diff --git a/imagekitio/file.py b/imagekitio/file.py deleted file mode 100644 index a18f1bb7..00000000 --- a/imagekitio/file.py +++ /dev/null @@ -1,294 +0,0 @@ -from json import dumps -from typing import Any, Dict - -from .constants.errors import ERRORS -from .constants.files import VALID_FILE_OPTIONS, VALID_UPLOAD_OPTIONS -from .constants.url import URL -from .utils.formatter import ( - camel_dict_to_snake_dict, - request_formatter, - snake_to_lower_camel, -) - -try: - from simplejson.errors import JSONDecodeError -except ImportError: - from json import JSONDecodeError - -class File(object): - def __init__(self, request_obj): - self.request = request_obj - - def upload(self, file, file_name, options) -> Dict: - """Upload file to server using local image or url - :param file: either local file path or network file path - :param file_name: intended file name - :param options: intended options - :return: json response from server - """ - if not file: - raise TypeError(ERRORS.MISSING_UPLOAD_FILE_PARAMETER.value) - if not file_name: - raise TypeError(ERRORS.MISSING_UPLOAD_FILENAME_PARAMETER.value) - url = URL.UPLOAD_URL.value - headers = self.request.create_headers() - - files = { - "file": file, - "fileName": (None, file_name), - } - - if not options: - options = dict() - else: - options = self.validate_upload(options) - if options is False: - raise ValueError("Invalid upload options") - if isinstance(file, str) or isinstance(file, bytes): - files.update({"file": (None, file)}) - resp = self.request.request( - "Post", url=url, files=files, data=options, headers=headers - ) - - if resp.status_code > 200: - try: - error = resp.json() - except JSONDecodeError: - error = resp.text - response = None - else: - error = None - response = resp.json() - response = {"error": error, "response": response} - return response - - def list(self, options: dict) -> Dict: - """Returns list files on ImageKit Server - :param: options dictionary of options - :return: list of the response - """ - - formatted_options = request_formatter(options) - if not self.is_valid_list_options(formatted_options): - raise ValueError("Invalid option for list_files") - url = URL.BASE_URL.value - headers = self.request.create_headers() - - resp = self.request.request( - method="GET", url=url, headers=headers, params=options - ) - if resp.status_code > 200: - error = resp.json() - response = None - else: - error = None - response = resp.json() - response = {"error": error, "response": response} - return response - - def details(self, file_identifier: str = None) -> Dict: - """returns file detail - """ - if not file_identifier: - raise TypeError(ERRORS.FILE_ID_MISSING.value) - url = "{}/{}/details".format(URL.BASE_URL.value, file_identifier) - resp = self.request.request( - method="GET", url=url, headers=self.request.create_headers(), - ) - if resp.status_code > 200: - error = resp.json() - response = None - else: - error = None - response = resp.json() - response = {"error": error, "response": response} - return response - - def update_file_details(self, file_id: str, options: dict): - """Update detail of a file(like tags, coordinates) - update details identified by file_id and options, - which is already uploaded - """ - if not file_id: - raise TypeError(ERRORS.FILE_ID_MISSING.value) - url = "{}/{}/details/".format(URL.BASE_URL.value, file_id) - headers = {"Content-Type": "application/json"} - headers.update(self.request.get_auth_headers()) - data = dumps(request_formatter(options)) - resp = self.request.request(method="Patch", url=url, headers=headers, data=data) - if resp.status_code > 200: - error = resp.json() - response = None - else: - error = None - response = resp.json() - response = {"error": error, "response": response} - return response - - def delete(self, file_id: str = None) -> Dict: - """Delete file by file_id - deletes file from imagekit server - """ - if not file_id: - raise TypeError(ERRORS.FILE_ID_MISSING.value) - url = "{}/{}".format(URL.BASE_URL.value, file_id) - resp = self.request.request( - method="Delete", url=url, headers=self.request.create_headers() - ) - if resp.status_code > 204: - error = resp.text - response = None - else: - error = None - response = None - response = {"error": error, "response": response} - return response - - def batch_delete(self, file_ids: list = None): - """Delete bulk files - Delete files by batch ids - """ - if not file_ids: - raise ValueError("Need to pass ids in list") - url = URL.BASE_URL.value + URL.BULK_FILE_DELETE.value - resp = self.request.request( - method="POST", - url=url, - headers=self.request.create_headers(), - data={"fileIds": file_ids}, - ) - - if resp.status_code > 204: - error = resp.text - response = None - else: - error = None - response = resp.json() - - response = {"error": error, "response": response} - return response - - def purge_cache(self, file_url: str = None) -> Dict[str, Any]: - """Use from child class to purge cache - """ - if not file_url: - raise TypeError(ERRORS.MISSING_FILE_URL.value) - url = URL.BASE_URL.value + URL.PURGE_CACHE.value - headers = {"Content-Type": "application/json"} - headers.update(self.request.get_auth_headers()) - body = {"url": file_url} - resp = self.request.request( - "Post", headers=headers, url=url, data=dumps(body) - ) - formatted_resp = camel_dict_to_snake_dict(resp.json()) - if resp.status_code > 204: - error = formatted_resp - response = None - else: - error = None - response = formatted_resp - response = {"error": error, "response": response} - return response - - def get_purge_cache_status(self, cache_request_id: str = None) -> Dict[str, Any]: - """Get purge cache status by cache_request_id - :return: cache_request_id - """ - if not cache_request_id: - raise TypeError(ERRORS.CACHE_PURGE_STATUS_ID_MISSING.value) - - url = "{}/purge/{}".format(URL.BASE_URL.value, cache_request_id) - headers = self.request.create_headers() - resp = self.request.request("GET", url, headers=headers) - formatted_resp = camel_dict_to_snake_dict(resp.json()) - - if resp.status_code > 200: - error = formatted_resp - response = None - else: - error = None - response = formatted_resp - response = {"error": error, "response": response} - return response - - def get_metadata(self, file_id: str = None): - """Get metadata by file_id - """ - if not file_id: - raise TypeError(ERRORS.FILE_ID_MISSING.value) - - url = "{}/{}/metadata".format(URL.BASE_URL.value, file_id) - resp = self.request.request("GET", url, headers=self.request.create_headers()) - formatted_resp = camel_dict_to_snake_dict(resp.json()) - if resp.status_code > 200: - error = resp.json() - response = None - else: - error = None - response = resp.json() - response = {"error": error, "response": response} - return response - - def get_metadata_from_remote_url(self, remote_file_url: str): - if not remote_file_url: - raise ValueError("You must provide remote url") - url = URL.REMOTE_METADATA_FULL_URL.value - param = {"url": remote_file_url} - resp = self.request.request( - "GET", url, headers=self.request.create_headers(), params=param - ) - - if resp.status_code > 204: - error = resp.json() - response = None - else: - error = None - response = resp.json() - response = {"error": error, "response": response} - return response - - def is_valid_list_options(self, options: Dict[str, Any]) -> bool: - """Returns if options are valid - """ - valid_values = self.get_valid_list_values() - for key in options: - if key not in valid_values: - return False - return True - - @staticmethod - def get_valid_list_values(): - """Returns valid options for list files - """ - return VALID_FILE_OPTIONS - - @staticmethod - def validate_upload(options): - """ - Validates upload value, checks if params are valid, - changes snake to camel case - """ - response_list = [] - for key, val in options.items(): - if key not in VALID_UPLOAD_OPTIONS: - return False - if key == "response_fields": - for i, j in enumerate(options[key]): - if j not in VALID_UPLOAD_OPTIONS: - return False - response_list.append(snake_to_lower_camel(j)) - val = ",".join(response_list) - if val: - options[key] = ",".join(response_list) - continue - if isinstance(val, list): - val = ",".join(val) - if val: - options[key] = val - continue - # imagekit server accepts 'true/false' - elif isinstance(val, bool): - val = str(val).lower() - if val: - options[key] = val - return request_formatter(options) diff --git a/imagekitio/resource.py b/imagekitio/resource.py deleted file mode 100644 index 4767b706..00000000 --- a/imagekitio/resource.py +++ /dev/null @@ -1,92 +0,0 @@ -import base64 -from datetime import datetime as dt -from typing import Dict - -import requests -from requests import Response - -from .constants.defaults import Default -from .constants.errors import ERRORS - - -class ImageKitRequest(object): - """ - ImageKitRequest is holds the methods and attributes about server - communications and communicates to server, used by Internal classes - """ - - def __init__( - self, private_key, public_key, url_endpoint, transformation_position, options - ): - self.private_key = private_key - self.public_key = public_key - self.url_endpoint = url_endpoint - self.transformation_position = ( - transformation_position or Default.DEFAULT_TRANSFORMATION_POSITION.value - ) - self.options = options or {} - - if not (self.private_key and self.public_key and self.url_endpoint): - raise ValueError(ERRORS.MANDATORY_INITIALIZATION_MISSING.value) - - def create_headers(self): - """Create headers dict and sets Authorization header - """ - headers = {"Accept-Encoding": "gzip, deflate"} - headers.update(self.get_auth_headers()) - return headers - - def get_auth_headers(self): - """Create dictionary with encoded private key - The out put is used in request header as authorization header - - :return: dictionary of encoded private key - """ - encoded_private_key = base64.b64encode((self.private_key + ":").encode()).decode( - "utf-8" - ) - return {"Authorization": "Basic {}".format(encoded_private_key)} - - @staticmethod - def request(method, url, headers, params=None, files=None, data=None) -> Response: - """Requests from ImageKit server used,by internal methods - """ - resp = requests.request( - method=method, - url=url, - params=params, - files=files, - data=data, - headers=headers, - ) - - return resp - - def extend_url_options(self, options: Dict) -> Dict: - """ - adds data to the options from the object, so that - required data can be used by url builder - """ - attr_dict = { - "public_key": self.public_key, - "private_key": self.private_key, - "url_endpoint": self.url_endpoint, - "transformation_position": self.transformation_position, - } - - extended_options = {**self.options, **attr_dict, **options} - return extended_options - - @staticmethod - def get_signature_timestamp(seconds: int = None) -> int: - """ - Returns either default time stamp - or current unix time and expiry seconds to get - signature time stamp - """ - - if not seconds: - return Default.DEFAULT_TIMESTAMP.value - current_timestamp = int(dt.now().timestamp()) - - return current_timestamp + seconds diff --git a/imagekitio/url.py b/imagekitio/url.py deleted file mode 100644 index 43636ab1..00000000 --- a/imagekitio/url.py +++ /dev/null @@ -1,198 +0,0 @@ -import hashlib -import hmac -import sys -from datetime import datetime as dt -from typing import Any, Dict, List -from urllib.parse import ParseResult, urlparse, urlunparse, parse_qsl, urlencode - -from imagekitio.constants.defaults import Default -from imagekitio.constants.supported_transform import SUPPORTED_TRANS -from imagekitio.utils.formatter import camel_dict_to_snake_dict, flatten_dict - -from .constants import ERRORS - - -class Url: - """ - Url class holds the request and related methods - to generate url(signed and unsigned) - """ - - def __init__(self, request_obj): - self.request = request_obj - - def generate_url(self, options: Dict = None) -> str: - options = camel_dict_to_snake_dict(options) - extended_options = self.request.extend_url_options(options) - return self.build_url(extended_options) - - def build_url(self, options: dict) -> str: - """ - builds url for from all options, - """ - - # important to strip the trailing slashes. later logic assumes no trailing slashes. - path = options.get("path", "").strip("/") - src = options.get("src", "").strip("/") - url_endpoint = options.get("url_endpoint", "").strip("/") - transformation_str = self.transformation_to_str(options.get("transformation")) - transformation_position = options.get("transformation_position", Default.DEFAULT_TRANSFORMATION_POSITION.value) - - if transformation_position not in Default.VALID_TRANSFORMATION_POSITION.value: - raise ValueError(ERRORS.INVALID_TRANSFORMATION_POSITION.value) - - if (path == "" and src == ""): - return "" - - # if path is present then it is given priority over src parameter - if path: - if transformation_position == "path" and len(transformation_str) != 0: - temp_url = "{}/{}:{}/{}".format( - url_endpoint, - Default.TRANSFORMATION_PARAMETER.value, - transformation_str.strip("/"), - path - ) - else: - temp_url = "{}/{}".format( - url_endpoint, - path - ) - else: - temp_url = src - # if src parameter is used, then we force transformation position in query - transformation_position = Default.QUERY_TRANSFORMATION_POSITION.value - - url_object = urlparse(temp_url) - - query_params = dict(parse_qsl(url_object.query)) - query_params.update(options.get("query_parameters", {})) - if transformation_position == Default.QUERY_TRANSFORMATION_POSITION.value and len(transformation_str) != 0: - query_params.update({Default.TRANSFORMATION_PARAMETER.value: transformation_str}) - query_params.update({Default.SDK_VERSION_PARAMETER.value: Default.SDK_VERSION.value}) - - # Update query params in the url - url_object = url_object._replace(query=urlencode(query_params)) - - if options.get("signed"): - expire_seconds = options.get("expire_seconds") - private_key = options.get("private_key") - expiry_timestamp = self.get_signature_timestamp(expire_seconds) - url_signature = self.get_signature( - private_key=private_key, - url=url_object.geturl(), - url_endpoint=url_endpoint, - expiry_timestamp=expiry_timestamp, - ) - - """ - If the expire_seconds parameter is specified then the output URL contains - ik-t parameter (unix timestamp seconds when the URL expires) and - the signature contains the timestamp for computation. - - If not present, then no ik-t parameter and the value 9999999999 is used. - """ - if expire_seconds: - query_params.update({Default.TIMESTAMP_PARAMETER.value: expiry_timestamp, Default.SIGNATURE_PARAMETER.value: url_signature}) - else: - query_params.update({Default.SIGNATURE_PARAMETER.value: url_signature}) - - # Update signature related query params - url_object = url_object._replace(query=urlencode(query_params)) - - return url_object.geturl() - - @staticmethod - def get_signature_timestamp(expiry_seconds: int = None) -> int: - """ - this function returns the signature timestamp to be used - with the generated url. - If expiry_seconds is provided, it returns expiry_seconds added - to the current unix time, otherwise the default time stamp - is returned. - """ - if not expiry_seconds: - return Default.DEFAULT_TIMESTAMP.value - current_timestamp = int(dt.now().timestamp()) - - return current_timestamp + expiry_seconds - - @staticmethod - def get_signature(private_key, url, url_endpoint, expiry_timestamp : int) -> str: - """" - create signature(hashed hex key) from - private_key, url, url_endpoint and expiry_timestamp - """ - # ensure url_endpoint has a trailing slash - if url_endpoint[-1] != '/': - url_endpoint += '/' - - if expiry_timestamp < 1: - expiry_timestamp = Default.DEFAULT_TIMESTAMP.value - - replaced_url = url.replace(url_endpoint, "") + str(expiry_timestamp) - - signature = hmac.new( - key=private_key.encode(), msg=replaced_url.encode(), digestmod=hashlib.sha1 - ) - return signature.hexdigest() - - @staticmethod - def is_valid_trans_options(options: Dict[str, Any]) -> bool: - """ - check if transformation options parameter provided by user is valid - so that ValueError exception can be raised with appropriate error - message in the ImageKitRequest Class - """ - supported_trans_keys = SUPPORTED_TRANS.keys() - # flattening to dict from list of dict to check key validation - transformation_dict = flatten_dict(options.get("transformation", [])) - for key in transformation_dict: - if key not in supported_trans_keys: - return False - return True - - @staticmethod - def is_valid_transformation_pos(trans_pos: str) -> bool: - """ - Returns if transformation position is valid as per Server Documentation - """ - return trans_pos in Default.VALID_TRANSFORMATION_POSITION.value - - @staticmethod - def transformation_to_str(transformation): - """ - creates transformation_position string for url from - transformation_position dictionary - """ - if not isinstance(transformation, list): - return "" - parsed_transforms = [] - for i in range(len(transformation)): - parsed_transform_step = [] - for key in transformation[i]: - transform_key = SUPPORTED_TRANS.get(key, "") - if not transform_key: - transform_key = key - - if transformation[i][key] == "-": - parsed_transform_step.append(transform_key) - else: - value = transformation[i][key] - if isinstance(value, bool): - value = str(value).lower() - if transform_key == "oi" or transform_key == "di": - value = value.strip("/") - value = value.replace("/","@@") - parsed_transform_step.append( - "{}{}{}".format( - transform_key, - Default.TRANSFORM_KEY_VALUE_DELIMITER.value, - value, - ) - ) - - parsed_transforms.append( - Default.TRANSFORM_DELIMITER.value.join(parsed_transform_step)) - - return Default.CHAIN_TRANSFORM_DELIMITER.value.join(parsed_transforms) diff --git a/imagekitio/utils/calculation.py b/imagekitio/utils/calculation.py deleted file mode 100644 index 0948096d..00000000 --- a/imagekitio/utils/calculation.py +++ /dev/null @@ -1,41 +0,0 @@ -import hashlib -import hmac -import uuid -from datetime import datetime as dt - -from imagekitio.constants import ERRORS - -DEFAULT_TIME_DIFF = 60 * 30 - - -def hamming_distance(first: str, second: str) -> int: - """Calculate Hamming Distance between to hex string - """ - try: - a = bin(int(first, 16))[2:].zfill(64) - b = bin(int(second, 16))[2:].zfill(64) - except TypeError: - raise TypeError(ERRORS.INVALID_PHASH_VALUE.value) - - return len(list(filter(lambda x: ord(x[0]) ^ ord(x[1]), zip(a, b)))) - - -def get_authenticated_params(token, expire, private_key): - default_expire = int(dt.now().timestamp()) + DEFAULT_TIME_DIFF - token = token or str(uuid.uuid4()) - expire = expire or default_expire - auth_params = {"token": token, "expire": expire, "signature": ""} - - if not private_key: - return - signature = hmac.new( - key=private_key.encode(), - msg=(token + str(expire)).encode(), - digestmod=hashlib.sha1, - ).hexdigest() - - auth_params["token"] = token - auth_params["expire"] = expire - auth_params["signature"] = signature - - return auth_params diff --git a/imagekitio/utils/formatter.py b/imagekitio/utils/formatter.py deleted file mode 100644 index 56cfc8b8..00000000 --- a/imagekitio/utils/formatter.py +++ /dev/null @@ -1,47 +0,0 @@ -import re -from collections import ChainMap, OrderedDict -from typing import Dict, List - - -def camel_to_snake(name): - """ - converts camelCase to snake_case for python - """ - s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() - - -def snake_to_lower_camel(word): - """ - changes word snake to lower camelCase example: my_plan -> MyPlan - :return camelCaseWord - """ - word_list = word.split("_") - if word_list: - return word_list[0] + "".join(x.title() for x in word_list[1:]) - return word - - -def request_formatter(data: dict) -> dict: - """Converts all keys to camelCase format required for ImageKit server - :param data: dict() - :return: converted_dict -> dict() - """ - return {snake_to_lower_camel(key): val for key, val in data.items()} - - -def camel_dict_to_snake_dict(data: dict) -> dict: - """Convert the keys of dictionary from camel case to snake case - """ - return {camel_to_snake(key): val for key, val in data.items()} - - -def flatten_dict(dict_list: List[Dict]) -> OrderedDict: - """Convert list of dictionary to flatten dict - :param dict_list: list of dictionary - :return: flatten_dict - """ - flat_dict = OrderedDict() - for dict_var in dict_list: - flat_dict.update(dict_var) - return flat_dict diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 00000000..53bca7ff --- /dev/null +++ b/noxfile.py @@ -0,0 +1,9 @@ +import nox + + +@nox.session(reuse_venv=True, name="test-pydantic-v1") +def test_pydantic_v1(session: nox.Session) -> None: + session.install("-r", "requirements-dev.lock") + session.install("pydantic<2") + + session.run("pytest", "--showlocals", "--ignore=tests/functional", *session.posargs) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..9902514a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,270 @@ +[project] +name = "imagekitio" +version = "5.0.0" +description = "The official Python library for the ImageKit API" +dynamic = ["readme"] +license = "Apache-2.0" +authors = [ +{ name = "Image Kit", email = "developer@imagekit.io" }, +] + +dependencies = [ + "httpx>=0.23.0, <1", + "pydantic>=1.9.0, <3", + "typing-extensions>=4.10, <5", + "anyio>=3.5.0, <5", + "distro>=1.7.0, <2", + "sniffio", +] + +requires-python = ">= 3.9" +classifiers = [ + "Typing :: Typed", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Operating System :: OS Independent", + "Operating System :: POSIX", + "Operating System :: MacOS", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Topic :: Software Development :: Libraries :: Python Modules", + "License :: OSI Approved :: Apache Software License" +] + +[project.urls] +Homepage = "https://github.com/imagekit-developer/imagekit-python" +Repository = "https://github.com/imagekit-developer/imagekit-python" + +[project.optional-dependencies] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.9"] +webhooks = ["standardwebhooks"] + +[tool.rye] +managed = true +# version pins are in requirements-dev.lock +dev-dependencies = [ + "pyright==1.1.399", + "mypy==1.17", + "respx", + "pytest", + "pytest-asyncio", + "ruff", + "time-machine", + "nox", + "dirty-equals>=0.6.0", + "importlib-metadata>=6.7.0", + "rich>=13.7.1", + "pytest-xdist>=3.6.1", +] + +[tool.rye.scripts] +format = { chain = [ + "format:ruff", + "format:docs", + "fix:ruff", + # run formatting again to fix any inconsistencies when imports are stripped + "format:ruff", +]} +"format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md" +"format:ruff" = "ruff format" + +"lint" = { chain = [ + "check:ruff", + "typecheck", + "check:importable", +]} +"check:ruff" = "ruff check ." +"fix:ruff" = "ruff check --fix ." + +"check:importable" = "python -c 'import imagekitio'" + +typecheck = { chain = [ + "typecheck:pyright", + "typecheck:mypy" +]} +"typecheck:pyright" = "pyright" +"typecheck:verify-types" = "pyright --verifytypes imagekitio --ignoreexternal" +"typecheck:mypy" = "mypy ." + +[build-system] +requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"] +build-backend = "hatchling.build" + +[tool.hatch.build] +include = [ + "src/*" +] + +[tool.hatch.build.targets.wheel] +packages = ["src/imagekitio"] + +[tool.hatch.build.targets.sdist] +# Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) +include = [ + "/*.toml", + "/*.json", + "/*.lock", + "/*.md", + "/mypy.ini", + "/noxfile.py", + "bin/*", + "examples/*", + "src/*", + "tests/*", +] + +[tool.hatch.metadata.hooks.fancy-pypi-readme] +content-type = "text/markdown" + +[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]] +path = "README.md" + +[[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] +# replace relative links with absolute links +pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' +replacement = '[\1](https://github.com/imagekit-developer/imagekit-python/tree/master/\g<2>)' + +[tool.pytest.ini_options] +testpaths = ["tests"] +addopts = "--tb=short -n auto" +xfail_strict = true +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "session" +filterwarnings = [ + "error" +] + +[tool.pyright] +# this enables practically every flag given by pyright. +# there are a couple of flags that are still disabled by +# default in strict mode as they are experimental and niche. +typeCheckingMode = "strict" +pythonVersion = "3.9" + +exclude = [ + "_dev", + ".venv", + ".nox", + ".git", +] + +reportImplicitOverride = true +reportOverlappingOverload = false + +reportImportCycles = false +reportPrivateUsage = false + +[tool.mypy] +pretty = true +show_error_codes = true + +# Exclude _files.py because mypy isn't smart enough to apply +# the correct type narrowing and as this is an internal module +# it's fine to just use Pyright. +# +# We also exclude our `tests` as mypy doesn't always infer +# types correctly and Pyright will still catch any type errors. +exclude = ['src/imagekitio/_files.py', '_dev/.*.py', 'tests/.*'] + +strict_equality = true +implicit_reexport = true +check_untyped_defs = true +no_implicit_optional = true + +warn_return_any = true +warn_unreachable = true +warn_unused_configs = true + +# Turn these options off as it could cause conflicts +# with the Pyright options. +warn_unused_ignores = false +warn_redundant_casts = false + +disallow_any_generics = true +disallow_untyped_defs = true +disallow_untyped_calls = true +disallow_subclassing_any = true +disallow_incomplete_defs = true +disallow_untyped_decorators = true +cache_fine_grained = true + +# By default, mypy reports an error if you assign a value to the result +# of a function call that doesn't return anything. We do this in our test +# cases: +# ``` +# result = ... +# assert result is None +# ``` +# Changing this codegen to make mypy happy would increase complexity +# and would not be worth it. +disable_error_code = "func-returns-value,overload-cannot-match" + +# https://github.com/python/mypy/issues/12162 +[[tool.mypy.overrides]] +module = "black.files.*" +ignore_errors = true +ignore_missing_imports = true + + +[tool.ruff] +line-length = 120 +output-format = "grouped" +target-version = "py38" + +[tool.ruff.format] +docstring-code-format = true + +[tool.ruff.lint] +select = [ + # isort + "I", + # bugbear rules + "B", + # remove unused imports + "F401", + # check for missing future annotations + "FA102", + # bare except statements + "E722", + # unused arguments + "ARG", + # print statements + "T201", + "T203", + # misuse of typing.TYPE_CHECKING + "TC004", + # import rules + "TID251", +] +ignore = [ + # mutable defaults + "B006", +] +unfixable = [ + # disable auto fix for print statements + "T201", + "T203", +] + +extend-safe-fixes = ["FA102"] + +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead" + +[tool.ruff.lint.isort] +length-sort = true +length-sort-straight = true +combine-as-imports = true +extra-standard-library = ["typing_extensions"] +known-first-party = ["imagekitio", "tests"] + +[tool.ruff.lint.per-file-ignores] +"bin/**.py" = ["T201", "T203"] +"scripts/**.py" = ["T201", "T203"] +"tests/**.py" = ["T201", "T203"] +"examples/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 00000000..cd36a977 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,66 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "python", + "extra-files": [ + "src/imagekitio/_version.py" + ] +} \ No newline at end of file diff --git a/requirements-dev.lock b/requirements-dev.lock new file mode 100644 index 00000000..c34becba --- /dev/null +++ b/requirements-dev.lock @@ -0,0 +1,162 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false +# generate-hashes: false +# universal: false + +-e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.13.2 + # via httpx-aiohttp + # via imagekitio +aiosignal==1.4.0 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.12.0 + # via httpx + # via imagekitio +argcomplete==3.6.3 + # via nox +async-timeout==5.0.1 + # via aiohttp +attrs==25.4.0 + # via aiohttp + # via nox + # via standardwebhooks +backports-asyncio-runner==1.2.0 + # via pytest-asyncio +certifi==2025.11.12 + # via httpcore + # via httpx +colorlog==6.10.1 + # via nox +dependency-groups==1.3.1 + # via nox +deprecated==1.3.1 + # via standardwebhooks +dirty-equals==0.11 +distlib==0.4.0 + # via virtualenv +distro==1.9.0 + # via imagekitio +exceptiongroup==1.3.1 + # via anyio + # via pytest +execnet==2.1.2 + # via pytest-xdist +filelock==3.19.1 + # via virtualenv +frozenlist==1.8.0 + # via aiohttp + # via aiosignal +h11==0.16.0 + # via httpcore +httpcore==1.0.9 + # via httpx +httpx==0.28.1 + # via httpx-aiohttp + # via imagekitio + # via respx + # via standardwebhooks +httpx-aiohttp==0.1.9 + # via imagekitio +humanize==4.13.0 + # via nox +idna==3.11 + # via anyio + # via httpx + # via yarl +importlib-metadata==8.7.0 +iniconfig==2.1.0 + # via pytest +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +multidict==6.7.0 + # via aiohttp + # via yarl +mypy==1.17.0 +mypy-extensions==1.1.0 + # via mypy +nodeenv==1.9.1 + # via pyright +nox==2025.11.12 +packaging==25.0 + # via dependency-groups + # via nox + # via pytest +pathspec==0.12.1 + # via mypy +platformdirs==4.4.0 + # via virtualenv +pluggy==1.6.0 + # via pytest +propcache==0.4.1 + # via aiohttp + # via yarl +pydantic==2.12.5 + # via imagekitio +pydantic-core==2.41.5 + # via pydantic +pygments==2.19.2 + # via pytest + # via rich +pyright==1.1.399 +pytest==8.4.2 + # via pytest-asyncio + # via pytest-xdist +pytest-asyncio==1.2.0 +pytest-xdist==3.8.0 +python-dateutil==2.9.0.post0 + # via standardwebhooks + # via time-machine +respx==0.22.0 +rich==14.2.0 +ruff==0.14.7 +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via imagekitio +standardwebhooks==1.0.0 + # via imagekitio +time-machine==2.19.0 +tomli==2.3.0 + # via dependency-groups + # via mypy + # via nox + # via pytest +types-deprecated==1.3.1.20251101 + # via standardwebhooks +types-python-dateutil==2.9.0.20251115 + # via standardwebhooks +typing-extensions==4.15.0 + # via aiosignal + # via anyio + # via exceptiongroup + # via imagekitio + # via multidict + # via mypy + # via pydantic + # via pydantic-core + # via pyright + # via pytest-asyncio + # via typing-inspection + # via virtualenv +typing-inspection==0.4.2 + # via pydantic +virtualenv==20.35.4 + # via nox +wrapt==2.0.1 + # via deprecated +yarl==1.22.0 + # via aiohttp +zipp==3.23.0 + # via importlib-metadata diff --git a/requirements.lock b/requirements.lock new file mode 100644 index 00000000..acc82f8d --- /dev/null +++ b/requirements.lock @@ -0,0 +1,92 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false +# generate-hashes: false +# universal: false + +-e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.13.2 + # via httpx-aiohttp + # via imagekitio +aiosignal==1.4.0 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.12.0 + # via httpx + # via imagekitio +async-timeout==5.0.1 + # via aiohttp +attrs==25.4.0 + # via aiohttp + # via standardwebhooks +certifi==2025.11.12 + # via httpcore + # via httpx +deprecated==1.3.1 + # via standardwebhooks +distro==1.9.0 + # via imagekitio +exceptiongroup==1.3.1 + # via anyio +frozenlist==1.8.0 + # via aiohttp + # via aiosignal +h11==0.16.0 + # via httpcore +httpcore==1.0.9 + # via httpx +httpx==0.28.1 + # via httpx-aiohttp + # via imagekitio + # via standardwebhooks +httpx-aiohttp==0.1.9 + # via imagekitio +idna==3.11 + # via anyio + # via httpx + # via yarl +multidict==6.7.0 + # via aiohttp + # via yarl +propcache==0.4.1 + # via aiohttp + # via yarl +pydantic==2.12.5 + # via imagekitio +pydantic-core==2.41.5 + # via pydantic +python-dateutil==2.9.0.post0 + # via standardwebhooks +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via imagekitio +standardwebhooks==1.0.0 + # via imagekitio +types-deprecated==1.3.1.20251101 + # via standardwebhooks +types-python-dateutil==2.9.0.20251115 + # via standardwebhooks +typing-extensions==4.15.0 + # via aiosignal + # via anyio + # via exceptiongroup + # via imagekitio + # via multidict + # via pydantic + # via pydantic-core + # via typing-inspection +typing-inspection==0.4.2 + # via pydantic +wrapt==2.0.1 + # via deprecated +yarl==1.22.0 + # via aiohttp diff --git a/requirements/requirements.txt b/requirements/requirements.txt deleted file mode 100644 index d5c2bc9c..00000000 --- a/requirements/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -requests>=2.22.0 diff --git a/requirements/test.txt b/requirements/test.txt deleted file mode 100644 index f24fbcf0..00000000 --- a/requirements/test.txt +++ /dev/null @@ -1,4 +0,0 @@ -requests>=2.22.0 -black==19.10b0 -coverage==4.5.4 -tox==3.14.2 diff --git a/sample/__init__.py b/sample/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/sample/requirements.txt b/sample/requirements.txt deleted file mode 100644 index 566083cb..00000000 --- a/sample/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -requests==2.22.0 diff --git a/sample/sample.jpg b/sample/sample.jpg deleted file mode 100644 index 1db39e04..00000000 Binary files a/sample/sample.jpg and /dev/null differ diff --git a/sample/sample.py b/sample/sample.py deleted file mode 100644 index 854d6efa..00000000 --- a/sample/sample.py +++ /dev/null @@ -1,242 +0,0 @@ -import base64 -import sys - -sys.path.append("..") - -# #### set your private_key public_key, url_endpoint, url ### ## -private_key = "your_public_api_key" -public_key = "your_private_api_key" -url_endpoint = "https://ik.imagekit.io/your_imagekit_id/" -# dummy image url -url = "https://file-examples.com/wp-content/uploads/2017/10/file_example_JPG_100kB.jpg" - -if __name__ == "__main__": - from imagekitio.client import ImageKit - - imagekit = ImageKit( - private_key=private_key, public_key=public_key, url_endpoint=url_endpoint, - ) - - ### The signed url generated for this file doesn't work using the Python SDK - upload = imagekit.upload_file( - file=open("sample.jpg", "rb"), - file_name="testing_upload_binary_signed_private.jpg", - options={ - "response_fields": ["is_private_file", "tags"], - "is_private_file": False, - "folder" : "/testing-python-folder/", - "tags": ["abc", "def"] - }, - ) - - print("-------------------------------------") - print("Upload with binary") - print("-------------------------------------") - print(upload, end="\n\n") - - image_url = imagekit.url( - { - "path": upload['response']['filePath'], - "query_parameters": {"v": "123"}, - "transformation": [{"height": "300", "width": "400"}], - "signed": True, - "expire_seconds": 3000, - } - ) - - print("-------------------------------------") - print("Signed url") - print("-------------------------------------") - print(image_url, end="\n\n") - - - # URL generation using image path and image hostname - image_url = imagekit.url( - { - "path": "default-image.jpg", - "url_endpoint": url_endpoint, - "transformation": [{"height": "300", "width": "400"}], - } - ) - - print("-------------------------------------") - print("Url using image path") - print("-------------------------------------") - print(image_url, end="\n\n") - - # 2 Using full image URL - image_url = imagekit.url( - { - "src": url_endpoint.rstrip("/") + "/default-image.jpg", - "transformation": [{"height": "300", "width": "400"}], - } - ) - - print("-------------------------------------") - print("Url using src") - print("-------------------------------------") - print(image_url, end="\n\n") - - image_url = imagekit.url( - { - "path": "/default-image.jpg", - "url_endpoint": "https://www.example.com", - "transformation": [{"height": "300", "width": "400"}, {"rotation": 90}], - "transformation_position": "query", - } - ) - - print("-------------------------------------") - print("Chained transformation") - print("-------------------------------------") - print(image_url, end="\n\n") - - image_url = imagekit.url( - { - "src": url_endpoint.rstrip("/") + "/default-image.jpg", - "transformation": [ - { - "format": "jpg", - "progressive": "true", - "effect_sharpen": "-", - "effect_contrast": "1", - } - ], - } - ) - - print("-------------------------------------") - print("Sharpening and contrast transformation") - print("-------------------------------------") - print(image_url, end="\n\n") - - list_files = imagekit.list_files({"skip": 0, "limit": 5}) - bulk_ids = [ - list_files["response"][3]["fileId"], - list_files["response"][4]["fileId"], - ] - - print("-------------------------------------") - print("List files") - print("-------------------------------------") - print(list_files, end="\n\n") - - upload = imagekit.upload_file( - file=open("sample.jpg", "rb"), - file_name="testing-binary.jpg", - options={ - "response_fields": ["is_private_file", "tags"], - "tags": ["abc", "def"], - "use_unique_file_name": False, - }, - ) - - print("-------------------------------------") - print("Upload with binary") - print("-------------------------------------") - print(upload, end="\n\n") - - file_id = upload["response"]["fileId"] - - upload = imagekit.upload_file( - file=url, - file_name="testing-url.jpg", - options={ - "response_fields": ["is_private_file"], - "is_private_file": False, - "tags": ["abc", "def"], - }, - ) - image_url = upload["response"]["url"] - - print("-------------------------------------") - print("Upload with url") - print("-------------------------------------") - print(upload, end="\n\n") - - with open("sample.jpg", mode="rb") as img: - imgstr = base64.b64encode(img.read()) - - upload_base64 = imagekit.upload_file( - file=imgstr, - file_name="testing-base64.jpg", - options={ - "response_fields": ["is_private_file", "metadata", "tags"], - "is_private_file": False, - "tags": ["abc", "def"], - }, - ) - - - print("-------------------------------------") - print("Upload with base64") - print("-------------------------------------") - print(upload_base64, end="\n\n") - - updated_detail = imagekit.update_file_details( - list_files["response"][0]["fileId"], - {"tags": None, "custom_coordinates": "10,10,100,100"}, - ) - - print("-------------------------------------") - print("Update file details") - print("-------------------------------------") - print(updated_detail, end="\n\n") - - details = imagekit.get_file_details(list_files["response"][0]["fileId"]) - print("-------------------------------------") - print("Get file details") - print("-------------------------------------") - print(details, end="\n\n") - - file_metadata = imagekit.get_file_metadata(list_files["response"][0]["fileId"]) - print("-------------------------------------") - print("File metadata") - print("-------------------------------------") - print(file_metadata, end="\n\n") - - - delete = imagekit.delete_file(list_files["response"][1]["fileId"]) - print("-------------------------------------") - print("Delete file") - print("-------------------------------------") - print(delete, end="\n\n") - - - purge_cache = imagekit.purge_file_cache(file_url=image_url) - print("-------------------------------------") - print("Purge cache") - print("-------------------------------------") - print(purge_cache, end="\n\n") - - request_id = purge_cache["response"]["request_id"] - purge_cache_status = imagekit.get_purge_file_cache_status(request_id) - - print("-------------------------------------") - print("Cache status") - print("-------------------------------------") - print(purge_cache_status, end="\n\n") - - auth_params = imagekit.get_authentication_parameters() - print("-------------------------------------") - print("Auth params") - print("-------------------------------------") - print(auth_params, end="\n\n") - - print("-------------------------------------") - print("Phash distance") - print("-------------------------------------") - print(imagekit.phash_distance("f06830ca9f1e3e90", "f06830ca9f1e3e90"), end="\n\n") - - - - print("-------------------------------------") - print("Bulk file delete") - print("-------------------------------------") - print(imagekit.bulk_file_delete(bulk_ids), end="\n\n") - - remote_file_url = upload["response"]["url"] - print("-------------------------------------") - print("Get metatdata via url") - print("-------------------------------------") - print(imagekit.get_remote_file_url_metadata(remote_file_url)) diff --git a/scripts/bootstrap b/scripts/bootstrap new file mode 100755 index 00000000..b430fee3 --- /dev/null +++ b/scripts/bootstrap @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then + brew bundle check >/dev/null 2>&1 || { + echo -n "==> Install Homebrew dependencies? (y/N): " + read -r response + case "$response" in + [yY][eE][sS]|[yY]) + brew bundle + ;; + *) + ;; + esac + echo + } +fi + +echo "==> Installing Python dependencies…" + +# experimental uv support makes installations significantly faster +rye config --set-bool behavior.use-uv=true + +rye sync --all-features diff --git a/scripts/format b/scripts/format new file mode 100755 index 00000000..667ec2d7 --- /dev/null +++ b/scripts/format @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running formatters" +rye run format diff --git a/scripts/lint b/scripts/lint new file mode 100755 index 00000000..eb9a4dda --- /dev/null +++ b/scripts/lint @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running lints" +rye run lint + +echo "==> Making sure it imports" +rye run python -c 'import imagekitio' diff --git a/scripts/mock b/scripts/mock new file mode 100755 index 00000000..0b28f6ea --- /dev/null +++ b/scripts/mock @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [[ -n "$1" && "$1" != '--'* ]]; then + URL="$1" + shift +else + URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" +fi + +# Check if the URL is empty +if [ -z "$URL" ]; then + echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" + exit 1 +fi + +echo "==> Starting mock server with URL ${URL}" + +# Run prism mock on the given spec +if [ "$1" == "--daemon" ]; then + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & + + # Wait for server to come online + echo -n "Waiting for server" + while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + echo -n "." + sleep 0.1 + done + + if grep -q "✖ fatal" ".prism.log"; then + cat .prism.log + exit 1 + fi + + echo +else + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" +fi diff --git a/scripts/test b/scripts/test new file mode 100755 index 00000000..dbeda2d2 --- /dev/null +++ b/scripts/test @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +function prism_is_running() { + curl --silent "http://localhost:4010" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon +fi + +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo +fi + +export DEFER_PYDANTIC_BUILD=false + +echo "==> Running tests" +rye run pytest "$@" + +echo "==> Running Pydantic v1 tests" +rye run nox -s test-pydantic-v1 -- "$@" diff --git a/scripts/utils/ruffen-docs.py b/scripts/utils/ruffen-docs.py new file mode 100644 index 00000000..0cf2bd2f --- /dev/null +++ b/scripts/utils/ruffen-docs.py @@ -0,0 +1,167 @@ +# fork of https://github.com/asottile/blacken-docs adapted for ruff +from __future__ import annotations + +import re +import sys +import argparse +import textwrap +import contextlib +import subprocess +from typing import Match, Optional, Sequence, Generator, NamedTuple, cast + +MD_RE = re.compile( + r"(?P^(?P *)```\s*python\n)" r"(?P.*?)" r"(?P^(?P=indent)```\s*$)", + re.DOTALL | re.MULTILINE, +) +MD_PYCON_RE = re.compile( + r"(?P^(?P *)```\s*pycon\n)" r"(?P.*?)" r"(?P^(?P=indent)```.*$)", + re.DOTALL | re.MULTILINE, +) +PYCON_PREFIX = ">>> " +PYCON_CONTINUATION_PREFIX = "..." +PYCON_CONTINUATION_RE = re.compile( + rf"^{re.escape(PYCON_CONTINUATION_PREFIX)}( |$)", +) +DEFAULT_LINE_LENGTH = 100 + + +class CodeBlockError(NamedTuple): + offset: int + exc: Exception + + +def format_str( + src: str, +) -> tuple[str, Sequence[CodeBlockError]]: + errors: list[CodeBlockError] = [] + + @contextlib.contextmanager + def _collect_error(match: Match[str]) -> Generator[None, None, None]: + try: + yield + except Exception as e: + errors.append(CodeBlockError(match.start(), e)) + + def _md_match(match: Match[str]) -> str: + code = textwrap.dedent(match["code"]) + with _collect_error(match): + code = format_code_block(code) + code = textwrap.indent(code, match["indent"]) + return f"{match['before']}{code}{match['after']}" + + def _pycon_match(match: Match[str]) -> str: + code = "" + fragment = cast(Optional[str], None) + + def finish_fragment() -> None: + nonlocal code + nonlocal fragment + + if fragment is not None: + with _collect_error(match): + fragment = format_code_block(fragment) + fragment_lines = fragment.splitlines() + code += f"{PYCON_PREFIX}{fragment_lines[0]}\n" + for line in fragment_lines[1:]: + # Skip blank lines to handle Black adding a blank above + # functions within blocks. A blank line would end the REPL + # continuation prompt. + # + # >>> if True: + # ... def f(): + # ... pass + # ... + if line: + code += f"{PYCON_CONTINUATION_PREFIX} {line}\n" + if fragment_lines[-1].startswith(" "): + code += f"{PYCON_CONTINUATION_PREFIX}\n" + fragment = None + + indentation = None + for line in match["code"].splitlines(): + orig_line, line = line, line.lstrip() + if indentation is None and line: + indentation = len(orig_line) - len(line) + continuation_match = PYCON_CONTINUATION_RE.match(line) + if continuation_match and fragment is not None: + fragment += line[continuation_match.end() :] + "\n" + else: + finish_fragment() + if line.startswith(PYCON_PREFIX): + fragment = line[len(PYCON_PREFIX) :] + "\n" + else: + code += orig_line[indentation:] + "\n" + finish_fragment() + return code + + def _md_pycon_match(match: Match[str]) -> str: + code = _pycon_match(match) + code = textwrap.indent(code, match["indent"]) + return f"{match['before']}{code}{match['after']}" + + src = MD_RE.sub(_md_match, src) + src = MD_PYCON_RE.sub(_md_pycon_match, src) + return src, errors + + +def format_code_block(code: str) -> str: + return subprocess.check_output( + [ + sys.executable, + "-m", + "ruff", + "format", + "--stdin-filename=script.py", + f"--line-length={DEFAULT_LINE_LENGTH}", + ], + encoding="utf-8", + input=code, + ) + + +def format_file( + filename: str, + skip_errors: bool, +) -> int: + with open(filename, encoding="UTF-8") as f: + contents = f.read() + new_contents, errors = format_str(contents) + for error in errors: + lineno = contents[: error.offset].count("\n") + 1 + print(f"{filename}:{lineno}: code block parse error {error.exc}") + if errors and not skip_errors: + return 1 + if contents != new_contents: + print(f"{filename}: Rewriting...") + with open(filename, "w", encoding="UTF-8") as f: + f.write(new_contents) + return 0 + else: + return 0 + + +def main(argv: Sequence[str] | None = None) -> int: + parser = argparse.ArgumentParser() + parser.add_argument( + "-l", + "--line-length", + type=int, + default=DEFAULT_LINE_LENGTH, + ) + parser.add_argument( + "-S", + "--skip-string-normalization", + action="store_true", + ) + parser.add_argument("-E", "--skip-errors", action="store_true") + parser.add_argument("filenames", nargs="*") + args = parser.parse_args(argv) + + retv = 0 + for filename in args.filenames: + retv |= format_file(filename, skip_errors=args.skip_errors) + return retv + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 00000000..ace7ffbf --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -exuo pipefail + +FILENAME=$(basename dist/*.whl) + +RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(curl -v -X PUT \ + -H "Content-Type: binary/octet-stream" \ + --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/imagekit-python/$SHA/$FILENAME'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi diff --git a/setup.py b/setup.py deleted file mode 100644 index 5ad5ff27..00000000 --- a/setup.py +++ /dev/null @@ -1,24 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -with open("requirements/requirements.txt") as f: - install_requires = f.read().splitlines() - -setuptools.setup( - name="imagekitio", - version="2.2.8", - description="Python wrapper for the ImageKit API", - long_description=long_description, - long_description_content_type="text/markdown", - install_requires=install_requires, - url="https://github.com/imagekit-developer/imagekit-python", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - python_requires=">=3.6", -) diff --git a/src/imagekit/lib/.keep b/src/imagekit/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/imagekit/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/imagekitio/__init__.py b/src/imagekitio/__init__.py new file mode 100644 index 00000000..90416321 --- /dev/null +++ b/src/imagekitio/__init__.py @@ -0,0 +1,104 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import typing as _t + +from . import types +from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes, omit, not_given +from ._utils import file_from_path +from ._client import ( + Client, + Stream, + Timeout, + ImageKit, + Transport, + AsyncClient, + AsyncStream, + AsyncImageKit, + RequestOptions, +) +from ._models import BaseModel +from ._version import __title__, __version__ +from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse +from ._constants import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES, DEFAULT_CONNECTION_LIMITS +from ._exceptions import ( + APIError, + ConflictError, + ImageKitError, + NotFoundError, + APIStatusError, + RateLimitError, + APITimeoutError, + BadRequestError, + APIConnectionError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, + APIWebhookValidationError, + APIResponseValidationError, +) +from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient +from ._utils._logs import setup_logging as _setup_logging + +__all__ = [ + "types", + "__version__", + "__title__", + "NoneType", + "Transport", + "ProxiesTypes", + "NotGiven", + "NOT_GIVEN", + "not_given", + "Omit", + "omit", + "ImageKitError", + "APIError", + "APIStatusError", + "APITimeoutError", + "APIConnectionError", + "APIResponseValidationError", + "APIWebhookValidationError", + "BadRequestError", + "AuthenticationError", + "PermissionDeniedError", + "NotFoundError", + "ConflictError", + "UnprocessableEntityError", + "RateLimitError", + "InternalServerError", + "Timeout", + "RequestOptions", + "Client", + "AsyncClient", + "Stream", + "AsyncStream", + "ImageKit", + "AsyncImageKit", + "file_from_path", + "BaseModel", + "DEFAULT_TIMEOUT", + "DEFAULT_MAX_RETRIES", + "DEFAULT_CONNECTION_LIMITS", + "DefaultHttpxClient", + "DefaultAsyncHttpxClient", + "DefaultAioHttpClient", +] + +if not _t.TYPE_CHECKING: + from ._utils._resources_proxy import resources as resources + +_setup_logging() + +# Update the __module__ attribute for exported symbols so that +# error messages point to this module instead of the module +# it was originally defined in, e.g. +# imagekitio._exceptions.NotFoundError -> imagekitio.NotFoundError +__locals = locals() +for __name in __all__: + if not __name.startswith("__"): + try: + __locals[__name].__module__ = "imagekitio" + except (TypeError, AttributeError): + # Some of our exported symbols are builtins which we can't set attributes for. + pass diff --git a/src/imagekitio/_base_client.py b/src/imagekitio/_base_client.py new file mode 100644 index 00000000..384e7c0a --- /dev/null +++ b/src/imagekitio/_base_client.py @@ -0,0 +1,1995 @@ +from __future__ import annotations + +import sys +import json +import time +import uuid +import email +import asyncio +import inspect +import logging +import platform +import email.utils +from types import TracebackType +from random import random +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Type, + Union, + Generic, + Mapping, + TypeVar, + Iterable, + Iterator, + Optional, + Generator, + AsyncIterator, + cast, + overload, +) +from typing_extensions import Literal, override, get_origin + +import anyio +import httpx +import distro +import pydantic +from httpx import URL +from pydantic import PrivateAttr + +from . import _exceptions +from ._qs import Querystring +from ._files import to_httpx_files, async_to_httpx_files +from ._types import ( + Body, + Omit, + Query, + Headers, + Timeout, + NotGiven, + ResponseT, + AnyMapping, + PostParser, + RequestFiles, + HttpxSendArgs, + RequestOptions, + HttpxRequestFiles, + ModelBuilderProtocol, + not_given, +) +from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping +from ._compat import PYDANTIC_V1, model_copy, model_dump +from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type +from ._response import ( + APIResponse, + BaseAPIResponse, + AsyncAPIResponse, + extract_response_type, +) +from ._constants import ( + DEFAULT_TIMEOUT, + MAX_RETRY_DELAY, + DEFAULT_MAX_RETRIES, + INITIAL_RETRY_DELAY, + RAW_RESPONSE_HEADER, + OVERRIDE_CAST_TO_HEADER, + DEFAULT_CONNECTION_LIMITS, +) +from ._streaming import Stream, SSEDecoder, AsyncStream, SSEBytesDecoder +from ._exceptions import ( + APIStatusError, + APITimeoutError, + APIConnectionError, + APIResponseValidationError, +) + +log: logging.Logger = logging.getLogger(__name__) + +# TODO: make base page type vars covariant +SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]") +AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]") + + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) + +_StreamT = TypeVar("_StreamT", bound=Stream[Any]) +_AsyncStreamT = TypeVar("_AsyncStreamT", bound=AsyncStream[Any]) + +if TYPE_CHECKING: + from httpx._config import ( + DEFAULT_TIMEOUT_CONFIG, # pyright: ignore[reportPrivateImportUsage] + ) + + HTTPX_DEFAULT_TIMEOUT = DEFAULT_TIMEOUT_CONFIG +else: + try: + from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT + except ImportError: + # taken from https://github.com/encode/httpx/blob/3ba5fe0d7ac70222590e759c31442b1cab263791/httpx/_config.py#L366 + HTTPX_DEFAULT_TIMEOUT = Timeout(5.0) + + +class PageInfo: + """Stores the necessary information to build the request to retrieve the next page. + + Either `url` or `params` must be set. + """ + + url: URL | NotGiven + params: Query | NotGiven + json: Body | NotGiven + + @overload + def __init__( + self, + *, + url: URL, + ) -> None: ... + + @overload + def __init__( + self, + *, + params: Query, + ) -> None: ... + + @overload + def __init__( + self, + *, + json: Body, + ) -> None: ... + + def __init__( + self, + *, + url: URL | NotGiven = not_given, + json: Body | NotGiven = not_given, + params: Query | NotGiven = not_given, + ) -> None: + self.url = url + self.json = json + self.params = params + + @override + def __repr__(self) -> str: + if self.url: + return f"{self.__class__.__name__}(url={self.url})" + if self.json: + return f"{self.__class__.__name__}(json={self.json})" + return f"{self.__class__.__name__}(params={self.params})" + + +class BasePage(GenericModel, Generic[_T]): + """ + Defines the core interface for pagination. + + Type Args: + ModelT: The pydantic model that represents an item in the response. + + Methods: + has_next_page(): Check if there is another page available + next_page_info(): Get the necessary information to make a request for the next page + """ + + _options: FinalRequestOptions = PrivateAttr() + _model: Type[_T] = PrivateAttr() + + def has_next_page(self) -> bool: + items = self._get_page_items() + if not items: + return False + return self.next_page_info() is not None + + def next_page_info(self) -> Optional[PageInfo]: ... + + def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body] + ... + + def _params_from_url(self, url: URL) -> httpx.QueryParams: + # TODO: do we have to preprocess params here? + return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params) + + def _info_to_options(self, info: PageInfo) -> FinalRequestOptions: + options = model_copy(self._options) + options._strip_raw_response_header() + + if not isinstance(info.params, NotGiven): + options.params = {**options.params, **info.params} + return options + + if not isinstance(info.url, NotGiven): + params = self._params_from_url(info.url) + url = info.url.copy_with(params=params) + options.params = dict(url.params) + options.url = str(url) + return options + + if not isinstance(info.json, NotGiven): + if not is_mapping(info.json): + raise TypeError("Pagination is only supported with mappings") + + if not options.json_data: + options.json_data = {**info.json} + else: + if not is_mapping(options.json_data): + raise TypeError("Pagination is only supported with mappings") + + options.json_data = {**options.json_data, **info.json} + return options + + raise ValueError("Unexpected PageInfo state") + + +class BaseSyncPage(BasePage[_T], Generic[_T]): + _client: SyncAPIClient = pydantic.PrivateAttr() + + def _set_private_attributes( + self, + client: SyncAPIClient, + model: Type[_T], + options: FinalRequestOptions, + ) -> None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: + self.__pydantic_private__ = {} + + self._model = model + self._client = client + self._options = options + + # Pydantic uses a custom `__iter__` method to support casting BaseModels + # to dictionaries. e.g. dict(model). + # As we want to support `for item in page`, this is inherently incompatible + # with the default pydantic behaviour. It is not possible to support both + # use cases at once. Fortunately, this is not a big deal as all other pydantic + # methods should continue to work as expected as there is an alternative method + # to cast a model to a dictionary, model.dict(), which is used internally + # by pydantic. + def __iter__(self) -> Iterator[_T]: # type: ignore + for page in self.iter_pages(): + for item in page._get_page_items(): + yield item + + def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]: + page = self + while True: + yield page + if page.has_next_page(): + page = page.get_next_page() + else: + return + + def get_next_page(self: SyncPageT) -> SyncPageT: + info = self.next_page_info() + if not info: + raise RuntimeError( + "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." + ) + + options = self._info_to_options(info) + return self._client._request_api_list(self._model, page=self.__class__, options=options) + + +class AsyncPaginator(Generic[_T, AsyncPageT]): + def __init__( + self, + client: AsyncAPIClient, + options: FinalRequestOptions, + page_cls: Type[AsyncPageT], + model: Type[_T], + ) -> None: + self._model = model + self._client = client + self._options = options + self._page_cls = page_cls + + def __await__(self) -> Generator[Any, None, AsyncPageT]: + return self._get_page().__await__() + + async def _get_page(self) -> AsyncPageT: + def _parser(resp: AsyncPageT) -> AsyncPageT: + resp._set_private_attributes( + model=self._model, + options=self._options, + client=self._client, + ) + return resp + + self._options.post_parser = _parser + + return await self._client.request(self._page_cls, self._options) + + async def __aiter__(self) -> AsyncIterator[_T]: + # https://github.com/microsoft/pyright/issues/3464 + page = cast( + AsyncPageT, + await self, # type: ignore + ) + async for item in page: + yield item + + +class BaseAsyncPage(BasePage[_T], Generic[_T]): + _client: AsyncAPIClient = pydantic.PrivateAttr() + + def _set_private_attributes( + self, + model: Type[_T], + client: AsyncAPIClient, + options: FinalRequestOptions, + ) -> None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: + self.__pydantic_private__ = {} + + self._model = model + self._client = client + self._options = options + + async def __aiter__(self) -> AsyncIterator[_T]: + async for page in self.iter_pages(): + for item in page._get_page_items(): + yield item + + async def iter_pages(self: AsyncPageT) -> AsyncIterator[AsyncPageT]: + page = self + while True: + yield page + if page.has_next_page(): + page = await page.get_next_page() + else: + return + + async def get_next_page(self: AsyncPageT) -> AsyncPageT: + info = self.next_page_info() + if not info: + raise RuntimeError( + "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." + ) + + options = self._info_to_options(info) + return await self._client._request_api_list(self._model, page=self.__class__, options=options) + + +_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient]) +_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]]) + + +class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]): + _client: _HttpxClientT + _version: str + _base_url: URL + max_retries: int + timeout: Union[float, Timeout, None] + _strict_response_validation: bool + _idempotency_header: str | None + _default_stream_cls: type[_DefaultStreamT] | None = None + + def __init__( + self, + *, + version: str, + base_url: str | URL, + _strict_response_validation: bool, + max_retries: int = DEFAULT_MAX_RETRIES, + timeout: float | Timeout | None = DEFAULT_TIMEOUT, + custom_headers: Mapping[str, str] | None = None, + custom_query: Mapping[str, object] | None = None, + ) -> None: + self._version = version + self._base_url = self._enforce_trailing_slash(URL(base_url)) + self.max_retries = max_retries + self.timeout = timeout + self._custom_headers = custom_headers or {} + self._custom_query = custom_query or {} + self._strict_response_validation = _strict_response_validation + self._idempotency_header = None + self._platform: Platform | None = None + + if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] + raise TypeError( + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `imagekitio.DEFAULT_MAX_RETRIES`" + ) + + def _enforce_trailing_slash(self, url: URL) -> URL: + if url.raw_path.endswith(b"/"): + return url + return url.copy_with(raw_path=url.raw_path + b"/") + + def _make_status_error_from_response( + self, + response: httpx.Response, + ) -> APIStatusError: + if response.is_closed and not response.is_stream_consumed: + # We can't read the response body as it has been closed + # before it was read. This can happen if an event hook + # raises a status error. + body = None + err_msg = f"Error code: {response.status_code}" + else: + err_text = response.text.strip() + body = err_text + + try: + body = json.loads(err_text) + err_msg = f"Error code: {response.status_code} - {body}" + except Exception: + err_msg = err_text or f"Error code: {response.status_code}" + + return self._make_status_error(err_msg, body=body, response=response) + + def _make_status_error( + self, + err_msg: str, + *, + body: object, + response: httpx.Response, + ) -> _exceptions.APIStatusError: + raise NotImplementedError() + + def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers: + custom_headers = options.headers or {} + headers_dict = _merge_mappings(self.default_headers, custom_headers) + self._validate_headers(headers_dict, custom_headers) + + # headers are case-insensitive while dictionaries are not. + headers = httpx.Headers(headers_dict) + + idempotency_header = self._idempotency_header + if idempotency_header and options.idempotency_key and idempotency_header not in headers: + headers[idempotency_header] = options.idempotency_key + + # Don't set these headers if they were already set or removed by the caller. We check + # `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case. + lower_custom_headers = [header.lower() for header in custom_headers] + if "x-stainless-retry-count" not in lower_custom_headers: + headers["x-stainless-retry-count"] = str(retries_taken) + if "x-stainless-read-timeout" not in lower_custom_headers: + timeout = self.timeout if isinstance(options.timeout, NotGiven) else options.timeout + if isinstance(timeout, Timeout): + timeout = timeout.read + if timeout is not None: + headers["x-stainless-read-timeout"] = str(timeout) + + return headers + + def _prepare_url(self, url: str) -> URL: + """ + Merge a URL argument together with any 'base_url' on the client, + to create the URL used for the outgoing request. + """ + # Copied from httpx's `_merge_url` method. + merge_url = URL(url) + if merge_url.is_relative_url: + merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/") + return self.base_url.copy_with(raw_path=merge_raw_path) + + return merge_url + + def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder: + return SSEDecoder() + + def _build_request( + self, + options: FinalRequestOptions, + *, + retries_taken: int = 0, + ) -> httpx.Request: + if log.isEnabledFor(logging.DEBUG): + log.debug("Request options: %s", model_dump(options, exclude_unset=True)) + + kwargs: dict[str, Any] = {} + + json_data = options.json_data + if options.extra_json is not None: + if json_data is None: + json_data = cast(Body, options.extra_json) + elif is_mapping(json_data): + json_data = _merge_mappings(json_data, options.extra_json) + else: + raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") + + headers = self._build_headers(options, retries_taken=retries_taken) + params = _merge_mappings(self.default_query, options.params) + content_type = headers.get("Content-Type") + files = options.files + + # If the given Content-Type header is multipart/form-data then it + # has to be removed so that httpx can generate the header with + # additional information for us as it has to be in this form + # for the server to be able to correctly parse the request: + # multipart/form-data; boundary=---abc-- + if content_type is not None and content_type.startswith("multipart/form-data"): + if "boundary" not in content_type: + # only remove the header if the boundary hasn't been explicitly set + # as the caller doesn't want httpx to come up with their own boundary + headers.pop("Content-Type") + + # As we are now sending multipart/form-data instead of application/json + # we need to tell httpx to use it, https://www.python-httpx.org/advanced/clients/#multipart-file-encoding + if json_data: + if not is_dict(json_data): + raise TypeError( + f"Expected query input to be a dictionary for multipart requests but got {type(json_data)} instead." + ) + kwargs["data"] = self._serialize_multipartform(json_data) + + # httpx determines whether or not to send a "multipart/form-data" + # request based on the truthiness of the "files" argument. + # This gets around that issue by generating a dict value that + # evaluates to true. + # + # https://github.com/encode/httpx/discussions/2399#discussioncomment-3814186 + if not files: + files = cast(HttpxRequestFiles, ForceMultipartDict()) + + prepared_url = self._prepare_url(options.url) + if "_" in prepared_url.host: + # work around https://github.com/encode/httpx/discussions/2880 + kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + + is_body_allowed = options.method.lower() != "get" + + if is_body_allowed: + if isinstance(json_data, bytes): + kwargs["content"] = json_data + else: + kwargs["json"] = json_data if is_given(json_data) else None + kwargs["files"] = files + else: + headers.pop("Content-Type", None) + kwargs.pop("data", None) + + # TODO: report this error to httpx + return self._client.build_request( # pyright: ignore[reportUnknownMemberType] + headers=headers, + timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, + method=options.method, + url=prepared_url, + # the `Query` type that we use is incompatible with qs' + # `Params` type as it needs to be typed as `Mapping[str, object]` + # so that passing a `TypedDict` doesn't cause an error. + # https://github.com/microsoft/pyright/issues/3526#event-6715453066 + params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, + **kwargs, + ) + + def _serialize_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: + items = self.qs.stringify_items( + # TODO: type ignore is required as stringify_items is well typed but we can't be + # well typed without heavy validation. + data, # type: ignore + array_format="brackets", + ) + serialized: dict[str, object] = {} + for key, value in items: + existing = serialized.get(key) + + if not existing: + serialized[key] = value + continue + + # If a value has already been set for this key then that + # means we're sending data like `array[]=[1, 2, 3]` and we + # need to tell httpx that we want to send multiple values with + # the same key which is done by using a list or a tuple. + # + # Note: 2d arrays should never result in the same key at both + # levels so it's safe to assume that if the value is a list, + # it was because we changed it to be a list. + if is_list(existing): + existing.append(value) + else: + serialized[key] = [existing, value] + + return serialized + + def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalRequestOptions) -> type[ResponseT]: + if not is_given(options.headers): + return cast_to + + # make a copy of the headers so we don't mutate user-input + headers = dict(options.headers) + + # we internally support defining a temporary header to override the + # default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response` + # see _response.py for implementation details + override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, not_given) + if is_given(override_cast_to): + options.headers = headers + return cast(Type[ResponseT], override_cast_to) + + return cast_to + + def _should_stream_response_body(self, request: httpx.Request) -> bool: + return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return] + + def _process_response_data( + self, + *, + data: object, + cast_to: type[ResponseT], + response: httpx.Response, + ) -> ResponseT: + if data is None: + return cast(ResponseT, None) + + if cast_to is object: + return cast(ResponseT, data) + + try: + if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol): + return cast(ResponseT, cast_to.build(response=response, data=data)) + + if self._strict_response_validation: + return cast(ResponseT, validate_type(type_=cast_to, value=data)) + + return cast(ResponseT, construct_type(type_=cast_to, value=data)) + except pydantic.ValidationError as err: + raise APIResponseValidationError(response=response, body=data) from err + + @property + def qs(self) -> Querystring: + return Querystring() + + @property + def custom_auth(self) -> httpx.Auth | None: + return None + + @property + def auth_headers(self) -> dict[str, str]: + return {} + + @property + def default_headers(self) -> dict[str, str | Omit]: + return { + "Accept": "application/json", + "Content-Type": "application/json", + "User-Agent": self.user_agent, + **self.platform_headers(), + **self.auth_headers, + **self._custom_headers, + } + + @property + def default_query(self) -> dict[str, object]: + return { + **self._custom_query, + } + + def _validate_headers( + self, + headers: Headers, # noqa: ARG002 + custom_headers: Headers, # noqa: ARG002 + ) -> None: + """Validate the given default headers and custom headers. + + Does nothing by default. + """ + return + + @property + def user_agent(self) -> str: + return f"{self.__class__.__name__}/Python {self._version}" + + @property + def base_url(self) -> URL: + return self._base_url + + @base_url.setter + def base_url(self, url: URL | str) -> None: + self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url)) + + def platform_headers(self) -> Dict[str, str]: + # the actual implementation is in a separate `lru_cache` decorated + # function because adding `lru_cache` to methods will leak memory + # https://github.com/python/cpython/issues/88476 + return platform_headers(self._version, platform=self._platform) + + def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None: + """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified. + + About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After + See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax + """ + if response_headers is None: + return None + + # First, try the non-standard `retry-after-ms` header for milliseconds, + # which is more precise than integer-seconds `retry-after` + try: + retry_ms_header = response_headers.get("retry-after-ms", None) + return float(retry_ms_header) / 1000 + except (TypeError, ValueError): + pass + + # Next, try parsing `retry-after` header as seconds (allowing nonstandard floats). + retry_header = response_headers.get("retry-after") + try: + # note: the spec indicates that this should only ever be an integer + # but if someone sends a float there's no reason for us to not respect it + return float(retry_header) + except (TypeError, ValueError): + pass + + # Last, try parsing `retry-after` as a date. + retry_date_tuple = email.utils.parsedate_tz(retry_header) + if retry_date_tuple is None: + return None + + retry_date = email.utils.mktime_tz(retry_date_tuple) + return float(retry_date - time.time()) + + def _calculate_retry_timeout( + self, + remaining_retries: int, + options: FinalRequestOptions, + response_headers: Optional[httpx.Headers] = None, + ) -> float: + max_retries = options.get_max_retries(self.max_retries) + + # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. + retry_after = self._parse_retry_after_header(response_headers) + if retry_after is not None and 0 < retry_after <= 60: + return retry_after + + # Also cap retry count to 1000 to avoid any potential overflows with `pow` + nb_retries = min(max_retries - remaining_retries, 1000) + + # Apply exponential backoff, but not more than the max. + sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY) + + # Apply some jitter, plus-or-minus half a second. + jitter = 1 - 0.25 * random() + timeout = sleep_seconds * jitter + return timeout if timeout >= 0 else 0 + + def _should_retry(self, response: httpx.Response) -> bool: + # Note: this is not a standard header + should_retry_header = response.headers.get("x-should-retry") + + # If the server explicitly says whether or not to retry, obey. + if should_retry_header == "true": + log.debug("Retrying as header `x-should-retry` is set to `true`") + return True + if should_retry_header == "false": + log.debug("Not retrying as header `x-should-retry` is set to `false`") + return False + + # Retry on request timeouts. + if response.status_code == 408: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry on lock timeouts. + if response.status_code == 409: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry on rate limits. + if response.status_code == 429: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry internal errors. + if response.status_code >= 500: + log.debug("Retrying due to status code %i", response.status_code) + return True + + log.debug("Not retrying") + return False + + def _idempotency_key(self) -> str: + return f"stainless-python-retry-{uuid.uuid4()}" + + +class _DefaultHttpxClient(httpx.Client): + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + super().__init__(**kwargs) + + +if TYPE_CHECKING: + DefaultHttpxClient = httpx.Client + """An alias to `httpx.Client` that provides the same defaults that this SDK + uses internally. + + This is useful because overriding the `http_client` with your own instance of + `httpx.Client` will result in httpx's defaults being used, not ours. + """ +else: + DefaultHttpxClient = _DefaultHttpxClient + + +class SyncHttpxClientWrapper(DefaultHttpxClient): + def __del__(self) -> None: + if self.is_closed: + return + + try: + self.close() + except Exception: + pass + + +class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]): + _client: httpx.Client + _default_stream_cls: type[Stream[Any]] | None = None + + def __init__( + self, + *, + version: str, + base_url: str | URL, + max_retries: int = DEFAULT_MAX_RETRIES, + timeout: float | Timeout | None | NotGiven = not_given, + http_client: httpx.Client | None = None, + custom_headers: Mapping[str, str] | None = None, + custom_query: Mapping[str, object] | None = None, + _strict_response_validation: bool, + ) -> None: + if not is_given(timeout): + # if the user passed in a custom http client with a non-default + # timeout set then we use that timeout. + # + # note: there is an edge case here where the user passes in a client + # where they've explicitly set the timeout to match the default timeout + # as this check is structural, meaning that we'll think they didn't + # pass in a timeout and will ignore it + if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT: + timeout = http_client.timeout + else: + timeout = DEFAULT_TIMEOUT + + if http_client is not None and not isinstance(http_client, httpx.Client): # pyright: ignore[reportUnnecessaryIsInstance] + raise TypeError( + f"Invalid `http_client` argument; Expected an instance of `httpx.Client` but got {type(http_client)}" + ) + + super().__init__( + version=version, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + base_url=base_url, + max_retries=max_retries, + custom_query=custom_query, + custom_headers=custom_headers, + _strict_response_validation=_strict_response_validation, + ) + self._client = http_client or SyncHttpxClientWrapper( + base_url=base_url, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + ) + + def is_closed(self) -> bool: + return self._client.is_closed + + def close(self) -> None: + """Close the underlying HTTPX client. + + The client will *not* be usable after this. + """ + # If an error is thrown while constructing a client, self._client + # may not be present + if hasattr(self, "_client"): + self._client.close() + + def __enter__(self: _T) -> _T: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.close() + + def _prepare_options( + self, + options: FinalRequestOptions, # noqa: ARG002 + ) -> FinalRequestOptions: + """Hook for mutating the given options""" + return options + + def _prepare_request( + self, + request: httpx.Request, # noqa: ARG002 + ) -> None: + """This method is used as a callback for mutating the `Request` object + after it has been constructed. + This is useful for cases where you want to add certain headers based off of + the request properties, e.g. `url`, `method` etc. + """ + return None + + @overload + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[True], + stream_cls: Type[_StreamT], + ) -> _StreamT: ... + + @overload + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool = False, + stream_cls: Type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: ... + + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool = False, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: + cast_to = self._maybe_override_cast_to(cast_to, options) + + # create a copy of the options we were given so that if the + # options are mutated later & we then retry, the retries are + # given the original options + input_options = model_copy(options) + if input_options.idempotency_key is None and input_options.method.lower() != "get": + # ensure the idempotency key is reused between requests + input_options.idempotency_key = self._idempotency_key() + + response: httpx.Response | None = None + max_retries = input_options.get_max_retries(self.max_retries) + + retries_taken = 0 + for retries_taken in range(max_retries + 1): + options = model_copy(input_options) + options = self._prepare_options(options) + + remaining_retries = max_retries - retries_taken + request = self._build_request(options, retries_taken=retries_taken) + self._prepare_request(request) + + kwargs: HttpxSendArgs = {} + if self.custom_auth is not None: + kwargs["auth"] = self.custom_auth + + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + + log.debug("Sending HTTP Request: %s %s", request.method, request.url) + + response = None + try: + response = self._client.send( + request, + stream=stream or self._should_stream_response_body(request=request), + **kwargs, + ) + except httpx.TimeoutException as err: + log.debug("Encountered httpx.TimeoutException", exc_info=True) + + if remaining_retries > 0: + self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising timeout error") + raise APITimeoutError(request=request) from err + except Exception as err: + log.debug("Encountered Exception", exc_info=True) + + if remaining_retries > 0: + self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising connection error") + raise APIConnectionError(request=request) from err + + log.debug( + 'HTTP Response: %s %s "%i %s" %s', + request.method, + request.url, + response.status_code, + response.reason_phrase, + response.headers, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code + log.debug("Encountered httpx.HTTPStatusError", exc_info=True) + + if remaining_retries > 0 and self._should_retry(err.response): + err.response.close() + self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=response, + ) + continue + + # If the response is streamed then we need to explicitly read the response + # to completion before attempting to access the response text. + if not err.response.is_closed: + err.response.read() + + log.debug("Re-raising status error") + raise self._make_status_error_from_response(err.response) from None + + break + + assert response is not None, "could not resolve response (should never happen)" + return self._process_response( + cast_to=cast_to, + options=options, + response=response, + stream=stream, + stream_cls=stream_cls, + retries_taken=retries_taken, + ) + + def _sleep_for_retry( + self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None + ) -> None: + remaining_retries = max_retries - retries_taken + if remaining_retries == 1: + log.debug("1 retry left") + else: + log.debug("%i retries left", remaining_retries) + + timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None) + log.info("Retrying request to %s in %f seconds", options.url, timeout) + + time.sleep(timeout) + + def _process_response( + self, + *, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + response: httpx.Response, + stream: bool, + stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + retries_taken: int = 0, + ) -> ResponseT: + origin = get_origin(cast_to) or cast_to + + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): + if not issubclass(origin, APIResponse): + raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") + + response_cls = cast("type[BaseAPIResponse[Any]]", cast_to) + return cast( + ResponseT, + response_cls( + raw=response, + client=self, + cast_to=extract_response_type(response_cls), + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ), + ) + + if cast_to == httpx.Response: + return cast(ResponseT, response) + + api_response = APIResponse( + raw=response, + client=self, + cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast] + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ) + if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): + return cast(ResponseT, api_response) + + return api_response.parse() + + def _request_api_list( + self, + model: Type[object], + page: Type[SyncPageT], + options: FinalRequestOptions, + ) -> SyncPageT: + def _parser(resp: SyncPageT) -> SyncPageT: + resp._set_private_attributes( + client=self, + model=model, + options=options, + ) + return resp + + options.post_parser = _parser + + return self.request(page, options, stream=False) + + @overload + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[True], + stream_cls: type[_StreamT], + ) -> _StreamT: ... + + @overload + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: ... + + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool = False, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: + opts = FinalRequestOptions.construct(method="get", url=path, **options) + # cast is required because mypy complains about returning Any even though + # it understands the type variables + return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) + + @overload + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: Literal[True], + stream_cls: type[_StreamT], + ) -> _StreamT: ... + + @overload + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: bool, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: ... + + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: bool = False, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: + opts = FinalRequestOptions.construct( + method="post", url=path, json_data=body, files=to_httpx_files(files), **options + ) + return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) + + def patch( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + return self.request(cast_to, opts) + + def put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct( + method="put", url=path, json_data=body, files=to_httpx_files(files), **options + ) + return self.request(cast_to, opts) + + def delete( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) + return self.request(cast_to, opts) + + def get_api_list( + self, + path: str, + *, + model: Type[object], + page: Type[SyncPageT], + body: Body | None = None, + options: RequestOptions = {}, + method: str = "get", + ) -> SyncPageT: + opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) + return self._request_api_list(model, page, opts) + + +class _DefaultAsyncHttpxClient(httpx.AsyncClient): + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + super().__init__(**kwargs) + + +try: + import httpx_aiohttp +except ImportError: + + class _DefaultAioHttpClient(httpx.AsyncClient): + def __init__(self, **_kwargs: Any) -> None: + raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra") +else: + + class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + + super().__init__(**kwargs) + + +if TYPE_CHECKING: + DefaultAsyncHttpxClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK + uses internally. + + This is useful because overriding the `http_client` with your own instance of + `httpx.AsyncClient` will result in httpx's defaults being used, not ours. + """ + + DefaultAioHttpClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`.""" +else: + DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + DefaultAioHttpClient = _DefaultAioHttpClient + + +class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): + def __del__(self) -> None: + if self.is_closed: + return + + try: + # TODO(someday): support non asyncio runtimes here + asyncio.get_running_loop().create_task(self.aclose()) + except Exception: + pass + + +class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]): + _client: httpx.AsyncClient + _default_stream_cls: type[AsyncStream[Any]] | None = None + + def __init__( + self, + *, + version: str, + base_url: str | URL, + _strict_response_validation: bool, + max_retries: int = DEFAULT_MAX_RETRIES, + timeout: float | Timeout | None | NotGiven = not_given, + http_client: httpx.AsyncClient | None = None, + custom_headers: Mapping[str, str] | None = None, + custom_query: Mapping[str, object] | None = None, + ) -> None: + if not is_given(timeout): + # if the user passed in a custom http client with a non-default + # timeout set then we use that timeout. + # + # note: there is an edge case here where the user passes in a client + # where they've explicitly set the timeout to match the default timeout + # as this check is structural, meaning that we'll think they didn't + # pass in a timeout and will ignore it + if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT: + timeout = http_client.timeout + else: + timeout = DEFAULT_TIMEOUT + + if http_client is not None and not isinstance(http_client, httpx.AsyncClient): # pyright: ignore[reportUnnecessaryIsInstance] + raise TypeError( + f"Invalid `http_client` argument; Expected an instance of `httpx.AsyncClient` but got {type(http_client)}" + ) + + super().__init__( + version=version, + base_url=base_url, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + max_retries=max_retries, + custom_query=custom_query, + custom_headers=custom_headers, + _strict_response_validation=_strict_response_validation, + ) + self._client = http_client or AsyncHttpxClientWrapper( + base_url=base_url, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + ) + + def is_closed(self) -> bool: + return self._client.is_closed + + async def close(self) -> None: + """Close the underlying HTTPX client. + + The client will *not* be usable after this. + """ + await self._client.aclose() + + async def __aenter__(self: _T) -> _T: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + await self.close() + + async def _prepare_options( + self, + options: FinalRequestOptions, # noqa: ARG002 + ) -> FinalRequestOptions: + """Hook for mutating the given options""" + return options + + async def _prepare_request( + self, + request: httpx.Request, # noqa: ARG002 + ) -> None: + """This method is used as a callback for mutating the `Request` object + after it has been constructed. + This is useful for cases where you want to add certain headers based off of + the request properties, e.g. `url`, `method` etc. + """ + return None + + @overload + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[True], + stream_cls: type[_AsyncStreamT], + ) -> _AsyncStreamT: ... + + @overload + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: ... + + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool = False, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: + if self._platform is None: + # `get_platform` can make blocking IO calls so we + # execute it earlier while we are in an async context + self._platform = await asyncify(get_platform)() + + cast_to = self._maybe_override_cast_to(cast_to, options) + + # create a copy of the options we were given so that if the + # options are mutated later & we then retry, the retries are + # given the original options + input_options = model_copy(options) + if input_options.idempotency_key is None and input_options.method.lower() != "get": + # ensure the idempotency key is reused between requests + input_options.idempotency_key = self._idempotency_key() + + response: httpx.Response | None = None + max_retries = input_options.get_max_retries(self.max_retries) + + retries_taken = 0 + for retries_taken in range(max_retries + 1): + options = model_copy(input_options) + options = await self._prepare_options(options) + + remaining_retries = max_retries - retries_taken + request = self._build_request(options, retries_taken=retries_taken) + await self._prepare_request(request) + + kwargs: HttpxSendArgs = {} + if self.custom_auth is not None: + kwargs["auth"] = self.custom_auth + + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + + log.debug("Sending HTTP Request: %s %s", request.method, request.url) + + response = None + try: + response = await self._client.send( + request, + stream=stream or self._should_stream_response_body(request=request), + **kwargs, + ) + except httpx.TimeoutException as err: + log.debug("Encountered httpx.TimeoutException", exc_info=True) + + if remaining_retries > 0: + await self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising timeout error") + raise APITimeoutError(request=request) from err + except Exception as err: + log.debug("Encountered Exception", exc_info=True) + + if remaining_retries > 0: + await self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising connection error") + raise APIConnectionError(request=request) from err + + log.debug( + 'HTTP Response: %s %s "%i %s" %s', + request.method, + request.url, + response.status_code, + response.reason_phrase, + response.headers, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code + log.debug("Encountered httpx.HTTPStatusError", exc_info=True) + + if remaining_retries > 0 and self._should_retry(err.response): + await err.response.aclose() + await self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=response, + ) + continue + + # If the response is streamed then we need to explicitly read the response + # to completion before attempting to access the response text. + if not err.response.is_closed: + await err.response.aread() + + log.debug("Re-raising status error") + raise self._make_status_error_from_response(err.response) from None + + break + + assert response is not None, "could not resolve response (should never happen)" + return await self._process_response( + cast_to=cast_to, + options=options, + response=response, + stream=stream, + stream_cls=stream_cls, + retries_taken=retries_taken, + ) + + async def _sleep_for_retry( + self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None + ) -> None: + remaining_retries = max_retries - retries_taken + if remaining_retries == 1: + log.debug("1 retry left") + else: + log.debug("%i retries left", remaining_retries) + + timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None) + log.info("Retrying request to %s in %f seconds", options.url, timeout) + + await anyio.sleep(timeout) + + async def _process_response( + self, + *, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + response: httpx.Response, + stream: bool, + stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + retries_taken: int = 0, + ) -> ResponseT: + origin = get_origin(cast_to) or cast_to + + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): + if not issubclass(origin, AsyncAPIResponse): + raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") + + response_cls = cast("type[BaseAPIResponse[Any]]", cast_to) + return cast( + "ResponseT", + response_cls( + raw=response, + client=self, + cast_to=extract_response_type(response_cls), + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ), + ) + + if cast_to == httpx.Response: + return cast(ResponseT, response) + + api_response = AsyncAPIResponse( + raw=response, + client=self, + cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast] + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ) + if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): + return cast(ResponseT, api_response) + + return await api_response.parse() + + def _request_api_list( + self, + model: Type[_T], + page: Type[AsyncPageT], + options: FinalRequestOptions, + ) -> AsyncPaginator[_T, AsyncPageT]: + return AsyncPaginator(client=self, options=options, page_cls=page, model=model) + + @overload + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[True], + stream_cls: type[_AsyncStreamT], + ) -> _AsyncStreamT: ... + + @overload + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: ... + + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool = False, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: + opts = FinalRequestOptions.construct(method="get", url=path, **options) + return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) + + @overload + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: Literal[True], + stream_cls: type[_AsyncStreamT], + ) -> _AsyncStreamT: ... + + @overload + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: bool, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: ... + + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: bool = False, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: + opts = FinalRequestOptions.construct( + method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options + ) + return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) + + async def patch( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + return await self.request(cast_to, opts) + + async def put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct( + method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options + ) + return await self.request(cast_to, opts) + + async def delete( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) + return await self.request(cast_to, opts) + + def get_api_list( + self, + path: str, + *, + model: Type[_T], + page: Type[AsyncPageT], + body: Body | None = None, + options: RequestOptions = {}, + method: str = "get", + ) -> AsyncPaginator[_T, AsyncPageT]: + opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) + return self._request_api_list(model, page, opts) + + +def make_request_options( + *, + query: Query | None = None, + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + idempotency_key: str | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + post_parser: PostParser | NotGiven = not_given, +) -> RequestOptions: + """Create a dict of type RequestOptions without keys of NotGiven values.""" + options: RequestOptions = {} + if extra_headers is not None: + options["headers"] = extra_headers + + if extra_body is not None: + options["extra_json"] = cast(AnyMapping, extra_body) + + if query is not None: + options["params"] = query + + if extra_query is not None: + options["params"] = {**options.get("params", {}), **extra_query} + + if not isinstance(timeout, NotGiven): + options["timeout"] = timeout + + if idempotency_key is not None: + options["idempotency_key"] = idempotency_key + + if is_given(post_parser): + # internal + options["post_parser"] = post_parser # type: ignore + + return options + + +class ForceMultipartDict(Dict[str, None]): + def __bool__(self) -> bool: + return True + + +class OtherPlatform: + def __init__(self, name: str) -> None: + self.name = name + + @override + def __str__(self) -> str: + return f"Other:{self.name}" + + +Platform = Union[ + OtherPlatform, + Literal[ + "MacOS", + "Linux", + "Windows", + "FreeBSD", + "OpenBSD", + "iOS", + "Android", + "Unknown", + ], +] + + +def get_platform() -> Platform: + try: + system = platform.system().lower() + platform_name = platform.platform().lower() + except Exception: + return "Unknown" + + if "iphone" in platform_name or "ipad" in platform_name: + # Tested using Python3IDE on an iPhone 11 and Pythonista on an iPad 7 + # system is Darwin and platform_name is a string like: + # - Darwin-21.6.0-iPhone12,1-64bit + # - Darwin-21.6.0-iPad7,11-64bit + return "iOS" + + if system == "darwin": + return "MacOS" + + if system == "windows": + return "Windows" + + if "android" in platform_name: + # Tested using Pydroid 3 + # system is Linux and platform_name is a string like 'Linux-5.10.81-android12-9-00001-geba40aecb3b7-ab8534902-aarch64-with-libc' + return "Android" + + if system == "linux": + # https://distro.readthedocs.io/en/latest/#distro.id + distro_id = distro.id() + if distro_id == "freebsd": + return "FreeBSD" + + if distro_id == "openbsd": + return "OpenBSD" + + return "Linux" + + if platform_name: + return OtherPlatform(platform_name) + + return "Unknown" + + +@lru_cache(maxsize=None) +def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]: + return { + "X-Stainless-Lang": "python", + "X-Stainless-Package-Version": version, + "X-Stainless-OS": str(platform or get_platform()), + "X-Stainless-Arch": str(get_architecture()), + "X-Stainless-Runtime": get_python_runtime(), + "X-Stainless-Runtime-Version": get_python_version(), + } + + +class OtherArch: + def __init__(self, name: str) -> None: + self.name = name + + @override + def __str__(self) -> str: + return f"other:{self.name}" + + +Arch = Union[OtherArch, Literal["x32", "x64", "arm", "arm64", "unknown"]] + + +def get_python_runtime() -> str: + try: + return platform.python_implementation() + except Exception: + return "unknown" + + +def get_python_version() -> str: + try: + return platform.python_version() + except Exception: + return "unknown" + + +def get_architecture() -> Arch: + try: + machine = platform.machine().lower() + except Exception: + return "unknown" + + if machine in ("arm64", "aarch64"): + return "arm64" + + # TODO: untested + if machine == "arm": + return "arm" + + if machine == "x86_64": + return "x64" + + # TODO: untested + if sys.maxsize <= 2**32: + return "x32" + + if machine: + return OtherArch(machine) + + return "unknown" + + +def _merge_mappings( + obj1: Mapping[_T_co, Union[_T, Omit]], + obj2: Mapping[_T_co, Union[_T, Omit]], +) -> Dict[_T_co, _T]: + """Merge two mappings of the same type, removing any values that are instances of `Omit`. + + In cases with duplicate keys the second mapping takes precedence. + """ + merged = {**obj1, **obj2} + return {key: value for key, value in merged.items() if not isinstance(value, Omit)} diff --git a/src/imagekitio/_client.py b/src/imagekitio/_client.py new file mode 100644 index 00000000..3b9f4aec --- /dev/null +++ b/src/imagekitio/_client.py @@ -0,0 +1,555 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +import base64 +from typing import Any, Mapping +from typing_extensions import Self, override + +import httpx + +from . import _exceptions +from ._qs import Querystring +from .lib import helper +from ._types import ( + Omit, + Headers, + Timeout, + NotGiven, + Transport, + ProxiesTypes, + RequestOptions, + not_given, +) +from ._utils import is_given, get_async_library +from ._version import __version__ +from .resources import dummy, assets, webhooks, custom_metadata_fields +from ._streaming import Stream as Stream, AsyncStream as AsyncStream +from ._exceptions import ImageKitError, APIStatusError +from ._base_client import ( + DEFAULT_MAX_RETRIES, + SyncAPIClient, + AsyncAPIClient, +) +from .resources.beta import beta +from .resources.cache import cache +from .resources.files import files +from .resources.folders import folders +from .resources.accounts import accounts + +__all__ = [ + "Timeout", + "Transport", + "ProxiesTypes", + "RequestOptions", + "ImageKit", + "AsyncImageKit", + "Client", + "AsyncClient", +] + + +class ImageKit(SyncAPIClient): + dummy: dummy.DummyResource + custom_metadata_fields: custom_metadata_fields.CustomMetadataFieldsResource + files: files.FilesResource + assets: assets.AssetsResource + cache: cache.CacheResource + folders: folders.FoldersResource + accounts: accounts.AccountsResource + beta: beta.BetaResource + webhooks: webhooks.WebhooksResource + helper: helper.HelperResource + with_raw_response: ImageKitWithRawResponse + with_streaming_response: ImageKitWithStreamedResponse + + # client options + private_key: str + password: str | None + webhook_secret: str | None + + def __init__( + self, + *, + private_key: str | None = None, + password: str | None = None, + webhook_secret: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: float | Timeout | None | NotGiven = not_given, + max_retries: int = DEFAULT_MAX_RETRIES, + default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + # Configure a custom httpx client. + # We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`. + # See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details. + http_client: httpx.Client | None = None, + # Enable or disable schema validation for data returned by the API. + # When enabled an error APIResponseValidationError is raised + # if the API responds with invalid data for the expected schema. + # + # This parameter may be removed or changed in the future. + # If you rely on this feature, please open a GitHub issue + # outlining your use-case to help us decide if it should be + # part of our public interface in the future. + _strict_response_validation: bool = False, + ) -> None: + """Construct a new synchronous ImageKit client instance. + + This automatically infers the following arguments from their corresponding environment variables if they are not provided: + - `private_key` from `IMAGEKIT_PRIVATE_KEY` + - `password` from `OPTIONAL_IMAGEKIT_IGNORES_THIS` + - `webhook_secret` from `IMAGEKIT_WEBHOOK_SECRET` + """ + if private_key is None: + private_key = os.environ.get("IMAGEKIT_PRIVATE_KEY") + if private_key is None: + raise ImageKitError( + "The private_key client option must be set either by passing private_key to the client or by setting the IMAGEKIT_PRIVATE_KEY environment variable" + ) + self.private_key = private_key + + if password is None: + password = os.environ.get("OPTIONAL_IMAGEKIT_IGNORES_THIS") or "do_not_set" + self.password = password + + if webhook_secret is None: + webhook_secret = os.environ.get("IMAGEKIT_WEBHOOK_SECRET") + self.webhook_secret = webhook_secret + + if base_url is None: + base_url = os.environ.get("IMAGE_KIT_BASE_URL") + self._base_url_overridden = base_url is not None + if base_url is None: + base_url = f"https://api.imagekit.io" + + super().__init__( + version=__version__, + base_url=base_url, + max_retries=max_retries, + timeout=timeout, + http_client=http_client, + custom_headers=default_headers, + custom_query=default_query, + _strict_response_validation=_strict_response_validation, + ) + + self.dummy = dummy.DummyResource(self) + self.custom_metadata_fields = custom_metadata_fields.CustomMetadataFieldsResource(self) + self.files = files.FilesResource(self) + self.assets = assets.AssetsResource(self) + self.cache = cache.CacheResource(self) + self.folders = folders.FoldersResource(self) + self.accounts = accounts.AccountsResource(self) + self.beta = beta.BetaResource(self) + self.webhooks = webhooks.WebhooksResource(self) + self.helper = helper.HelperResource(self) + self.with_raw_response = ImageKitWithRawResponse(self) + self.with_streaming_response = ImageKitWithStreamedResponse(self) + + @property + @override + def qs(self) -> Querystring: + return Querystring(array_format="comma") + + @property + @override + def auth_headers(self) -> dict[str, str]: + if self.password is None: + return {} + credentials = f"{self.private_key}:{self.password}".encode("ascii") + header = f"Basic {base64.b64encode(credentials).decode('ascii')}" + return {"Authorization": header} + + @property + @override + def default_headers(self) -> dict[str, str | Omit]: + return { + **super().default_headers, + "X-Stainless-Async": "false", + **self._custom_headers, + } + + @override + def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: + if self.private_key and self.password and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + + raise TypeError( + '"Could not resolve authentication method. Expected the private_key or password to be set. Or for the `Authorization` headers to be explicitly omitted"' + ) + + def copy( + self, + *, + private_key: str | None = None, + password: str | None = None, + webhook_secret: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: float | Timeout | None | NotGiven = not_given, + http_client: httpx.Client | None = None, + max_retries: int | NotGiven = not_given, + default_headers: Mapping[str, str] | None = None, + set_default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + set_default_query: Mapping[str, object] | None = None, + _extra_kwargs: Mapping[str, Any] = {}, + ) -> Self: + """ + Create a new client instance re-using the same options given to the current client with optional overriding. + """ + if default_headers is not None and set_default_headers is not None: + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + + if default_query is not None and set_default_query is not None: + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + + headers = self._custom_headers + if default_headers is not None: + headers = {**headers, **default_headers} + elif set_default_headers is not None: + headers = set_default_headers + + params = self._custom_query + if default_query is not None: + params = {**params, **default_query} + elif set_default_query is not None: + params = set_default_query + + http_client = http_client or self._client + client = self.__class__( + private_key=private_key or self.private_key, + password=password or self.password, + webhook_secret=webhook_secret or self.webhook_secret, + base_url=base_url or self.base_url, + timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, + http_client=http_client, + max_retries=max_retries if is_given(max_retries) else self.max_retries, + default_headers=headers, + default_query=params, + **_extra_kwargs, + ) + client._base_url_overridden = self._base_url_overridden or base_url is not None + return client + + # Alias for `copy` for nicer inline usage, e.g. + # client.with_options(timeout=10).foo.create(...) + with_options = copy + + @override + def _make_status_error( + self, + err_msg: str, + *, + body: object, + response: httpx.Response, + ) -> APIStatusError: + if response.status_code == 400: + return _exceptions.BadRequestError(err_msg, response=response, body=body) + + if response.status_code == 401: + return _exceptions.AuthenticationError(err_msg, response=response, body=body) + + if response.status_code == 403: + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + + if response.status_code == 404: + return _exceptions.NotFoundError(err_msg, response=response, body=body) + + if response.status_code == 409: + return _exceptions.ConflictError(err_msg, response=response, body=body) + + if response.status_code == 422: + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + + if response.status_code == 429: + return _exceptions.RateLimitError(err_msg, response=response, body=body) + + if response.status_code >= 500: + return _exceptions.InternalServerError(err_msg, response=response, body=body) + return APIStatusError(err_msg, response=response, body=body) + + +class AsyncImageKit(AsyncAPIClient): + dummy: dummy.AsyncDummyResource + custom_metadata_fields: custom_metadata_fields.AsyncCustomMetadataFieldsResource + files: files.AsyncFilesResource + assets: assets.AsyncAssetsResource + cache: cache.AsyncCacheResource + folders: folders.AsyncFoldersResource + accounts: accounts.AsyncAccountsResource + beta: beta.AsyncBetaResource + webhooks: webhooks.AsyncWebhooksResource + helper: helper.AsyncHelperResource + with_raw_response: AsyncImageKitWithRawResponse + with_streaming_response: AsyncImageKitWithStreamedResponse + + # client options + private_key: str + password: str | None + webhook_secret: str | None + + def __init__( + self, + *, + private_key: str | None = None, + password: str | None = None, + webhook_secret: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: float | Timeout | None | NotGiven = not_given, + max_retries: int = DEFAULT_MAX_RETRIES, + default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + # Configure a custom httpx client. + # We provide a `DefaultAsyncHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`. + # See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details. + http_client: httpx.AsyncClient | None = None, + # Enable or disable schema validation for data returned by the API. + # When enabled an error APIResponseValidationError is raised + # if the API responds with invalid data for the expected schema. + # + # This parameter may be removed or changed in the future. + # If you rely on this feature, please open a GitHub issue + # outlining your use-case to help us decide if it should be + # part of our public interface in the future. + _strict_response_validation: bool = False, + ) -> None: + """Construct a new async AsyncImageKit client instance. + + This automatically infers the following arguments from their corresponding environment variables if they are not provided: + - `private_key` from `IMAGEKIT_PRIVATE_KEY` + - `password` from `OPTIONAL_IMAGEKIT_IGNORES_THIS` + - `webhook_secret` from `IMAGEKIT_WEBHOOK_SECRET` + """ + if private_key is None: + private_key = os.environ.get("IMAGEKIT_PRIVATE_KEY") + if private_key is None: + raise ImageKitError( + "The private_key client option must be set either by passing private_key to the client or by setting the IMAGEKIT_PRIVATE_KEY environment variable" + ) + self.private_key = private_key + + if password is None: + password = os.environ.get("OPTIONAL_IMAGEKIT_IGNORES_THIS") or "do_not_set" + self.password = password + + if webhook_secret is None: + webhook_secret = os.environ.get("IMAGEKIT_WEBHOOK_SECRET") + self.webhook_secret = webhook_secret + + if base_url is None: + base_url = os.environ.get("IMAGE_KIT_BASE_URL") + self._base_url_overridden = base_url is not None + if base_url is None: + base_url = f"https://api.imagekit.io" + + super().__init__( + version=__version__, + base_url=base_url, + max_retries=max_retries, + timeout=timeout, + http_client=http_client, + custom_headers=default_headers, + custom_query=default_query, + _strict_response_validation=_strict_response_validation, + ) + + self.dummy = dummy.AsyncDummyResource(self) + self.custom_metadata_fields = custom_metadata_fields.AsyncCustomMetadataFieldsResource(self) + self.files = files.AsyncFilesResource(self) + self.assets = assets.AsyncAssetsResource(self) + self.cache = cache.AsyncCacheResource(self) + self.folders = folders.AsyncFoldersResource(self) + self.accounts = accounts.AsyncAccountsResource(self) + self.beta = beta.AsyncBetaResource(self) + self.webhooks = webhooks.AsyncWebhooksResource(self) + self.helper = helper.AsyncHelperResource(self) + self.with_raw_response = AsyncImageKitWithRawResponse(self) + self.with_streaming_response = AsyncImageKitWithStreamedResponse(self) + + @property + @override + def qs(self) -> Querystring: + return Querystring(array_format="comma") + + @property + @override + def auth_headers(self) -> dict[str, str]: + if self.password is None: + return {} + credentials = f"{self.private_key}:{self.password}".encode("ascii") + header = f"Basic {base64.b64encode(credentials).decode('ascii')}" + return {"Authorization": header} + + @property + @override + def default_headers(self) -> dict[str, str | Omit]: + return { + **super().default_headers, + "X-Stainless-Async": f"async:{get_async_library()}", + **self._custom_headers, + } + + @override + def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: + if self.private_key and self.password and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + + raise TypeError( + '"Could not resolve authentication method. Expected the private_key or password to be set. Or for the `Authorization` headers to be explicitly omitted"' + ) + + def copy( + self, + *, + private_key: str | None = None, + password: str | None = None, + webhook_secret: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: float | Timeout | None | NotGiven = not_given, + http_client: httpx.AsyncClient | None = None, + max_retries: int | NotGiven = not_given, + default_headers: Mapping[str, str] | None = None, + set_default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + set_default_query: Mapping[str, object] | None = None, + _extra_kwargs: Mapping[str, Any] = {}, + ) -> Self: + """ + Create a new client instance re-using the same options given to the current client with optional overriding. + """ + if default_headers is not None and set_default_headers is not None: + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + + if default_query is not None and set_default_query is not None: + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + + headers = self._custom_headers + if default_headers is not None: + headers = {**headers, **default_headers} + elif set_default_headers is not None: + headers = set_default_headers + + params = self._custom_query + if default_query is not None: + params = {**params, **default_query} + elif set_default_query is not None: + params = set_default_query + + http_client = http_client or self._client + client = self.__class__( + private_key=private_key or self.private_key, + password=password or self.password, + webhook_secret=webhook_secret or self.webhook_secret, + base_url=base_url or self.base_url, + timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, + http_client=http_client, + max_retries=max_retries if is_given(max_retries) else self.max_retries, + default_headers=headers, + default_query=params, + **_extra_kwargs, + ) + client._base_url_overridden = self._base_url_overridden or base_url is not None + return client + + # Alias for `copy` for nicer inline usage, e.g. + # client.with_options(timeout=10).foo.create(...) + with_options = copy + + @override + def _make_status_error( + self, + err_msg: str, + *, + body: object, + response: httpx.Response, + ) -> APIStatusError: + if response.status_code == 400: + return _exceptions.BadRequestError(err_msg, response=response, body=body) + + if response.status_code == 401: + return _exceptions.AuthenticationError(err_msg, response=response, body=body) + + if response.status_code == 403: + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + + if response.status_code == 404: + return _exceptions.NotFoundError(err_msg, response=response, body=body) + + if response.status_code == 409: + return _exceptions.ConflictError(err_msg, response=response, body=body) + + if response.status_code == 422: + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + + if response.status_code == 429: + return _exceptions.RateLimitError(err_msg, response=response, body=body) + + if response.status_code >= 500: + return _exceptions.InternalServerError(err_msg, response=response, body=body) + return APIStatusError(err_msg, response=response, body=body) + + +class ImageKitWithRawResponse: + def __init__(self, client: ImageKit) -> None: + self.dummy = dummy.DummyResourceWithRawResponse(client.dummy) + self.custom_metadata_fields = custom_metadata_fields.CustomMetadataFieldsResourceWithRawResponse( + client.custom_metadata_fields + ) + self.files = files.FilesResourceWithRawResponse(client.files) + self.assets = assets.AssetsResourceWithRawResponse(client.assets) + self.cache = cache.CacheResourceWithRawResponse(client.cache) + self.folders = folders.FoldersResourceWithRawResponse(client.folders) + self.accounts = accounts.AccountsResourceWithRawResponse(client.accounts) + self.beta = beta.BetaResourceWithRawResponse(client.beta) + + +class AsyncImageKitWithRawResponse: + def __init__(self, client: AsyncImageKit) -> None: + self.dummy = dummy.AsyncDummyResourceWithRawResponse(client.dummy) + self.custom_metadata_fields = custom_metadata_fields.AsyncCustomMetadataFieldsResourceWithRawResponse( + client.custom_metadata_fields + ) + self.files = files.AsyncFilesResourceWithRawResponse(client.files) + self.assets = assets.AsyncAssetsResourceWithRawResponse(client.assets) + self.cache = cache.AsyncCacheResourceWithRawResponse(client.cache) + self.folders = folders.AsyncFoldersResourceWithRawResponse(client.folders) + self.accounts = accounts.AsyncAccountsResourceWithRawResponse(client.accounts) + self.beta = beta.AsyncBetaResourceWithRawResponse(client.beta) + + +class ImageKitWithStreamedResponse: + def __init__(self, client: ImageKit) -> None: + self.dummy = dummy.DummyResourceWithStreamingResponse(client.dummy) + self.custom_metadata_fields = custom_metadata_fields.CustomMetadataFieldsResourceWithStreamingResponse( + client.custom_metadata_fields + ) + self.files = files.FilesResourceWithStreamingResponse(client.files) + self.assets = assets.AssetsResourceWithStreamingResponse(client.assets) + self.cache = cache.CacheResourceWithStreamingResponse(client.cache) + self.folders = folders.FoldersResourceWithStreamingResponse(client.folders) + self.accounts = accounts.AccountsResourceWithStreamingResponse(client.accounts) + self.beta = beta.BetaResourceWithStreamingResponse(client.beta) + + +class AsyncImageKitWithStreamedResponse: + def __init__(self, client: AsyncImageKit) -> None: + self.dummy = dummy.AsyncDummyResourceWithStreamingResponse(client.dummy) + self.custom_metadata_fields = custom_metadata_fields.AsyncCustomMetadataFieldsResourceWithStreamingResponse( + client.custom_metadata_fields + ) + self.files = files.AsyncFilesResourceWithStreamingResponse(client.files) + self.assets = assets.AsyncAssetsResourceWithStreamingResponse(client.assets) + self.cache = cache.AsyncCacheResourceWithStreamingResponse(client.cache) + self.folders = folders.AsyncFoldersResourceWithStreamingResponse(client.folders) + self.accounts = accounts.AsyncAccountsResourceWithStreamingResponse(client.accounts) + self.beta = beta.AsyncBetaResourceWithStreamingResponse(client.beta) + + +Client = ImageKit + +AsyncClient = AsyncImageKit diff --git a/src/imagekitio/_compat.py b/src/imagekitio/_compat.py new file mode 100644 index 00000000..bdef67f0 --- /dev/null +++ b/src/imagekitio/_compat.py @@ -0,0 +1,219 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload +from datetime import date, datetime +from typing_extensions import Self, Literal + +import pydantic +from pydantic.fields import FieldInfo + +from ._types import IncEx, StrBytesIntFloat + +_T = TypeVar("_T") +_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) + +# --------------- Pydantic v2, v3 compatibility --------------- + +# Pyright incorrectly reports some of our functions as overriding a method when they don't +# pyright: reportIncompatibleMethodOverride=false + +PYDANTIC_V1 = pydantic.VERSION.startswith("1.") + +if TYPE_CHECKING: + + def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001 + ... + + def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: # noqa: ARG001 + ... + + def get_args(t: type[Any]) -> tuple[Any, ...]: # noqa: ARG001 + ... + + def is_union(tp: type[Any] | None) -> bool: # noqa: ARG001 + ... + + def get_origin(t: type[Any]) -> type[Any] | None: # noqa: ARG001 + ... + + def is_literal_type(type_: type[Any]) -> bool: # noqa: ARG001 + ... + + def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001 + ... + +else: + # v1 re-exports + if PYDANTIC_V1: + from pydantic.typing import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + is_typeddict as is_typeddict, + is_literal_type as is_literal_type, + ) + from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime + else: + from ._utils import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + parse_date as parse_date, + is_typeddict as is_typeddict, + parse_datetime as parse_datetime, + is_literal_type as is_literal_type, + ) + + +# refactored config +if TYPE_CHECKING: + from pydantic import ConfigDict as ConfigDict +else: + if PYDANTIC_V1: + # TODO: provide an error message here? + ConfigDict = None + else: + from pydantic import ConfigDict as ConfigDict + + +# renamed methods / properties +def parse_obj(model: type[_ModelT], value: object) -> _ModelT: + if PYDANTIC_V1: + return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + else: + return model.model_validate(value) + + +def field_is_required(field: FieldInfo) -> bool: + if PYDANTIC_V1: + return field.required # type: ignore + return field.is_required() + + +def field_get_default(field: FieldInfo) -> Any: + value = field.get_default() + if PYDANTIC_V1: + return value + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + + +def field_outer_type(field: FieldInfo) -> Any: + if PYDANTIC_V1: + return field.outer_type_ # type: ignore + return field.annotation + + +def get_model_config(model: type[pydantic.BaseModel]) -> Any: + if PYDANTIC_V1: + return model.__config__ # type: ignore + return model.model_config + + +def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: + if PYDANTIC_V1: + return model.__fields__ # type: ignore + return model.model_fields + + +def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT: + if PYDANTIC_V1: + return model.copy(deep=deep) # type: ignore + return model.model_copy(deep=deep) + + +def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: + if PYDANTIC_V1: + return model.json(indent=indent) # type: ignore + return model.model_dump_json(indent=indent) + + +def model_dump( + model: pydantic.BaseModel, + *, + exclude: IncEx | None = None, + exclude_unset: bool = False, + exclude_defaults: bool = False, + warnings: bool = True, + mode: Literal["json", "python"] = "python", +) -> dict[str, Any]: + if (not PYDANTIC_V1) or hasattr(model, "model_dump"): + return model.model_dump( + mode=mode, + exclude=exclude, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + # warnings are not supported in Pydantic v1 + warnings=True if PYDANTIC_V1 else warnings, + ) + return cast( + "dict[str, Any]", + model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + exclude=exclude, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + ), + ) + + +def model_parse(model: type[_ModelT], data: Any) -> _ModelT: + if PYDANTIC_V1: + return model.parse_obj(data) # pyright: ignore[reportDeprecated] + return model.model_validate(data) + + +# generic models +if TYPE_CHECKING: + + class GenericModel(pydantic.BaseModel): ... + +else: + if PYDANTIC_V1: + import pydantic.generics + + class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... + else: + # there no longer needs to be a distinction in v2 but + # we still have to create our own subclass to avoid + # inconsistent MRO ordering errors + class GenericModel(pydantic.BaseModel): ... + + +# cached properties +if TYPE_CHECKING: + cached_property = property + + # we define a separate type (copied from typeshed) + # that represents that `cached_property` is `set`able + # at runtime, which differs from `@property`. + # + # this is a separate type as editors likely special case + # `@property` and we don't want to cause issues just to have + # more helpful internal types. + + class typed_cached_property(Generic[_T]): + func: Callable[[Any], _T] + attrname: str | None + + def __init__(self, func: Callable[[Any], _T]) -> None: ... + + @overload + def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ... + + @overload + def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ... + + def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self: + raise NotImplementedError() + + def __set_name__(self, owner: type[Any], name: str) -> None: ... + + # __set__ is not defined at runtime, but @cached_property is designed to be settable + def __set__(self, instance: object, value: _T) -> None: ... +else: + from functools import cached_property as cached_property + + typed_cached_property = cached_property diff --git a/src/imagekitio/_constants.py b/src/imagekitio/_constants.py new file mode 100644 index 00000000..6ddf2c71 --- /dev/null +++ b/src/imagekitio/_constants.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import httpx + +RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response" +OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to" + +# default timeout is 1 minute +DEFAULT_TIMEOUT = httpx.Timeout(timeout=60, connect=5.0) +DEFAULT_MAX_RETRIES = 2 +DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=100, max_keepalive_connections=20) + +INITIAL_RETRY_DELAY = 0.5 +MAX_RETRY_DELAY = 8.0 diff --git a/src/imagekitio/_exceptions.py b/src/imagekitio/_exceptions.py new file mode 100644 index 00000000..364ab5af --- /dev/null +++ b/src/imagekitio/_exceptions.py @@ -0,0 +1,112 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal + +import httpx + +__all__ = [ + "BadRequestError", + "AuthenticationError", + "PermissionDeniedError", + "NotFoundError", + "ConflictError", + "UnprocessableEntityError", + "RateLimitError", + "InternalServerError", +] + + +class ImageKitError(Exception): + pass + + +class APIError(ImageKitError): + message: str + request: httpx.Request + + body: object | None + """The API response body. + + If the API responded with a valid JSON structure then this property will be the + decoded result. + + If it isn't a valid JSON structure then this will be the raw response. + + If there was no response associated with this error then it will be `None`. + """ + + def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None: # noqa: ARG002 + super().__init__(message) + self.request = request + self.message = message + self.body = body + + +class APIResponseValidationError(APIError): + response: httpx.Response + status_code: int + + def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None: + super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body) + self.response = response + self.status_code = response.status_code + + +class APIWebhookValidationError(APIError): + pass + + +class APIStatusError(APIError): + """Raised when an API response has a status code of 4xx or 5xx.""" + + response: httpx.Response + status_code: int + + def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None: + super().__init__(message, response.request, body=body) + self.response = response + self.status_code = response.status_code + + +class APIConnectionError(APIError): + def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None: + super().__init__(message, request, body=None) + + +class APITimeoutError(APIConnectionError): + def __init__(self, request: httpx.Request) -> None: + super().__init__(message="Request timed out.", request=request) + + +class BadRequestError(APIStatusError): + status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride] + + +class AuthenticationError(APIStatusError): + status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride] + + +class PermissionDeniedError(APIStatusError): + status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride] + + +class NotFoundError(APIStatusError): + status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride] + + +class ConflictError(APIStatusError): + status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride] + + +class UnprocessableEntityError(APIStatusError): + status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride] + + +class RateLimitError(APIStatusError): + status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride] + + +class InternalServerError(APIStatusError): + pass diff --git a/src/imagekitio/_files.py b/src/imagekitio/_files.py new file mode 100644 index 00000000..331bc44c --- /dev/null +++ b/src/imagekitio/_files.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +import io +import os +import pathlib +from typing import overload +from typing_extensions import TypeGuard + +import anyio + +from ._types import ( + FileTypes, + FileContent, + RequestFiles, + HttpxFileTypes, + Base64FileInput, + HttpxFileContent, + HttpxRequestFiles, +) +from ._utils import is_tuple_t, is_mapping_t, is_sequence_t + + +def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]: + return isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike) + + +def is_file_content(obj: object) -> TypeGuard[FileContent]: + return ( + isinstance(obj, bytes) or isinstance(obj, tuple) or isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike) + ) + + +def assert_is_file_content(obj: object, *, key: str | None = None) -> None: + if not is_file_content(obj): + prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" + raise RuntimeError( + f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/imagekit-developer/imagekit-python/tree/master#file-uploads" + ) from None + + +@overload +def to_httpx_files(files: None) -> None: ... + + +@overload +def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... + + +def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: + if files is None: + return None + + if is_mapping_t(files): + files = {key: _transform_file(file) for key, file in files.items()} + elif is_sequence_t(files): + files = [(key, _transform_file(file)) for key, file in files] + else: + raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence") + + return files + + +def _transform_file(file: FileTypes) -> HttpxFileTypes: + if is_file_content(file): + if isinstance(file, os.PathLike): + path = pathlib.Path(file) + return (path.name, path.read_bytes()) + + return file + + if is_tuple_t(file): + return (file[0], read_file_content(file[1]), *file[2:]) + + raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") + + +def read_file_content(file: FileContent) -> HttpxFileContent: + if isinstance(file, os.PathLike): + return pathlib.Path(file).read_bytes() + return file + + +@overload +async def async_to_httpx_files(files: None) -> None: ... + + +@overload +async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... + + +async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: + if files is None: + return None + + if is_mapping_t(files): + files = {key: await _async_transform_file(file) for key, file in files.items()} + elif is_sequence_t(files): + files = [(key, await _async_transform_file(file)) for key, file in files] + else: + raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence") + + return files + + +async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: + if is_file_content(file): + if isinstance(file, os.PathLike): + path = anyio.Path(file) + return (path.name, await path.read_bytes()) + + return file + + if is_tuple_t(file): + return (file[0], await async_read_file_content(file[1]), *file[2:]) + + raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") + + +async def async_read_file_content(file: FileContent) -> HttpxFileContent: + if isinstance(file, os.PathLike): + return await anyio.Path(file).read_bytes() + + return file diff --git a/src/imagekitio/_models.py b/src/imagekitio/_models.py new file mode 100644 index 00000000..ca9500b2 --- /dev/null +++ b/src/imagekitio/_models.py @@ -0,0 +1,857 @@ +from __future__ import annotations + +import os +import inspect +import weakref +from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast +from datetime import date, datetime +from typing_extensions import ( + List, + Unpack, + Literal, + ClassVar, + Protocol, + Required, + ParamSpec, + TypedDict, + TypeGuard, + final, + override, + runtime_checkable, +) + +import pydantic +from pydantic.fields import FieldInfo + +from ._types import ( + Body, + IncEx, + Query, + ModelT, + Headers, + Timeout, + NotGiven, + AnyMapping, + HttpxRequestFiles, +) +from ._utils import ( + PropertyInfo, + is_list, + is_given, + json_safe, + lru_cache, + is_mapping, + parse_date, + coerce_boolean, + parse_datetime, + strip_not_given, + extract_type_arg, + is_annotated_type, + is_type_alias_type, + strip_annotated_type, +) +from ._compat import ( + PYDANTIC_V1, + ConfigDict, + GenericModel as BaseGenericModel, + get_args, + is_union, + parse_obj, + get_origin, + is_literal_type, + get_model_config, + get_model_fields, + field_get_default, +) +from ._constants import RAW_RESPONSE_HEADER + +if TYPE_CHECKING: + from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema + +__all__ = ["BaseModel", "GenericModel"] + +_T = TypeVar("_T") +_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel") + +P = ParamSpec("P") + + +@runtime_checkable +class _ConfigProtocol(Protocol): + allow_population_by_field_name: bool + + +class BaseModel(pydantic.BaseModel): + if PYDANTIC_V1: + + @property + @override + def model_fields_set(self) -> set[str]: + # a forwards-compat shim for pydantic v2 + return self.__fields_set__ # type: ignore + + class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] + extra: Any = pydantic.Extra.allow # type: ignore + else: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) + ) + + def to_dict( + self, + *, + mode: Literal["json", "python"] = "python", + use_api_names: bool = True, + exclude_unset: bool = True, + exclude_defaults: bool = False, + exclude_none: bool = False, + warnings: bool = True, + ) -> dict[str, object]: + """Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude. + + By default, fields that were not set by the API will not be included, + and keys will match the API response, *not* the property names from the model. + + For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, + the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). + + Args: + mode: + If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`. + If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)` + + use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that are set to their default value from the output. + exclude_none: Whether to exclude fields that have a value of `None` from the output. + warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2. + """ + return self.model_dump( + mode=mode, + by_alias=use_api_names, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + warnings=warnings, + ) + + def to_json( + self, + *, + indent: int | None = 2, + use_api_names: bool = True, + exclude_unset: bool = True, + exclude_defaults: bool = False, + exclude_none: bool = False, + warnings: bool = True, + ) -> str: + """Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation). + + By default, fields that were not set by the API will not be included, + and keys will match the API response, *not* the property names from the model. + + For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, + the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). + + Args: + indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2` + use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that have the default value. + exclude_none: Whether to exclude fields that have a value of `None`. + warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2. + """ + return self.model_dump_json( + indent=indent, + by_alias=use_api_names, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + warnings=warnings, + ) + + @override + def __str__(self) -> str: + # mypy complains about an invalid self arg + return f"{self.__repr_name__()}({self.__repr_str__(', ')})" # type: ignore[misc] + + # Override the 'construct' method in a way that supports recursive parsing without validation. + # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. + @classmethod + @override + def construct( # pyright: ignore[reportIncompatibleMethodOverride] + __cls: Type[ModelT], + _fields_set: set[str] | None = None, + **values: object, + ) -> ModelT: + m = __cls.__new__(__cls) + fields_values: dict[str, object] = {} + + config = get_model_config(__cls) + populate_by_name = ( + config.allow_population_by_field_name + if isinstance(config, _ConfigProtocol) + else config.get("populate_by_name") + ) + + if _fields_set is None: + _fields_set = set() + + model_fields = get_model_fields(__cls) + for name, field in model_fields.items(): + key = field.alias + if key is None or (key not in values and populate_by_name): + key = name + + if key in values: + fields_values[name] = _construct_field(value=values[key], field=field, key=key) + _fields_set.add(name) + else: + fields_values[name] = field_get_default(field) + + extra_field_type = _get_extra_fields_type(__cls) + + _extra = {} + for key, value in values.items(): + if key not in model_fields: + parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value + + if PYDANTIC_V1: + _fields_set.add(key) + fields_values[key] = parsed + else: + _extra[key] = parsed + + object.__setattr__(m, "__dict__", fields_values) + + if PYDANTIC_V1: + # init_private_attributes() does not exist in v2 + m._init_private_attributes() # type: ignore + + # copied from Pydantic v1's `construct()` method + object.__setattr__(m, "__fields_set__", _fields_set) + else: + # these properties are copied from Pydantic's `model_construct()` method + object.__setattr__(m, "__pydantic_private__", None) + object.__setattr__(m, "__pydantic_extra__", _extra) + object.__setattr__(m, "__pydantic_fields_set__", _fields_set) + + return m + + if not TYPE_CHECKING: + # type checkers incorrectly complain about this assignment + # because the type signatures are technically different + # although not in practice + model_construct = construct + + if PYDANTIC_V1: + # we define aliases for some of the new pydantic v2 methods so + # that we can just document these methods without having to specify + # a specific pydantic version as some users may not know which + # pydantic version they are currently using + + @override + def model_dump( + self, + *, + mode: Literal["json", "python"] | str = "python", + include: IncEx | None = None, + exclude: IncEx | None = None, + context: Any | None = None, + by_alias: bool | None = None, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + exclude_computed_fields: bool = False, + round_trip: bool = False, + warnings: bool | Literal["none", "warn", "error"] = True, + fallback: Callable[[Any], Any] | None = None, + serialize_as_any: bool = False, + ) -> dict[str, Any]: + """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump + + Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. + + Args: + mode: The mode in which `to_python` should run. + If mode is 'json', the output will only contain JSON serializable types. + If mode is 'python', the output may contain non-JSON-serializable Python objects. + include: A set of fields to include in the output. + exclude: A set of fields to exclude from the output. + context: Additional context to pass to the serializer. + by_alias: Whether to use the field's alias in the dictionary key if defined. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that are set to their default value. + exclude_none: Whether to exclude fields that have a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. + While this can be useful for round-tripping, it is usually recommended to use the dedicated + `round_trip` parameter instead. + round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T]. + warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, + "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. + fallback: A function to call when an unknown value is encountered. If not provided, + a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised. + serialize_as_any: Whether to serialize fields with duck-typing serialization behavior. + + Returns: + A dictionary representation of the model. + """ + if mode not in {"json", "python"}: + raise ValueError("mode must be either 'json' or 'python'") + if round_trip != False: + raise ValueError("round_trip is only supported in Pydantic v2") + if warnings != True: + raise ValueError("warnings is only supported in Pydantic v2") + if context is not None: + raise ValueError("context is only supported in Pydantic v2") + if serialize_as_any != False: + raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") + if exclude_computed_fields != False: + raise ValueError("exclude_computed_fields is only supported in Pydantic v2") + dumped = super().dict( # pyright: ignore[reportDeprecated] + include=include, + exclude=exclude, + by_alias=by_alias if by_alias is not None else False, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped + + @override + def model_dump_json( + self, + *, + indent: int | None = None, + ensure_ascii: bool = False, + include: IncEx | None = None, + exclude: IncEx | None = None, + context: Any | None = None, + by_alias: bool | None = None, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + exclude_computed_fields: bool = False, + round_trip: bool = False, + warnings: bool | Literal["none", "warn", "error"] = True, + fallback: Callable[[Any], Any] | None = None, + serialize_as_any: bool = False, + ) -> str: + """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json + + Generates a JSON representation of the model using Pydantic's `to_json` method. + + Args: + indent: Indentation to use in the JSON output. If None is passed, the output will be compact. + include: Field(s) to include in the JSON output. Can take either a string or set of strings. + exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings. + by_alias: Whether to serialize using field aliases. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that have the default value. + exclude_none: Whether to exclude fields that have a value of `None`. + round_trip: Whether to use serialization/deserialization between JSON and class instance. + warnings: Whether to show any warnings that occurred during serialization. + + Returns: + A JSON string representation of the model. + """ + if round_trip != False: + raise ValueError("round_trip is only supported in Pydantic v2") + if warnings != True: + raise ValueError("warnings is only supported in Pydantic v2") + if context is not None: + raise ValueError("context is only supported in Pydantic v2") + if serialize_as_any != False: + raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") + if ensure_ascii != False: + raise ValueError("ensure_ascii is only supported in Pydantic v2") + if exclude_computed_fields != False: + raise ValueError("exclude_computed_fields is only supported in Pydantic v2") + return super().json( # type: ignore[reportDeprecated] + indent=indent, + include=include, + exclude=exclude, + by_alias=by_alias if by_alias is not None else False, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + +def _construct_field(value: object, field: FieldInfo, key: str) -> object: + if value is None: + return field_get_default(field) + + if PYDANTIC_V1: + type_ = cast(type, field.outer_type_) # type: ignore + else: + type_ = field.annotation # type: ignore + + if type_ is None: + raise RuntimeError(f"Unexpected field type is None for {key}") + + return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) + + +def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: + if PYDANTIC_V1: + # TODO + return None + + schema = cls.__pydantic_core_schema__ + if schema["type"] == "model": + fields = schema["schema"] + if fields["type"] == "model-fields": + extras = fields.get("extras_schema") + if extras and "cls" in extras: + # mypy can't narrow the type + return extras["cls"] # type: ignore[no-any-return] + + return None + + +def is_basemodel(type_: type) -> bool: + """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" + if is_union(type_): + for variant in get_args(type_): + if is_basemodel(variant): + return True + + return False + + return is_basemodel_type(type_) + + +def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]: + origin = get_origin(type_) or type_ + if not inspect.isclass(origin): + return False + return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) + + +def build( + base_model_cls: Callable[P, _BaseModelT], + *args: P.args, + **kwargs: P.kwargs, +) -> _BaseModelT: + """Construct a BaseModel class without validation. + + This is useful for cases where you need to instantiate a `BaseModel` + from an API response as this provides type-safe params which isn't supported + by helpers like `construct_type()`. + + ```py + build(MyModel, my_field_a="foo", my_field_b=123) + ``` + """ + if args: + raise TypeError( + "Received positional arguments which are not supported; Keyword arguments must be used instead", + ) + + return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs)) + + +def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: + """Loose coercion to the expected type with construction of nested values. + + Note: the returned value from this function is not guaranteed to match the + given type. + """ + return cast(_T, construct_type(value=value, type_=type_)) + + +def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object: + """Loose coercion to the expected type with construction of nested values. + + If the given value does not match the expected type then it is returned as-is. + """ + + # store a reference to the original type we were given before we extract any inner + # types so that we can properly resolve forward references in `TypeAliasType` annotations + original_type = None + + # we allow `object` as the input type because otherwise, passing things like + # `Literal['value']` will be reported as a type error by type checkers + type_ = cast("type[object]", type_) + if is_type_alias_type(type_): + original_type = type_ # type: ignore[unreachable] + type_ = type_.__value__ # type: ignore[unreachable] + + # unwrap `Annotated[T, ...]` -> `T` + if metadata is not None and len(metadata) > 0: + meta: tuple[Any, ...] = tuple(metadata) + elif is_annotated_type(type_): + meta = get_args(type_)[1:] + type_ = extract_type_arg(type_, 0) + else: + meta = tuple() + + # we need to use the origin class for any types that are subscripted generics + # e.g. Dict[str, object] + origin = get_origin(type_) or type_ + args = get_args(type_) + + if is_union(origin): + try: + return validate_type(type_=cast("type[object]", original_type or type_), value=value) + except Exception: + pass + + # if the type is a discriminated union then we want to construct the right variant + # in the union, even if the data doesn't match exactly, otherwise we'd break code + # that relies on the constructed class types, e.g. + # + # class FooType: + # kind: Literal['foo'] + # value: str + # + # class BarType: + # kind: Literal['bar'] + # value: int + # + # without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then + # we'd end up constructing `FooType` when it should be `BarType`. + discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta) + if discriminator and is_mapping(value): + variant_value = value.get(discriminator.field_alias_from or discriminator.field_name) + if variant_value and isinstance(variant_value, str): + variant_type = discriminator.mapping.get(variant_value) + if variant_type: + return construct_type(type_=variant_type, value=value) + + # if the data is not valid, use the first variant that doesn't fail while deserializing + for variant in args: + try: + return construct_type(value=value, type_=variant) + except Exception: + continue + + raise RuntimeError(f"Could not convert data into a valid instance of {type_}") + + if origin == dict: + if not is_mapping(value): + return value + + _, items_type = get_args(type_) # Dict[_, items_type] + return {key: construct_type(value=item, type_=items_type) for key, item in value.items()} + + if ( + not is_literal_type(type_) + and inspect.isclass(origin) + and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel)) + ): + if is_list(value): + return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value] + + if is_mapping(value): + if issubclass(type_, BaseModel): + return type_.construct(**value) # type: ignore[arg-type] + + return cast(Any, type_).construct(**value) + + if origin == list: + if not is_list(value): + return value + + inner_type = args[0] # List[inner_type] + return [construct_type(value=entry, type_=inner_type) for entry in value] + + if origin == float: + if isinstance(value, int): + coerced = float(value) + if coerced != value: + return value + return coerced + + return value + + if type_ == datetime: + try: + return parse_datetime(value) # type: ignore + except Exception: + return value + + if type_ == date: + try: + return parse_date(value) # type: ignore + except Exception: + return value + + return value + + +@runtime_checkable +class CachedDiscriminatorType(Protocol): + __discriminator__: DiscriminatorDetails + + +DISCRIMINATOR_CACHE: weakref.WeakKeyDictionary[type, DiscriminatorDetails] = weakref.WeakKeyDictionary() + + +class DiscriminatorDetails: + field_name: str + """The name of the discriminator field in the variant class, e.g. + + ```py + class Foo(BaseModel): + type: Literal['foo'] + ``` + + Will result in field_name='type' + """ + + field_alias_from: str | None + """The name of the discriminator field in the API response, e.g. + + ```py + class Foo(BaseModel): + type: Literal['foo'] = Field(alias='type_from_api') + ``` + + Will result in field_alias_from='type_from_api' + """ + + mapping: dict[str, type] + """Mapping of discriminator value to variant type, e.g. + + {'foo': FooVariant, 'bar': BarVariant} + """ + + def __init__( + self, + *, + mapping: dict[str, type], + discriminator_field: str, + discriminator_alias: str | None, + ) -> None: + self.mapping = mapping + self.field_name = discriminator_field + self.field_alias_from = discriminator_alias + + +def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None: + cached = DISCRIMINATOR_CACHE.get(union) + if cached is not None: + return cached + + discriminator_field_name: str | None = None + + for annotation in meta_annotations: + if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None: + discriminator_field_name = annotation.discriminator + break + + if not discriminator_field_name: + return None + + mapping: dict[str, type] = {} + discriminator_alias: str | None = None + + for variant in get_args(union): + variant = strip_annotated_type(variant) + if is_basemodel_type(variant): + if PYDANTIC_V1: + field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + if not field_info: + continue + + # Note: if one variant defines an alias then they all should + discriminator_alias = field_info.alias + + if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): + for entry in get_args(annotation): + if isinstance(entry, str): + mapping[entry] = variant + else: + field = _extract_field_schema_pv2(variant, discriminator_field_name) + if not field: + continue + + # Note: if one variant defines an alias then they all should + discriminator_alias = field.get("serialization_alias") + + field_schema = field["schema"] + + if field_schema["type"] == "literal": + for entry in cast("LiteralSchema", field_schema)["expected"]: + if isinstance(entry, str): + mapping[entry] = variant + + if not mapping: + return None + + details = DiscriminatorDetails( + mapping=mapping, + discriminator_field=discriminator_field_name, + discriminator_alias=discriminator_alias, + ) + DISCRIMINATOR_CACHE.setdefault(union, details) + return details + + +def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None: + schema = model.__pydantic_core_schema__ + if schema["type"] == "definitions": + schema = schema["schema"] + + if schema["type"] != "model": + return None + + schema = cast("ModelSchema", schema) + fields_schema = schema["schema"] + if fields_schema["type"] != "model-fields": + return None + + fields_schema = cast("ModelFieldsSchema", fields_schema) + field = fields_schema["fields"].get(field_name) + if not field: + return None + + return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast] + + +def validate_type(*, type_: type[_T], value: object) -> _T: + """Strict validation that the given value matches the expected type""" + if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): + return cast(_T, parse_obj(type_, value)) + + return cast(_T, _validate_non_model_type(type_=type_, value=value)) + + +def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None: + """Add a pydantic config for the given type. + + Note: this is a no-op on Pydantic v1. + """ + setattr(typ, "__pydantic_config__", config) # noqa: B010 + + +# our use of subclassing here causes weirdness for type checkers, +# so we just pretend that we don't subclass +if TYPE_CHECKING: + GenericModel = BaseModel +else: + + class GenericModel(BaseGenericModel, BaseModel): + pass + + +if not PYDANTIC_V1: + from pydantic import TypeAdapter as _TypeAdapter + + _CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter)) + + if TYPE_CHECKING: + from pydantic import TypeAdapter + else: + TypeAdapter = _CachedTypeAdapter + + def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: + return TypeAdapter(type_).validate_python(value) + +elif not TYPE_CHECKING: # TODO: condition is weird + + class RootModel(GenericModel, Generic[_T]): + """Used as a placeholder to easily convert runtime types to a Pydantic format + to provide validation. + + For example: + ```py + validated = RootModel[int](__root__="5").__root__ + # validated: 5 + ``` + """ + + __root__: _T + + def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: + model = _create_pydantic_model(type_).validate(value) + return cast(_T, model.__root__) + + def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]: + return RootModel[type_] # type: ignore + + +class FinalRequestOptionsInput(TypedDict, total=False): + method: Required[str] + url: Required[str] + params: Query + headers: Headers + max_retries: int + timeout: float | Timeout | None + files: HttpxRequestFiles | None + idempotency_key: str + json_data: Body + extra_json: AnyMapping + follow_redirects: bool + + +@final +class FinalRequestOptions(pydantic.BaseModel): + method: str + url: str + params: Query = {} + headers: Union[Headers, NotGiven] = NotGiven() + max_retries: Union[int, NotGiven] = NotGiven() + timeout: Union[float, Timeout, None, NotGiven] = NotGiven() + files: Union[HttpxRequestFiles, None] = None + idempotency_key: Union[str, None] = None + post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() + follow_redirects: Union[bool, None] = None + + # It should be noted that we cannot use `json` here as that would override + # a BaseModel method in an incompatible fashion. + json_data: Union[Body, None] = None + extra_json: Union[AnyMapping, None] = None + + if PYDANTIC_V1: + + class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] + arbitrary_types_allowed: bool = True + else: + model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) + + def get_max_retries(self, max_retries: int) -> int: + if isinstance(self.max_retries, NotGiven): + return max_retries + return self.max_retries + + def _strip_raw_response_header(self) -> None: + if not is_given(self.headers): + return + + if self.headers.get(RAW_RESPONSE_HEADER): + self.headers = {**self.headers} + self.headers.pop(RAW_RESPONSE_HEADER) + + # override the `construct` method so that we can run custom transformations. + # this is necessary as we don't want to do any actual runtime type checking + # (which means we can't use validators) but we do want to ensure that `NotGiven` + # values are not present + # + # type ignore required because we're adding explicit types to `**values` + @classmethod + def construct( # type: ignore + cls, + _fields_set: set[str] | None = None, + **values: Unpack[FinalRequestOptionsInput], + ) -> FinalRequestOptions: + kwargs: dict[str, Any] = { + # we unconditionally call `strip_not_given` on any value + # as it will just ignore any non-mapping types + key: strip_not_given(value) + for key, value in values.items() + } + if PYDANTIC_V1: + return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + return super().model_construct(_fields_set, **kwargs) + + if not TYPE_CHECKING: + # type checkers incorrectly complain about this assignment + model_construct = construct diff --git a/src/imagekitio/_qs.py b/src/imagekitio/_qs.py new file mode 100644 index 00000000..ada6fd3f --- /dev/null +++ b/src/imagekitio/_qs.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from typing import Any, List, Tuple, Union, Mapping, TypeVar +from urllib.parse import parse_qs, urlencode +from typing_extensions import Literal, get_args + +from ._types import NotGiven, not_given +from ._utils import flatten + +_T = TypeVar("_T") + + +ArrayFormat = Literal["comma", "repeat", "indices", "brackets"] +NestedFormat = Literal["dots", "brackets"] + +PrimitiveData = Union[str, int, float, bool, None] +# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"] +# https://github.com/microsoft/pyright/issues/3555 +Data = Union[PrimitiveData, List[Any], Tuple[Any], "Mapping[str, Any]"] +Params = Mapping[str, Data] + + +class Querystring: + array_format: ArrayFormat + nested_format: NestedFormat + + def __init__( + self, + *, + array_format: ArrayFormat = "repeat", + nested_format: NestedFormat = "brackets", + ) -> None: + self.array_format = array_format + self.nested_format = nested_format + + def parse(self, query: str) -> Mapping[str, object]: + # Note: custom format syntax is not supported yet + return parse_qs(query) + + def stringify( + self, + params: Params, + *, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, + ) -> str: + return urlencode( + self.stringify_items( + params, + array_format=array_format, + nested_format=nested_format, + ) + ) + + def stringify_items( + self, + params: Params, + *, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, + ) -> list[tuple[str, str]]: + opts = Options( + qs=self, + array_format=array_format, + nested_format=nested_format, + ) + return flatten([self._stringify_item(key, value, opts) for key, value in params.items()]) + + def _stringify_item( + self, + key: str, + value: Data, + opts: Options, + ) -> list[tuple[str, str]]: + if isinstance(value, Mapping): + items: list[tuple[str, str]] = [] + nested_format = opts.nested_format + for subkey, subvalue in value.items(): + items.extend( + self._stringify_item( + # TODO: error if unknown format + f"{key}.{subkey}" if nested_format == "dots" else f"{key}[{subkey}]", + subvalue, + opts, + ) + ) + return items + + if isinstance(value, (list, tuple)): + array_format = opts.array_format + if array_format == "comma": + return [ + ( + key, + ",".join(self._primitive_value_to_str(item) for item in value if item is not None), + ), + ] + elif array_format == "repeat": + items = [] + for item in value: + items.extend(self._stringify_item(key, item, opts)) + return items + elif array_format == "indices": + raise NotImplementedError("The array indices format is not supported yet") + elif array_format == "brackets": + items = [] + key = key + "[]" + for item in value: + items.extend(self._stringify_item(key, item, opts)) + return items + else: + raise NotImplementedError( + f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}" + ) + + serialised = self._primitive_value_to_str(value) + if not serialised: + return [] + return [(key, serialised)] + + def _primitive_value_to_str(self, value: PrimitiveData) -> str: + # copied from httpx + if value is True: + return "true" + elif value is False: + return "false" + elif value is None: + return "" + return str(value) + + +_qs = Querystring() +parse = _qs.parse +stringify = _qs.stringify +stringify_items = _qs.stringify_items + + +class Options: + array_format: ArrayFormat + nested_format: NestedFormat + + def __init__( + self, + qs: Querystring = _qs, + *, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, + ) -> None: + self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format + self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format diff --git a/src/imagekitio/_resource.py b/src/imagekitio/_resource.py new file mode 100644 index 00000000..f830660f --- /dev/null +++ b/src/imagekitio/_resource.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import time +from typing import TYPE_CHECKING + +import anyio + +if TYPE_CHECKING: + from ._client import ImageKit, AsyncImageKit + + +class SyncAPIResource: + _client: ImageKit + + def __init__(self, client: ImageKit) -> None: + self._client = client + self._get = client.get + self._post = client.post + self._patch = client.patch + self._put = client.put + self._delete = client.delete + self._get_api_list = client.get_api_list + + def _sleep(self, seconds: float) -> None: + time.sleep(seconds) + + +class AsyncAPIResource: + _client: AsyncImageKit + + def __init__(self, client: AsyncImageKit) -> None: + self._client = client + self._get = client.get + self._post = client.post + self._patch = client.patch + self._put = client.put + self._delete = client.delete + self._get_api_list = client.get_api_list + + async def _sleep(self, seconds: float) -> None: + await anyio.sleep(seconds) diff --git a/src/imagekitio/_response.py b/src/imagekitio/_response.py new file mode 100644 index 00000000..ff8fc4f0 --- /dev/null +++ b/src/imagekitio/_response.py @@ -0,0 +1,832 @@ +from __future__ import annotations + +import os +import inspect +import logging +import datetime +import functools +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + Union, + Generic, + TypeVar, + Callable, + Iterator, + AsyncIterator, + cast, + overload, +) +from typing_extensions import Awaitable, ParamSpec, override, get_origin + +import anyio +import httpx +import pydantic + +from ._types import NoneType +from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type, extract_type_var_from_base +from ._models import BaseModel, is_basemodel +from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER +from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type +from ._exceptions import ImageKitError, APIResponseValidationError + +if TYPE_CHECKING: + from ._models import FinalRequestOptions + from ._base_client import BaseClient + + +P = ParamSpec("P") +R = TypeVar("R") +_T = TypeVar("_T") +_APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]") +_AsyncAPIResponseT = TypeVar("_AsyncAPIResponseT", bound="AsyncAPIResponse[Any]") + +log: logging.Logger = logging.getLogger(__name__) + + +class BaseAPIResponse(Generic[R]): + _cast_to: type[R] + _client: BaseClient[Any, Any] + _parsed_by_type: dict[type[Any], Any] + _is_sse_stream: bool + _stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None + _options: FinalRequestOptions + + http_response: httpx.Response + + retries_taken: int + """The number of retries made. If no retries happened this will be `0`""" + + def __init__( + self, + *, + raw: httpx.Response, + cast_to: type[R], + client: BaseClient[Any, Any], + stream: bool, + stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + options: FinalRequestOptions, + retries_taken: int = 0, + ) -> None: + self._cast_to = cast_to + self._client = client + self._parsed_by_type = {} + self._is_sse_stream = stream + self._stream_cls = stream_cls + self._options = options + self.http_response = raw + self.retries_taken = retries_taken + + @property + def headers(self) -> httpx.Headers: + return self.http_response.headers + + @property + def http_request(self) -> httpx.Request: + """Returns the httpx Request instance associated with the current response.""" + return self.http_response.request + + @property + def status_code(self) -> int: + return self.http_response.status_code + + @property + def url(self) -> httpx.URL: + """Returns the URL for which the request was made.""" + return self.http_response.url + + @property + def method(self) -> str: + return self.http_request.method + + @property + def http_version(self) -> str: + return self.http_response.http_version + + @property + def elapsed(self) -> datetime.timedelta: + """The time taken for the complete request/response cycle to complete.""" + return self.http_response.elapsed + + @property + def is_closed(self) -> bool: + """Whether or not the response body has been closed. + + If this is False then there is response data that has not been read yet. + You must either fully consume the response body or call `.close()` + before discarding the response to prevent resource leaks. + """ + return self.http_response.is_closed + + @override + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>" + ) + + def _parse(self, *, to: type[_T] | None = None) -> R | _T: + cast_to = to if to is not None else self._cast_to + + # unwrap `TypeAlias('Name', T)` -> `T` + if is_type_alias_type(cast_to): + cast_to = cast_to.__value__ # type: ignore[unreachable] + + # unwrap `Annotated[T, ...]` -> `T` + if cast_to and is_annotated_type(cast_to): + cast_to = extract_type_arg(cast_to, 0) + + origin = get_origin(cast_to) or cast_to + + if self._is_sse_stream: + if to: + if not is_stream_class_type(to): + raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}") + + return cast( + _T, + to( + cast_to=extract_stream_chunk_type( + to, + failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]", + ), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + if self._stream_cls: + return cast( + R, + self._stream_cls( + cast_to=extract_stream_chunk_type(self._stream_cls), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls) + if stream_cls is None: + raise MissingStreamClassError() + + return cast( + R, + stream_cls( + cast_to=cast_to, + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + if cast_to is NoneType: + return cast(R, None) + + response = self.http_response + if cast_to == str: + return cast(R, response.text) + + if cast_to == bytes: + return cast(R, response.content) + + if cast_to == int: + return cast(R, int(response.text)) + + if cast_to == float: + return cast(R, float(response.text)) + + if cast_to == bool: + return cast(R, response.text.lower() == "true") + + if origin == APIResponse: + raise RuntimeError("Unexpected state - cast_to is `APIResponse`") + + if inspect.isclass(origin) and issubclass(origin, httpx.Response): + # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response + # and pass that class to our request functions. We cannot change the variance to be either + # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct + # the response class ourselves but that is something that should be supported directly in httpx + # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. + if cast_to != httpx.Response: + raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") + return cast(R, response) + + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): + raise TypeError( + "Pydantic models must subclass our base model type, e.g. `from imagekitio import BaseModel`" + ) + + if ( + cast_to is not object + and not origin is list + and not origin is dict + and not origin is Union + and not issubclass(origin, BaseModel) + ): + raise RuntimeError( + f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." + ) + + # split is required to handle cases where additional information is included + # in the response, e.g. application/json; charset=utf-8 + content_type, *_ = response.headers.get("content-type", "*").split(";") + if not content_type.endswith("json"): + if is_basemodel(cast_to): + try: + data = response.json() + except Exception as exc: + log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) + else: + return self._client._process_response_data( + data=data, + cast_to=cast_to, # type: ignore + response=response, + ) + + if self._client._strict_response_validation: + raise APIResponseValidationError( + response=response, + message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", + body=response.text, + ) + + # If the API responds with content that isn't JSON then we just return + # the (decoded) text without performing any parsing so that you can still + # handle the response however you need to. + return response.text # type: ignore + + data = response.json() + + return self._client._process_response_data( + data=data, + cast_to=cast_to, # type: ignore + response=response, + ) + + +class APIResponse(BaseAPIResponse[R]): + @overload + def parse(self, *, to: type[_T]) -> _T: ... + + @overload + def parse(self) -> R: ... + + def parse(self, *, to: type[_T] | None = None) -> R | _T: + """Returns the rich python representation of this response's data. + + For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. + + You can customise the type that the response is parsed into through + the `to` argument, e.g. + + ```py + from imagekitio import BaseModel + + + class MyModel(BaseModel): + foo: str + + + obj = response.parse(to=MyModel) + print(obj.foo) + ``` + + We support parsing: + - `BaseModel` + - `dict` + - `list` + - `Union` + - `str` + - `int` + - `float` + - `httpx.Response` + """ + cache_key = to if to is not None else self._cast_to + cached = self._parsed_by_type.get(cache_key) + if cached is not None: + return cached # type: ignore[no-any-return] + + if not self._is_sse_stream: + self.read() + + parsed = self._parse(to=to) + if is_given(self._options.post_parser): + parsed = self._options.post_parser(parsed) + + self._parsed_by_type[cache_key] = parsed + return parsed + + def read(self) -> bytes: + """Read and return the binary response content.""" + try: + return self.http_response.read() + except httpx.StreamConsumed as exc: + # The default error raised by httpx isn't very + # helpful in our case so we re-raise it with + # a different error message. + raise StreamAlreadyConsumed() from exc + + def text(self) -> str: + """Read and decode the response content into a string.""" + self.read() + return self.http_response.text + + def json(self) -> object: + """Read and decode the JSON response content.""" + self.read() + return self.http_response.json() + + def close(self) -> None: + """Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + self.http_response.close() + + def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: + """ + A byte-iterator over the decoded response content. + + This automatically handles gzip, deflate and brotli encoded responses. + """ + for chunk in self.http_response.iter_bytes(chunk_size): + yield chunk + + def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: + """A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + for chunk in self.http_response.iter_text(chunk_size): + yield chunk + + def iter_lines(self) -> Iterator[str]: + """Like `iter_text()` but will only yield chunks for each line""" + for chunk in self.http_response.iter_lines(): + yield chunk + + +class AsyncAPIResponse(BaseAPIResponse[R]): + @overload + async def parse(self, *, to: type[_T]) -> _T: ... + + @overload + async def parse(self) -> R: ... + + async def parse(self, *, to: type[_T] | None = None) -> R | _T: + """Returns the rich python representation of this response's data. + + For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. + + You can customise the type that the response is parsed into through + the `to` argument, e.g. + + ```py + from imagekitio import BaseModel + + + class MyModel(BaseModel): + foo: str + + + obj = response.parse(to=MyModel) + print(obj.foo) + ``` + + We support parsing: + - `BaseModel` + - `dict` + - `list` + - `Union` + - `str` + - `httpx.Response` + """ + cache_key = to if to is not None else self._cast_to + cached = self._parsed_by_type.get(cache_key) + if cached is not None: + return cached # type: ignore[no-any-return] + + if not self._is_sse_stream: + await self.read() + + parsed = self._parse(to=to) + if is_given(self._options.post_parser): + parsed = self._options.post_parser(parsed) + + self._parsed_by_type[cache_key] = parsed + return parsed + + async def read(self) -> bytes: + """Read and return the binary response content.""" + try: + return await self.http_response.aread() + except httpx.StreamConsumed as exc: + # the default error raised by httpx isn't very + # helpful in our case so we re-raise it with + # a different error message + raise StreamAlreadyConsumed() from exc + + async def text(self) -> str: + """Read and decode the response content into a string.""" + await self.read() + return self.http_response.text + + async def json(self) -> object: + """Read and decode the JSON response content.""" + await self.read() + return self.http_response.json() + + async def close(self) -> None: + """Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + await self.http_response.aclose() + + async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: + """ + A byte-iterator over the decoded response content. + + This automatically handles gzip, deflate and brotli encoded responses. + """ + async for chunk in self.http_response.aiter_bytes(chunk_size): + yield chunk + + async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: + """A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + async for chunk in self.http_response.aiter_text(chunk_size): + yield chunk + + async def iter_lines(self) -> AsyncIterator[str]: + """Like `iter_text()` but will only yield chunks for each line""" + async for chunk in self.http_response.aiter_lines(): + yield chunk + + +class BinaryAPIResponse(APIResponse[bytes]): + """Subclass of APIResponse providing helpers for dealing with binary data. + + Note: If you want to stream the response data instead of eagerly reading it + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + + def write_to_file( + self, + file: str | os.PathLike[str], + ) -> None: + """Write the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + + Note: if you want to stream the data to the file instead of writing + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + with open(file, mode="wb") as f: + for data in self.iter_bytes(): + f.write(data) + + +class AsyncBinaryAPIResponse(AsyncAPIResponse[bytes]): + """Subclass of APIResponse providing helpers for dealing with binary data. + + Note: If you want to stream the response data instead of eagerly reading it + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + + async def write_to_file( + self, + file: str | os.PathLike[str], + ) -> None: + """Write the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + + Note: if you want to stream the data to the file instead of writing + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + path = anyio.Path(file) + async with await path.open(mode="wb") as f: + async for data in self.iter_bytes(): + await f.write(data) + + +class StreamedBinaryAPIResponse(APIResponse[bytes]): + def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + """Streams the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + """ + with open(file, mode="wb") as f: + for data in self.iter_bytes(chunk_size): + f.write(data) + + +class AsyncStreamedBinaryAPIResponse(AsyncAPIResponse[bytes]): + async def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + """Streams the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + """ + path = anyio.Path(file) + async with await path.open(mode="wb") as f: + async for data in self.iter_bytes(chunk_size): + await f.write(data) + + +class MissingStreamClassError(TypeError): + def __init__(self) -> None: + super().__init__( + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `imagekitio._streaming` for reference", + ) + + +class StreamAlreadyConsumed(ImageKitError): + """ + Attempted to read or stream content, but the content has already + been streamed. + + This can happen if you use a method like `.iter_lines()` and then attempt + to read th entire response body afterwards, e.g. + + ```py + response = await client.post(...) + async for line in response.iter_lines(): + ... # do something with `line` + + content = await response.read() + # ^ error + ``` + + If you want this behaviour you'll need to either manually accumulate the response + content or call `await response.read()` before iterating over the stream. + """ + + def __init__(self) -> None: + message = ( + "Attempted to read or stream some content, but the content has " + "already been streamed. " + "This could be due to attempting to stream the response " + "content more than once." + "\n\n" + "You can fix this by manually accumulating the response content while streaming " + "or by calling `.read()` before starting to stream." + ) + super().__init__(message) + + +class ResponseContextManager(Generic[_APIResponseT]): + """Context manager for ensuring that a request is not made + until it is entered and that the response will always be closed + when the context manager exits + """ + + def __init__(self, request_func: Callable[[], _APIResponseT]) -> None: + self._request_func = request_func + self.__response: _APIResponseT | None = None + + def __enter__(self) -> _APIResponseT: + self.__response = self._request_func() + return self.__response + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if self.__response is not None: + self.__response.close() + + +class AsyncResponseContextManager(Generic[_AsyncAPIResponseT]): + """Context manager for ensuring that a request is not made + until it is entered and that the response will always be closed + when the context manager exits + """ + + def __init__(self, api_request: Awaitable[_AsyncAPIResponseT]) -> None: + self._api_request = api_request + self.__response: _AsyncAPIResponseT | None = None + + async def __aenter__(self) -> _AsyncAPIResponseT: + self.__response = await self._api_request + return self.__response + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if self.__response is not None: + await self.__response.close() + + +def to_streamed_response_wrapper(func: Callable[P, R]) -> Callable[P, ResponseContextManager[APIResponse[R]]]: + """Higher order function that takes one of our bound API methods and wraps it + to support streaming and returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[APIResponse[R]]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + + kwargs["extra_headers"] = extra_headers + + make_request = functools.partial(func, *args, **kwargs) + + return ResponseContextManager(cast(Callable[[], APIResponse[R]], make_request)) + + return wrapped + + +def async_to_streamed_response_wrapper( + func: Callable[P, Awaitable[R]], +) -> Callable[P, AsyncResponseContextManager[AsyncAPIResponse[R]]]: + """Higher order function that takes one of our bound API methods and wraps it + to support streaming and returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[AsyncAPIResponse[R]]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + + kwargs["extra_headers"] = extra_headers + + make_request = func(*args, **kwargs) + + return AsyncResponseContextManager(cast(Awaitable[AsyncAPIResponse[R]], make_request)) + + return wrapped + + +def to_custom_streamed_response_wrapper( + func: Callable[P, object], + response_cls: type[_APIResponseT], +) -> Callable[P, ResponseContextManager[_APIResponseT]]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support streaming and returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[_APIResponseT]: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + make_request = functools.partial(func, *args, **kwargs) + + return ResponseContextManager(cast(Callable[[], _APIResponseT], make_request)) + + return wrapped + + +def async_to_custom_streamed_response_wrapper( + func: Callable[P, Awaitable[object]], + response_cls: type[_AsyncAPIResponseT], +) -> Callable[P, AsyncResponseContextManager[_AsyncAPIResponseT]]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support streaming and returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[_AsyncAPIResponseT]: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + make_request = func(*args, **kwargs) + + return AsyncResponseContextManager(cast(Awaitable[_AsyncAPIResponseT], make_request)) + + return wrapped + + +def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, APIResponse[R]]: + """Higher order function that takes one of our bound API methods and wraps it + to support returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> APIResponse[R]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + + kwargs["extra_headers"] = extra_headers + + return cast(APIResponse[R], func(*args, **kwargs)) + + return wrapped + + +def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[AsyncAPIResponse[R]]]: + """Higher order function that takes one of our bound API methods and wraps it + to support returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + async def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncAPIResponse[R]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + + kwargs["extra_headers"] = extra_headers + + return cast(AsyncAPIResponse[R], await func(*args, **kwargs)) + + return wrapped + + +def to_custom_raw_response_wrapper( + func: Callable[P, object], + response_cls: type[_APIResponseT], +) -> Callable[P, _APIResponseT]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> _APIResponseT: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + return cast(_APIResponseT, func(*args, **kwargs)) + + return wrapped + + +def async_to_custom_raw_response_wrapper( + func: Callable[P, Awaitable[object]], + response_cls: type[_AsyncAPIResponseT], +) -> Callable[P, Awaitable[_AsyncAPIResponseT]]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> Awaitable[_AsyncAPIResponseT]: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + return cast(Awaitable[_AsyncAPIResponseT], func(*args, **kwargs)) + + return wrapped + + +def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type: + """Given a type like `APIResponse[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyResponse(APIResponse[bytes]): + ... + + extract_response_type(MyResponse) -> bytes + ``` + """ + return extract_type_var_from_base( + typ, + generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse, AsyncAPIResponse)), + index=0, + ) diff --git a/src/imagekitio/_streaming.py b/src/imagekitio/_streaming.py new file mode 100644 index 00000000..c4a0e31e --- /dev/null +++ b/src/imagekitio/_streaming.py @@ -0,0 +1,333 @@ +# Note: initially copied from https://github.com/florimondmanca/httpx-sse/blob/master/src/httpx_sse/_decoders.py +from __future__ import annotations + +import json +import inspect +from types import TracebackType +from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast +from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable + +import httpx + +from ._utils import extract_type_var_from_base + +if TYPE_CHECKING: + from ._client import ImageKit, AsyncImageKit + + +_T = TypeVar("_T") + + +class Stream(Generic[_T]): + """Provides the core interface to iterate over a synchronous stream response.""" + + response: httpx.Response + + _decoder: SSEBytesDecoder + + def __init__( + self, + *, + cast_to: type[_T], + response: httpx.Response, + client: ImageKit, + ) -> None: + self.response = response + self._cast_to = cast_to + self._client = client + self._decoder = client._make_sse_decoder() + self._iterator = self.__stream__() + + def __next__(self) -> _T: + return self._iterator.__next__() + + def __iter__(self) -> Iterator[_T]: + for item in self._iterator: + yield item + + def _iter_events(self) -> Iterator[ServerSentEvent]: + yield from self._decoder.iter_bytes(self.response.iter_bytes()) + + def __stream__(self) -> Iterator[_T]: + cast_to = cast(Any, self._cast_to) + response = self.response + process_data = self._client._process_response_data + iterator = self._iter_events() + + try: + for sse in iterator: + yield process_data(data=sse.json(), cast_to=cast_to, response=response) + finally: + # Ensure the response is closed even if the consumer doesn't read all data + response.close() + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.close() + + def close(self) -> None: + """ + Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + self.response.close() + + +class AsyncStream(Generic[_T]): + """Provides the core interface to iterate over an asynchronous stream response.""" + + response: httpx.Response + + _decoder: SSEDecoder | SSEBytesDecoder + + def __init__( + self, + *, + cast_to: type[_T], + response: httpx.Response, + client: AsyncImageKit, + ) -> None: + self.response = response + self._cast_to = cast_to + self._client = client + self._decoder = client._make_sse_decoder() + self._iterator = self.__stream__() + + async def __anext__(self) -> _T: + return await self._iterator.__anext__() + + async def __aiter__(self) -> AsyncIterator[_T]: + async for item in self._iterator: + yield item + + async def _iter_events(self) -> AsyncIterator[ServerSentEvent]: + async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()): + yield sse + + async def __stream__(self) -> AsyncIterator[_T]: + cast_to = cast(Any, self._cast_to) + response = self.response + process_data = self._client._process_response_data + iterator = self._iter_events() + + try: + async for sse in iterator: + yield process_data(data=sse.json(), cast_to=cast_to, response=response) + finally: + # Ensure the response is closed even if the consumer doesn't read all data + await response.aclose() + + async def __aenter__(self) -> Self: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + await self.close() + + async def close(self) -> None: + """ + Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + await self.response.aclose() + + +class ServerSentEvent: + def __init__( + self, + *, + event: str | None = None, + data: str | None = None, + id: str | None = None, + retry: int | None = None, + ) -> None: + if data is None: + data = "" + + self._id = id + self._data = data + self._event = event or None + self._retry = retry + + @property + def event(self) -> str | None: + return self._event + + @property + def id(self) -> str | None: + return self._id + + @property + def retry(self) -> int | None: + return self._retry + + @property + def data(self) -> str: + return self._data + + def json(self) -> Any: + return json.loads(self.data) + + @override + def __repr__(self) -> str: + return f"ServerSentEvent(event={self.event}, data={self.data}, id={self.id}, retry={self.retry})" + + +class SSEDecoder: + _data: list[str] + _event: str | None + _retry: int | None + _last_event_id: str | None + + def __init__(self) -> None: + self._event = None + self._data = [] + self._last_event_id = None + self._retry = None + + def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]: + """Given an iterator that yields raw binary data, iterate over it & yield every event encountered""" + for chunk in self._iter_chunks(iterator): + # Split before decoding so splitlines() only uses \r and \n + for raw_line in chunk.splitlines(): + line = raw_line.decode("utf-8") + sse = self.decode(line) + if sse: + yield sse + + def _iter_chunks(self, iterator: Iterator[bytes]) -> Iterator[bytes]: + """Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks""" + data = b"" + for chunk in iterator: + for line in chunk.splitlines(keepends=True): + data += line + if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")): + yield data + data = b"" + if data: + yield data + + async def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]: + """Given an iterator that yields raw binary data, iterate over it & yield every event encountered""" + async for chunk in self._aiter_chunks(iterator): + # Split before decoding so splitlines() only uses \r and \n + for raw_line in chunk.splitlines(): + line = raw_line.decode("utf-8") + sse = self.decode(line) + if sse: + yield sse + + async def _aiter_chunks(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[bytes]: + """Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks""" + data = b"" + async for chunk in iterator: + for line in chunk.splitlines(keepends=True): + data += line + if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")): + yield data + data = b"" + if data: + yield data + + def decode(self, line: str) -> ServerSentEvent | None: + # See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501 + + if not line: + if not self._event and not self._data and not self._last_event_id and self._retry is None: + return None + + sse = ServerSentEvent( + event=self._event, + data="\n".join(self._data), + id=self._last_event_id, + retry=self._retry, + ) + + # NOTE: as per the SSE spec, do not reset last_event_id. + self._event = None + self._data = [] + self._retry = None + + return sse + + if line.startswith(":"): + return None + + fieldname, _, value = line.partition(":") + + if value.startswith(" "): + value = value[1:] + + if fieldname == "event": + self._event = value + elif fieldname == "data": + self._data.append(value) + elif fieldname == "id": + if "\0" in value: + pass + else: + self._last_event_id = value + elif fieldname == "retry": + try: + self._retry = int(value) + except (TypeError, ValueError): + pass + else: + pass # Field is ignored. + + return None + + +@runtime_checkable +class SSEBytesDecoder(Protocol): + def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]: + """Given an iterator that yields raw binary data, iterate over it & yield every event encountered""" + ... + + def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]: + """Given an async iterator that yields raw binary data, iterate over it & yield every event encountered""" + ... + + +def is_stream_class_type(typ: type) -> TypeGuard[type[Stream[object]] | type[AsyncStream[object]]]: + """TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`""" + origin = get_origin(typ) or typ + return inspect.isclass(origin) and issubclass(origin, (Stream, AsyncStream)) + + +def extract_stream_chunk_type( + stream_cls: type, + *, + failure_message: str | None = None, +) -> type: + """Given a type like `Stream[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyStream(Stream[bytes]): + ... + + extract_stream_chunk_type(MyStream) -> bytes + ``` + """ + from ._base_client import Stream, AsyncStream + + return extract_type_var_from_base( + stream_cls, + index=0, + generic_bases=cast("tuple[type, ...]", (Stream, AsyncStream)), + failure_message=failure_message, + ) diff --git a/src/imagekitio/_types.py b/src/imagekitio/_types.py new file mode 100644 index 00000000..714fee27 --- /dev/null +++ b/src/imagekitio/_types.py @@ -0,0 +1,261 @@ +from __future__ import annotations + +from os import PathLike +from typing import ( + IO, + TYPE_CHECKING, + Any, + Dict, + List, + Type, + Tuple, + Union, + Mapping, + TypeVar, + Callable, + Iterator, + Optional, + Sequence, +) +from typing_extensions import ( + Set, + Literal, + Protocol, + TypeAlias, + TypedDict, + SupportsIndex, + overload, + override, + runtime_checkable, +) + +import httpx +import pydantic +from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport + +if TYPE_CHECKING: + from ._models import BaseModel + from ._response import APIResponse, AsyncAPIResponse + +Transport = BaseTransport +AsyncTransport = AsyncBaseTransport +Query = Mapping[str, object] +Body = object +AnyMapping = Mapping[str, object] +ModelT = TypeVar("ModelT", bound=pydantic.BaseModel) +_T = TypeVar("_T") + + +# Approximates httpx internal ProxiesTypes and RequestFiles types +# while adding support for `PathLike` instances +ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]] +ProxiesTypes = Union[str, Proxy, ProxiesDict] +if TYPE_CHECKING: + Base64FileInput = Union[IO[bytes], PathLike[str]] + FileContent = Union[IO[bytes], bytes, PathLike[str]] +else: + Base64FileInput = Union[IO[bytes], PathLike] + FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8. +FileTypes = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], +] +RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]] + +# duplicate of the above but without our custom file support +HttpxFileContent = Union[IO[bytes], bytes] +HttpxFileTypes = Union[ + # file (or bytes) + HttpxFileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], HttpxFileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], HttpxFileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]], +] +HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]] + +# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT +# where ResponseT includes `None`. In order to support directly +# passing `None`, overloads would have to be defined for every +# method that uses `ResponseT` which would lead to an unacceptable +# amount of code duplication and make it unreadable. See _base_client.py +# for example usage. +# +# This unfortunately means that you will either have +# to import this type and pass it explicitly: +# +# from imagekitio import NoneType +# client.get('/foo', cast_to=NoneType) +# +# or build it yourself: +# +# client.get('/foo', cast_to=type(None)) +if TYPE_CHECKING: + NoneType: Type[None] +else: + NoneType = type(None) + + +class RequestOptions(TypedDict, total=False): + headers: Headers + max_retries: int + timeout: float | Timeout | None + params: Query + extra_json: AnyMapping + idempotency_key: str + follow_redirects: bool + + +# Sentinel class used until PEP 0661 is accepted +class NotGiven: + """ + For parameters with a meaningful None value, we need to distinguish between + the user explicitly passing None, and the user not passing the parameter at + all. + + User code shouldn't need to use not_given directly. + + For example: + + ```py + def create(timeout: Timeout | None | NotGiven = not_given): ... + + + create(timeout=1) # 1s timeout + create(timeout=None) # No timeout + create() # Default timeout behavior + ``` + """ + + def __bool__(self) -> Literal[False]: + return False + + @override + def __repr__(self) -> str: + return "NOT_GIVEN" + + +not_given = NotGiven() +# for backwards compatibility: +NOT_GIVEN = NotGiven() + + +class Omit: + """ + To explicitly omit something from being sent in a request, use `omit`. + + ```py + # as the default `Content-Type` header is `application/json` that will be sent + client.post("/upload/files", files={"file": b"my raw file content"}) + + # you can't explicitly override the header as it has to be dynamically generated + # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983' + client.post(..., headers={"Content-Type": "multipart/form-data"}) + + # instead you can remove the default `application/json` header by passing omit + client.post(..., headers={"Content-Type": omit}) + ``` + """ + + def __bool__(self) -> Literal[False]: + return False + + +omit = Omit() + + +@runtime_checkable +class ModelBuilderProtocol(Protocol): + @classmethod + def build( + cls: type[_T], + *, + response: Response, + data: object, + ) -> _T: ... + + +Headers = Mapping[str, Union[str, Omit]] + + +class HeadersLikeProtocol(Protocol): + def get(self, __key: str) -> str | None: ... + + +HeadersLike = Union[Headers, HeadersLikeProtocol] + +ResponseT = TypeVar( + "ResponseT", + bound=Union[ + object, + str, + None, + "BaseModel", + List[Any], + Dict[str, Any], + Response, + ModelBuilderProtocol, + "APIResponse[Any]", + "AsyncAPIResponse[Any]", + ], +) + +StrBytesIntFloat = Union[str, bytes, int, float] + +# Note: copied from Pydantic +# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79 +IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]] + +PostParser = Callable[[Any], Any] + + +@runtime_checkable +class InheritsGeneric(Protocol): + """Represents a type that has inherited from `Generic` + + The `__orig_bases__` property can be used to determine the resolved + type variable for a given base class. + """ + + __orig_bases__: tuple[_GenericAlias] + + +class _GenericAlias(Protocol): + __origin__: type[object] + + +class HttpxSendArgs(TypedDict, total=False): + auth: httpx.Auth + follow_redirects: bool + + +_T_co = TypeVar("_T_co", covariant=True) + + +if TYPE_CHECKING: + # This works because str.__contains__ does not accept object (either in typeshed or at runtime) + # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285 + # + # Note: index() and count() methods are intentionally omitted to allow pyright to properly + # infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr. + class SequenceNotStr(Protocol[_T_co]): + @overload + def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... + @overload + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... + def __contains__(self, value: object, /) -> bool: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[_T_co]: ... + def __reversed__(self) -> Iterator[_T_co]: ... +else: + # just point this to a normal `Sequence` at runtime to avoid having to special case + # deserializing our custom sequence type + SequenceNotStr = Sequence diff --git a/src/imagekitio/_utils/__init__.py b/src/imagekitio/_utils/__init__.py new file mode 100644 index 00000000..dc64e29a --- /dev/null +++ b/src/imagekitio/_utils/__init__.py @@ -0,0 +1,64 @@ +from ._sync import asyncify as asyncify +from ._proxy import LazyProxy as LazyProxy +from ._utils import ( + flatten as flatten, + is_dict as is_dict, + is_list as is_list, + is_given as is_given, + is_tuple as is_tuple, + json_safe as json_safe, + lru_cache as lru_cache, + is_mapping as is_mapping, + is_tuple_t as is_tuple_t, + is_iterable as is_iterable, + is_sequence as is_sequence, + coerce_float as coerce_float, + is_mapping_t as is_mapping_t, + removeprefix as removeprefix, + removesuffix as removesuffix, + extract_files as extract_files, + is_sequence_t as is_sequence_t, + required_args as required_args, + coerce_boolean as coerce_boolean, + coerce_integer as coerce_integer, + file_from_path as file_from_path, + strip_not_given as strip_not_given, + deepcopy_minimal as deepcopy_minimal, + get_async_library as get_async_library, + maybe_coerce_float as maybe_coerce_float, + get_required_header as get_required_header, + maybe_coerce_boolean as maybe_coerce_boolean, + maybe_coerce_integer as maybe_coerce_integer, +) +from ._compat import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + is_typeddict as is_typeddict, + is_literal_type as is_literal_type, +) +from ._typing import ( + is_list_type as is_list_type, + is_union_type as is_union_type, + extract_type_arg as extract_type_arg, + is_iterable_type as is_iterable_type, + is_required_type as is_required_type, + is_sequence_type as is_sequence_type, + is_annotated_type as is_annotated_type, + is_type_alias_type as is_type_alias_type, + strip_annotated_type as strip_annotated_type, + extract_type_var_from_base as extract_type_var_from_base, +) +from ._streams import consume_sync_iterator as consume_sync_iterator, consume_async_iterator as consume_async_iterator +from ._transform import ( + PropertyInfo as PropertyInfo, + transform as transform, + async_transform as async_transform, + maybe_transform as maybe_transform, + async_maybe_transform as async_maybe_transform, +) +from ._reflection import ( + function_has_argument as function_has_argument, + assert_signatures_in_sync as assert_signatures_in_sync, +) +from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime diff --git a/src/imagekitio/_utils/_compat.py b/src/imagekitio/_utils/_compat.py new file mode 100644 index 00000000..dd703233 --- /dev/null +++ b/src/imagekitio/_utils/_compat.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import sys +import typing_extensions +from typing import Any, Type, Union, Literal, Optional +from datetime import date, datetime +from typing_extensions import get_args as _get_args, get_origin as _get_origin + +from .._types import StrBytesIntFloat +from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime + +_LITERAL_TYPES = {Literal, typing_extensions.Literal} + + +def get_args(tp: type[Any]) -> tuple[Any, ...]: + return _get_args(tp) + + +def get_origin(tp: type[Any]) -> type[Any] | None: + return _get_origin(tp) + + +def is_union(tp: Optional[Type[Any]]) -> bool: + if sys.version_info < (3, 10): + return tp is Union # type: ignore[comparison-overlap] + else: + import types + + return tp is Union or tp is types.UnionType + + +def is_typeddict(tp: Type[Any]) -> bool: + return typing_extensions.is_typeddict(tp) + + +def is_literal_type(tp: Type[Any]) -> bool: + return get_origin(tp) in _LITERAL_TYPES + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + return _parse_date(value) + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + return _parse_datetime(value) diff --git a/src/imagekitio/_utils/_datetime_parse.py b/src/imagekitio/_utils/_datetime_parse.py new file mode 100644 index 00000000..7cb9d9e6 --- /dev/null +++ b/src/imagekitio/_utils/_datetime_parse.py @@ -0,0 +1,136 @@ +""" +This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py +without the Pydantic v1 specific errors. +""" + +from __future__ import annotations + +import re +from typing import Dict, Union, Optional +from datetime import date, datetime, timezone, timedelta + +from .._types import StrBytesIntFloat + +date_expr = r"(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})" +time_expr = ( + r"(?P\d{1,2}):(?P\d{1,2})" + r"(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?" + r"(?PZ|[+-]\d{2}(?::?\d{2})?)?$" +) + +date_re = re.compile(f"{date_expr}$") +datetime_re = re.compile(f"{date_expr}[T ]{time_expr}") + + +EPOCH = datetime(1970, 1, 1) +# if greater than this, the number is in ms, if less than or equal it's in seconds +# (in seconds this is 11th October 2603, in ms it's 20th August 1970) +MS_WATERSHED = int(2e10) +# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9 +MAX_NUMBER = int(3e20) + + +def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]: + if isinstance(value, (int, float)): + return value + try: + return float(value) + except ValueError: + return None + except TypeError: + raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None + + +def _from_unix_seconds(seconds: Union[int, float]) -> datetime: + if seconds > MAX_NUMBER: + return datetime.max + elif seconds < -MAX_NUMBER: + return datetime.min + + while abs(seconds) > MS_WATERSHED: + seconds /= 1000 + dt = EPOCH + timedelta(seconds=seconds) + return dt.replace(tzinfo=timezone.utc) + + +def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]: + if value == "Z": + return timezone.utc + elif value is not None: + offset_mins = int(value[-2:]) if len(value) > 3 else 0 + offset = 60 * int(value[1:3]) + offset_mins + if value[0] == "-": + offset = -offset + return timezone(timedelta(minutes=offset)) + else: + return None + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + """ + Parse a datetime/int/float/string and return a datetime.datetime. + + This function supports time zone offsets. When the input contains one, + the output uses a timezone with a fixed offset from UTC. + + Raise ValueError if the input is well formatted but not a valid datetime. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, datetime): + return value + + number = _get_numeric(value, "datetime") + if number is not None: + return _from_unix_seconds(number) + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + + match = datetime_re.match(value) + if match is None: + raise ValueError("invalid datetime format") + + kw = match.groupdict() + if kw["microsecond"]: + kw["microsecond"] = kw["microsecond"].ljust(6, "0") + + tzinfo = _parse_timezone(kw.pop("tzinfo")) + kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None} + kw_["tzinfo"] = tzinfo + + return datetime(**kw_) # type: ignore + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + """ + Parse a date/int/float/string and return a datetime.date. + + Raise ValueError if the input is well formatted but not a valid date. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, date): + if isinstance(value, datetime): + return value.date() + else: + return value + + number = _get_numeric(value, "date") + if number is not None: + return _from_unix_seconds(number).date() + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + match = date_re.match(value) + if match is None: + raise ValueError("invalid date format") + + kw = {k: int(v) for k, v in match.groupdict().items()} + + try: + return date(**kw) + except ValueError: + raise ValueError("invalid date format") from None diff --git a/src/imagekitio/_utils/_logs.py b/src/imagekitio/_utils/_logs.py new file mode 100644 index 00000000..c383e3ea --- /dev/null +++ b/src/imagekitio/_utils/_logs.py @@ -0,0 +1,25 @@ +import os +import logging + +logger: logging.Logger = logging.getLogger("imagekitio") +httpx_logger: logging.Logger = logging.getLogger("httpx") + + +def _basic_config() -> None: + # e.g. [2023-10-05 14:12:26 - imagekitio._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + logging.basicConfig( + format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + + +def setup_logging() -> None: + env = os.environ.get("IMAGE_KIT_LOG") + if env == "debug": + _basic_config() + logger.setLevel(logging.DEBUG) + httpx_logger.setLevel(logging.DEBUG) + elif env == "info": + _basic_config() + logger.setLevel(logging.INFO) + httpx_logger.setLevel(logging.INFO) diff --git a/src/imagekitio/_utils/_proxy.py b/src/imagekitio/_utils/_proxy.py new file mode 100644 index 00000000..0f239a33 --- /dev/null +++ b/src/imagekitio/_utils/_proxy.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Generic, TypeVar, Iterable, cast +from typing_extensions import override + +T = TypeVar("T") + + +class LazyProxy(Generic[T], ABC): + """Implements data methods to pretend that an instance is another instance. + + This includes forwarding attribute access and other methods. + """ + + # Note: we have to special case proxies that themselves return proxies + # to support using a proxy as a catch-all for any random access, e.g. `proxy.foo.bar.baz` + + def __getattr__(self, attr: str) -> object: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return proxied # pyright: ignore + return getattr(proxied, attr) + + @override + def __repr__(self) -> str: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return proxied.__class__.__name__ + return repr(self.__get_proxied__()) + + @override + def __str__(self) -> str: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return proxied.__class__.__name__ + return str(proxied) + + @override + def __dir__(self) -> Iterable[str]: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return [] + return proxied.__dir__() + + @property # type: ignore + @override + def __class__(self) -> type: # pyright: ignore + try: + proxied = self.__get_proxied__() + except Exception: + return type(self) + if issubclass(type(proxied), LazyProxy): + return type(proxied) + return proxied.__class__ + + def __get_proxied__(self) -> T: + return self.__load__() + + def __as_proxied__(self) -> T: + """Helper method that returns the current proxy, typed as the loaded object""" + return cast(T, self) + + @abstractmethod + def __load__(self) -> T: ... diff --git a/src/imagekitio/_utils/_reflection.py b/src/imagekitio/_utils/_reflection.py new file mode 100644 index 00000000..89aa712a --- /dev/null +++ b/src/imagekitio/_utils/_reflection.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import inspect +from typing import Any, Callable + + +def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool: + """Returns whether or not the given function has a specific parameter""" + sig = inspect.signature(func) + return arg_name in sig.parameters + + +def assert_signatures_in_sync( + source_func: Callable[..., Any], + check_func: Callable[..., Any], + *, + exclude_params: set[str] = set(), +) -> None: + """Ensure that the signature of the second function matches the first.""" + + check_sig = inspect.signature(check_func) + source_sig = inspect.signature(source_func) + + errors: list[str] = [] + + for name, source_param in source_sig.parameters.items(): + if name in exclude_params: + continue + + custom_param = check_sig.parameters.get(name) + if not custom_param: + errors.append(f"the `{name}` param is missing") + continue + + if custom_param.annotation != source_param.annotation: + errors.append( + f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}" + ) + continue + + if errors: + raise AssertionError(f"{len(errors)} errors encountered when comparing signatures:\n\n" + "\n\n".join(errors)) diff --git a/src/imagekitio/_utils/_resources_proxy.py b/src/imagekitio/_utils/_resources_proxy.py new file mode 100644 index 00000000..5ba91421 --- /dev/null +++ b/src/imagekitio/_utils/_resources_proxy.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from typing import Any +from typing_extensions import override + +from ._proxy import LazyProxy + + +class ResourcesProxy(LazyProxy[Any]): + """A proxy for the `imagekitio.resources` module. + + This is used so that we can lazily import `imagekitio.resources` only when + needed *and* so that users can just import `imagekitio` and reference `imagekitio.resources` + """ + + @override + def __load__(self) -> Any: + import importlib + + mod = importlib.import_module("imagekitio.resources") + return mod + + +resources = ResourcesProxy().__as_proxied__() diff --git a/src/imagekitio/_utils/_streams.py b/src/imagekitio/_utils/_streams.py new file mode 100644 index 00000000..f4a0208f --- /dev/null +++ b/src/imagekitio/_utils/_streams.py @@ -0,0 +1,12 @@ +from typing import Any +from typing_extensions import Iterator, AsyncIterator + + +def consume_sync_iterator(iterator: Iterator[Any]) -> None: + for _ in iterator: + ... + + +async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None: + async for _ in iterator: + ... diff --git a/src/imagekitio/_utils/_sync.py b/src/imagekitio/_utils/_sync.py new file mode 100644 index 00000000..f6027c18 --- /dev/null +++ b/src/imagekitio/_utils/_sync.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import asyncio +import functools +from typing import TypeVar, Callable, Awaitable +from typing_extensions import ParamSpec + +import anyio +import sniffio +import anyio.to_thread + +T_Retval = TypeVar("T_Retval") +T_ParamSpec = ParamSpec("T_ParamSpec") + + +async def to_thread( + func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs +) -> T_Retval: + if sniffio.current_async_library() == "asyncio": + return await asyncio.to_thread(func, *args, **kwargs) + + return await anyio.to_thread.run_sync( + functools.partial(func, *args, **kwargs), + ) + + +# inspired by `asyncer`, https://github.com/tiangolo/asyncer +def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: + """ + Take a blocking function and create an async one that receives the same + positional and keyword arguments. + + Usage: + + ```python + def blocking_func(arg1, arg2, kwarg1=None): + # blocking code + return result + + + result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1) + ``` + + ## Arguments + + `function`: a blocking regular callable (e.g. a function) + + ## Return + + An async function that takes the same positional and keyword arguments as the + original one, that when called runs the same original function in a thread worker + and returns the result. + """ + + async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: + return await to_thread(function, *args, **kwargs) + + return wrapper diff --git a/src/imagekitio/_utils/_transform.py b/src/imagekitio/_utils/_transform.py new file mode 100644 index 00000000..52075492 --- /dev/null +++ b/src/imagekitio/_utils/_transform.py @@ -0,0 +1,457 @@ +from __future__ import annotations + +import io +import base64 +import pathlib +from typing import Any, Mapping, TypeVar, cast +from datetime import date, datetime +from typing_extensions import Literal, get_args, override, get_type_hints as _get_type_hints + +import anyio +import pydantic + +from ._utils import ( + is_list, + is_given, + lru_cache, + is_mapping, + is_iterable, + is_sequence, +) +from .._files import is_base64_file_input +from ._compat import get_origin, is_typeddict +from ._typing import ( + is_list_type, + is_union_type, + extract_type_arg, + is_iterable_type, + is_required_type, + is_sequence_type, + is_annotated_type, + strip_annotated_type, +) + +_T = TypeVar("_T") + + +# TODO: support for drilling globals() and locals() +# TODO: ensure works correctly with forward references in all cases + + +PropertyFormat = Literal["iso8601", "base64", "custom"] + + +class PropertyInfo: + """Metadata class to be used in Annotated types to provide information about a given type. + + For example: + + class MyParams(TypedDict): + account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')] + + This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API. + """ + + alias: str | None + format: PropertyFormat | None + format_template: str | None + discriminator: str | None + + def __init__( + self, + *, + alias: str | None = None, + format: PropertyFormat | None = None, + format_template: str | None = None, + discriminator: str | None = None, + ) -> None: + self.alias = alias + self.format = format + self.format_template = format_template + self.discriminator = discriminator + + @override + def __repr__(self) -> str: + return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')" + + +def maybe_transform( + data: object, + expected_type: object, +) -> Any | None: + """Wrapper over `transform()` that allows `None` to be passed. + + See `transform()` for more details. + """ + if data is None: + return None + return transform(data, expected_type) + + +# Wrapper over _transform_recursive providing fake types +def transform( + data: _T, + expected_type: object, +) -> _T: + """Transform dictionaries based off of type information from the given type, for example: + + ```py + class Params(TypedDict, total=False): + card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] + + + transformed = transform({"card_id": ""}, Params) + # {'cardID': ''} + ``` + + Any keys / data that does not have type information given will be included as is. + + It should be noted that the transformations that this function does are not represented in the type system. + """ + transformed = _transform_recursive(data, annotation=cast(type, expected_type)) + return cast(_T, transformed) + + +@lru_cache(maxsize=8096) +def _get_annotated_type(type_: type) -> type | None: + """If the given type is an `Annotated` type then it is returned, if not `None` is returned. + + This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]` + """ + if is_required_type(type_): + # Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]` + type_ = get_args(type_)[0] + + if is_annotated_type(type_): + return type_ + + return None + + +def _maybe_transform_key(key: str, type_: type) -> str: + """Transform the given `data` based on the annotations provided in `type_`. + + Note: this function only looks at `Annotated` types that contain `PropertyInfo` metadata. + """ + annotated_type = _get_annotated_type(type_) + if annotated_type is None: + # no `Annotated` definition for this type, no transformation needed + return key + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.alias is not None: + return annotation.alias + + return key + + +def _no_transform_needed(annotation: type) -> bool: + return annotation == float or annotation == int + + +def _transform_recursive( + data: object, + *, + annotation: type, + inner_type: type | None = None, +) -> object: + """Transform the given data against the expected type. + + Args: + annotation: The direct type annotation given to the particular piece of data. + This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc + + inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type + is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in + the list can be transformed using the metadata from the container type. + + Defaults to the same value as the `annotation` argument. + """ + from .._compat import model_dump + + if inner_type is None: + inner_type = annotation + + stripped_type = strip_annotated_type(inner_type) + origin = get_origin(stripped_type) or stripped_type + if is_typeddict(stripped_type) and is_mapping(data): + return _transform_typeddict(data, stripped_type) + + if origin == dict and is_mapping(data): + items_type = get_args(stripped_type)[1] + return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()} + + if ( + # List[T] + (is_list_type(stripped_type) and is_list(data)) + # Iterable[T] + or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + # Sequence[T] + or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str)) + ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + + inner_type = extract_type_arg(stripped_type, 0) + if _no_transform_needed(inner_type): + # for some types there is no need to transform anything, so we can get a small + # perf boost from skipping that work. + # + # but we still need to convert to a list to ensure the data is json-serializable + if is_list(data): + return data + return list(data) + + return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] + + if is_union_type(stripped_type): + # For union types we run the transformation against all subtypes to ensure that everything is transformed. + # + # TODO: there may be edge cases where the same normalized field name will transform to two different names + # in different subtypes. + for subtype in get_args(stripped_type): + data = _transform_recursive(data, annotation=annotation, inner_type=subtype) + return data + + if isinstance(data, pydantic.BaseModel): + return model_dump(data, exclude_unset=True, mode="json") + + annotated_type = _get_annotated_type(annotation) + if annotated_type is None: + return data + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.format is not None: + return _format_data(data, annotation.format, annotation.format_template) + + return data + + +def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: + if isinstance(data, (date, datetime)): + if format_ == "iso8601": + return data.isoformat() + + if format_ == "custom" and format_template is not None: + return data.strftime(format_template) + + if format_ == "base64" and is_base64_file_input(data): + binary: str | bytes | None = None + + if isinstance(data, pathlib.Path): + binary = data.read_bytes() + elif isinstance(data, io.IOBase): + binary = data.read() + + if isinstance(binary, str): # type: ignore[unreachable] + binary = binary.encode() + + if not isinstance(binary, bytes): + raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") + + return base64.b64encode(binary).decode("ascii") + + return data + + +def _transform_typeddict( + data: Mapping[str, object], + expected_type: type, +) -> Mapping[str, object]: + result: dict[str, object] = {} + annotations = get_type_hints(expected_type, include_extras=True) + for key, value in data.items(): + if not is_given(value): + # we don't need to include omitted values here as they'll + # be stripped out before the request is sent anyway + continue + + type_ = annotations.get(key) + if type_ is None: + # we do not have a type annotation for this field, leave it as is + result[key] = value + else: + result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_) + return result + + +async def async_maybe_transform( + data: object, + expected_type: object, +) -> Any | None: + """Wrapper over `async_transform()` that allows `None` to be passed. + + See `async_transform()` for more details. + """ + if data is None: + return None + return await async_transform(data, expected_type) + + +async def async_transform( + data: _T, + expected_type: object, +) -> _T: + """Transform dictionaries based off of type information from the given type, for example: + + ```py + class Params(TypedDict, total=False): + card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] + + + transformed = transform({"card_id": ""}, Params) + # {'cardID': ''} + ``` + + Any keys / data that does not have type information given will be included as is. + + It should be noted that the transformations that this function does are not represented in the type system. + """ + transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type)) + return cast(_T, transformed) + + +async def _async_transform_recursive( + data: object, + *, + annotation: type, + inner_type: type | None = None, +) -> object: + """Transform the given data against the expected type. + + Args: + annotation: The direct type annotation given to the particular piece of data. + This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc + + inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type + is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in + the list can be transformed using the metadata from the container type. + + Defaults to the same value as the `annotation` argument. + """ + from .._compat import model_dump + + if inner_type is None: + inner_type = annotation + + stripped_type = strip_annotated_type(inner_type) + origin = get_origin(stripped_type) or stripped_type + if is_typeddict(stripped_type) and is_mapping(data): + return await _async_transform_typeddict(data, stripped_type) + + if origin == dict and is_mapping(data): + items_type = get_args(stripped_type)[1] + return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()} + + if ( + # List[T] + (is_list_type(stripped_type) and is_list(data)) + # Iterable[T] + or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + # Sequence[T] + or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str)) + ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + + inner_type = extract_type_arg(stripped_type, 0) + if _no_transform_needed(inner_type): + # for some types there is no need to transform anything, so we can get a small + # perf boost from skipping that work. + # + # but we still need to convert to a list to ensure the data is json-serializable + if is_list(data): + return data + return list(data) + + return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] + + if is_union_type(stripped_type): + # For union types we run the transformation against all subtypes to ensure that everything is transformed. + # + # TODO: there may be edge cases where the same normalized field name will transform to two different names + # in different subtypes. + for subtype in get_args(stripped_type): + data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype) + return data + + if isinstance(data, pydantic.BaseModel): + return model_dump(data, exclude_unset=True, mode="json") + + annotated_type = _get_annotated_type(annotation) + if annotated_type is None: + return data + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.format is not None: + return await _async_format_data(data, annotation.format, annotation.format_template) + + return data + + +async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: + if isinstance(data, (date, datetime)): + if format_ == "iso8601": + return data.isoformat() + + if format_ == "custom" and format_template is not None: + return data.strftime(format_template) + + if format_ == "base64" and is_base64_file_input(data): + binary: str | bytes | None = None + + if isinstance(data, pathlib.Path): + binary = await anyio.Path(data).read_bytes() + elif isinstance(data, io.IOBase): + binary = data.read() + + if isinstance(binary, str): # type: ignore[unreachable] + binary = binary.encode() + + if not isinstance(binary, bytes): + raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") + + return base64.b64encode(binary).decode("ascii") + + return data + + +async def _async_transform_typeddict( + data: Mapping[str, object], + expected_type: type, +) -> Mapping[str, object]: + result: dict[str, object] = {} + annotations = get_type_hints(expected_type, include_extras=True) + for key, value in data.items(): + if not is_given(value): + # we don't need to include omitted values here as they'll + # be stripped out before the request is sent anyway + continue + + type_ = annotations.get(key) + if type_ is None: + # we do not have a type annotation for this field, leave it as is + result[key] = value + else: + result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_) + return result + + +@lru_cache(maxsize=8096) +def get_type_hints( + obj: Any, + globalns: dict[str, Any] | None = None, + localns: Mapping[str, Any] | None = None, + include_extras: bool = False, +) -> dict[str, Any]: + return _get_type_hints(obj, globalns=globalns, localns=localns, include_extras=include_extras) diff --git a/src/imagekitio/_utils/_typing.py b/src/imagekitio/_utils/_typing.py new file mode 100644 index 00000000..193109f3 --- /dev/null +++ b/src/imagekitio/_utils/_typing.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +import sys +import typing +import typing_extensions +from typing import Any, TypeVar, Iterable, cast +from collections import abc as _c_abc +from typing_extensions import ( + TypeIs, + Required, + Annotated, + get_args, + get_origin, +) + +from ._utils import lru_cache +from .._types import InheritsGeneric +from ._compat import is_union as _is_union + + +def is_annotated_type(typ: type) -> bool: + return get_origin(typ) == Annotated + + +def is_list_type(typ: type) -> bool: + return (get_origin(typ) or typ) == list + + +def is_sequence_type(typ: type) -> bool: + origin = get_origin(typ) or typ + return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence + + +def is_iterable_type(typ: type) -> bool: + """If the given type is `typing.Iterable[T]`""" + origin = get_origin(typ) or typ + return origin == Iterable or origin == _c_abc.Iterable + + +def is_union_type(typ: type) -> bool: + return _is_union(get_origin(typ)) + + +def is_required_type(typ: type) -> bool: + return get_origin(typ) == Required + + +def is_typevar(typ: type) -> bool: + # type ignore is required because type checkers + # think this expression will always return False + return type(typ) == TypeVar # type: ignore + + +_TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,) +if sys.version_info >= (3, 12): + _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType) + + +def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]: + """Return whether the provided argument is an instance of `TypeAliasType`. + + ```python + type Int = int + is_type_alias_type(Int) + # > True + Str = TypeAliasType("Str", str) + is_type_alias_type(Str) + # > True + ``` + """ + return isinstance(tp, _TYPE_ALIAS_TYPES) + + +# Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]] +@lru_cache(maxsize=8096) +def strip_annotated_type(typ: type) -> type: + if is_required_type(typ) or is_annotated_type(typ): + return strip_annotated_type(cast(type, get_args(typ)[0])) + + return typ + + +def extract_type_arg(typ: type, index: int) -> type: + args = get_args(typ) + try: + return cast(type, args[index]) + except IndexError as err: + raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err + + +def extract_type_var_from_base( + typ: type, + *, + generic_bases: tuple[type, ...], + index: int, + failure_message: str | None = None, +) -> type: + """Given a type like `Foo[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyResponse(Foo[bytes]): + ... + + extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes + ``` + + And where a generic subclass is given: + ```py + _T = TypeVar('_T') + class MyResponse(Foo[_T]): + ... + + extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes + ``` + """ + cls = cast(object, get_origin(typ) or typ) + if cls in generic_bases: # pyright: ignore[reportUnnecessaryContains] + # we're given the class directly + return extract_type_arg(typ, index) + + # if a subclass is given + # --- + # this is needed as __orig_bases__ is not present in the typeshed stubs + # because it is intended to be for internal use only, however there does + # not seem to be a way to resolve generic TypeVars for inherited subclasses + # without using it. + if isinstance(cls, InheritsGeneric): + target_base_class: Any | None = None + for base in cls.__orig_bases__: + if base.__origin__ in generic_bases: + target_base_class = base + break + + if target_base_class is None: + raise RuntimeError( + "Could not find the generic base class;\n" + "This should never happen;\n" + f"Does {cls} inherit from one of {generic_bases} ?" + ) + + extracted = extract_type_arg(target_base_class, index) + if is_typevar(extracted): + # If the extracted type argument is itself a type variable + # then that means the subclass itself is generic, so we have + # to resolve the type argument from the class itself, not + # the base class. + # + # Note: if there is more than 1 type argument, the subclass could + # change the ordering of the type arguments, this is not currently + # supported. + return extract_type_arg(typ, index) + + return extracted + + raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}") diff --git a/src/imagekitio/_utils/_utils.py b/src/imagekitio/_utils/_utils.py new file mode 100644 index 00000000..eec7f4a1 --- /dev/null +++ b/src/imagekitio/_utils/_utils.py @@ -0,0 +1,421 @@ +from __future__ import annotations + +import os +import re +import inspect +import functools +from typing import ( + Any, + Tuple, + Mapping, + TypeVar, + Callable, + Iterable, + Sequence, + cast, + overload, +) +from pathlib import Path +from datetime import date, datetime +from typing_extensions import TypeGuard + +import sniffio + +from .._types import Omit, NotGiven, FileTypes, HeadersLike + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) +_MappingT = TypeVar("_MappingT", bound=Mapping[str, object]) +_SequenceT = TypeVar("_SequenceT", bound=Sequence[object]) +CallableT = TypeVar("CallableT", bound=Callable[..., Any]) + + +def flatten(t: Iterable[Iterable[_T]]) -> list[_T]: + return [item for sublist in t for item in sublist] + + +def extract_files( + # TODO: this needs to take Dict but variance issues..... + # create protocol type ? + query: Mapping[str, object], + *, + paths: Sequence[Sequence[str]], +) -> list[tuple[str, FileTypes]]: + """Recursively extract files from the given dictionary based on specified paths. + + A path may look like this ['foo', 'files', '', 'data']. + + Note: this mutates the given dictionary. + """ + files: list[tuple[str, FileTypes]] = [] + for path in paths: + files.extend(_extract_items(query, path, index=0, flattened_key=None)) + return files + + +def _extract_items( + obj: object, + path: Sequence[str], + *, + index: int, + flattened_key: str | None, +) -> list[tuple[str, FileTypes]]: + try: + key = path[index] + except IndexError: + if not is_given(obj): + # no value was provided - we can safely ignore + return [] + + # cyclical import + from .._files import assert_is_file_content + + # We have exhausted the path, return the entry we found. + assert flattened_key is not None + + if is_list(obj): + files: list[tuple[str, FileTypes]] = [] + for entry in obj: + assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "") + files.append((flattened_key + "[]", cast(FileTypes, entry))) + return files + + assert_is_file_content(obj, key=flattened_key) + return [(flattened_key, cast(FileTypes, obj))] + + index += 1 + if is_dict(obj): + try: + # We are at the last entry in the path so we must remove the field + if (len(path)) == index: + item = obj.pop(key) + else: + item = obj[key] + except KeyError: + # Key was not present in the dictionary, this is not indicative of an error + # as the given path may not point to a required field. We also do not want + # to enforce required fields as the API may differ from the spec in some cases. + return [] + if flattened_key is None: + flattened_key = key + else: + flattened_key += f"[{key}]" + return _extract_items( + item, + path, + index=index, + flattened_key=flattened_key, + ) + elif is_list(obj): + if key != "": + return [] + + return flatten( + [ + _extract_items( + item, + path, + index=index, + flattened_key=flattened_key + "[]" if flattened_key is not None else "[]", + ) + for item in obj + ] + ) + + # Something unexpected was passed, just ignore it. + return [] + + +def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]: + return not isinstance(obj, NotGiven) and not isinstance(obj, Omit) + + +# Type safe methods for narrowing types with TypeVars. +# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown], +# however this cause Pyright to rightfully report errors. As we know we don't +# care about the contained types we can safely use `object` in its place. +# +# There are two separate functions defined, `is_*` and `is_*_t` for different use cases. +# `is_*` is for when you're dealing with an unknown input +# `is_*_t` is for when you're narrowing a known union type to a specific subset + + +def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]: + return isinstance(obj, tuple) + + +def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]: + return isinstance(obj, tuple) + + +def is_sequence(obj: object) -> TypeGuard[Sequence[object]]: + return isinstance(obj, Sequence) + + +def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]: + return isinstance(obj, Sequence) + + +def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]: + return isinstance(obj, Mapping) + + +def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]: + return isinstance(obj, Mapping) + + +def is_dict(obj: object) -> TypeGuard[dict[object, object]]: + return isinstance(obj, dict) + + +def is_list(obj: object) -> TypeGuard[list[object]]: + return isinstance(obj, list) + + +def is_iterable(obj: object) -> TypeGuard[Iterable[object]]: + return isinstance(obj, Iterable) + + +def deepcopy_minimal(item: _T) -> _T: + """Minimal reimplementation of copy.deepcopy() that will only copy certain object types: + + - mappings, e.g. `dict` + - list + + This is done for performance reasons. + """ + if is_mapping(item): + return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()}) + if is_list(item): + return cast(_T, [deepcopy_minimal(entry) for entry in item]) + return item + + +# copied from https://github.com/Rapptz/RoboDanny +def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str: + size = len(seq) + if size == 0: + return "" + + if size == 1: + return seq[0] + + if size == 2: + return f"{seq[0]} {final} {seq[1]}" + + return delim.join(seq[:-1]) + f" {final} {seq[-1]}" + + +def quote(string: str) -> str: + """Add single quotation marks around the given string. Does *not* do any escaping.""" + return f"'{string}'" + + +def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]: + """Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function. + + Useful for enforcing runtime validation of overloaded functions. + + Example usage: + ```py + @overload + def foo(*, a: str) -> str: ... + + + @overload + def foo(*, b: bool) -> str: ... + + + # This enforces the same constraints that a static type checker would + # i.e. that either a or b must be passed to the function + @required_args(["a"], ["b"]) + def foo(*, a: str | None = None, b: bool | None = None) -> str: ... + ``` + """ + + def inner(func: CallableT) -> CallableT: + params = inspect.signature(func).parameters + positional = [ + name + for name, param in params.items() + if param.kind + in { + param.POSITIONAL_ONLY, + param.POSITIONAL_OR_KEYWORD, + } + ] + + @functools.wraps(func) + def wrapper(*args: object, **kwargs: object) -> object: + given_params: set[str] = set() + for i, _ in enumerate(args): + try: + given_params.add(positional[i]) + except IndexError: + raise TypeError( + f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given" + ) from None + + for key in kwargs.keys(): + given_params.add(key) + + for variant in variants: + matches = all((param in given_params for param in variant)) + if matches: + break + else: # no break + if len(variants) > 1: + variations = human_join( + ["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants] + ) + msg = f"Missing required arguments; Expected either {variations} arguments to be given" + else: + assert len(variants) > 0 + + # TODO: this error message is not deterministic + missing = list(set(variants[0]) - given_params) + if len(missing) > 1: + msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}" + else: + msg = f"Missing required argument: {quote(missing[0])}" + raise TypeError(msg) + return func(*args, **kwargs) + + return wrapper # type: ignore + + return inner + + +_K = TypeVar("_K") +_V = TypeVar("_V") + + +@overload +def strip_not_given(obj: None) -> None: ... + + +@overload +def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ... + + +@overload +def strip_not_given(obj: object) -> object: ... + + +def strip_not_given(obj: object | None) -> object: + """Remove all top-level keys where their values are instances of `NotGiven`""" + if obj is None: + return None + + if not is_mapping(obj): + return obj + + return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} + + +def coerce_integer(val: str) -> int: + return int(val, base=10) + + +def coerce_float(val: str) -> float: + return float(val) + + +def coerce_boolean(val: str) -> bool: + return val == "true" or val == "1" or val == "on" + + +def maybe_coerce_integer(val: str | None) -> int | None: + if val is None: + return None + return coerce_integer(val) + + +def maybe_coerce_float(val: str | None) -> float | None: + if val is None: + return None + return coerce_float(val) + + +def maybe_coerce_boolean(val: str | None) -> bool | None: + if val is None: + return None + return coerce_boolean(val) + + +def removeprefix(string: str, prefix: str) -> str: + """Remove a prefix from a string. + + Backport of `str.removeprefix` for Python < 3.9 + """ + if string.startswith(prefix): + return string[len(prefix) :] + return string + + +def removesuffix(string: str, suffix: str) -> str: + """Remove a suffix from a string. + + Backport of `str.removesuffix` for Python < 3.9 + """ + if string.endswith(suffix): + return string[: -len(suffix)] + return string + + +def file_from_path(path: str) -> FileTypes: + contents = Path(path).read_bytes() + file_name = os.path.basename(path) + return (file_name, contents) + + +def get_required_header(headers: HeadersLike, header: str) -> str: + lower_header = header.lower() + if is_mapping_t(headers): + # mypy doesn't understand the type narrowing here + for k, v in headers.items(): # type: ignore + if k.lower() == lower_header and isinstance(v, str): + return v + + # to deal with the case where the header looks like Stainless-Event-Id + intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize()) + + for normalized_header in [header, lower_header, header.upper(), intercaps_header]: + value = headers.get(normalized_header) + if value: + return value + + raise ValueError(f"Could not find {header} header") + + +def get_async_library() -> str: + try: + return sniffio.current_async_library() + except Exception: + return "false" + + +def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]: + """A version of functools.lru_cache that retains the type signature + for the wrapped function arguments. + """ + wrapper = functools.lru_cache( # noqa: TID251 + maxsize=maxsize, + ) + return cast(Any, wrapper) # type: ignore[no-any-return] + + +def json_safe(data: object) -> object: + """Translates a mapping / sequence recursively in the same fashion + as `pydantic` v2's `model_dump(mode="json")`. + """ + if is_mapping(data): + return {json_safe(key): json_safe(value) for key, value in data.items()} + + if is_iterable(data) and not isinstance(data, (str, bytes, bytearray)): + return [json_safe(item) for item in data] + + if isinstance(data, (datetime, date)): + return data.isoformat() + + return data diff --git a/src/imagekitio/_version.py b/src/imagekitio/_version.py new file mode 100644 index 00000000..32a263a1 --- /dev/null +++ b/src/imagekitio/_version.py @@ -0,0 +1,4 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +__title__ = "imagekitio" +__version__ = "5.0.0" # x-release-please-version diff --git a/src/imagekitio/lib/.keep b/src/imagekitio/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/imagekitio/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/imagekitio/lib/__init__.py b/src/imagekitio/lib/__init__.py new file mode 100644 index 00000000..5ba9d0db --- /dev/null +++ b/src/imagekitio/lib/__init__.py @@ -0,0 +1,11 @@ +# Custom helper functions - not generated from OpenAPI spec + +from .helper import ( + HelperResource, + AsyncHelperResource, +) + +__all__ = [ + "HelperResource", + "AsyncHelperResource", +] diff --git a/src/imagekitio/lib/helper.py b/src/imagekitio/lib/helper.py new file mode 100644 index 00000000..ed57436a --- /dev/null +++ b/src/imagekitio/lib/helper.py @@ -0,0 +1,808 @@ +# File manually created for helper functions - not generated from OpenAPI spec + +from __future__ import annotations + +import re +import hmac +import time +import uuid +import base64 +import hashlib +from typing import Any, Dict, List, Union, Iterable, Optional, Sequence, cast +from urllib.parse import quote, parse_qs, urlparse, urlunparse +from typing_extensions import Unpack + +from .._resource import SyncAPIResource, AsyncAPIResource +from ..types.shared_params.overlay import Overlay +from ..types.shared_params.src_options import SrcOptions +from ..types.shared_params.transformation import Transformation +from ..types.shared_params.text_overlay_transformation import TextOverlayTransformation +from ..types.shared_params.subtitle_overlay_transformation import SubtitleOverlayTransformation +from ..types.shared_params.solid_color_overlay_transformation import SolidColorOverlayTransformation + +# Type alias for any transformation type (main or overlay-specific) +AnyTransformation = Union[ + Transformation, TextOverlayTransformation, SubtitleOverlayTransformation, SolidColorOverlayTransformation +] + +__all__ = ["HelperResource", "AsyncHelperResource"] + +# Constants +TRANSFORMATION_PARAMETER = "tr" +SIGNATURE_PARAMETER = "ik-s" +TIMESTAMP_PARAMETER = "ik-t" +DEFAULT_TIMESTAMP = 9999999999 +SIMPLE_OVERLAY_PATH_REGEX = re.compile(r"^[a-zA-Z0-9-._/ ]*$") +SIMPLE_OVERLAY_TEXT_REGEX = re.compile(r"^[a-zA-Z0-9-._ ]*$") + +# Transformation key mapping +SUPPORTED_TRANSFORMS = { + # Basic sizing & layout + "width": "w", + "height": "h", + "aspect_ratio": "ar", + "background": "bg", + "border": "b", + "crop": "c", + "crop_mode": "cm", + "dpr": "dpr", + "focus": "fo", + "quality": "q", + "x": "x", + "x_center": "xc", + "y": "y", + "y_center": "yc", + "format": "f", + "video_codec": "vc", + "audio_codec": "ac", + "radius": "r", + "rotation": "rt", + "blur": "bl", + "named": "n", + "default_image": "di", + "flip": "fl", + "original": "orig", + "start_offset": "so", + "end_offset": "eo", + "duration": "du", + "streaming_resolutions": "sr", + # AI & advanced effects + "grayscale": "e-grayscale", + "ai_upscale": "e-upscale", + "ai_retouch": "e-retouch", + "ai_variation": "e-genvar", + "ai_drop_shadow": "e-dropshadow", + "ai_change_background": "e-changebg", + "ai_remove_background": "e-bgremove", + "ai_remove_background_external": "e-removedotbg", + "ai_edit": "e-edit", + "contrast_stretch": "e-contrast", + "shadow": "e-shadow", + "sharpen": "e-sharpen", + "unsharp_mask": "e-usm", + "gradient": "e-gradient", + # Other flags & finishing + "progressive": "pr", + "lossless": "lo", + "color_profile": "cp", + "metadata": "md", + "opacity": "o", + "trim": "t", + "zoom": "z", + "page": "pg", + # Text overlay transformations + "font_size": "fs", + "font_family": "ff", + "font_color": "co", + "inner_alignment": "ia", + "padding": "pa", + "alpha": "al", + "typography": "tg", + "line_height": "lh", + # Subtitles transformations + "font_outline": "fol", + "font_shadow": "fsh", + "color": "co", + # Raw pass-through + "raw": "raw", +} + +CHAIN_TRANSFORM_DELIMITER = ":" +TRANSFORM_DELIMITER = "," +TRANSFORM_KEY_VALUE_DELIMITER = "-" + +# RFC 3986 section 3.3 defines 'pchar' (path characters) that are safe to use unencoded: +# pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +# sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" +# This matches what Node.js URL.pathname uses and ensures compatibility across SDKs +RFC3986_PATH_SAFE_CHARS = "/:@!$&'()*+,;=-._~" + + +def _get_transform_key(transform: str) -> str: + """Get the short transformation key from the long form.""" + if not transform: + return "" + return SUPPORTED_TRANSFORMS.get(transform, transform) + + +def _add_trailing_slash(s: str) -> str: + """Add trailing slash if not present.""" + if s and not s.endswith("/"): + return s + "/" + return s + + +def _remove_trailing_slash(s: str) -> str: + """Remove trailing slash if present.""" + if s and s.endswith("/"): + return s[:-1] + return s + + +def _remove_leading_slash(s: str) -> str: + """Remove leading slash if present.""" + if s and s.startswith("/"): + return s[1:] + return s + + +def _format_number(value: Any) -> str: + """ + Format a numeric value as a string, removing unnecessary decimal points. + + Examples: + 5.0 -> "5" + 5.5 -> "5.5" + 5 -> "5" + "5" -> "5" + """ + if isinstance(value, (int, float)): + # Check if it's a whole number + if isinstance(value, float) and value.is_integer(): + return str(int(value)) + return str(value) + return str(value) + + +def _path_join(parts: List[str], sep: str = "/") -> str: + """Join path parts, handling slashes correctly.""" + cleaned_parts: List[str] = [] + for part in parts: + if part: + # Remove leading and trailing slashes from parts + cleaned_part = part.strip("/") + if cleaned_part: + cleaned_parts.append(cleaned_part) + return sep + sep.join(cleaned_parts) if cleaned_parts else "" + + +def _safe_btoa(s: str) -> str: + """ + Base64 encode a string and then URL-encode it. + This matches Node.js behavior: safeBtoa() + encodeURIComponent(). + + In Node.js: + - encodeURIComponent() encodes: / as %2F, + as %2B, = as %3D + - Python's quote() with default safe='/' doesn't encode / + - So we need to explicitly set safe='' to encode everything + """ + encoded = base64.b64encode(s.encode("utf-8")).decode("utf-8") + # URL encode the entire base64 string (/, +, =, etc.) + # quote() with safe='' will encode all special characters to match encodeURIComponent + return quote(encoded, safe="") + + +def _process_input_path(s: str, encoding: str) -> str: + """ + Process input path for overlays. + Returns the full parameter string including the i- or ie- prefix. + """ + if not s: + return "" + + # Remove leading and trailing slashes + s = _remove_trailing_slash(_remove_leading_slash(s)) + + if encoding == "plain": + return f"i-{s.replace('/', '@@')}" + + if encoding == "base64": + # safeBtoa already encodes = as %3D, no need for further encoding + return f"ie-{_safe_btoa(s)}" + + # Auto encoding: use plain for simple paths, base64 for special characters + if SIMPLE_OVERLAY_PATH_REGEX.match(s): + return f"i-{s.replace('/', '@@')}" + else: + # safeBtoa already encodes = as %3D, no need for further encoding + return f"ie-{_safe_btoa(s)}" + + +def _process_text(s: str, encoding: str) -> str: + """ + Process text for overlays. + Returns the full parameter string including the i- or ie- prefix. + """ + if not s: + return "" + + if encoding == "plain": + return f"i-{quote(s, safe='')}" + + if encoding == "base64": + # safeBtoa already encodes = as %3D, no need for further encoding + return f"ie-{_safe_btoa(s)}" + + # Auto encoding: use plain for simple text, base64 for special characters + if SIMPLE_OVERLAY_TEXT_REGEX.match(s): + return f"i-{quote(s, safe='')}" + + # safeBtoa already encodes = as %3D, no need for further encoding + return f"ie-{_safe_btoa(s)}" + + +def _process_overlay(overlay: Overlay) -> str: + """Process overlay transformations.""" + if not overlay: + return "" + + # Extract type, position, timing, and transformation from overlay + overlay_type: str = cast(str, overlay.get("type", "")) + position: Dict[str, Any] = cast(Dict[str, Any], overlay.get("position", {})) + timing: Dict[str, Any] = cast(Dict[str, Any], overlay.get("timing", {})) + transformation: List[Any] = cast(List[Any], overlay.get("transformation", [])) + + if not overlay_type: + return "" + + parsed_overlay: List[str] = [] + + if overlay_type == "text": + text: str = cast(str, overlay.get("text", "")) + if not text: + return "" + + encoding: str = cast(str, overlay.get("encoding", "auto")) + parsed_overlay.append("l-text") + + # Process the text - returns full string with i- or ie- prefix + parsed_overlay.append(_process_text(text, encoding)) + + elif overlay_type == "image": + parsed_overlay.append("l-image") + + input_val: str = cast(str, overlay.get("input", "")) + if not input_val: + return "" + + img_encoding = cast(str, overlay.get("encoding", "auto")) + + # Process the input path - returns full string with i- or ie- prefix + parsed_overlay.append(_process_input_path(input_val, img_encoding)) + + elif overlay_type == "video": + parsed_overlay.append("l-video") + + video_input = cast(str, overlay.get("input", "")) + if not video_input: + return "" + + video_encoding = cast(str, overlay.get("encoding", "auto")) + + # Process the input path - returns full string with i- or ie- prefix + parsed_overlay.append(_process_input_path(video_input, video_encoding)) + + elif overlay_type == "subtitle": + parsed_overlay.append("l-subtitle") + + subtitle_input = cast(str, overlay.get("input", "")) + if not subtitle_input: + return "" + + subtitle_encoding = cast(str, overlay.get("encoding", "auto")) + + # Process the input path - returns full string with i- or ie- prefix + parsed_overlay.append(_process_input_path(subtitle_input, subtitle_encoding)) + + elif overlay_type == "solidColor": + parsed_overlay.append("l-image") + parsed_overlay.append("i-ik_canvas") + + color: str = cast(str, overlay.get("color", "")) + if not color: + return "" + + parsed_overlay.append(f"bg-{color}") + + # Handle position properties (x, y, focus) + # Node.js uses if (x) which skips falsy values like 0, '', false, null, undefined + x = position.get("x") + if x: + parsed_overlay.append(f"lx-{x}") + + y = position.get("y") + if y: + parsed_overlay.append(f"ly-{y}") + + focus = position.get("focus") + if focus: + parsed_overlay.append(f"lfo-{focus}") + + # Handle timing properties (start, end, duration) + # Node.js uses if (start) which skips falsy values + start = timing.get("start") + if start: + parsed_overlay.append(f"lso-{_format_number(start)}") + + end = timing.get("end") + if end: + parsed_overlay.append(f"leo-{_format_number(end)}") + + duration = timing.get("duration") + if duration: + parsed_overlay.append(f"ldu-{duration}") + + # Handle nested transformations for image/video overlays + if transformation: + transformation_string: str = _build_transformation_string(transformation) + if transformation_string and transformation_string.strip(): + parsed_overlay.append(transformation_string) + + # Close overlay + parsed_overlay.append("l-end") + + return TRANSFORM_DELIMITER.join(parsed_overlay) + + +def _build_transformation_string(transformation: Optional[Sequence[AnyTransformation]]) -> str: + """Build transformation string from transformation objects.""" + if not transformation: + return "" + + parsed_transforms: List[str] = [] + + for current_transform in transformation: + if not current_transform: + continue + + parsed_transform_step: List[str] = [] + + for key, value in current_transform.items(): + if value is None: + continue + + # Handle overlay separately + if key == "overlay" and isinstance(value, dict): + raw_string: str = _process_overlay(cast(Overlay, value)) + if raw_string and raw_string.strip(): + parsed_transform_step.append(raw_string) + continue + + # Get the transformation key + transform_key: str = _get_transform_key(key) + if not transform_key: + transform_key = key + + if not transform_key: + continue + + # Handle boolean transformations that should only output key + if transform_key in [ + "e-grayscale", + "e-contrast", + "e-removedotbg", + "e-bgremove", + "e-upscale", + "e-retouch", + "e-genvar", + ]: + if value is True or value == "-" or value == "true": + parsed_transform_step.append(transform_key) + # Any other value means that the effect should not be applied + continue + + # Handle transformations that can be true or have values + if transform_key in ["e-sharpen", "e-shadow", "e-gradient", "e-usm", "e-dropshadow"] and ( + str(value).strip() == "" or value is True or value == "true" + ): + parsed_transform_step.append(transform_key) + continue + + # Handle raw transformation + if key == "raw": + if isinstance(value, str) and value.strip(): + parsed_transform_step.append(value) + continue + + # Handle default_image and font_family - replace slashes + if transform_key in ["di", "ff"]: + value = _remove_trailing_slash(_remove_leading_slash(str(value) if value else "")) + value = value.replace("/", "@@") + + # Handle streaming_resolutions array + if transform_key == "sr" and isinstance(value, list): + value = "_".join(str(v) for v in cast(List[Any], value)) + + # Special case for trim with empty string + if transform_key == "t" and str(value).strip() == "": + value = "true" + + # Skip false values + if value is False: + continue + + # Skip empty strings (except for special keys that allow empty values) + if isinstance(value, str) and value.strip() == "": + continue + + # Convert boolean True to lowercase "true" + if value is True: + value = "true" + + # Format numeric values to avoid unnecessary .0 for integers + if isinstance(value, (int, float)): + value = _format_number(value) + + # Add the transformation + parsed_transform_step.append(f"{transform_key}{TRANSFORM_KEY_VALUE_DELIMITER}{value}") + + if parsed_transform_step: + parsed_transforms.append(TRANSFORM_DELIMITER.join(parsed_transform_step)) + + return CHAIN_TRANSFORM_DELIMITER.join(parsed_transforms) + + +def _get_signature_timestamp(seconds: Optional[float]) -> int: + """Calculate expiry timestamp for URL signing.""" + if not seconds or seconds <= 0: + return DEFAULT_TIMESTAMP + + # Try to parse as int, return DEFAULT_TIMESTAMP if invalid + try: + sec = int(seconds) + if sec <= 0: + return DEFAULT_TIMESTAMP + except (ValueError, TypeError): + return DEFAULT_TIMESTAMP + + return int(time.time()) + sec + + +def _get_signature(private_key: str, url: str, url_endpoint: str, expiry_timestamp: int) -> str: + """Generate HMAC-SHA1 signature for URL signing.""" + if not private_key or not url or not url_endpoint: + return "" + + # Create the string to sign: relative path + expiry timestamp + # This matches Node.js: url.replace(addTrailingSlash(urlEndpoint), '') + String(expiryTimestamp) + url_endpoint_with_slash = _add_trailing_slash(url_endpoint) + string_to_sign = url.replace(url_endpoint_with_slash, "") + str(expiry_timestamp) + + # Generate HMAC-SHA1 signature + signature = hmac.new(private_key.encode("utf-8"), string_to_sign.encode("utf-8"), hashlib.sha1).hexdigest() + + return signature + + +def _get_authentication_parameters(token: str, expire: int, private_key: str) -> Dict[str, Any]: + """Generate authentication parameters for uploads.""" + auth_parameters = { + "token": token, + "expire": expire, + "signature": "", + } + + signature = hmac.new(private_key.encode("utf-8"), f"{token}{expire}".encode("utf-8"), hashlib.sha1).hexdigest() + + auth_parameters["signature"] = signature + return auth_parameters + + +def _build_url( + src: str, + url_endpoint: str, + transformation_position: str, + transformation: Any, + query_parameters: Dict[str, Any], + signed: bool, + expires_in: Optional[float], + private_key: str, +) -> str: + """ + Internal implementation of build_url. + + Args: + src: Accepts a relative or absolute path of the resource. + url_endpoint: Get your urlEndpoint from the ImageKit dashboard. + transformation_position: By default, the transformation string is added as a query parameter. + transformation: An array of objects specifying the transformations to be applied in the URL. + query_parameters: Additional query parameters to add to the final URL. + signed: Whether to sign the URL or not. + expires_in: When you want the signed URL to expire, specified in seconds. + private_key: Private key for signing URLs. + + Returns: + The constructed source URL. + """ + if not src: + return "" + + # Check if src is absolute URL + is_absolute_url = src.startswith("http://") or src.startswith("https://") + + # Track if src parameter is used for URL (matches Node.js isSrcParameterUsedForURL) + is_src_parameter_used_for_url = False + + # Parse URL + try: + if not is_absolute_url: + parsed_url = urlparse(url_endpoint) + else: + parsed_url = urlparse(src) + is_src_parameter_used_for_url = True + except Exception: + return "" + + # Build query parameters + query_dict_raw = dict(parse_qs(parsed_url.query)) + # Flatten lists from parse_qs + query_dict: Dict[str, str] = {k: v[0] if len(v) == 1 else ",".join(v) for k, v in query_dict_raw.items()} + + # Add additional query parameters - convert values to strings like Node.js does + if query_parameters: + for k, v in query_parameters.items(): + query_dict[k] = str(v) + + # Build transformation string + transformation_string = _build_transformation_string(transformation) + + # Determine if transformation should be in query or path + # Matches Node.js: addAsQuery = transformationUtils.addAsQueryParameter(opts) || isSrcParameterUsedForURL + add_as_query = transformation_position == "query" or is_src_parameter_used_for_url + + # Placeholder for transformation to avoid URL encoding issues + TRANSFORMATION_PLACEHOLDER = "PLEASEREPLACEJUSTBEFORESIGN" + + # Build the path + if not is_absolute_url: + # For relative URLs + endpoint_path = urlparse(url_endpoint).path + path_parts = [endpoint_path] if endpoint_path else [] + + # Add transformation in path if needed + if transformation_string and not add_as_query: + path_parts.append(f"{TRANSFORMATION_PARAMETER}{CHAIN_TRANSFORM_DELIMITER}{TRANSFORMATION_PLACEHOLDER}") + + # Add src path with RFC 3986 compliant encoding + # Python's urlunparse() doesn't auto-encode Unicode like Node.js URL does, + # so we must manually encode the path while preserving RFC 3986 safe chars + encoded_src = quote(src, safe=RFC3986_PATH_SAFE_CHARS) + path_parts.append(encoded_src) + + path = _path_join(path_parts) + else: + path = parsed_url.path + + # Add transformation to query if needed + if transformation_string and add_as_query: + query_dict[TRANSFORMATION_PARAMETER] = TRANSFORMATION_PLACEHOLDER + + # Build the URL + scheme = parsed_url.scheme or "https" + netloc = parsed_url.netloc if is_absolute_url else urlparse(url_endpoint).netloc + + # Build query string manually to avoid encoding transformation string + query_string = "" + if query_dict: + query_parts: List[str] = [] + for k, v in query_dict.items(): + query_parts.append(f"{k}={v}") + query_string = "&".join(query_parts) + + final_url = urlunparse((scheme, netloc, path, "", query_string, "")) + + # Replace placeholder with actual transformation string + if transformation_string: + final_url = final_url.replace(TRANSFORMATION_PLACEHOLDER, transformation_string) + + # Sign URL if needed + if signed or (expires_in and expires_in > 0): + expiry_timestamp = _get_signature_timestamp(expires_in) + + url_signature = _get_signature( + private_key=private_key, url=final_url, url_endpoint=url_endpoint, expiry_timestamp=expiry_timestamp + ) + + # Add signature parameters + parsed_final = urlparse(final_url) + has_existing_params = bool(parsed_final.query) + separator = "&" if has_existing_params else "?" + + if expiry_timestamp and expiry_timestamp != DEFAULT_TIMESTAMP: + final_url += f"{separator}{TIMESTAMP_PARAMETER}={expiry_timestamp}" + final_url += f"&{SIGNATURE_PARAMETER}={url_signature}" + else: + final_url += f"{separator}{SIGNATURE_PARAMETER}={url_signature}" + + return final_url + + +def _get_authentication_parameters_with_defaults( + token: Optional[str], expire: Optional[int], private_key: str +) -> Dict[str, Any]: + """ + Internal implementation of get_authentication_parameters with default value handling. + + Args: + token: Custom token for the upload session. If not provided, a UUID v4 will be generated automatically. + expire: Expiration time in seconds from now. If not provided, defaults to 1800 seconds (30 minutes). + private_key: Private key for generating authentication parameters. + + Returns: + Authentication parameters object containing token, expire, and signature. + """ + if not private_key: + raise ValueError("Private key is required for generating authentication parameters") + + # Generate token if not provided + if not token: + token = str(uuid.uuid4()) + + # Set default expiry if not provided + if expire is None: + expire = int(time.time()) + 1800 # 30 minutes default + + return _get_authentication_parameters(token, expire, private_key) + + +class HelperResource(SyncAPIResource): + """ + Helper resource for additional utility functions like URL building and authentication. + """ + + def build_url(self, **options: Unpack[SrcOptions]) -> str: + """ + Builds a source URL with the given options. + + Args: + src: Accepts a relative or absolute path of the resource. If a relative path is provided, + it is appended to the `url_endpoint`. If an absolute path is provided, `url_endpoint` is ignored. + url_endpoint: Get your urlEndpoint from the ImageKit dashboard. + transformation: An array of objects specifying the transformations to be applied in the URL. + transformation_position: By default, the transformation string is added as a query parameter. + Set to `path` to add it in the URL path instead. + signed: Whether to sign the URL or not. Set to `true` to generate a signed URL. + expires_in: When you want the signed URL to expire, specified in seconds. + query_parameters: Additional query parameters to add to the final URL. + + Returns: + The constructed source URL. + """ + return _build_url( + src=options.get("src", ""), + url_endpoint=options.get("url_endpoint", ""), + transformation_position=options.get("transformation_position", "query"), + transformation=options.get("transformation"), + query_parameters=options.get("query_parameters", {}), + signed=options.get("signed", False), + expires_in=options.get("expires_in"), + private_key=self._client.private_key, + ) + + def get_authentication_parameters( + self, + token: Optional[str] = None, + expire: Optional[int] = None, + ) -> Dict[str, Any]: + """ + Generates authentication parameters for client-side file uploads using ImageKit's Upload API. + + Args: + token: Custom token for the upload session. If not provided, a UUID v4 will be generated automatically. + expire: Expiration time in seconds from now. If not provided, defaults to 1800 seconds (30 minutes). + + Returns: + Authentication parameters object containing: + - token: Unique identifier for this upload session + - expire: Unix timestamp when these parameters expire + - signature: HMAC-SHA1 signature for authenticating the upload + """ + return _get_authentication_parameters_with_defaults( + token=token, expire=expire, private_key=self._client.private_key + ) + + def build_transformation_string(self, transformation: Optional[Iterable[Transformation]] = None) -> str: + """ + Builds a transformation string from an array of transformation objects. + + Args: + transformation: List of transformation dictionaries. + + Returns: + The transformation string in ImageKit format. + """ + if transformation is None: + return "" + + # Convert to list if it's an iterable + if not isinstance(transformation, list): + transformation = list(transformation) + + return _build_transformation_string(transformation) + + +class AsyncHelperResource(AsyncAPIResource): + """ + Async version of helper resource for additional utility functions. + """ + + async def build_url(self, **options: Unpack[SrcOptions]) -> str: + """ + Async version of build_url. + + Args: + src: Accepts a relative or absolute path of the resource. If a relative path is provided, + it is appended to the `url_endpoint`. If an absolute path is provided, `url_endpoint` is ignored. + url_endpoint: Get your urlEndpoint from the ImageKit dashboard. + transformation: An array of objects specifying the transformations to be applied in the URL. + transformation_position: By default, the transformation string is added as a query parameter. + Set to `path` to add it in the URL path instead. + signed: Whether to sign the URL or not. Set to `true` to generate a signed URL. + expires_in: When you want the signed URL to expire, specified in seconds. + query_parameters: Additional query parameters to add to the final URL. + + Returns: + The constructed source URL. + """ + return _build_url( + src=options.get("src", ""), + url_endpoint=options.get("url_endpoint", ""), + transformation_position=options.get("transformation_position", "query"), + transformation=options.get("transformation"), + query_parameters=options.get("query_parameters", {}), + signed=options.get("signed", False), + expires_in=options.get("expires_in"), + private_key=self._client.private_key, + ) + + async def get_authentication_parameters( + self, + token: Optional[str] = None, + expire: Optional[int] = None, + ) -> Dict[str, Any]: + """ + Async version of get_authentication_parameters. + + Args: + token: Custom token for the upload session. If not provided, a UUID v4 will be generated automatically. + expire: Expiration time in seconds from now. If not provided, defaults to 1800 seconds (30 minutes). + + Returns: + Authentication parameters object containing: + - token: Unique identifier for this upload session + - expire: Unix timestamp when these parameters expire + - signature: HMAC-SHA1 signature for authenticating the upload + """ + return _get_authentication_parameters_with_defaults( + token=token, expire=expire, private_key=self._client.private_key + ) + + async def build_transformation_string(self, transformation: Optional[Iterable[Transformation]] = None) -> str: + """ + Async version of build_transformation_string. + + Args: + transformation: List of transformation dictionaries. + + Returns: + The transformation string in ImageKit format. + """ + if transformation is None: + return "" + + # Convert to list if it's an iterable + if not isinstance(transformation, list): + transformation = list(transformation) + + return _build_transformation_string(transformation) diff --git a/src/imagekitio/lib/serialization_utils.py b/src/imagekitio/lib/serialization_utils.py new file mode 100644 index 00000000..4fe5a473 --- /dev/null +++ b/src/imagekitio/lib/serialization_utils.py @@ -0,0 +1,47 @@ +# Serialization utilities for upload options +# This file handles serialization of upload parameters before sending to ImageKit API + +import json +from typing import Any, Dict, Sequence, cast + + +def serialize_upload_options(upload_options: Dict[str, Any]) -> Dict[str, Any]: + """ + Serialize upload options to handle proper formatting for ImageKit backend API. + + Special cases handled: + - tags: converted to comma-separated string + - response_fields: converted to comma-separated string + - extensions: JSON stringified + - custom_metadata: JSON stringified + - transformation: JSON stringified + + Args: + upload_options: Dictionary containing upload parameters + + Returns: + Dictionary with serialized values + """ + serialized: Dict[str, Any] = {**upload_options} + + for key in list(serialized.keys()): + if key and serialized[key] is not None: + value = serialized[key] + + if key == "tags" and isinstance(value, (list, tuple)): + # Tags should be comma-separated string + serialized[key] = ",".join(cast(Sequence[str], value)) + elif key == "response_fields" and isinstance(value, (list, tuple)): + # Response fields should be comma-separated string + serialized[key] = ",".join(cast(Sequence[str], value)) + elif key == "extensions" and isinstance(value, list): + # Extensions should be JSON stringified + serialized[key] = json.dumps(value) + elif key == "custom_metadata" and isinstance(value, dict): + # Custom metadata should be JSON stringified + serialized[key] = json.dumps(value) + elif key == "transformation" and isinstance(value, dict): + # Transformation should be JSON stringified + serialized[key] = json.dumps(value) + + return serialized diff --git a/imagekitio/utils/__init__.py b/src/imagekitio/py.typed similarity index 100% rename from imagekitio/utils/__init__.py rename to src/imagekitio/py.typed diff --git a/src/imagekitio/resources/__init__.py b/src/imagekitio/resources/__init__.py new file mode 100644 index 00000000..81ba578e --- /dev/null +++ b/src/imagekitio/resources/__init__.py @@ -0,0 +1,126 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .beta import ( + BetaResource, + AsyncBetaResource, + BetaResourceWithRawResponse, + AsyncBetaResourceWithRawResponse, + BetaResourceWithStreamingResponse, + AsyncBetaResourceWithStreamingResponse, +) +from .cache import ( + CacheResource, + AsyncCacheResource, + CacheResourceWithRawResponse, + AsyncCacheResourceWithRawResponse, + CacheResourceWithStreamingResponse, + AsyncCacheResourceWithStreamingResponse, +) +from .dummy import ( + DummyResource, + AsyncDummyResource, + DummyResourceWithRawResponse, + AsyncDummyResourceWithRawResponse, + DummyResourceWithStreamingResponse, + AsyncDummyResourceWithStreamingResponse, +) +from .files import ( + FilesResource, + AsyncFilesResource, + FilesResourceWithRawResponse, + AsyncFilesResourceWithRawResponse, + FilesResourceWithStreamingResponse, + AsyncFilesResourceWithStreamingResponse, +) +from .assets import ( + AssetsResource, + AsyncAssetsResource, + AssetsResourceWithRawResponse, + AsyncAssetsResourceWithRawResponse, + AssetsResourceWithStreamingResponse, + AsyncAssetsResourceWithStreamingResponse, +) +from .folders import ( + FoldersResource, + AsyncFoldersResource, + FoldersResourceWithRawResponse, + AsyncFoldersResourceWithRawResponse, + FoldersResourceWithStreamingResponse, + AsyncFoldersResourceWithStreamingResponse, +) +from .accounts import ( + AccountsResource, + AsyncAccountsResource, + AccountsResourceWithRawResponse, + AsyncAccountsResourceWithRawResponse, + AccountsResourceWithStreamingResponse, + AsyncAccountsResourceWithStreamingResponse, +) +from .webhooks import WebhooksResource, AsyncWebhooksResource +from ..lib.helper import ( + HelperResource, + AsyncHelperResource, +) +from .custom_metadata_fields import ( + CustomMetadataFieldsResource, + AsyncCustomMetadataFieldsResource, + CustomMetadataFieldsResourceWithRawResponse, + AsyncCustomMetadataFieldsResourceWithRawResponse, + CustomMetadataFieldsResourceWithStreamingResponse, + AsyncCustomMetadataFieldsResourceWithStreamingResponse, +) + +__all__ = [ + "DummyResource", + "AsyncDummyResource", + "DummyResourceWithRawResponse", + "AsyncDummyResourceWithRawResponse", + "DummyResourceWithStreamingResponse", + "AsyncDummyResourceWithStreamingResponse", + "CustomMetadataFieldsResource", + "AsyncCustomMetadataFieldsResource", + "CustomMetadataFieldsResourceWithRawResponse", + "AsyncCustomMetadataFieldsResourceWithRawResponse", + "CustomMetadataFieldsResourceWithStreamingResponse", + "AsyncCustomMetadataFieldsResourceWithStreamingResponse", + "FilesResource", + "AsyncFilesResource", + "FilesResourceWithRawResponse", + "AsyncFilesResourceWithRawResponse", + "FilesResourceWithStreamingResponse", + "AsyncFilesResourceWithStreamingResponse", + "AssetsResource", + "AsyncAssetsResource", + "AssetsResourceWithRawResponse", + "AsyncAssetsResourceWithRawResponse", + "AssetsResourceWithStreamingResponse", + "AsyncAssetsResourceWithStreamingResponse", + "CacheResource", + "AsyncCacheResource", + "CacheResourceWithRawResponse", + "AsyncCacheResourceWithRawResponse", + "CacheResourceWithStreamingResponse", + "AsyncCacheResourceWithStreamingResponse", + "FoldersResource", + "AsyncFoldersResource", + "FoldersResourceWithRawResponse", + "AsyncFoldersResourceWithRawResponse", + "FoldersResourceWithStreamingResponse", + "AsyncFoldersResourceWithStreamingResponse", + "AccountsResource", + "AsyncAccountsResource", + "AccountsResourceWithRawResponse", + "AsyncAccountsResourceWithRawResponse", + "AccountsResourceWithStreamingResponse", + "AsyncAccountsResourceWithStreamingResponse", + "BetaResource", + "AsyncBetaResource", + "BetaResourceWithRawResponse", + "AsyncBetaResourceWithRawResponse", + "BetaResourceWithStreamingResponse", + "AsyncBetaResourceWithStreamingResponse", + "WebhooksResource", + "AsyncWebhooksResource", + "HelperResource", + "AsyncHelperResource", +] diff --git a/src/imagekitio/resources/accounts/__init__.py b/src/imagekitio/resources/accounts/__init__.py new file mode 100644 index 00000000..fc56413d --- /dev/null +++ b/src/imagekitio/resources/accounts/__init__.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .usage import ( + UsageResource, + AsyncUsageResource, + UsageResourceWithRawResponse, + AsyncUsageResourceWithRawResponse, + UsageResourceWithStreamingResponse, + AsyncUsageResourceWithStreamingResponse, +) +from .origins import ( + OriginsResource, + AsyncOriginsResource, + OriginsResourceWithRawResponse, + AsyncOriginsResourceWithRawResponse, + OriginsResourceWithStreamingResponse, + AsyncOriginsResourceWithStreamingResponse, +) +from .accounts import ( + AccountsResource, + AsyncAccountsResource, + AccountsResourceWithRawResponse, + AsyncAccountsResourceWithRawResponse, + AccountsResourceWithStreamingResponse, + AsyncAccountsResourceWithStreamingResponse, +) +from .url_endpoints import ( + URLEndpointsResource, + AsyncURLEndpointsResource, + URLEndpointsResourceWithRawResponse, + AsyncURLEndpointsResourceWithRawResponse, + URLEndpointsResourceWithStreamingResponse, + AsyncURLEndpointsResourceWithStreamingResponse, +) + +__all__ = [ + "UsageResource", + "AsyncUsageResource", + "UsageResourceWithRawResponse", + "AsyncUsageResourceWithRawResponse", + "UsageResourceWithStreamingResponse", + "AsyncUsageResourceWithStreamingResponse", + "OriginsResource", + "AsyncOriginsResource", + "OriginsResourceWithRawResponse", + "AsyncOriginsResourceWithRawResponse", + "OriginsResourceWithStreamingResponse", + "AsyncOriginsResourceWithStreamingResponse", + "URLEndpointsResource", + "AsyncURLEndpointsResource", + "URLEndpointsResourceWithRawResponse", + "AsyncURLEndpointsResourceWithRawResponse", + "URLEndpointsResourceWithStreamingResponse", + "AsyncURLEndpointsResourceWithStreamingResponse", + "AccountsResource", + "AsyncAccountsResource", + "AccountsResourceWithRawResponse", + "AsyncAccountsResourceWithRawResponse", + "AccountsResourceWithStreamingResponse", + "AsyncAccountsResourceWithStreamingResponse", +] diff --git a/src/imagekitio/resources/accounts/accounts.py b/src/imagekitio/resources/accounts/accounts.py new file mode 100644 index 00000000..461e8cff --- /dev/null +++ b/src/imagekitio/resources/accounts/accounts.py @@ -0,0 +1,166 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .usage import ( + UsageResource, + AsyncUsageResource, + UsageResourceWithRawResponse, + AsyncUsageResourceWithRawResponse, + UsageResourceWithStreamingResponse, + AsyncUsageResourceWithStreamingResponse, +) +from .origins import ( + OriginsResource, + AsyncOriginsResource, + OriginsResourceWithRawResponse, + AsyncOriginsResourceWithRawResponse, + OriginsResourceWithStreamingResponse, + AsyncOriginsResourceWithStreamingResponse, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .url_endpoints import ( + URLEndpointsResource, + AsyncURLEndpointsResource, + URLEndpointsResourceWithRawResponse, + AsyncURLEndpointsResourceWithRawResponse, + URLEndpointsResourceWithStreamingResponse, + AsyncURLEndpointsResourceWithStreamingResponse, +) + +__all__ = ["AccountsResource", "AsyncAccountsResource"] + + +class AccountsResource(SyncAPIResource): + @cached_property + def usage(self) -> UsageResource: + return UsageResource(self._client) + + @cached_property + def origins(self) -> OriginsResource: + return OriginsResource(self._client) + + @cached_property + def url_endpoints(self) -> URLEndpointsResource: + return URLEndpointsResource(self._client) + + @cached_property + def with_raw_response(self) -> AccountsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AccountsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AccountsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AccountsResourceWithStreamingResponse(self) + + +class AsyncAccountsResource(AsyncAPIResource): + @cached_property + def usage(self) -> AsyncUsageResource: + return AsyncUsageResource(self._client) + + @cached_property + def origins(self) -> AsyncOriginsResource: + return AsyncOriginsResource(self._client) + + @cached_property + def url_endpoints(self) -> AsyncURLEndpointsResource: + return AsyncURLEndpointsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAccountsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncAccountsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAccountsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncAccountsResourceWithStreamingResponse(self) + + +class AccountsResourceWithRawResponse: + def __init__(self, accounts: AccountsResource) -> None: + self._accounts = accounts + + @cached_property + def usage(self) -> UsageResourceWithRawResponse: + return UsageResourceWithRawResponse(self._accounts.usage) + + @cached_property + def origins(self) -> OriginsResourceWithRawResponse: + return OriginsResourceWithRawResponse(self._accounts.origins) + + @cached_property + def url_endpoints(self) -> URLEndpointsResourceWithRawResponse: + return URLEndpointsResourceWithRawResponse(self._accounts.url_endpoints) + + +class AsyncAccountsResourceWithRawResponse: + def __init__(self, accounts: AsyncAccountsResource) -> None: + self._accounts = accounts + + @cached_property + def usage(self) -> AsyncUsageResourceWithRawResponse: + return AsyncUsageResourceWithRawResponse(self._accounts.usage) + + @cached_property + def origins(self) -> AsyncOriginsResourceWithRawResponse: + return AsyncOriginsResourceWithRawResponse(self._accounts.origins) + + @cached_property + def url_endpoints(self) -> AsyncURLEndpointsResourceWithRawResponse: + return AsyncURLEndpointsResourceWithRawResponse(self._accounts.url_endpoints) + + +class AccountsResourceWithStreamingResponse: + def __init__(self, accounts: AccountsResource) -> None: + self._accounts = accounts + + @cached_property + def usage(self) -> UsageResourceWithStreamingResponse: + return UsageResourceWithStreamingResponse(self._accounts.usage) + + @cached_property + def origins(self) -> OriginsResourceWithStreamingResponse: + return OriginsResourceWithStreamingResponse(self._accounts.origins) + + @cached_property + def url_endpoints(self) -> URLEndpointsResourceWithStreamingResponse: + return URLEndpointsResourceWithStreamingResponse(self._accounts.url_endpoints) + + +class AsyncAccountsResourceWithStreamingResponse: + def __init__(self, accounts: AsyncAccountsResource) -> None: + self._accounts = accounts + + @cached_property + def usage(self) -> AsyncUsageResourceWithStreamingResponse: + return AsyncUsageResourceWithStreamingResponse(self._accounts.usage) + + @cached_property + def origins(self) -> AsyncOriginsResourceWithStreamingResponse: + return AsyncOriginsResourceWithStreamingResponse(self._accounts.origins) + + @cached_property + def url_endpoints(self) -> AsyncURLEndpointsResourceWithStreamingResponse: + return AsyncURLEndpointsResourceWithStreamingResponse(self._accounts.url_endpoints) diff --git a/src/imagekitio/resources/accounts/origins.py b/src/imagekitio/resources/accounts/origins.py new file mode 100644 index 00000000..27dc7af5 --- /dev/null +++ b/src/imagekitio/resources/accounts/origins.py @@ -0,0 +1,2233 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, cast +from typing_extensions import Literal, overload + +import httpx + +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.accounts import origin_create_params, origin_update_params +from ...types.accounts.origin_response import OriginResponse +from ...types.accounts.origin_list_response import OriginListResponse + +__all__ = ["OriginsResource", "AsyncOriginsResource"] + + +class OriginsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> OriginsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return OriginsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> OriginsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return OriginsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["S3"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + access_key: str, + bucket: str, + endpoint: str, + name: str, + secret_key: str, + type: Literal["S3_COMPATIBLE"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + endpoint: Custom S3-compatible endpoint. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + s3_force_path_style: Use path-style S3 URLs? + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["CLOUDINARY_BACKUP"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + base_url: str, + name: str, + type: Literal["WEB_FOLDER"], + base_url_for_canonical_header: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + base_url: Root URL for the web folder origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + forward_host_header_to_origin: Forward the Host header to origin? + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + name: str, + type: Literal["WEB_PROXY"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + bucket: str, + client_email: str, + name: str, + private_key: str, + type: Literal["GCS"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + account_name: str, + container: str, + name: str, + sas_token: str, + type: Literal["AZURE_BLOB"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + base_url: str, + client_id: str, + client_secret: str, + name: str, + password: str, + type: Literal["AKENEO_PIM"], + username: str, + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + base_url: Akeneo instance base URL. + + client_id: Akeneo API client ID. + + client_secret: Akeneo API client secret. + + name: Display name of the origin. + + password: Akeneo API password. + + username: Akeneo API username. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args( + ["access_key", "bucket", "name", "secret_key", "type"], + ["access_key", "bucket", "endpoint", "name", "secret_key", "type"], + ["base_url", "name", "type"], + ["name", "type"], + ["bucket", "client_email", "name", "private_key", "type"], + ["account_name", "container", "name", "sas_token", "type"], + ["base_url", "client_id", "client_secret", "name", "password", "type", "username"], + ) + def create( + self, + *, + access_key: str | Omit = omit, + bucket: str | Omit = omit, + name: str, + secret_key: str | Omit = omit, + type: Literal["S3"] + | Literal["S3_COMPATIBLE"] + | Literal["CLOUDINARY_BACKUP"] + | Literal["WEB_FOLDER"] + | Literal["WEB_PROXY"] + | Literal["GCS"] + | Literal["AZURE_BLOB"] + | Literal["AKENEO_PIM"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + endpoint: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + base_url: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + client_email: str | Omit = omit, + private_key: str | Omit = omit, + account_name: str | Omit = omit, + container: str | Omit = omit, + sas_token: str | Omit = omit, + client_id: str | Omit = omit, + client_secret: str | Omit = omit, + password: str | Omit = omit, + username: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + return cast( + OriginResponse, + self._post( + "/v1/accounts/origins", + body=maybe_transform( + { + "access_key": access_key, + "bucket": bucket, + "name": name, + "secret_key": secret_key, + "type": type, + "base_url_for_canonical_header": base_url_for_canonical_header, + "include_canonical_header": include_canonical_header, + "prefix": prefix, + "endpoint": endpoint, + "s3_force_path_style": s3_force_path_style, + "base_url": base_url, + "forward_host_header_to_origin": forward_host_header_to_origin, + "client_email": client_email, + "private_key": private_key, + "account_name": account_name, + "container": container, + "sas_token": sas_token, + "client_id": client_id, + "client_secret": client_secret, + "password": password, + "username": username, + }, + origin_create_params.OriginCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast(Any, OriginResponse), # Union types cannot be passed in as arguments in the type system + ), + ) + + @overload + def update( + self, + id: str, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["S3"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + id: str, + *, + access_key: str, + bucket: str, + endpoint: str, + name: str, + secret_key: str, + type: Literal["S3_COMPATIBLE"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + endpoint: Custom S3-compatible endpoint. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + s3_force_path_style: Use path-style S3 URLs? + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + id: str, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["CLOUDINARY_BACKUP"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + id: str, + *, + base_url: str, + name: str, + type: Literal["WEB_FOLDER"], + base_url_for_canonical_header: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + base_url: Root URL for the web folder origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + forward_host_header_to_origin: Forward the Host header to origin? + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + id: str, + *, + name: str, + type: Literal["WEB_PROXY"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + id: str, + *, + bucket: str, + client_email: str, + name: str, + private_key: str, + type: Literal["GCS"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + id: str, + *, + account_name: str, + container: str, + name: str, + sas_token: str, + type: Literal["AZURE_BLOB"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + id: str, + *, + base_url: str, + client_id: str, + client_secret: str, + name: str, + password: str, + type: Literal["AKENEO_PIM"], + username: str, + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + base_url: Akeneo instance base URL. + + client_id: Akeneo API client ID. + + client_secret: Akeneo API client secret. + + name: Display name of the origin. + + password: Akeneo API password. + + username: Akeneo API username. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args( + ["access_key", "bucket", "name", "secret_key", "type"], + ["access_key", "bucket", "endpoint", "name", "secret_key", "type"], + ["base_url", "name", "type"], + ["name", "type"], + ["bucket", "client_email", "name", "private_key", "type"], + ["account_name", "container", "name", "sas_token", "type"], + ["base_url", "client_id", "client_secret", "name", "password", "type", "username"], + ) + def update( + self, + id: str, + *, + access_key: str | Omit = omit, + bucket: str | Omit = omit, + name: str, + secret_key: str | Omit = omit, + type: Literal["S3"] + | Literal["S3_COMPATIBLE"] + | Literal["CLOUDINARY_BACKUP"] + | Literal["WEB_FOLDER"] + | Literal["WEB_PROXY"] + | Literal["GCS"] + | Literal["AZURE_BLOB"] + | Literal["AKENEO_PIM"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + endpoint: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + base_url: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + client_email: str | Omit = omit, + private_key: str | Omit = omit, + account_name: str | Omit = omit, + container: str | Omit = omit, + sas_token: str | Omit = omit, + client_id: str | Omit = omit, + client_secret: str | Omit = omit, + password: str | Omit = omit, + username: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return cast( + OriginResponse, + self._put( + f"/v1/accounts/origins/{id}", + body=maybe_transform( + { + "access_key": access_key, + "bucket": bucket, + "name": name, + "secret_key": secret_key, + "type": type, + "base_url_for_canonical_header": base_url_for_canonical_header, + "include_canonical_header": include_canonical_header, + "prefix": prefix, + "endpoint": endpoint, + "s3_force_path_style": s3_force_path_style, + "base_url": base_url, + "forward_host_header_to_origin": forward_host_header_to_origin, + "client_email": client_email, + "private_key": private_key, + "account_name": account_name, + "container": container, + "sas_token": sas_token, + "client_id": client_id, + "client_secret": client_secret, + "password": password, + "username": username, + }, + origin_update_params.OriginUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast(Any, OriginResponse), # Union types cannot be passed in as arguments in the type system + ), + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginListResponse: + """**Note:** This API is currently in beta. + + + Returns an array of all configured origins for the current account. + """ + return self._get( + "/v1/accounts/origins", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OriginListResponse, + ) + + def delete( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """**Note:** This API is currently in beta. + + + Permanently removes the origin identified by `id`. If the origin is in use by + any URL‑endpoints, the API will return an error. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v1/accounts/origins/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def get( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Retrieves the origin identified by `id`. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return cast( + OriginResponse, + self._get( + f"/v1/accounts/origins/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast(Any, OriginResponse), # Union types cannot be passed in as arguments in the type system + ), + ) + + +class AsyncOriginsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncOriginsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncOriginsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOriginsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncOriginsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["S3"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + access_key: str, + bucket: str, + endpoint: str, + name: str, + secret_key: str, + type: Literal["S3_COMPATIBLE"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + endpoint: Custom S3-compatible endpoint. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + s3_force_path_style: Use path-style S3 URLs? + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["CLOUDINARY_BACKUP"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + base_url: str, + name: str, + type: Literal["WEB_FOLDER"], + base_url_for_canonical_header: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + base_url: Root URL for the web folder origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + forward_host_header_to_origin: Forward the Host header to origin? + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + name: str, + type: Literal["WEB_PROXY"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + bucket: str, + client_email: str, + name: str, + private_key: str, + type: Literal["GCS"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + account_name: str, + container: str, + name: str, + sas_token: str, + type: Literal["AZURE_BLOB"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + base_url: str, + client_id: str, + client_secret: str, + name: str, + password: str, + type: Literal["AKENEO_PIM"], + username: str, + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Creates a new origin and returns the origin object. + + Args: + base_url: Akeneo instance base URL. + + client_id: Akeneo API client ID. + + client_secret: Akeneo API client secret. + + name: Display name of the origin. + + password: Akeneo API password. + + username: Akeneo API username. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args( + ["access_key", "bucket", "name", "secret_key", "type"], + ["access_key", "bucket", "endpoint", "name", "secret_key", "type"], + ["base_url", "name", "type"], + ["name", "type"], + ["bucket", "client_email", "name", "private_key", "type"], + ["account_name", "container", "name", "sas_token", "type"], + ["base_url", "client_id", "client_secret", "name", "password", "type", "username"], + ) + async def create( + self, + *, + access_key: str | Omit = omit, + bucket: str | Omit = omit, + name: str, + secret_key: str | Omit = omit, + type: Literal["S3"] + | Literal["S3_COMPATIBLE"] + | Literal["CLOUDINARY_BACKUP"] + | Literal["WEB_FOLDER"] + | Literal["WEB_PROXY"] + | Literal["GCS"] + | Literal["AZURE_BLOB"] + | Literal["AKENEO_PIM"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + endpoint: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + base_url: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + client_email: str | Omit = omit, + private_key: str | Omit = omit, + account_name: str | Omit = omit, + container: str | Omit = omit, + sas_token: str | Omit = omit, + client_id: str | Omit = omit, + client_secret: str | Omit = omit, + password: str | Omit = omit, + username: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + return cast( + OriginResponse, + await self._post( + "/v1/accounts/origins", + body=await async_maybe_transform( + { + "access_key": access_key, + "bucket": bucket, + "name": name, + "secret_key": secret_key, + "type": type, + "base_url_for_canonical_header": base_url_for_canonical_header, + "include_canonical_header": include_canonical_header, + "prefix": prefix, + "endpoint": endpoint, + "s3_force_path_style": s3_force_path_style, + "base_url": base_url, + "forward_host_header_to_origin": forward_host_header_to_origin, + "client_email": client_email, + "private_key": private_key, + "account_name": account_name, + "container": container, + "sas_token": sas_token, + "client_id": client_id, + "client_secret": client_secret, + "password": password, + "username": username, + }, + origin_create_params.OriginCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast(Any, OriginResponse), # Union types cannot be passed in as arguments in the type system + ), + ) + + @overload + async def update( + self, + id: str, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["S3"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + id: str, + *, + access_key: str, + bucket: str, + endpoint: str, + name: str, + secret_key: str, + type: Literal["S3_COMPATIBLE"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + endpoint: Custom S3-compatible endpoint. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + s3_force_path_style: Use path-style S3 URLs? + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + id: str, + *, + access_key: str, + bucket: str, + name: str, + secret_key: str, + type: Literal["CLOUDINARY_BACKUP"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + access_key: Access key for the bucket. + + bucket: S3 bucket name. + + name: Display name of the origin. + + secret_key: Secret key for the bucket. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + prefix: Path prefix inside the bucket. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + id: str, + *, + base_url: str, + name: str, + type: Literal["WEB_FOLDER"], + base_url_for_canonical_header: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + base_url: Root URL for the web folder origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + forward_host_header_to_origin: Forward the Host header to origin? + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + id: str, + *, + name: str, + type: Literal["WEB_PROXY"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + id: str, + *, + bucket: str, + client_email: str, + name: str, + private_key: str, + type: Literal["GCS"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + id: str, + *, + account_name: str, + container: str, + name: str, + sas_token: str, + type: Literal["AZURE_BLOB"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + name: Display name of the origin. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + id: str, + *, + base_url: str, + client_id: str, + client_secret: str, + name: str, + password: str, + type: Literal["AKENEO_PIM"], + username: str, + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Updates the origin identified by `id` and returns the updated origin object. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + base_url: Akeneo instance base URL. + + client_id: Akeneo API client ID. + + client_secret: Akeneo API client secret. + + name: Display name of the origin. + + password: Akeneo API password. + + username: Akeneo API username. + + base_url_for_canonical_header: URL used in the Canonical header (if enabled). + + include_canonical_header: Whether to send a Canonical header. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args( + ["access_key", "bucket", "name", "secret_key", "type"], + ["access_key", "bucket", "endpoint", "name", "secret_key", "type"], + ["base_url", "name", "type"], + ["name", "type"], + ["bucket", "client_email", "name", "private_key", "type"], + ["account_name", "container", "name", "sas_token", "type"], + ["base_url", "client_id", "client_secret", "name", "password", "type", "username"], + ) + async def update( + self, + id: str, + *, + access_key: str | Omit = omit, + bucket: str | Omit = omit, + name: str, + secret_key: str | Omit = omit, + type: Literal["S3"] + | Literal["S3_COMPATIBLE"] + | Literal["CLOUDINARY_BACKUP"] + | Literal["WEB_FOLDER"] + | Literal["WEB_PROXY"] + | Literal["GCS"] + | Literal["AZURE_BLOB"] + | Literal["AKENEO_PIM"], + base_url_for_canonical_header: str | Omit = omit, + include_canonical_header: bool | Omit = omit, + prefix: str | Omit = omit, + endpoint: str | Omit = omit, + s3_force_path_style: bool | Omit = omit, + base_url: str | Omit = omit, + forward_host_header_to_origin: bool | Omit = omit, + client_email: str | Omit = omit, + private_key: str | Omit = omit, + account_name: str | Omit = omit, + container: str | Omit = omit, + sas_token: str | Omit = omit, + client_id: str | Omit = omit, + client_secret: str | Omit = omit, + password: str | Omit = omit, + username: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return cast( + OriginResponse, + await self._put( + f"/v1/accounts/origins/{id}", + body=await async_maybe_transform( + { + "access_key": access_key, + "bucket": bucket, + "name": name, + "secret_key": secret_key, + "type": type, + "base_url_for_canonical_header": base_url_for_canonical_header, + "include_canonical_header": include_canonical_header, + "prefix": prefix, + "endpoint": endpoint, + "s3_force_path_style": s3_force_path_style, + "base_url": base_url, + "forward_host_header_to_origin": forward_host_header_to_origin, + "client_email": client_email, + "private_key": private_key, + "account_name": account_name, + "container": container, + "sas_token": sas_token, + "client_id": client_id, + "client_secret": client_secret, + "password": password, + "username": username, + }, + origin_update_params.OriginUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast(Any, OriginResponse), # Union types cannot be passed in as arguments in the type system + ), + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginListResponse: + """**Note:** This API is currently in beta. + + + Returns an array of all configured origins for the current account. + """ + return await self._get( + "/v1/accounts/origins", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OriginListResponse, + ) + + async def delete( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """**Note:** This API is currently in beta. + + + Permanently removes the origin identified by `id`. If the origin is in use by + any URL‑endpoints, the API will return an error. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v1/accounts/origins/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def get( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> OriginResponse: + """**Note:** This API is currently in beta. + + + Retrieves the origin identified by `id`. + + Args: + id: Unique identifier for the origin. This is generated by ImageKit when you create + a new origin. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return cast( + OriginResponse, + await self._get( + f"/v1/accounts/origins/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast(Any, OriginResponse), # Union types cannot be passed in as arguments in the type system + ), + ) + + +class OriginsResourceWithRawResponse: + def __init__(self, origins: OriginsResource) -> None: + self._origins = origins + + self.create = to_raw_response_wrapper( + origins.create, + ) + self.update = to_raw_response_wrapper( + origins.update, + ) + self.list = to_raw_response_wrapper( + origins.list, + ) + self.delete = to_raw_response_wrapper( + origins.delete, + ) + self.get = to_raw_response_wrapper( + origins.get, + ) + + +class AsyncOriginsResourceWithRawResponse: + def __init__(self, origins: AsyncOriginsResource) -> None: + self._origins = origins + + self.create = async_to_raw_response_wrapper( + origins.create, + ) + self.update = async_to_raw_response_wrapper( + origins.update, + ) + self.list = async_to_raw_response_wrapper( + origins.list, + ) + self.delete = async_to_raw_response_wrapper( + origins.delete, + ) + self.get = async_to_raw_response_wrapper( + origins.get, + ) + + +class OriginsResourceWithStreamingResponse: + def __init__(self, origins: OriginsResource) -> None: + self._origins = origins + + self.create = to_streamed_response_wrapper( + origins.create, + ) + self.update = to_streamed_response_wrapper( + origins.update, + ) + self.list = to_streamed_response_wrapper( + origins.list, + ) + self.delete = to_streamed_response_wrapper( + origins.delete, + ) + self.get = to_streamed_response_wrapper( + origins.get, + ) + + +class AsyncOriginsResourceWithStreamingResponse: + def __init__(self, origins: AsyncOriginsResource) -> None: + self._origins = origins + + self.create = async_to_streamed_response_wrapper( + origins.create, + ) + self.update = async_to_streamed_response_wrapper( + origins.update, + ) + self.list = async_to_streamed_response_wrapper( + origins.list, + ) + self.delete = async_to_streamed_response_wrapper( + origins.delete, + ) + self.get = async_to_streamed_response_wrapper( + origins.get, + ) diff --git a/src/imagekitio/resources/accounts/url_endpoints.py b/src/imagekitio/resources/accounts/url_endpoints.py new file mode 100644 index 00000000..ab8d4222 --- /dev/null +++ b/src/imagekitio/resources/accounts/url_endpoints.py @@ -0,0 +1,594 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.accounts import url_endpoint_create_params, url_endpoint_update_params +from ...types.accounts.url_endpoint_response import URLEndpointResponse +from ...types.accounts.url_endpoint_list_response import URLEndpointListResponse + +__all__ = ["URLEndpointsResource", "AsyncURLEndpointsResource"] + + +class URLEndpointsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> URLEndpointsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return URLEndpointsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> URLEndpointsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return URLEndpointsResourceWithStreamingResponse(self) + + def create( + self, + *, + description: str, + origins: SequenceNotStr[str] | Omit = omit, + url_prefix: str | Omit = omit, + url_rewriter: url_endpoint_create_params.URLRewriter | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointResponse: + """**Note:** This API is currently in beta. + + + Creates a new URL‑endpoint and returns the resulting object. + + Args: + description: Description of the URL endpoint. + + origins: Ordered list of origin IDs to try when the file isn’t in the Media Library; + ImageKit checks them in the sequence provided. Origin must be created before it + can be used in a URL endpoint. + + url_prefix: Path segment appended to your base URL to form the endpoint (letters, digits, + and hyphens only — or empty for the default endpoint). + + url_rewriter: Configuration for third-party URL rewriting. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/accounts/url-endpoints", + body=maybe_transform( + { + "description": description, + "origins": origins, + "url_prefix": url_prefix, + "url_rewriter": url_rewriter, + }, + url_endpoint_create_params.URLEndpointCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointResponse, + ) + + def update( + self, + id: str, + *, + description: str, + origins: SequenceNotStr[str] | Omit = omit, + url_prefix: str | Omit = omit, + url_rewriter: url_endpoint_update_params.URLRewriter | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointResponse: + """**Note:** This API is currently in beta. + + + Updates the URL‑endpoint identified by `id` and returns the updated object. + + Args: + id: Unique identifier for the URL-endpoint. This is generated by ImageKit when you + create a new URL-endpoint. For the default URL-endpoint, this is always + `default`. + + description: Description of the URL endpoint. + + origins: Ordered list of origin IDs to try when the file isn’t in the Media Library; + ImageKit checks them in the sequence provided. Origin must be created before it + can be used in a URL endpoint. + + url_prefix: Path segment appended to your base URL to form the endpoint (letters, digits, + and hyphens only — or empty for the default endpoint). + + url_rewriter: Configuration for third-party URL rewriting. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return self._put( + f"/v1/accounts/url-endpoints/{id}", + body=maybe_transform( + { + "description": description, + "origins": origins, + "url_prefix": url_prefix, + "url_rewriter": url_rewriter, + }, + url_endpoint_update_params.URLEndpointUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointListResponse: + """**Note:** This API is currently in beta. + + + Returns an array of all URL‑endpoints configured including the default + URL-endpoint generated by ImageKit during account creation. + """ + return self._get( + "/v1/accounts/url-endpoints", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointListResponse, + ) + + def delete( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """**Note:** This API is currently in beta. + + + Deletes the URL‑endpoint identified by `id`. You cannot delete the default + URL‑endpoint created by ImageKit during account creation. + + Args: + id: Unique identifier for the URL-endpoint. This is generated by ImageKit when you + create a new URL-endpoint. For the default URL-endpoint, this is always + `default`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v1/accounts/url-endpoints/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def get( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointResponse: + """**Note:** This API is currently in beta. + + + Retrieves the URL‑endpoint identified by `id`. + + Args: + id: Unique identifier for the URL-endpoint. This is generated by ImageKit when you + create a new URL-endpoint. For the default URL-endpoint, this is always + `default`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return self._get( + f"/v1/accounts/url-endpoints/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointResponse, + ) + + +class AsyncURLEndpointsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncURLEndpointsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncURLEndpointsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncURLEndpointsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncURLEndpointsResourceWithStreamingResponse(self) + + async def create( + self, + *, + description: str, + origins: SequenceNotStr[str] | Omit = omit, + url_prefix: str | Omit = omit, + url_rewriter: url_endpoint_create_params.URLRewriter | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointResponse: + """**Note:** This API is currently in beta. + + + Creates a new URL‑endpoint and returns the resulting object. + + Args: + description: Description of the URL endpoint. + + origins: Ordered list of origin IDs to try when the file isn’t in the Media Library; + ImageKit checks them in the sequence provided. Origin must be created before it + can be used in a URL endpoint. + + url_prefix: Path segment appended to your base URL to form the endpoint (letters, digits, + and hyphens only — or empty for the default endpoint). + + url_rewriter: Configuration for third-party URL rewriting. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/accounts/url-endpoints", + body=await async_maybe_transform( + { + "description": description, + "origins": origins, + "url_prefix": url_prefix, + "url_rewriter": url_rewriter, + }, + url_endpoint_create_params.URLEndpointCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointResponse, + ) + + async def update( + self, + id: str, + *, + description: str, + origins: SequenceNotStr[str] | Omit = omit, + url_prefix: str | Omit = omit, + url_rewriter: url_endpoint_update_params.URLRewriter | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointResponse: + """**Note:** This API is currently in beta. + + + Updates the URL‑endpoint identified by `id` and returns the updated object. + + Args: + id: Unique identifier for the URL-endpoint. This is generated by ImageKit when you + create a new URL-endpoint. For the default URL-endpoint, this is always + `default`. + + description: Description of the URL endpoint. + + origins: Ordered list of origin IDs to try when the file isn’t in the Media Library; + ImageKit checks them in the sequence provided. Origin must be created before it + can be used in a URL endpoint. + + url_prefix: Path segment appended to your base URL to form the endpoint (letters, digits, + and hyphens only — or empty for the default endpoint). + + url_rewriter: Configuration for third-party URL rewriting. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return await self._put( + f"/v1/accounts/url-endpoints/{id}", + body=await async_maybe_transform( + { + "description": description, + "origins": origins, + "url_prefix": url_prefix, + "url_rewriter": url_rewriter, + }, + url_endpoint_update_params.URLEndpointUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointListResponse: + """**Note:** This API is currently in beta. + + + Returns an array of all URL‑endpoints configured including the default + URL-endpoint generated by ImageKit during account creation. + """ + return await self._get( + "/v1/accounts/url-endpoints", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointListResponse, + ) + + async def delete( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """**Note:** This API is currently in beta. + + + Deletes the URL‑endpoint identified by `id`. You cannot delete the default + URL‑endpoint created by ImageKit during account creation. + + Args: + id: Unique identifier for the URL-endpoint. This is generated by ImageKit when you + create a new URL-endpoint. For the default URL-endpoint, this is always + `default`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v1/accounts/url-endpoints/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def get( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> URLEndpointResponse: + """**Note:** This API is currently in beta. + + + Retrieves the URL‑endpoint identified by `id`. + + Args: + id: Unique identifier for the URL-endpoint. This is generated by ImageKit when you + create a new URL-endpoint. For the default URL-endpoint, this is always + `default`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return await self._get( + f"/v1/accounts/url-endpoints/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=URLEndpointResponse, + ) + + +class URLEndpointsResourceWithRawResponse: + def __init__(self, url_endpoints: URLEndpointsResource) -> None: + self._url_endpoints = url_endpoints + + self.create = to_raw_response_wrapper( + url_endpoints.create, + ) + self.update = to_raw_response_wrapper( + url_endpoints.update, + ) + self.list = to_raw_response_wrapper( + url_endpoints.list, + ) + self.delete = to_raw_response_wrapper( + url_endpoints.delete, + ) + self.get = to_raw_response_wrapper( + url_endpoints.get, + ) + + +class AsyncURLEndpointsResourceWithRawResponse: + def __init__(self, url_endpoints: AsyncURLEndpointsResource) -> None: + self._url_endpoints = url_endpoints + + self.create = async_to_raw_response_wrapper( + url_endpoints.create, + ) + self.update = async_to_raw_response_wrapper( + url_endpoints.update, + ) + self.list = async_to_raw_response_wrapper( + url_endpoints.list, + ) + self.delete = async_to_raw_response_wrapper( + url_endpoints.delete, + ) + self.get = async_to_raw_response_wrapper( + url_endpoints.get, + ) + + +class URLEndpointsResourceWithStreamingResponse: + def __init__(self, url_endpoints: URLEndpointsResource) -> None: + self._url_endpoints = url_endpoints + + self.create = to_streamed_response_wrapper( + url_endpoints.create, + ) + self.update = to_streamed_response_wrapper( + url_endpoints.update, + ) + self.list = to_streamed_response_wrapper( + url_endpoints.list, + ) + self.delete = to_streamed_response_wrapper( + url_endpoints.delete, + ) + self.get = to_streamed_response_wrapper( + url_endpoints.get, + ) + + +class AsyncURLEndpointsResourceWithStreamingResponse: + def __init__(self, url_endpoints: AsyncURLEndpointsResource) -> None: + self._url_endpoints = url_endpoints + + self.create = async_to_streamed_response_wrapper( + url_endpoints.create, + ) + self.update = async_to_streamed_response_wrapper( + url_endpoints.update, + ) + self.list = async_to_streamed_response_wrapper( + url_endpoints.list, + ) + self.delete = async_to_streamed_response_wrapper( + url_endpoints.delete, + ) + self.get = async_to_streamed_response_wrapper( + url_endpoints.get, + ) diff --git a/src/imagekitio/resources/accounts/usage.py b/src/imagekitio/resources/accounts/usage.py new file mode 100644 index 00000000..b35d3c9b --- /dev/null +++ b/src/imagekitio/resources/accounts/usage.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from datetime import date + +import httpx + +from ..._types import Body, Query, Headers, NotGiven, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.accounts import usage_get_params +from ...types.accounts.usage_get_response import UsageGetResponse + +__all__ = ["UsageResource", "AsyncUsageResource"] + + +class UsageResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> UsageResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return UsageResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> UsageResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return UsageResourceWithStreamingResponse(self) + + def get( + self, + *, + end_date: Union[str, date], + start_date: Union[str, date], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> UsageGetResponse: + """Get the account usage information between two dates. + + Note that the API response + includes data from the start date while excluding data from the end date. In + other words, the data covers the period starting from the specified start date + up to, but not including, the end date. + + Args: + end_date: Specify a `endDate` in `YYYY-MM-DD` format. It should be after the `startDate`. + The difference between `startDate` and `endDate` should be less than 90 days. + + start_date: Specify a `startDate` in `YYYY-MM-DD` format. It should be before the `endDate`. + The difference between `startDate` and `endDate` should be less than 90 days. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v1/accounts/usage", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "end_date": end_date, + "start_date": start_date, + }, + usage_get_params.UsageGetParams, + ), + ), + cast_to=UsageGetResponse, + ) + + +class AsyncUsageResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncUsageResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncUsageResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncUsageResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncUsageResourceWithStreamingResponse(self) + + async def get( + self, + *, + end_date: Union[str, date], + start_date: Union[str, date], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> UsageGetResponse: + """Get the account usage information between two dates. + + Note that the API response + includes data from the start date while excluding data from the end date. In + other words, the data covers the period starting from the specified start date + up to, but not including, the end date. + + Args: + end_date: Specify a `endDate` in `YYYY-MM-DD` format. It should be after the `startDate`. + The difference between `startDate` and `endDate` should be less than 90 days. + + start_date: Specify a `startDate` in `YYYY-MM-DD` format. It should be before the `endDate`. + The difference between `startDate` and `endDate` should be less than 90 days. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v1/accounts/usage", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "end_date": end_date, + "start_date": start_date, + }, + usage_get_params.UsageGetParams, + ), + ), + cast_to=UsageGetResponse, + ) + + +class UsageResourceWithRawResponse: + def __init__(self, usage: UsageResource) -> None: + self._usage = usage + + self.get = to_raw_response_wrapper( + usage.get, + ) + + +class AsyncUsageResourceWithRawResponse: + def __init__(self, usage: AsyncUsageResource) -> None: + self._usage = usage + + self.get = async_to_raw_response_wrapper( + usage.get, + ) + + +class UsageResourceWithStreamingResponse: + def __init__(self, usage: UsageResource) -> None: + self._usage = usage + + self.get = to_streamed_response_wrapper( + usage.get, + ) + + +class AsyncUsageResourceWithStreamingResponse: + def __init__(self, usage: AsyncUsageResource) -> None: + self._usage = usage + + self.get = async_to_streamed_response_wrapper( + usage.get, + ) diff --git a/src/imagekitio/resources/assets.py b/src/imagekitio/resources/assets.py new file mode 100644 index 00000000..1d239da3 --- /dev/null +++ b/src/imagekitio/resources/assets.py @@ -0,0 +1,325 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal + +import httpx + +from ..types import asset_list_params +from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.asset_list_response import AssetListResponse + +__all__ = ["AssetsResource", "AsyncAssetsResource"] + + +class AssetsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> AssetsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AssetsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AssetsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AssetsResourceWithStreamingResponse(self) + + def list( + self, + *, + file_type: Literal["all", "image", "non-image"] | Omit = omit, + limit: int | Omit = omit, + path: str | Omit = omit, + search_query: str | Omit = omit, + skip: int | Omit = omit, + sort: Literal[ + "ASC_NAME", + "DESC_NAME", + "ASC_CREATED", + "DESC_CREATED", + "ASC_UPDATED", + "DESC_UPDATED", + "ASC_HEIGHT", + "DESC_HEIGHT", + "ASC_WIDTH", + "DESC_WIDTH", + "ASC_SIZE", + "DESC_SIZE", + "ASC_RELEVANCE", + "DESC_RELEVANCE", + ] + | Omit = omit, + type: Literal["file", "file-version", "folder", "all"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AssetListResponse: + """ + This API can list all the uploaded files and folders in your ImageKit.io media + library. In addition, you can fine-tune your query by specifying various filters + by generating a query string in a Lucene-like syntax and provide this generated + string as the value of the `searchQuery`. + + Args: + file_type: Filter results by file type. + + - `all` — include all file types + - `image` — include only image files + - `non-image` — include only non-image files (e.g., JS, CSS, video) + + limit: The maximum number of results to return in response. + + path: Folder path if you want to limit the search within a specific folder. For + example, `/sales-banner/` will only search in folder sales-banner. + + Note : If your use case involves searching within a folder as well as its + subfolders, you can use `path` parameter in `searchQuery` with appropriate + operator. Checkout + [Supported parameters](/docs/api-reference/digital-asset-management-dam/list-and-search-assets#supported-parameters) + for more information. + + search_query: Query string in a Lucene-like query language e.g. `createdAt > "7d"`. + + Note : When the searchQuery parameter is present, the following query parameters + will have no effect on the result: + + 1. `tags` + 2. `type` + 3. `name` + + [Learn more](/docs/api-reference/digital-asset-management-dam/list-and-search-assets#advanced-search-queries) + from examples. + + skip: The number of results to skip before returning results. + + sort: Sort the results by one of the supported fields in ascending or descending + order. + + type: Filter results by asset type. + + - `file` — returns only files + - `file-version` — returns specific file versions + - `folder` — returns only folders + - `all` — returns both files and folders (excludes `file-version`) + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v1/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "file_type": file_type, + "limit": limit, + "path": path, + "search_query": search_query, + "skip": skip, + "sort": sort, + "type": type, + }, + asset_list_params.AssetListParams, + ), + ), + cast_to=AssetListResponse, + ) + + +class AsyncAssetsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAssetsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncAssetsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAssetsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncAssetsResourceWithStreamingResponse(self) + + async def list( + self, + *, + file_type: Literal["all", "image", "non-image"] | Omit = omit, + limit: int | Omit = omit, + path: str | Omit = omit, + search_query: str | Omit = omit, + skip: int | Omit = omit, + sort: Literal[ + "ASC_NAME", + "DESC_NAME", + "ASC_CREATED", + "DESC_CREATED", + "ASC_UPDATED", + "DESC_UPDATED", + "ASC_HEIGHT", + "DESC_HEIGHT", + "ASC_WIDTH", + "DESC_WIDTH", + "ASC_SIZE", + "DESC_SIZE", + "ASC_RELEVANCE", + "DESC_RELEVANCE", + ] + | Omit = omit, + type: Literal["file", "file-version", "folder", "all"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AssetListResponse: + """ + This API can list all the uploaded files and folders in your ImageKit.io media + library. In addition, you can fine-tune your query by specifying various filters + by generating a query string in a Lucene-like syntax and provide this generated + string as the value of the `searchQuery`. + + Args: + file_type: Filter results by file type. + + - `all` — include all file types + - `image` — include only image files + - `non-image` — include only non-image files (e.g., JS, CSS, video) + + limit: The maximum number of results to return in response. + + path: Folder path if you want to limit the search within a specific folder. For + example, `/sales-banner/` will only search in folder sales-banner. + + Note : If your use case involves searching within a folder as well as its + subfolders, you can use `path` parameter in `searchQuery` with appropriate + operator. Checkout + [Supported parameters](/docs/api-reference/digital-asset-management-dam/list-and-search-assets#supported-parameters) + for more information. + + search_query: Query string in a Lucene-like query language e.g. `createdAt > "7d"`. + + Note : When the searchQuery parameter is present, the following query parameters + will have no effect on the result: + + 1. `tags` + 2. `type` + 3. `name` + + [Learn more](/docs/api-reference/digital-asset-management-dam/list-and-search-assets#advanced-search-queries) + from examples. + + skip: The number of results to skip before returning results. + + sort: Sort the results by one of the supported fields in ascending or descending + order. + + type: Filter results by asset type. + + - `file` — returns only files + - `file-version` — returns specific file versions + - `folder` — returns only folders + - `all` — returns both files and folders (excludes `file-version`) + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v1/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "file_type": file_type, + "limit": limit, + "path": path, + "search_query": search_query, + "skip": skip, + "sort": sort, + "type": type, + }, + asset_list_params.AssetListParams, + ), + ), + cast_to=AssetListResponse, + ) + + +class AssetsResourceWithRawResponse: + def __init__(self, assets: AssetsResource) -> None: + self._assets = assets + + self.list = to_raw_response_wrapper( + assets.list, + ) + + +class AsyncAssetsResourceWithRawResponse: + def __init__(self, assets: AsyncAssetsResource) -> None: + self._assets = assets + + self.list = async_to_raw_response_wrapper( + assets.list, + ) + + +class AssetsResourceWithStreamingResponse: + def __init__(self, assets: AssetsResource) -> None: + self._assets = assets + + self.list = to_streamed_response_wrapper( + assets.list, + ) + + +class AsyncAssetsResourceWithStreamingResponse: + def __init__(self, assets: AsyncAssetsResource) -> None: + self._assets = assets + + self.list = async_to_streamed_response_wrapper( + assets.list, + ) diff --git a/src/imagekitio/resources/beta/__init__.py b/src/imagekitio/resources/beta/__init__.py new file mode 100644 index 00000000..08cb3fbc --- /dev/null +++ b/src/imagekitio/resources/beta/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .v2 import ( + V2Resource, + AsyncV2Resource, + V2ResourceWithRawResponse, + AsyncV2ResourceWithRawResponse, + V2ResourceWithStreamingResponse, + AsyncV2ResourceWithStreamingResponse, +) +from .beta import ( + BetaResource, + AsyncBetaResource, + BetaResourceWithRawResponse, + AsyncBetaResourceWithRawResponse, + BetaResourceWithStreamingResponse, + AsyncBetaResourceWithStreamingResponse, +) + +__all__ = [ + "V2Resource", + "AsyncV2Resource", + "V2ResourceWithRawResponse", + "AsyncV2ResourceWithRawResponse", + "V2ResourceWithStreamingResponse", + "AsyncV2ResourceWithStreamingResponse", + "BetaResource", + "AsyncBetaResource", + "BetaResourceWithRawResponse", + "AsyncBetaResourceWithRawResponse", + "BetaResourceWithStreamingResponse", + "AsyncBetaResourceWithStreamingResponse", +] diff --git a/src/imagekitio/resources/beta/beta.py b/src/imagekitio/resources/beta/beta.py new file mode 100644 index 00000000..01e43aa0 --- /dev/null +++ b/src/imagekitio/resources/beta/beta.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .v2.v2 import ( + V2Resource, + AsyncV2Resource, + V2ResourceWithRawResponse, + AsyncV2ResourceWithRawResponse, + V2ResourceWithStreamingResponse, + AsyncV2ResourceWithStreamingResponse, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["BetaResource", "AsyncBetaResource"] + + +class BetaResource(SyncAPIResource): + @cached_property + def v2(self) -> V2Resource: + return V2Resource(self._client) + + @cached_property + def with_raw_response(self) -> BetaResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return BetaResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> BetaResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return BetaResourceWithStreamingResponse(self) + + +class AsyncBetaResource(AsyncAPIResource): + @cached_property + def v2(self) -> AsyncV2Resource: + return AsyncV2Resource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncBetaResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncBetaResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncBetaResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncBetaResourceWithStreamingResponse(self) + + +class BetaResourceWithRawResponse: + def __init__(self, beta: BetaResource) -> None: + self._beta = beta + + @cached_property + def v2(self) -> V2ResourceWithRawResponse: + return V2ResourceWithRawResponse(self._beta.v2) + + +class AsyncBetaResourceWithRawResponse: + def __init__(self, beta: AsyncBetaResource) -> None: + self._beta = beta + + @cached_property + def v2(self) -> AsyncV2ResourceWithRawResponse: + return AsyncV2ResourceWithRawResponse(self._beta.v2) + + +class BetaResourceWithStreamingResponse: + def __init__(self, beta: BetaResource) -> None: + self._beta = beta + + @cached_property + def v2(self) -> V2ResourceWithStreamingResponse: + return V2ResourceWithStreamingResponse(self._beta.v2) + + +class AsyncBetaResourceWithStreamingResponse: + def __init__(self, beta: AsyncBetaResource) -> None: + self._beta = beta + + @cached_property + def v2(self) -> AsyncV2ResourceWithStreamingResponse: + return AsyncV2ResourceWithStreamingResponse(self._beta.v2) diff --git a/src/imagekitio/resources/beta/v2/__init__.py b/src/imagekitio/resources/beta/v2/__init__.py new file mode 100644 index 00000000..6f4e8f3e --- /dev/null +++ b/src/imagekitio/resources/beta/v2/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .v2 import ( + V2Resource, + AsyncV2Resource, + V2ResourceWithRawResponse, + AsyncV2ResourceWithRawResponse, + V2ResourceWithStreamingResponse, + AsyncV2ResourceWithStreamingResponse, +) +from .files import ( + FilesResource, + AsyncFilesResource, + FilesResourceWithRawResponse, + AsyncFilesResourceWithRawResponse, + FilesResourceWithStreamingResponse, + AsyncFilesResourceWithStreamingResponse, +) + +__all__ = [ + "FilesResource", + "AsyncFilesResource", + "FilesResourceWithRawResponse", + "AsyncFilesResourceWithRawResponse", + "FilesResourceWithStreamingResponse", + "AsyncFilesResourceWithStreamingResponse", + "V2Resource", + "AsyncV2Resource", + "V2ResourceWithRawResponse", + "AsyncV2ResourceWithRawResponse", + "V2ResourceWithStreamingResponse", + "AsyncV2ResourceWithStreamingResponse", +] diff --git a/src/imagekitio/resources/beta/v2/files.py b/src/imagekitio/resources/beta/v2/files.py new file mode 100644 index 00000000..03b198fd --- /dev/null +++ b/src/imagekitio/resources/beta/v2/files.py @@ -0,0 +1,580 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Mapping, cast +from typing_extensions import Literal + +import httpx + +from ...._types import ( + Body, + Omit, + Query, + Headers, + NotGiven, + FileTypes, + SequenceNotStr, + omit, + not_given, +) +from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.beta.v2 import file_upload_params +from ....lib.serialization_utils import serialize_upload_options +from ....types.shared_params.extensions import Extensions +from ....types.beta.v2.file_upload_response import FileUploadResponse + +__all__ = ["FilesResource", "AsyncFilesResource"] + + +class FilesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> FilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return FilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return FilesResourceWithStreamingResponse(self) + + def upload( + self, + *, + file: FileTypes, + file_name: str, + token: str | Omit = omit, + checks: str | Omit = omit, + custom_coordinates: str | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + extensions: Extensions | Omit = omit, + folder: str | Omit = omit, + is_private_file: bool | Omit = omit, + is_published: bool | Omit = omit, + overwrite_ai_tags: bool | Omit = omit, + overwrite_custom_metadata: bool | Omit = omit, + overwrite_file: bool | Omit = omit, + overwrite_tags: bool | Omit = omit, + response_fields: List[ + Literal[ + "tags", + "customCoordinates", + "isPrivateFile", + "embeddedMetadata", + "isPublished", + "customMetadata", + "metadata", + "selectedFieldsSchema", + ] + ] + | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + transformation: file_upload_params.Transformation | Omit = omit, + use_unique_file_name: bool | Omit = omit, + webhook_url: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUploadResponse: + """The V2 API enhances security by verifying the entire payload using JWT. + + This API + is in beta. + + ImageKit.io allows you to upload files directly from both the server and client + sides. For server-side uploads, private API key authentication is used. For + client-side uploads, generate a one-time `token` from your secure backend using + private API. + [Learn more](/docs/api-reference/upload-file/upload-file-v2#how-to-implement-secure-client-side-file-upload) + about how to implement secure client-side file upload. + + **File size limit** \\ + On the free plan, the maximum upload file sizes are 20MB for images, audio, and raw + files, and 100MB for videos. On the paid plan, these limits increase to 40MB for + images, audio, and raw files, and 2GB for videos. These limits can be further increased + with higher-tier plans. + + **Version limit** \\ + A file can have a maximum of 100 versions. + + **Demo applications** + + - A full-fledged + [upload widget using Uppy](https://github.com/imagekit-samples/uppy-uploader), + supporting file selections from local storage, URL, Dropbox, Google Drive, + Instagram, and more. + - [Quick start guides](/docs/quick-start-guides) for various frameworks and + technologies. + + Args: + file: + The API accepts any of the following: + + - **Binary data** – send the raw bytes as `multipart/form-data`. + - **HTTP / HTTPS URL** – a publicly reachable URL that ImageKit’s servers can + fetch. + - **Base64 string** – the file encoded as a Base64 data URI or plain Base64. + + When supplying a URL, the server must receive the response headers within 8 + seconds; otherwise the request fails with 400 Bad Request. + + file_name: The name with which the file has to be uploaded. + + token: This is the client-generated JSON Web Token (JWT). The ImageKit.io server uses + it to authenticate and check that the upload request parameters have not been + tampered with after the token has been generated. Learn how to create the token + on the page below. This field is only required for authentication when uploading + a file from the client side. + + **Note**: Sending a JWT that has been used in the past will result in a + validation error. Even if your previous request resulted in an error, you should + always send a new token. + + **⚠️Warning**: JWT must be generated on the server-side because it is generated + using your account's private API key. This field is required for authentication + when uploading a file from the client-side. + + checks: Server-side checks to run on the asset. Read more about + [Upload API checks](/docs/api-reference/upload-file/upload-file-v2#upload-api-checks). + + custom_coordinates: Define an important area in the image. This is only relevant for image type + files. + + - To be passed as a string with the x and y coordinates of the top-left corner, + and width and height of the area of interest in the format `x,y,width,height`. + For example - `10,10,100,100` + - Can be used with fo-customtransformation. + - If this field is not specified and the file is overwritten, then + customCoordinates will be removed. + + custom_metadata: JSON key-value pairs to associate with the asset. Create the custom metadata + fields before setting these values. + + description: Optional text to describe the contents of the file. + + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + folder: The folder path in which the image has to be uploaded. If the folder(s) didn't + exist before, a new folder(s) is created. Using multiple `/` creates a nested + folder. + + is_private_file: Whether to mark the file as private or not. + + If `true`, the file is marked as private and is accessible only using named + transformation or signed URL. + + is_published: Whether to upload file as published or not. + + If `false`, the file is marked as unpublished, which restricts access to the + file only via the media library. Files in draft or unpublished state can only be + publicly accessed after being published. + + The option to upload in draft state is only available in custom enterprise + pricing plans. + + overwrite_ai_tags: If set to `true` and a file already exists at the exact location, its AITags + will be removed. Set `overwriteAITags` to `false` to preserve AITags. + + overwrite_custom_metadata: If the request does not have `customMetadata`, and a file already exists at the + exact location, existing customMetadata will be removed. + + overwrite_file: If `false` and `useUniqueFileName` is also `false`, and a file already exists at + the exact location, upload API will return an error immediately. + + overwrite_tags: If the request does not have `tags`, and a file already exists at the exact + location, existing tags will be removed. + + response_fields: Array of response field keys to include in the API response body. + + tags: Set the tags while uploading the file. Provide an array of tag strings (e.g. + `["tag1", "tag2", "tag3"]`). The combined length of all tag characters must not + exceed 500, and the `%` character is not allowed. If this field is not specified + and the file is overwritten, the existing tags will be removed. + + transformation: Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., + resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) + in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + + use_unique_file_name: Whether to use a unique filename for this file or not. + + If `true`, ImageKit.io will add a unique suffix to the filename parameter to get + a unique filename. + + If `false`, then the image is uploaded with the provided filename parameter, and + any existing file with the same name is replaced. + + webhook_url: The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "file_name": file_name, + "token": token, + "checks": checks, + "custom_coordinates": custom_coordinates, + "custom_metadata": custom_metadata, + "description": description, + "extensions": extensions, + "folder": folder, + "is_private_file": is_private_file, + "is_published": is_published, + "overwrite_ai_tags": overwrite_ai_tags, + "overwrite_custom_metadata": overwrite_custom_metadata, + "overwrite_file": overwrite_file, + "overwrite_tags": overwrite_tags, + "response_fields": response_fields, + "tags": tags, + "transformation": transformation, + "use_unique_file_name": use_unique_file_name, + "webhook_url": webhook_url, + } + ) + body = serialize_upload_options(body) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/api/v2/files/upload" + if self._client._base_url_overridden + else "https://upload.imagekit.io/api/v2/files/upload", + body=maybe_transform(body, file_upload_params.FileUploadParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileUploadResponse, + ) + + +class AsyncFilesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncFilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncFilesResourceWithStreamingResponse(self) + + async def upload( + self, + *, + file: FileTypes, + file_name: str, + token: str | Omit = omit, + checks: str | Omit = omit, + custom_coordinates: str | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + extensions: Extensions | Omit = omit, + folder: str | Omit = omit, + is_private_file: bool | Omit = omit, + is_published: bool | Omit = omit, + overwrite_ai_tags: bool | Omit = omit, + overwrite_custom_metadata: bool | Omit = omit, + overwrite_file: bool | Omit = omit, + overwrite_tags: bool | Omit = omit, + response_fields: List[ + Literal[ + "tags", + "customCoordinates", + "isPrivateFile", + "embeddedMetadata", + "isPublished", + "customMetadata", + "metadata", + "selectedFieldsSchema", + ] + ] + | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + transformation: file_upload_params.Transformation | Omit = omit, + use_unique_file_name: bool | Omit = omit, + webhook_url: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUploadResponse: + """The V2 API enhances security by verifying the entire payload using JWT. + + This API + is in beta. + + ImageKit.io allows you to upload files directly from both the server and client + sides. For server-side uploads, private API key authentication is used. For + client-side uploads, generate a one-time `token` from your secure backend using + private API. + [Learn more](/docs/api-reference/upload-file/upload-file-v2#how-to-implement-secure-client-side-file-upload) + about how to implement secure client-side file upload. + + **File size limit** \\ + On the free plan, the maximum upload file sizes are 20MB for images, audio, and raw + files, and 100MB for videos. On the paid plan, these limits increase to 40MB for + images, audio, and raw files, and 2GB for videos. These limits can be further increased + with higher-tier plans. + + **Version limit** \\ + A file can have a maximum of 100 versions. + + **Demo applications** + + - A full-fledged + [upload widget using Uppy](https://github.com/imagekit-samples/uppy-uploader), + supporting file selections from local storage, URL, Dropbox, Google Drive, + Instagram, and more. + - [Quick start guides](/docs/quick-start-guides) for various frameworks and + technologies. + + Args: + file: + The API accepts any of the following: + + - **Binary data** – send the raw bytes as `multipart/form-data`. + - **HTTP / HTTPS URL** – a publicly reachable URL that ImageKit’s servers can + fetch. + - **Base64 string** – the file encoded as a Base64 data URI or plain Base64. + + When supplying a URL, the server must receive the response headers within 8 + seconds; otherwise the request fails with 400 Bad Request. + + file_name: The name with which the file has to be uploaded. + + token: This is the client-generated JSON Web Token (JWT). The ImageKit.io server uses + it to authenticate and check that the upload request parameters have not been + tampered with after the token has been generated. Learn how to create the token + on the page below. This field is only required for authentication when uploading + a file from the client side. + + **Note**: Sending a JWT that has been used in the past will result in a + validation error. Even if your previous request resulted in an error, you should + always send a new token. + + **⚠️Warning**: JWT must be generated on the server-side because it is generated + using your account's private API key. This field is required for authentication + when uploading a file from the client-side. + + checks: Server-side checks to run on the asset. Read more about + [Upload API checks](/docs/api-reference/upload-file/upload-file-v2#upload-api-checks). + + custom_coordinates: Define an important area in the image. This is only relevant for image type + files. + + - To be passed as a string with the x and y coordinates of the top-left corner, + and width and height of the area of interest in the format `x,y,width,height`. + For example - `10,10,100,100` + - Can be used with fo-customtransformation. + - If this field is not specified and the file is overwritten, then + customCoordinates will be removed. + + custom_metadata: JSON key-value pairs to associate with the asset. Create the custom metadata + fields before setting these values. + + description: Optional text to describe the contents of the file. + + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + folder: The folder path in which the image has to be uploaded. If the folder(s) didn't + exist before, a new folder(s) is created. Using multiple `/` creates a nested + folder. + + is_private_file: Whether to mark the file as private or not. + + If `true`, the file is marked as private and is accessible only using named + transformation or signed URL. + + is_published: Whether to upload file as published or not. + + If `false`, the file is marked as unpublished, which restricts access to the + file only via the media library. Files in draft or unpublished state can only be + publicly accessed after being published. + + The option to upload in draft state is only available in custom enterprise + pricing plans. + + overwrite_ai_tags: If set to `true` and a file already exists at the exact location, its AITags + will be removed. Set `overwriteAITags` to `false` to preserve AITags. + + overwrite_custom_metadata: If the request does not have `customMetadata`, and a file already exists at the + exact location, existing customMetadata will be removed. + + overwrite_file: If `false` and `useUniqueFileName` is also `false`, and a file already exists at + the exact location, upload API will return an error immediately. + + overwrite_tags: If the request does not have `tags`, and a file already exists at the exact + location, existing tags will be removed. + + response_fields: Array of response field keys to include in the API response body. + + tags: Set the tags while uploading the file. Provide an array of tag strings (e.g. + `["tag1", "tag2", "tag3"]`). The combined length of all tag characters must not + exceed 500, and the `%` character is not allowed. If this field is not specified + and the file is overwritten, the existing tags will be removed. + + transformation: Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., + resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) + in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + + use_unique_file_name: Whether to use a unique filename for this file or not. + + If `true`, ImageKit.io will add a unique suffix to the filename parameter to get + a unique filename. + + If `false`, then the image is uploaded with the provided filename parameter, and + any existing file with the same name is replaced. + + webhook_url: The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "file_name": file_name, + "token": token, + "checks": checks, + "custom_coordinates": custom_coordinates, + "custom_metadata": custom_metadata, + "description": description, + "extensions": extensions, + "folder": folder, + "is_private_file": is_private_file, + "is_published": is_published, + "overwrite_ai_tags": overwrite_ai_tags, + "overwrite_custom_metadata": overwrite_custom_metadata, + "overwrite_file": overwrite_file, + "overwrite_tags": overwrite_tags, + "response_fields": response_fields, + "tags": tags, + "transformation": transformation, + "use_unique_file_name": use_unique_file_name, + "webhook_url": webhook_url, + } + ) + body = serialize_upload_options(body) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/api/v2/files/upload" + if self._client._base_url_overridden + else "https://upload.imagekit.io/api/v2/files/upload", + body=await async_maybe_transform(body, file_upload_params.FileUploadParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileUploadResponse, + ) + + +class FilesResourceWithRawResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.upload = to_raw_response_wrapper( + files.upload, + ) + + +class AsyncFilesResourceWithRawResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.upload = async_to_raw_response_wrapper( + files.upload, + ) + + +class FilesResourceWithStreamingResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.upload = to_streamed_response_wrapper( + files.upload, + ) + + +class AsyncFilesResourceWithStreamingResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.upload = async_to_streamed_response_wrapper( + files.upload, + ) diff --git a/src/imagekitio/resources/beta/v2/v2.py b/src/imagekitio/resources/beta/v2/v2.py new file mode 100644 index 00000000..2fb98309 --- /dev/null +++ b/src/imagekitio/resources/beta/v2/v2.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .files import ( + FilesResource, + AsyncFilesResource, + FilesResourceWithRawResponse, + AsyncFilesResourceWithRawResponse, + FilesResourceWithStreamingResponse, + AsyncFilesResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["V2Resource", "AsyncV2Resource"] + + +class V2Resource(SyncAPIResource): + @cached_property + def files(self) -> FilesResource: + return FilesResource(self._client) + + @cached_property + def with_raw_response(self) -> V2ResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return V2ResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> V2ResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return V2ResourceWithStreamingResponse(self) + + +class AsyncV2Resource(AsyncAPIResource): + @cached_property + def files(self) -> AsyncFilesResource: + return AsyncFilesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncV2ResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncV2ResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncV2ResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncV2ResourceWithStreamingResponse(self) + + +class V2ResourceWithRawResponse: + def __init__(self, v2: V2Resource) -> None: + self._v2 = v2 + + @cached_property + def files(self) -> FilesResourceWithRawResponse: + return FilesResourceWithRawResponse(self._v2.files) + + +class AsyncV2ResourceWithRawResponse: + def __init__(self, v2: AsyncV2Resource) -> None: + self._v2 = v2 + + @cached_property + def files(self) -> AsyncFilesResourceWithRawResponse: + return AsyncFilesResourceWithRawResponse(self._v2.files) + + +class V2ResourceWithStreamingResponse: + def __init__(self, v2: V2Resource) -> None: + self._v2 = v2 + + @cached_property + def files(self) -> FilesResourceWithStreamingResponse: + return FilesResourceWithStreamingResponse(self._v2.files) + + +class AsyncV2ResourceWithStreamingResponse: + def __init__(self, v2: AsyncV2Resource) -> None: + self._v2 = v2 + + @cached_property + def files(self) -> AsyncFilesResourceWithStreamingResponse: + return AsyncFilesResourceWithStreamingResponse(self._v2.files) diff --git a/src/imagekitio/resources/cache/__init__.py b/src/imagekitio/resources/cache/__init__.py new file mode 100644 index 00000000..f7e5a700 --- /dev/null +++ b/src/imagekitio/resources/cache/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .cache import ( + CacheResource, + AsyncCacheResource, + CacheResourceWithRawResponse, + AsyncCacheResourceWithRawResponse, + CacheResourceWithStreamingResponse, + AsyncCacheResourceWithStreamingResponse, +) +from .invalidation import ( + InvalidationResource, + AsyncInvalidationResource, + InvalidationResourceWithRawResponse, + AsyncInvalidationResourceWithRawResponse, + InvalidationResourceWithStreamingResponse, + AsyncInvalidationResourceWithStreamingResponse, +) + +__all__ = [ + "InvalidationResource", + "AsyncInvalidationResource", + "InvalidationResourceWithRawResponse", + "AsyncInvalidationResourceWithRawResponse", + "InvalidationResourceWithStreamingResponse", + "AsyncInvalidationResourceWithStreamingResponse", + "CacheResource", + "AsyncCacheResource", + "CacheResourceWithRawResponse", + "AsyncCacheResourceWithRawResponse", + "CacheResourceWithStreamingResponse", + "AsyncCacheResourceWithStreamingResponse", +] diff --git a/src/imagekitio/resources/cache/cache.py b/src/imagekitio/resources/cache/cache.py new file mode 100644 index 00000000..936b39f9 --- /dev/null +++ b/src/imagekitio/resources/cache/cache.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .invalidation import ( + InvalidationResource, + AsyncInvalidationResource, + InvalidationResourceWithRawResponse, + AsyncInvalidationResourceWithRawResponse, + InvalidationResourceWithStreamingResponse, + AsyncInvalidationResourceWithStreamingResponse, +) + +__all__ = ["CacheResource", "AsyncCacheResource"] + + +class CacheResource(SyncAPIResource): + @cached_property + def invalidation(self) -> InvalidationResource: + return InvalidationResource(self._client) + + @cached_property + def with_raw_response(self) -> CacheResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return CacheResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CacheResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return CacheResourceWithStreamingResponse(self) + + +class AsyncCacheResource(AsyncAPIResource): + @cached_property + def invalidation(self) -> AsyncInvalidationResource: + return AsyncInvalidationResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncCacheResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncCacheResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCacheResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncCacheResourceWithStreamingResponse(self) + + +class CacheResourceWithRawResponse: + def __init__(self, cache: CacheResource) -> None: + self._cache = cache + + @cached_property + def invalidation(self) -> InvalidationResourceWithRawResponse: + return InvalidationResourceWithRawResponse(self._cache.invalidation) + + +class AsyncCacheResourceWithRawResponse: + def __init__(self, cache: AsyncCacheResource) -> None: + self._cache = cache + + @cached_property + def invalidation(self) -> AsyncInvalidationResourceWithRawResponse: + return AsyncInvalidationResourceWithRawResponse(self._cache.invalidation) + + +class CacheResourceWithStreamingResponse: + def __init__(self, cache: CacheResource) -> None: + self._cache = cache + + @cached_property + def invalidation(self) -> InvalidationResourceWithStreamingResponse: + return InvalidationResourceWithStreamingResponse(self._cache.invalidation) + + +class AsyncCacheResourceWithStreamingResponse: + def __init__(self, cache: AsyncCacheResource) -> None: + self._cache = cache + + @cached_property + def invalidation(self) -> AsyncInvalidationResourceWithStreamingResponse: + return AsyncInvalidationResourceWithStreamingResponse(self._cache.invalidation) diff --git a/src/imagekitio/resources/cache/invalidation.py b/src/imagekitio/resources/cache/invalidation.py new file mode 100644 index 00000000..9c95dc82 --- /dev/null +++ b/src/imagekitio/resources/cache/invalidation.py @@ -0,0 +1,252 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import Body, Query, Headers, NotGiven, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.cache import invalidation_create_params +from ..._base_client import make_request_options +from ...types.cache.invalidation_get_response import InvalidationGetResponse +from ...types.cache.invalidation_create_response import InvalidationCreateResponse + +__all__ = ["InvalidationResource", "AsyncInvalidationResource"] + + +class InvalidationResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> InvalidationResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return InvalidationResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> InvalidationResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return InvalidationResourceWithStreamingResponse(self) + + def create( + self, + *, + url: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> InvalidationCreateResponse: + """This API will purge CDN cache and ImageKit.io's internal cache for a file. + + Note: + Purge cache is an asynchronous process and it may take some time to reflect the + changes. + + Args: + url: The full URL of the file to be purged. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/files/purge", + body=maybe_transform({"url": url}, invalidation_create_params.InvalidationCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InvalidationCreateResponse, + ) + + def get( + self, + request_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> InvalidationGetResponse: + """ + This API returns the status of a purge cache request. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not request_id: + raise ValueError(f"Expected a non-empty value for `request_id` but received {request_id!r}") + return self._get( + f"/v1/files/purge/{request_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InvalidationGetResponse, + ) + + +class AsyncInvalidationResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncInvalidationResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncInvalidationResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncInvalidationResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncInvalidationResourceWithStreamingResponse(self) + + async def create( + self, + *, + url: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> InvalidationCreateResponse: + """This API will purge CDN cache and ImageKit.io's internal cache for a file. + + Note: + Purge cache is an asynchronous process and it may take some time to reflect the + changes. + + Args: + url: The full URL of the file to be purged. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/files/purge", + body=await async_maybe_transform({"url": url}, invalidation_create_params.InvalidationCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InvalidationCreateResponse, + ) + + async def get( + self, + request_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> InvalidationGetResponse: + """ + This API returns the status of a purge cache request. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not request_id: + raise ValueError(f"Expected a non-empty value for `request_id` but received {request_id!r}") + return await self._get( + f"/v1/files/purge/{request_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InvalidationGetResponse, + ) + + +class InvalidationResourceWithRawResponse: + def __init__(self, invalidation: InvalidationResource) -> None: + self._invalidation = invalidation + + self.create = to_raw_response_wrapper( + invalidation.create, + ) + self.get = to_raw_response_wrapper( + invalidation.get, + ) + + +class AsyncInvalidationResourceWithRawResponse: + def __init__(self, invalidation: AsyncInvalidationResource) -> None: + self._invalidation = invalidation + + self.create = async_to_raw_response_wrapper( + invalidation.create, + ) + self.get = async_to_raw_response_wrapper( + invalidation.get, + ) + + +class InvalidationResourceWithStreamingResponse: + def __init__(self, invalidation: InvalidationResource) -> None: + self._invalidation = invalidation + + self.create = to_streamed_response_wrapper( + invalidation.create, + ) + self.get = to_streamed_response_wrapper( + invalidation.get, + ) + + +class AsyncInvalidationResourceWithStreamingResponse: + def __init__(self, invalidation: AsyncInvalidationResource) -> None: + self._invalidation = invalidation + + self.create = async_to_streamed_response_wrapper( + invalidation.create, + ) + self.get = async_to_streamed_response_wrapper( + invalidation.get, + ) diff --git a/src/imagekitio/resources/custom_metadata_fields.py b/src/imagekitio/resources/custom_metadata_fields.py new file mode 100644 index 00000000..467e52ab --- /dev/null +++ b/src/imagekitio/resources/custom_metadata_fields.py @@ -0,0 +1,535 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..types import ( + custom_metadata_field_list_params, + custom_metadata_field_create_params, + custom_metadata_field_update_params, +) +from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.custom_metadata_field import CustomMetadataField +from ..types.custom_metadata_field_list_response import CustomMetadataFieldListResponse +from ..types.custom_metadata_field_delete_response import CustomMetadataFieldDeleteResponse + +__all__ = ["CustomMetadataFieldsResource", "AsyncCustomMetadataFieldsResource"] + + +class CustomMetadataFieldsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CustomMetadataFieldsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return CustomMetadataFieldsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CustomMetadataFieldsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return CustomMetadataFieldsResourceWithStreamingResponse(self) + + def create( + self, + *, + label: str, + name: str, + schema: custom_metadata_field_create_params.Schema, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataField: + """This API creates a new custom metadata field. + + Once a custom metadata field is + created either through this API or using the dashboard UI, its value can be set + on the assets. The value of a field for an asset can be set using the media + library UI or programmatically through upload or update assets API. + + Args: + label: Human readable name of the custom metadata field. This should be unique across + all non deleted custom metadata fields. This name is displayed as form field + label to the users while setting field value on an asset in the media library + UI. + + name: API name of the custom metadata field. This should be unique across all + (including deleted) custom metadata fields. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/customMetadataFields", + body=maybe_transform( + { + "label": label, + "name": name, + "schema": schema, + }, + custom_metadata_field_create_params.CustomMetadataFieldCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CustomMetadataField, + ) + + def update( + self, + id: str, + *, + label: str | Omit = omit, + schema: custom_metadata_field_update_params.Schema | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataField: + """ + This API updates the label or schema of an existing custom metadata field. + + Args: + label: Human readable name of the custom metadata field. This should be unique across + all non deleted custom metadata fields. This name is displayed as form field + label to the users while setting field value on an asset in the media library + UI. This parameter is required if `schema` is not provided. + + schema: An object that describes the rules for the custom metadata key. This parameter + is required if `label` is not provided. Note: `type` cannot be updated and will + be ignored if sent with the `schema`. The schema will be validated as per the + existing `type`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return self._patch( + f"/v1/customMetadataFields/{id}", + body=maybe_transform( + { + "label": label, + "schema": schema, + }, + custom_metadata_field_update_params.CustomMetadataFieldUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CustomMetadataField, + ) + + def list( + self, + *, + folder_path: str | Omit = omit, + include_deleted: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataFieldListResponse: + """This API returns the array of created custom metadata field objects. + + By default + the API returns only non deleted field objects, but you can include deleted + fields in the API response. + + You can also filter results by a specific folder path to retrieve custom + metadata fields applicable at that location. This path-specific filtering is + useful when using the **Path policy** feature to determine which custom metadata + fields are selected for a given path. + + Args: + folder_path: The folder path (e.g., `/path/to/folder`) for which to retrieve applicable + custom metadata fields. Useful for determining path-specific field selections + when the [Path policy](https://imagekit.io/docs/dam/path-policy) feature is in + use. + + include_deleted: Set it to `true` to include deleted field objects in the API response. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v1/customMetadataFields", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "folder_path": folder_path, + "include_deleted": include_deleted, + }, + custom_metadata_field_list_params.CustomMetadataFieldListParams, + ), + ), + cast_to=CustomMetadataFieldListResponse, + ) + + def delete( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataFieldDeleteResponse: + """This API deletes a custom metadata field. + + Even after deleting a custom metadata + field, you cannot create any new custom metadata field with the same name. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return self._delete( + f"/v1/customMetadataFields/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CustomMetadataFieldDeleteResponse, + ) + + +class AsyncCustomMetadataFieldsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCustomMetadataFieldsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncCustomMetadataFieldsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCustomMetadataFieldsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncCustomMetadataFieldsResourceWithStreamingResponse(self) + + async def create( + self, + *, + label: str, + name: str, + schema: custom_metadata_field_create_params.Schema, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataField: + """This API creates a new custom metadata field. + + Once a custom metadata field is + created either through this API or using the dashboard UI, its value can be set + on the assets. The value of a field for an asset can be set using the media + library UI or programmatically through upload or update assets API. + + Args: + label: Human readable name of the custom metadata field. This should be unique across + all non deleted custom metadata fields. This name is displayed as form field + label to the users while setting field value on an asset in the media library + UI. + + name: API name of the custom metadata field. This should be unique across all + (including deleted) custom metadata fields. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/customMetadataFields", + body=await async_maybe_transform( + { + "label": label, + "name": name, + "schema": schema, + }, + custom_metadata_field_create_params.CustomMetadataFieldCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CustomMetadataField, + ) + + async def update( + self, + id: str, + *, + label: str | Omit = omit, + schema: custom_metadata_field_update_params.Schema | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataField: + """ + This API updates the label or schema of an existing custom metadata field. + + Args: + label: Human readable name of the custom metadata field. This should be unique across + all non deleted custom metadata fields. This name is displayed as form field + label to the users while setting field value on an asset in the media library + UI. This parameter is required if `schema` is not provided. + + schema: An object that describes the rules for the custom metadata key. This parameter + is required if `label` is not provided. Note: `type` cannot be updated and will + be ignored if sent with the `schema`. The schema will be validated as per the + existing `type`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return await self._patch( + f"/v1/customMetadataFields/{id}", + body=await async_maybe_transform( + { + "label": label, + "schema": schema, + }, + custom_metadata_field_update_params.CustomMetadataFieldUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CustomMetadataField, + ) + + async def list( + self, + *, + folder_path: str | Omit = omit, + include_deleted: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataFieldListResponse: + """This API returns the array of created custom metadata field objects. + + By default + the API returns only non deleted field objects, but you can include deleted + fields in the API response. + + You can also filter results by a specific folder path to retrieve custom + metadata fields applicable at that location. This path-specific filtering is + useful when using the **Path policy** feature to determine which custom metadata + fields are selected for a given path. + + Args: + folder_path: The folder path (e.g., `/path/to/folder`) for which to retrieve applicable + custom metadata fields. Useful for determining path-specific field selections + when the [Path policy](https://imagekit.io/docs/dam/path-policy) feature is in + use. + + include_deleted: Set it to `true` to include deleted field objects in the API response. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v1/customMetadataFields", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "folder_path": folder_path, + "include_deleted": include_deleted, + }, + custom_metadata_field_list_params.CustomMetadataFieldListParams, + ), + ), + cast_to=CustomMetadataFieldListResponse, + ) + + async def delete( + self, + id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CustomMetadataFieldDeleteResponse: + """This API deletes a custom metadata field. + + Even after deleting a custom metadata + field, you cannot create any new custom metadata field with the same name. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not id: + raise ValueError(f"Expected a non-empty value for `id` but received {id!r}") + return await self._delete( + f"/v1/customMetadataFields/{id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CustomMetadataFieldDeleteResponse, + ) + + +class CustomMetadataFieldsResourceWithRawResponse: + def __init__(self, custom_metadata_fields: CustomMetadataFieldsResource) -> None: + self._custom_metadata_fields = custom_metadata_fields + + self.create = to_raw_response_wrapper( + custom_metadata_fields.create, + ) + self.update = to_raw_response_wrapper( + custom_metadata_fields.update, + ) + self.list = to_raw_response_wrapper( + custom_metadata_fields.list, + ) + self.delete = to_raw_response_wrapper( + custom_metadata_fields.delete, + ) + + +class AsyncCustomMetadataFieldsResourceWithRawResponse: + def __init__(self, custom_metadata_fields: AsyncCustomMetadataFieldsResource) -> None: + self._custom_metadata_fields = custom_metadata_fields + + self.create = async_to_raw_response_wrapper( + custom_metadata_fields.create, + ) + self.update = async_to_raw_response_wrapper( + custom_metadata_fields.update, + ) + self.list = async_to_raw_response_wrapper( + custom_metadata_fields.list, + ) + self.delete = async_to_raw_response_wrapper( + custom_metadata_fields.delete, + ) + + +class CustomMetadataFieldsResourceWithStreamingResponse: + def __init__(self, custom_metadata_fields: CustomMetadataFieldsResource) -> None: + self._custom_metadata_fields = custom_metadata_fields + + self.create = to_streamed_response_wrapper( + custom_metadata_fields.create, + ) + self.update = to_streamed_response_wrapper( + custom_metadata_fields.update, + ) + self.list = to_streamed_response_wrapper( + custom_metadata_fields.list, + ) + self.delete = to_streamed_response_wrapper( + custom_metadata_fields.delete, + ) + + +class AsyncCustomMetadataFieldsResourceWithStreamingResponse: + def __init__(self, custom_metadata_fields: AsyncCustomMetadataFieldsResource) -> None: + self._custom_metadata_fields = custom_metadata_fields + + self.create = async_to_streamed_response_wrapper( + custom_metadata_fields.create, + ) + self.update = async_to_streamed_response_wrapper( + custom_metadata_fields.update, + ) + self.list = async_to_streamed_response_wrapper( + custom_metadata_fields.list, + ) + self.delete = async_to_streamed_response_wrapper( + custom_metadata_fields.delete, + ) diff --git a/src/imagekitio/resources/dummy.py b/src/imagekitio/resources/dummy.py new file mode 100644 index 00000000..072340e3 --- /dev/null +++ b/src/imagekitio/resources/dummy.py @@ -0,0 +1,345 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..types import dummy_create_params +from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.shared_params.overlay import Overlay +from ..types.shared_params.extensions import Extensions +from ..types.shared_params.src_options import SrcOptions +from ..types.shared_params.base_overlay import BaseOverlay +from ..types.shared_params.text_overlay import TextOverlay +from ..types.shared.streaming_resolution import StreamingResolution +from ..types.shared_params.image_overlay import ImageOverlay +from ..types.shared_params.video_overlay import VideoOverlay +from ..types.shared_params.overlay_timing import OverlayTiming +from ..types.shared_params.transformation import Transformation +from ..types.shared.transformation_position import TransformationPosition +from ..types.shared_params.overlay_position import OverlayPosition +from ..types.shared_params.subtitle_overlay import SubtitleOverlay +from ..types.shared_params.solid_color_overlay import SolidColorOverlay +from ..types.shared_params.responsive_image_attributes import ResponsiveImageAttributes +from ..types.shared_params.text_overlay_transformation import TextOverlayTransformation +from ..types.shared_params.get_image_attributes_options import GetImageAttributesOptions +from ..types.shared_params.subtitle_overlay_transformation import SubtitleOverlayTransformation +from ..types.shared_params.solid_color_overlay_transformation import SolidColorOverlayTransformation + +__all__ = ["DummyResource", "AsyncDummyResource"] + + +class DummyResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DummyResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return DummyResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DummyResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return DummyResourceWithStreamingResponse(self) + + def create( + self, + *, + base_overlay: BaseOverlay | Omit = omit, + extensions: Extensions | Omit = omit, + get_image_attributes_options: GetImageAttributesOptions | Omit = omit, + image_overlay: ImageOverlay | Omit = omit, + overlay: Overlay | Omit = omit, + overlay_position: OverlayPosition | Omit = omit, + overlay_timing: OverlayTiming | Omit = omit, + responsive_image_attributes: ResponsiveImageAttributes | Omit = omit, + solid_color_overlay: SolidColorOverlay | Omit = omit, + solid_color_overlay_transformation: SolidColorOverlayTransformation | Omit = omit, + src_options: SrcOptions | Omit = omit, + streaming_resolution: StreamingResolution | Omit = omit, + subtitle_overlay: SubtitleOverlay | Omit = omit, + subtitle_overlay_transformation: SubtitleOverlayTransformation | Omit = omit, + text_overlay: TextOverlay | Omit = omit, + text_overlay_transformation: TextOverlayTransformation | Omit = omit, + transformation: Transformation | Omit = omit, + transformation_position: TransformationPosition | Omit = omit, + video_overlay: VideoOverlay | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """Internal test endpoint for SDK generation purposes only. + + This endpoint + demonstrates usage of all shared models defined in the Stainless configuration + and is not intended for public consumption. + + Args: + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + get_image_attributes_options: Options for generating responsive image attributes including `src`, `srcSet`, + and `sizes` for HTML `` elements. This schema extends `SrcOptions` to add + support for responsive image generation with breakpoints. + + overlay: Specifies an overlay to be applied on the parent image or video. ImageKit + supports overlays including images, text, videos, subtitles, and solid colors. + See + [Overlay using layers](https://imagekit.io/docs/transformations#overlay-using-layers). + + responsive_image_attributes: Resulting set of attributes suitable for an HTML `` element. Useful for + enabling responsive image loading with `srcSet` and `sizes`. + + src_options: Options for generating ImageKit URLs with transformations. See the + [Transformations guide](https://imagekit.io/docs/transformations). + + streaming_resolution: Available streaming resolutions for + [adaptive bitrate streaming](https://imagekit.io/docs/adaptive-bitrate-streaming) + + subtitle_overlay_transformation: Subtitle styling options. + [Learn more](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + from the docs. + + transformation: The SDK provides easy-to-use names for transformations. These names are + converted to the corresponding transformation string before being added to the + URL. SDKs are updated regularly to support new transformations. If you want to + use a transformation that is not supported by the SDK, You can use the `raw` + parameter to pass the transformation string directly. See the + [Transformations documentation](https://imagekit.io/docs/transformations). + + transformation_position: By default, the transformation string is added as a query parameter in the URL, + e.g., `?tr=w-100,h-100`. If you want to add the transformation string in the + path of the URL, set this to `path`. Learn more in the + [Transformations guide](https://imagekit.io/docs/transformations). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + "/v1/dummy/test", + body=maybe_transform( + { + "base_overlay": base_overlay, + "extensions": extensions, + "get_image_attributes_options": get_image_attributes_options, + "image_overlay": image_overlay, + "overlay": overlay, + "overlay_position": overlay_position, + "overlay_timing": overlay_timing, + "responsive_image_attributes": responsive_image_attributes, + "solid_color_overlay": solid_color_overlay, + "solid_color_overlay_transformation": solid_color_overlay_transformation, + "src_options": src_options, + "streaming_resolution": streaming_resolution, + "subtitle_overlay": subtitle_overlay, + "subtitle_overlay_transformation": subtitle_overlay_transformation, + "text_overlay": text_overlay, + "text_overlay_transformation": text_overlay_transformation, + "transformation": transformation, + "transformation_position": transformation_position, + "video_overlay": video_overlay, + }, + dummy_create_params.DummyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncDummyResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDummyResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncDummyResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDummyResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncDummyResourceWithStreamingResponse(self) + + async def create( + self, + *, + base_overlay: BaseOverlay | Omit = omit, + extensions: Extensions | Omit = omit, + get_image_attributes_options: GetImageAttributesOptions | Omit = omit, + image_overlay: ImageOverlay | Omit = omit, + overlay: Overlay | Omit = omit, + overlay_position: OverlayPosition | Omit = omit, + overlay_timing: OverlayTiming | Omit = omit, + responsive_image_attributes: ResponsiveImageAttributes | Omit = omit, + solid_color_overlay: SolidColorOverlay | Omit = omit, + solid_color_overlay_transformation: SolidColorOverlayTransformation | Omit = omit, + src_options: SrcOptions | Omit = omit, + streaming_resolution: StreamingResolution | Omit = omit, + subtitle_overlay: SubtitleOverlay | Omit = omit, + subtitle_overlay_transformation: SubtitleOverlayTransformation | Omit = omit, + text_overlay: TextOverlay | Omit = omit, + text_overlay_transformation: TextOverlayTransformation | Omit = omit, + transformation: Transformation | Omit = omit, + transformation_position: TransformationPosition | Omit = omit, + video_overlay: VideoOverlay | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """Internal test endpoint for SDK generation purposes only. + + This endpoint + demonstrates usage of all shared models defined in the Stainless configuration + and is not intended for public consumption. + + Args: + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + get_image_attributes_options: Options for generating responsive image attributes including `src`, `srcSet`, + and `sizes` for HTML `` elements. This schema extends `SrcOptions` to add + support for responsive image generation with breakpoints. + + overlay: Specifies an overlay to be applied on the parent image or video. ImageKit + supports overlays including images, text, videos, subtitles, and solid colors. + See + [Overlay using layers](https://imagekit.io/docs/transformations#overlay-using-layers). + + responsive_image_attributes: Resulting set of attributes suitable for an HTML `` element. Useful for + enabling responsive image loading with `srcSet` and `sizes`. + + src_options: Options for generating ImageKit URLs with transformations. See the + [Transformations guide](https://imagekit.io/docs/transformations). + + streaming_resolution: Available streaming resolutions for + [adaptive bitrate streaming](https://imagekit.io/docs/adaptive-bitrate-streaming) + + subtitle_overlay_transformation: Subtitle styling options. + [Learn more](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + from the docs. + + transformation: The SDK provides easy-to-use names for transformations. These names are + converted to the corresponding transformation string before being added to the + URL. SDKs are updated regularly to support new transformations. If you want to + use a transformation that is not supported by the SDK, You can use the `raw` + parameter to pass the transformation string directly. See the + [Transformations documentation](https://imagekit.io/docs/transformations). + + transformation_position: By default, the transformation string is added as a query parameter in the URL, + e.g., `?tr=w-100,h-100`. If you want to add the transformation string in the + path of the URL, set this to `path`. Learn more in the + [Transformations guide](https://imagekit.io/docs/transformations). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + "/v1/dummy/test", + body=await async_maybe_transform( + { + "base_overlay": base_overlay, + "extensions": extensions, + "get_image_attributes_options": get_image_attributes_options, + "image_overlay": image_overlay, + "overlay": overlay, + "overlay_position": overlay_position, + "overlay_timing": overlay_timing, + "responsive_image_attributes": responsive_image_attributes, + "solid_color_overlay": solid_color_overlay, + "solid_color_overlay_transformation": solid_color_overlay_transformation, + "src_options": src_options, + "streaming_resolution": streaming_resolution, + "subtitle_overlay": subtitle_overlay, + "subtitle_overlay_transformation": subtitle_overlay_transformation, + "text_overlay": text_overlay, + "text_overlay_transformation": text_overlay_transformation, + "transformation": transformation, + "transformation_position": transformation_position, + "video_overlay": video_overlay, + }, + dummy_create_params.DummyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class DummyResourceWithRawResponse: + def __init__(self, dummy: DummyResource) -> None: + self._dummy = dummy + + self.create = to_raw_response_wrapper( + dummy.create, + ) + + +class AsyncDummyResourceWithRawResponse: + def __init__(self, dummy: AsyncDummyResource) -> None: + self._dummy = dummy + + self.create = async_to_raw_response_wrapper( + dummy.create, + ) + + +class DummyResourceWithStreamingResponse: + def __init__(self, dummy: DummyResource) -> None: + self._dummy = dummy + + self.create = to_streamed_response_wrapper( + dummy.create, + ) + + +class AsyncDummyResourceWithStreamingResponse: + def __init__(self, dummy: AsyncDummyResource) -> None: + self._dummy = dummy + + self.create = async_to_streamed_response_wrapper( + dummy.create, + ) diff --git a/src/imagekitio/resources/files/__init__.py b/src/imagekitio/resources/files/__init__.py new file mode 100644 index 00000000..7fdf5194 --- /dev/null +++ b/src/imagekitio/resources/files/__init__.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .bulk import ( + BulkResource, + AsyncBulkResource, + BulkResourceWithRawResponse, + AsyncBulkResourceWithRawResponse, + BulkResourceWithStreamingResponse, + AsyncBulkResourceWithStreamingResponse, +) +from .files import ( + FilesResource, + AsyncFilesResource, + FilesResourceWithRawResponse, + AsyncFilesResourceWithRawResponse, + FilesResourceWithStreamingResponse, + AsyncFilesResourceWithStreamingResponse, +) +from .metadata import ( + MetadataResource, + AsyncMetadataResource, + MetadataResourceWithRawResponse, + AsyncMetadataResourceWithRawResponse, + MetadataResourceWithStreamingResponse, + AsyncMetadataResourceWithStreamingResponse, +) +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, +) + +__all__ = [ + "BulkResource", + "AsyncBulkResource", + "BulkResourceWithRawResponse", + "AsyncBulkResourceWithRawResponse", + "BulkResourceWithStreamingResponse", + "AsyncBulkResourceWithStreamingResponse", + "VersionsResource", + "AsyncVersionsResource", + "VersionsResourceWithRawResponse", + "AsyncVersionsResourceWithRawResponse", + "VersionsResourceWithStreamingResponse", + "AsyncVersionsResourceWithStreamingResponse", + "MetadataResource", + "AsyncMetadataResource", + "MetadataResourceWithRawResponse", + "AsyncMetadataResourceWithRawResponse", + "MetadataResourceWithStreamingResponse", + "AsyncMetadataResourceWithStreamingResponse", + "FilesResource", + "AsyncFilesResource", + "FilesResourceWithRawResponse", + "AsyncFilesResourceWithRawResponse", + "FilesResourceWithStreamingResponse", + "AsyncFilesResourceWithStreamingResponse", +] diff --git a/src/imagekitio/resources/files/bulk.py b/src/imagekitio/resources/files/bulk.py new file mode 100644 index 00000000..43c02cbf --- /dev/null +++ b/src/imagekitio/resources/files/bulk.py @@ -0,0 +1,488 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import Body, Query, Headers, NotGiven, SequenceNotStr, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.files import bulk_delete_params, bulk_add_tags_params, bulk_remove_tags_params, bulk_remove_ai_tags_params +from ..._base_client import make_request_options +from ...types.files.bulk_delete_response import BulkDeleteResponse +from ...types.files.bulk_add_tags_response import BulkAddTagsResponse +from ...types.files.bulk_remove_tags_response import BulkRemoveTagsResponse +from ...types.files.bulk_remove_ai_tags_response import BulkRemoveAITagsResponse + +__all__ = ["BulkResource", "AsyncBulkResource"] + + +class BulkResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> BulkResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return BulkResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> BulkResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return BulkResourceWithStreamingResponse(self) + + def delete( + self, + *, + file_ids: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkDeleteResponse: + """ + This API deletes multiple files and all their file versions permanently. + + Note: If a file or specific transformation has been requested in the past, then + the response is cached. Deleting a file does not purge the cache. You can purge + the cache using purge cache API. + + A maximum of 100 files can be deleted at a time. + + Args: + file_ids: An array of fileIds which you want to delete. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/files/batch/deleteByFileIds", + body=maybe_transform({"file_ids": file_ids}, bulk_delete_params.BulkDeleteParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkDeleteResponse, + ) + + def add_tags( + self, + *, + file_ids: SequenceNotStr[str], + tags: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkAddTagsResponse: + """This API adds tags to multiple files in bulk. + + A maximum of 50 files can be + specified at a time. + + Args: + file_ids: An array of fileIds to which you want to add tags. + + tags: An array of tags that you want to add to the files. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/files/addTags", + body=maybe_transform( + { + "file_ids": file_ids, + "tags": tags, + }, + bulk_add_tags_params.BulkAddTagsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkAddTagsResponse, + ) + + def remove_ai_tags( + self, + *, + ai_tags: SequenceNotStr[str], + file_ids: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkRemoveAITagsResponse: + """This API removes AITags from multiple files in bulk. + + A maximum of 50 files can + be specified at a time. + + Args: + ai_tags: An array of AITags that you want to remove from the files. + + file_ids: An array of fileIds from which you want to remove AITags. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/files/removeAITags", + body=maybe_transform( + { + "ai_tags": ai_tags, + "file_ids": file_ids, + }, + bulk_remove_ai_tags_params.BulkRemoveAITagsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkRemoveAITagsResponse, + ) + + def remove_tags( + self, + *, + file_ids: SequenceNotStr[str], + tags: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkRemoveTagsResponse: + """This API removes tags from multiple files in bulk. + + A maximum of 50 files can be + specified at a time. + + Args: + file_ids: An array of fileIds from which you want to remove tags. + + tags: An array of tags that you want to remove from the files. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/files/removeTags", + body=maybe_transform( + { + "file_ids": file_ids, + "tags": tags, + }, + bulk_remove_tags_params.BulkRemoveTagsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkRemoveTagsResponse, + ) + + +class AsyncBulkResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncBulkResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncBulkResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncBulkResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncBulkResourceWithStreamingResponse(self) + + async def delete( + self, + *, + file_ids: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkDeleteResponse: + """ + This API deletes multiple files and all their file versions permanently. + + Note: If a file or specific transformation has been requested in the past, then + the response is cached. Deleting a file does not purge the cache. You can purge + the cache using purge cache API. + + A maximum of 100 files can be deleted at a time. + + Args: + file_ids: An array of fileIds which you want to delete. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/files/batch/deleteByFileIds", + body=await async_maybe_transform({"file_ids": file_ids}, bulk_delete_params.BulkDeleteParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkDeleteResponse, + ) + + async def add_tags( + self, + *, + file_ids: SequenceNotStr[str], + tags: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkAddTagsResponse: + """This API adds tags to multiple files in bulk. + + A maximum of 50 files can be + specified at a time. + + Args: + file_ids: An array of fileIds to which you want to add tags. + + tags: An array of tags that you want to add to the files. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/files/addTags", + body=await async_maybe_transform( + { + "file_ids": file_ids, + "tags": tags, + }, + bulk_add_tags_params.BulkAddTagsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkAddTagsResponse, + ) + + async def remove_ai_tags( + self, + *, + ai_tags: SequenceNotStr[str], + file_ids: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkRemoveAITagsResponse: + """This API removes AITags from multiple files in bulk. + + A maximum of 50 files can + be specified at a time. + + Args: + ai_tags: An array of AITags that you want to remove from the files. + + file_ids: An array of fileIds from which you want to remove AITags. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/files/removeAITags", + body=await async_maybe_transform( + { + "ai_tags": ai_tags, + "file_ids": file_ids, + }, + bulk_remove_ai_tags_params.BulkRemoveAITagsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkRemoveAITagsResponse, + ) + + async def remove_tags( + self, + *, + file_ids: SequenceNotStr[str], + tags: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> BulkRemoveTagsResponse: + """This API removes tags from multiple files in bulk. + + A maximum of 50 files can be + specified at a time. + + Args: + file_ids: An array of fileIds from which you want to remove tags. + + tags: An array of tags that you want to remove from the files. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/files/removeTags", + body=await async_maybe_transform( + { + "file_ids": file_ids, + "tags": tags, + }, + bulk_remove_tags_params.BulkRemoveTagsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BulkRemoveTagsResponse, + ) + + +class BulkResourceWithRawResponse: + def __init__(self, bulk: BulkResource) -> None: + self._bulk = bulk + + self.delete = to_raw_response_wrapper( + bulk.delete, + ) + self.add_tags = to_raw_response_wrapper( + bulk.add_tags, + ) + self.remove_ai_tags = to_raw_response_wrapper( + bulk.remove_ai_tags, + ) + self.remove_tags = to_raw_response_wrapper( + bulk.remove_tags, + ) + + +class AsyncBulkResourceWithRawResponse: + def __init__(self, bulk: AsyncBulkResource) -> None: + self._bulk = bulk + + self.delete = async_to_raw_response_wrapper( + bulk.delete, + ) + self.add_tags = async_to_raw_response_wrapper( + bulk.add_tags, + ) + self.remove_ai_tags = async_to_raw_response_wrapper( + bulk.remove_ai_tags, + ) + self.remove_tags = async_to_raw_response_wrapper( + bulk.remove_tags, + ) + + +class BulkResourceWithStreamingResponse: + def __init__(self, bulk: BulkResource) -> None: + self._bulk = bulk + + self.delete = to_streamed_response_wrapper( + bulk.delete, + ) + self.add_tags = to_streamed_response_wrapper( + bulk.add_tags, + ) + self.remove_ai_tags = to_streamed_response_wrapper( + bulk.remove_ai_tags, + ) + self.remove_tags = to_streamed_response_wrapper( + bulk.remove_tags, + ) + + +class AsyncBulkResourceWithStreamingResponse: + def __init__(self, bulk: AsyncBulkResource) -> None: + self._bulk = bulk + + self.delete = async_to_streamed_response_wrapper( + bulk.delete, + ) + self.add_tags = async_to_streamed_response_wrapper( + bulk.add_tags, + ) + self.remove_ai_tags = async_to_streamed_response_wrapper( + bulk.remove_ai_tags, + ) + self.remove_tags = async_to_streamed_response_wrapper( + bulk.remove_tags, + ) diff --git a/src/imagekitio/resources/files/files.py b/src/imagekitio/resources/files/files.py new file mode 100644 index 00000000..cd9ff3e7 --- /dev/null +++ b/src/imagekitio/resources/files/files.py @@ -0,0 +1,1574 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Mapping, Optional, cast +from typing_extensions import Literal, overload + +import httpx + +from .bulk import ( + BulkResource, + AsyncBulkResource, + BulkResourceWithRawResponse, + AsyncBulkResourceWithRawResponse, + BulkResourceWithStreamingResponse, + AsyncBulkResourceWithStreamingResponse, +) +from ...types import ( + file_copy_params, + file_move_params, + file_rename_params, + file_update_params, + file_upload_params, +) +from ..._types import ( + Body, + Omit, + Query, + Headers, + NoneType, + NotGiven, + FileTypes, + SequenceNotStr, + omit, + not_given, +) +from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .metadata import ( + MetadataResource, + AsyncMetadataResource, + MetadataResourceWithRawResponse, + AsyncMetadataResourceWithRawResponse, + MetadataResourceWithStreamingResponse, + AsyncMetadataResourceWithStreamingResponse, +) +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.file import File +from ..._base_client import make_request_options +from ...lib.serialization_utils import serialize_upload_options +from ...types.file_copy_response import FileCopyResponse +from ...types.file_move_response import FileMoveResponse +from ...types.file_rename_response import FileRenameResponse +from ...types.file_update_response import FileUpdateResponse +from ...types.file_upload_response import FileUploadResponse +from ...types.shared_params.extensions import Extensions + +__all__ = ["FilesResource", "AsyncFilesResource"] + + +class FilesResource(SyncAPIResource): + @cached_property + def bulk(self) -> BulkResource: + return BulkResource(self._client) + + @cached_property + def versions(self) -> VersionsResource: + return VersionsResource(self._client) + + @cached_property + def metadata(self) -> MetadataResource: + return MetadataResource(self._client) + + @cached_property + def with_raw_response(self) -> FilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return FilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return FilesResourceWithStreamingResponse(self) + + @overload + def update( + self, + file_id: str, + *, + custom_coordinates: Optional[str] | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + extensions: Extensions | Omit = omit, + remove_ai_tags: Union[SequenceNotStr[str], Literal["all"]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + webhook_url: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUpdateResponse: + """ + This API updates the details or attributes of the current version of the file. + You can update `tags`, `customCoordinates`, `customMetadata`, publication + status, remove existing `AITags` and apply extensions using this API. + + Args: + custom_coordinates: Define an important area in the image in the format `x,y,width,height` e.g. + `10,10,100,100`. Send `null` to unset this value. + + custom_metadata: A key-value data to be associated with the asset. To unset a key, send `null` + value for that key. Before setting any custom metadata on an asset you have to + create the field using custom metadata fields API. + + description: Optional text to describe the contents of the file. + + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + remove_ai_tags: An array of AITags associated with the file that you want to remove, e.g. + `["car", "vehicle", "motorsports"]`. + + If you want to remove all AITags associated with the file, send a string - + "all". + + Note: The remove operation for `AITags` executes before any of the `extensions` + are processed. + + tags: An array of tags associated with the file, such as `["tag1", "tag2"]`. Send + `null` to unset all tags associated with the file. + + webhook_url: The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + file_id: str, + *, + publish: file_update_params.ChangePublicationStatusPublish | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUpdateResponse: + """ + This API updates the details or attributes of the current version of the file. + You can update `tags`, `customCoordinates`, `customMetadata`, publication + status, remove existing `AITags` and apply extensions using this API. + + Args: + publish: Configure the publication status of a file and its versions. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + def update( + self, + file_id: str, + *, + custom_coordinates: Optional[str] | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + extensions: Extensions | Omit = omit, + remove_ai_tags: Union[SequenceNotStr[str], Literal["all"]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + webhook_url: str | Omit = omit, + publish: file_update_params.ChangePublicationStatusPublish | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUpdateResponse: + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._patch( + f"/v1/files/{file_id}/details", + body=maybe_transform( + { + "custom_coordinates": custom_coordinates, + "custom_metadata": custom_metadata, + "description": description, + "extensions": extensions, + "remove_ai_tags": remove_ai_tags, + "tags": tags, + "webhook_url": webhook_url, + "publish": publish, + }, + file_update_params.FileUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileUpdateResponse, + ) + + def delete( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + This API deletes the file and all its file versions permanently. + + Note: If a file or specific transformation has been requested in the past, then + the response is cached. Deleting a file does not purge the cache. You can purge + the cache using purge cache API. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v1/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def copy( + self, + *, + destination_path: str, + source_file_path: str, + include_file_versions: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileCopyResponse: + """ + This will copy a file from one folder to another. + + Note: If any file at the destination has the same name as the source file, then + the source file and its versions (if `includeFileVersions` is set to true) will + be appended to the destination file version history. + + Args: + destination_path: Full path to the folder you want to copy the above file into. + + source_file_path: The full path of the file you want to copy. + + include_file_versions: Option to copy all versions of a file. By default, only the current version of + the file is copied. When set to true, all versions of the file will be copied. + Default value - `false`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/files/copy", + body=maybe_transform( + { + "destination_path": destination_path, + "source_file_path": source_file_path, + "include_file_versions": include_file_versions, + }, + file_copy_params.FileCopyParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileCopyResponse, + ) + + def get( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> File: + """ + This API returns an object with details or attributes about the current version + of the file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/v1/files/{file_id}/details", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=File, + ) + + def move( + self, + *, + destination_path: str, + source_file_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileMoveResponse: + """ + This will move a file and all its versions from one folder to another. + + Note: If any file at the destination has the same name as the source file, then + the source file and its versions will be appended to the destination file. + + Args: + destination_path: Full path to the folder you want to move the above file into. + + source_file_path: The full path of the file you want to move. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/files/move", + body=maybe_transform( + { + "destination_path": destination_path, + "source_file_path": source_file_path, + }, + file_move_params.FileMoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileMoveResponse, + ) + + def rename( + self, + *, + file_path: str, + new_file_name: str, + purge_cache: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileRenameResponse: + """ + You can rename an already existing file in the media library using rename file + API. This operation would rename all file versions of the file. + + Note: The old URLs will stop working. The file/file version URLs cached on CDN + will continue to work unless a purge is requested. + + Args: + file_path: The full path of the file you want to rename. + + new_file_name: + The new name of the file. A filename can contain: + + Alphanumeric Characters: `a-z`, `A-Z`, `0-9` (including Unicode letters, marks, + and numerals in other languages). Special Characters: `.`, `_`, and `-`. + + Any other character, including space, will be replaced by `_`. + + purge_cache: Option to purge cache for the old file and its versions' URLs. + + When set to true, it will internally issue a purge cache request on CDN to + remove cached content of old file and its versions. This purge request is + counted against your monthly purge quota. + + Note: If the old file were accessible at + `https://ik.imagekit.io/demo/old-filename.jpg`, a purge cache request would be + issued against `https://ik.imagekit.io/demo/old-filename.jpg*` (with a wildcard + at the end). It will remove the file and its versions' URLs and any + transformations made using query parameters on this file or its versions. + However, the cache for file transformations made using path parameters will + persist. You can purge them using the purge API. For more details, refer to the + purge API documentation. + + Default value - `false` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._put( + "/v1/files/rename", + body=maybe_transform( + { + "file_path": file_path, + "new_file_name": new_file_name, + "purge_cache": purge_cache, + }, + file_rename_params.FileRenameParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileRenameResponse, + ) + + def upload( + self, + *, + file: FileTypes, + file_name: str, + token: str | Omit = omit, + checks: str | Omit = omit, + custom_coordinates: str | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + expire: int | Omit = omit, + extensions: Extensions | Omit = omit, + folder: str | Omit = omit, + is_private_file: bool | Omit = omit, + is_published: bool | Omit = omit, + overwrite_ai_tags: bool | Omit = omit, + overwrite_custom_metadata: bool | Omit = omit, + overwrite_file: bool | Omit = omit, + overwrite_tags: bool | Omit = omit, + public_key: str | Omit = omit, + response_fields: List[ + Literal[ + "tags", + "customCoordinates", + "isPrivateFile", + "embeddedMetadata", + "isPublished", + "customMetadata", + "metadata", + "selectedFieldsSchema", + ] + ] + | Omit = omit, + signature: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + transformation: file_upload_params.Transformation | Omit = omit, + use_unique_file_name: bool | Omit = omit, + webhook_url: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUploadResponse: + """ + ImageKit.io allows you to upload files directly from both the server and client + sides. For server-side uploads, private API key authentication is used. For + client-side uploads, generate a one-time `token`, `signature`, and `expire` from + your secure backend using private API. + [Learn more](/docs/api-reference/upload-file/upload-file#how-to-implement-client-side-file-upload) + about how to implement client-side file upload. + + The [V2 API](/docs/api-reference/upload-file/upload-file-v2) enhances security + by verifying the entire payload using JWT. + + **File size limit** \\ + On the free plan, the maximum upload file sizes are 20MB for images, audio, and raw + files and 100MB for videos. On the paid plan, these limits increase to 40MB for images, + audio, and raw files and 2GB for videos. These limits can be further increased with + higher-tier plans. + + **Version limit** \\ + A file can have a maximum of 100 versions. + + **Demo applications** + + - A full-fledged + [upload widget using Uppy](https://github.com/imagekit-samples/uppy-uploader), + supporting file selections from local storage, URL, Dropbox, Google Drive, + Instagram, and more. + - [Quick start guides](/docs/quick-start-guides) for various frameworks and + technologies. + + Args: + file: + The API accepts any of the following: + + - **Binary data** – send the raw bytes as `multipart/form-data`. + - **HTTP / HTTPS URL** – a publicly reachable URL that ImageKit’s servers can + fetch. + - **Base64 string** – the file encoded as a Base64 data URI or plain Base64. + + When supplying a URL, the server must receive the response headers within 8 + seconds; otherwise the request fails with 400 Bad Request. + + file_name: + The name with which the file has to be uploaded. The file name can contain: + + - Alphanumeric Characters: `a-z`, `A-Z`, `0-9`. + - Special Characters: `.`, `-` + + Any other character including space will be replaced by `_` + + token: A unique value that the ImageKit.io server will use to recognize and prevent + subsequent retries for the same request. We suggest using V4 UUIDs, or another + random string with enough entropy to avoid collisions. This field is only + required for authentication when uploading a file from the client side. + + **Note**: Sending a value that has been used in the past will result in a + validation error. Even if your previous request resulted in an error, you should + always send a new value for this field. + + checks: Server-side checks to run on the asset. Read more about + [Upload API checks](/docs/api-reference/upload-file/upload-file#upload-api-checks). + + custom_coordinates: Define an important area in the image. This is only relevant for image type + files. + + - To be passed as a string with the x and y coordinates of the top-left corner, + and width and height of the area of interest in the format `x,y,width,height`. + For example - `10,10,100,100` + - Can be used with fo-customtransformation. + - If this field is not specified and the file is overwritten, then + customCoordinates will be removed. + + custom_metadata: JSON key-value pairs to associate with the asset. Create the custom metadata + fields before setting these values. + + description: Optional text to describe the contents of the file. + + expire: The time until your signature is valid. It must be a + [Unix time](https://en.wikipedia.org/wiki/Unix_time) in less than 1 hour into + the future. It should be in seconds. This field is only required for + authentication when uploading a file from the client side. + + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + folder: The folder path in which the image has to be uploaded. If the folder(s) didn't + exist before, a new folder(s) is created. + + The folder name can contain: + + - Alphanumeric Characters: `a-z` , `A-Z` , `0-9` + - Special Characters: `/` , `_` , `-` + + Using multiple `/` creates a nested folder. + + is_private_file: Whether to mark the file as private or not. + + If `true`, the file is marked as private and is accessible only using named + transformation or signed URL. + + is_published: Whether to upload file as published or not. + + If `false`, the file is marked as unpublished, which restricts access to the + file only via the media library. Files in draft or unpublished state can only be + publicly accessed after being published. + + The option to upload in draft state is only available in custom enterprise + pricing plans. + + overwrite_ai_tags: If set to `true` and a file already exists at the exact location, its AITags + will be removed. Set `overwriteAITags` to `false` to preserve AITags. + + overwrite_custom_metadata: If the request does not have `customMetadata`, and a file already exists at the + exact location, existing customMetadata will be removed. + + overwrite_file: If `false` and `useUniqueFileName` is also `false`, and a file already exists at + the exact location, upload API will return an error immediately. + + overwrite_tags: If the request does not have `tags`, and a file already exists at the exact + location, existing tags will be removed. + + public_key: Your ImageKit.io public key. This field is only required for authentication when + uploading a file from the client side. + + response_fields: Array of response field keys to include in the API response body. + + signature: HMAC-SHA1 digest of the token+expire using your ImageKit.io private API key as a + key. Learn how to create a signature on the page below. This should be in + lowercase. + + Signature must be calculated on the server-side. This field is only required for + authentication when uploading a file from the client side. + + tags: Set the tags while uploading the file. Provide an array of tag strings (e.g. + `["tag1", "tag2", "tag3"]`). The combined length of all tag characters must not + exceed 500, and the `%` character is not allowed. If this field is not specified + and the file is overwritten, the existing tags will be removed. + + transformation: Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., + resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) + in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + + use_unique_file_name: Whether to use a unique filename for this file or not. + + If `true`, ImageKit.io will add a unique suffix to the filename parameter to get + a unique filename. + + If `false`, then the image is uploaded with the provided filename parameter, and + any existing file with the same name is replaced. + + webhook_url: The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "file_name": file_name, + "token": token, + "checks": checks, + "custom_coordinates": custom_coordinates, + "custom_metadata": custom_metadata, + "description": description, + "expire": expire, + "extensions": extensions, + "folder": folder, + "is_private_file": is_private_file, + "is_published": is_published, + "overwrite_ai_tags": overwrite_ai_tags, + "overwrite_custom_metadata": overwrite_custom_metadata, + "overwrite_file": overwrite_file, + "overwrite_tags": overwrite_tags, + "public_key": public_key, + "response_fields": response_fields, + "signature": signature, + "tags": tags, + "transformation": transformation, + "use_unique_file_name": use_unique_file_name, + "webhook_url": webhook_url, + } + ) + body = serialize_upload_options(body) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/api/v1/files/upload" + if self._client._base_url_overridden + else "https://upload.imagekit.io/api/v1/files/upload", + body=maybe_transform(body, file_upload_params.FileUploadParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileUploadResponse, + ) + + +class AsyncFilesResource(AsyncAPIResource): + @cached_property + def bulk(self) -> AsyncBulkResource: + return AsyncBulkResource(self._client) + + @cached_property + def versions(self) -> AsyncVersionsResource: + return AsyncVersionsResource(self._client) + + @cached_property + def metadata(self) -> AsyncMetadataResource: + return AsyncMetadataResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncFilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncFilesResourceWithStreamingResponse(self) + + @overload + async def update( + self, + file_id: str, + *, + custom_coordinates: Optional[str] | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + extensions: Extensions | Omit = omit, + remove_ai_tags: Union[SequenceNotStr[str], Literal["all"]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + webhook_url: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUpdateResponse: + """ + This API updates the details or attributes of the current version of the file. + You can update `tags`, `customCoordinates`, `customMetadata`, publication + status, remove existing `AITags` and apply extensions using this API. + + Args: + custom_coordinates: Define an important area in the image in the format `x,y,width,height` e.g. + `10,10,100,100`. Send `null` to unset this value. + + custom_metadata: A key-value data to be associated with the asset. To unset a key, send `null` + value for that key. Before setting any custom metadata on an asset you have to + create the field using custom metadata fields API. + + description: Optional text to describe the contents of the file. + + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + remove_ai_tags: An array of AITags associated with the file that you want to remove, e.g. + `["car", "vehicle", "motorsports"]`. + + If you want to remove all AITags associated with the file, send a string - + "all". + + Note: The remove operation for `AITags` executes before any of the `extensions` + are processed. + + tags: An array of tags associated with the file, such as `["tag1", "tag2"]`. Send + `null` to unset all tags associated with the file. + + webhook_url: The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + file_id: str, + *, + publish: file_update_params.ChangePublicationStatusPublish | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUpdateResponse: + """ + This API updates the details or attributes of the current version of the file. + You can update `tags`, `customCoordinates`, `customMetadata`, publication + status, remove existing `AITags` and apply extensions using this API. + + Args: + publish: Configure the publication status of a file and its versions. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + async def update( + self, + file_id: str, + *, + custom_coordinates: Optional[str] | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + extensions: Extensions | Omit = omit, + remove_ai_tags: Union[SequenceNotStr[str], Literal["all"]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + webhook_url: str | Omit = omit, + publish: file_update_params.ChangePublicationStatusPublish | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUpdateResponse: + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._patch( + f"/v1/files/{file_id}/details", + body=await async_maybe_transform( + { + "custom_coordinates": custom_coordinates, + "custom_metadata": custom_metadata, + "description": description, + "extensions": extensions, + "remove_ai_tags": remove_ai_tags, + "tags": tags, + "webhook_url": webhook_url, + "publish": publish, + }, + file_update_params.FileUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileUpdateResponse, + ) + + async def delete( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + This API deletes the file and all its file versions permanently. + + Note: If a file or specific transformation has been requested in the past, then + the response is cached. Deleting a file does not purge the cache. You can purge + the cache using purge cache API. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v1/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def copy( + self, + *, + destination_path: str, + source_file_path: str, + include_file_versions: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileCopyResponse: + """ + This will copy a file from one folder to another. + + Note: If any file at the destination has the same name as the source file, then + the source file and its versions (if `includeFileVersions` is set to true) will + be appended to the destination file version history. + + Args: + destination_path: Full path to the folder you want to copy the above file into. + + source_file_path: The full path of the file you want to copy. + + include_file_versions: Option to copy all versions of a file. By default, only the current version of + the file is copied. When set to true, all versions of the file will be copied. + Default value - `false`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/files/copy", + body=await async_maybe_transform( + { + "destination_path": destination_path, + "source_file_path": source_file_path, + "include_file_versions": include_file_versions, + }, + file_copy_params.FileCopyParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileCopyResponse, + ) + + async def get( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> File: + """ + This API returns an object with details or attributes about the current version + of the file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/v1/files/{file_id}/details", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=File, + ) + + async def move( + self, + *, + destination_path: str, + source_file_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileMoveResponse: + """ + This will move a file and all its versions from one folder to another. + + Note: If any file at the destination has the same name as the source file, then + the source file and its versions will be appended to the destination file. + + Args: + destination_path: Full path to the folder you want to move the above file into. + + source_file_path: The full path of the file you want to move. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/files/move", + body=await async_maybe_transform( + { + "destination_path": destination_path, + "source_file_path": source_file_path, + }, + file_move_params.FileMoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileMoveResponse, + ) + + async def rename( + self, + *, + file_path: str, + new_file_name: str, + purge_cache: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileRenameResponse: + """ + You can rename an already existing file in the media library using rename file + API. This operation would rename all file versions of the file. + + Note: The old URLs will stop working. The file/file version URLs cached on CDN + will continue to work unless a purge is requested. + + Args: + file_path: The full path of the file you want to rename. + + new_file_name: + The new name of the file. A filename can contain: + + Alphanumeric Characters: `a-z`, `A-Z`, `0-9` (including Unicode letters, marks, + and numerals in other languages). Special Characters: `.`, `_`, and `-`. + + Any other character, including space, will be replaced by `_`. + + purge_cache: Option to purge cache for the old file and its versions' URLs. + + When set to true, it will internally issue a purge cache request on CDN to + remove cached content of old file and its versions. This purge request is + counted against your monthly purge quota. + + Note: If the old file were accessible at + `https://ik.imagekit.io/demo/old-filename.jpg`, a purge cache request would be + issued against `https://ik.imagekit.io/demo/old-filename.jpg*` (with a wildcard + at the end). It will remove the file and its versions' URLs and any + transformations made using query parameters on this file or its versions. + However, the cache for file transformations made using path parameters will + persist. You can purge them using the purge API. For more details, refer to the + purge API documentation. + + Default value - `false` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._put( + "/v1/files/rename", + body=await async_maybe_transform( + { + "file_path": file_path, + "new_file_name": new_file_name, + "purge_cache": purge_cache, + }, + file_rename_params.FileRenameParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileRenameResponse, + ) + + async def upload( + self, + *, + file: FileTypes, + file_name: str, + token: str | Omit = omit, + checks: str | Omit = omit, + custom_coordinates: str | Omit = omit, + custom_metadata: Dict[str, object] | Omit = omit, + description: str | Omit = omit, + expire: int | Omit = omit, + extensions: Extensions | Omit = omit, + folder: str | Omit = omit, + is_private_file: bool | Omit = omit, + is_published: bool | Omit = omit, + overwrite_ai_tags: bool | Omit = omit, + overwrite_custom_metadata: bool | Omit = omit, + overwrite_file: bool | Omit = omit, + overwrite_tags: bool | Omit = omit, + public_key: str | Omit = omit, + response_fields: List[ + Literal[ + "tags", + "customCoordinates", + "isPrivateFile", + "embeddedMetadata", + "isPublished", + "customMetadata", + "metadata", + "selectedFieldsSchema", + ] + ] + | Omit = omit, + signature: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + transformation: file_upload_params.Transformation | Omit = omit, + use_unique_file_name: bool | Omit = omit, + webhook_url: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FileUploadResponse: + """ + ImageKit.io allows you to upload files directly from both the server and client + sides. For server-side uploads, private API key authentication is used. For + client-side uploads, generate a one-time `token`, `signature`, and `expire` from + your secure backend using private API. + [Learn more](/docs/api-reference/upload-file/upload-file#how-to-implement-client-side-file-upload) + about how to implement client-side file upload. + + The [V2 API](/docs/api-reference/upload-file/upload-file-v2) enhances security + by verifying the entire payload using JWT. + + **File size limit** \\ + On the free plan, the maximum upload file sizes are 20MB for images, audio, and raw + files and 100MB for videos. On the paid plan, these limits increase to 40MB for images, + audio, and raw files and 2GB for videos. These limits can be further increased with + higher-tier plans. + + **Version limit** \\ + A file can have a maximum of 100 versions. + + **Demo applications** + + - A full-fledged + [upload widget using Uppy](https://github.com/imagekit-samples/uppy-uploader), + supporting file selections from local storage, URL, Dropbox, Google Drive, + Instagram, and more. + - [Quick start guides](/docs/quick-start-guides) for various frameworks and + technologies. + + Args: + file: + The API accepts any of the following: + + - **Binary data** – send the raw bytes as `multipart/form-data`. + - **HTTP / HTTPS URL** – a publicly reachable URL that ImageKit’s servers can + fetch. + - **Base64 string** – the file encoded as a Base64 data URI or plain Base64. + + When supplying a URL, the server must receive the response headers within 8 + seconds; otherwise the request fails with 400 Bad Request. + + file_name: + The name with which the file has to be uploaded. The file name can contain: + + - Alphanumeric Characters: `a-z`, `A-Z`, `0-9`. + - Special Characters: `.`, `-` + + Any other character including space will be replaced by `_` + + token: A unique value that the ImageKit.io server will use to recognize and prevent + subsequent retries for the same request. We suggest using V4 UUIDs, or another + random string with enough entropy to avoid collisions. This field is only + required for authentication when uploading a file from the client side. + + **Note**: Sending a value that has been used in the past will result in a + validation error. Even if your previous request resulted in an error, you should + always send a new value for this field. + + checks: Server-side checks to run on the asset. Read more about + [Upload API checks](/docs/api-reference/upload-file/upload-file#upload-api-checks). + + custom_coordinates: Define an important area in the image. This is only relevant for image type + files. + + - To be passed as a string with the x and y coordinates of the top-left corner, + and width and height of the area of interest in the format `x,y,width,height`. + For example - `10,10,100,100` + - Can be used with fo-customtransformation. + - If this field is not specified and the file is overwritten, then + customCoordinates will be removed. + + custom_metadata: JSON key-value pairs to associate with the asset. Create the custom metadata + fields before setting these values. + + description: Optional text to describe the contents of the file. + + expire: The time until your signature is valid. It must be a + [Unix time](https://en.wikipedia.org/wiki/Unix_time) in less than 1 hour into + the future. It should be in seconds. This field is only required for + authentication when uploading a file from the client side. + + extensions: Array of extensions to be applied to the asset. Each extension can be configured + with specific parameters based on the extension type. + + folder: The folder path in which the image has to be uploaded. If the folder(s) didn't + exist before, a new folder(s) is created. + + The folder name can contain: + + - Alphanumeric Characters: `a-z` , `A-Z` , `0-9` + - Special Characters: `/` , `_` , `-` + + Using multiple `/` creates a nested folder. + + is_private_file: Whether to mark the file as private or not. + + If `true`, the file is marked as private and is accessible only using named + transformation or signed URL. + + is_published: Whether to upload file as published or not. + + If `false`, the file is marked as unpublished, which restricts access to the + file only via the media library. Files in draft or unpublished state can only be + publicly accessed after being published. + + The option to upload in draft state is only available in custom enterprise + pricing plans. + + overwrite_ai_tags: If set to `true` and a file already exists at the exact location, its AITags + will be removed. Set `overwriteAITags` to `false` to preserve AITags. + + overwrite_custom_metadata: If the request does not have `customMetadata`, and a file already exists at the + exact location, existing customMetadata will be removed. + + overwrite_file: If `false` and `useUniqueFileName` is also `false`, and a file already exists at + the exact location, upload API will return an error immediately. + + overwrite_tags: If the request does not have `tags`, and a file already exists at the exact + location, existing tags will be removed. + + public_key: Your ImageKit.io public key. This field is only required for authentication when + uploading a file from the client side. + + response_fields: Array of response field keys to include in the API response body. + + signature: HMAC-SHA1 digest of the token+expire using your ImageKit.io private API key as a + key. Learn how to create a signature on the page below. This should be in + lowercase. + + Signature must be calculated on the server-side. This field is only required for + authentication when uploading a file from the client side. + + tags: Set the tags while uploading the file. Provide an array of tag strings (e.g. + `["tag1", "tag2", "tag3"]`). The combined length of all tag characters must not + exceed 500, and the `%` character is not allowed. If this field is not specified + and the file is overwritten, the existing tags will be removed. + + transformation: Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., + resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) + in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + + use_unique_file_name: Whether to use a unique filename for this file or not. + + If `true`, ImageKit.io will add a unique suffix to the filename parameter to get + a unique filename. + + If `false`, then the image is uploaded with the provided filename parameter, and + any existing file with the same name is replaced. + + webhook_url: The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "file_name": file_name, + "token": token, + "checks": checks, + "custom_coordinates": custom_coordinates, + "custom_metadata": custom_metadata, + "description": description, + "expire": expire, + "extensions": extensions, + "folder": folder, + "is_private_file": is_private_file, + "is_published": is_published, + "overwrite_ai_tags": overwrite_ai_tags, + "overwrite_custom_metadata": overwrite_custom_metadata, + "overwrite_file": overwrite_file, + "overwrite_tags": overwrite_tags, + "public_key": public_key, + "response_fields": response_fields, + "signature": signature, + "tags": tags, + "transformation": transformation, + "use_unique_file_name": use_unique_file_name, + "webhook_url": webhook_url, + } + ) + body = serialize_upload_options(body) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/api/v1/files/upload" + if self._client._base_url_overridden + else "https://upload.imagekit.io/api/v1/files/upload", + body=await async_maybe_transform(body, file_upload_params.FileUploadParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileUploadResponse, + ) + + +class FilesResourceWithRawResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.update = to_raw_response_wrapper( + files.update, + ) + self.delete = to_raw_response_wrapper( + files.delete, + ) + self.copy = to_raw_response_wrapper( + files.copy, + ) + self.get = to_raw_response_wrapper( + files.get, + ) + self.move = to_raw_response_wrapper( + files.move, + ) + self.rename = to_raw_response_wrapper( + files.rename, + ) + self.upload = to_raw_response_wrapper( + files.upload, + ) + + @cached_property + def bulk(self) -> BulkResourceWithRawResponse: + return BulkResourceWithRawResponse(self._files.bulk) + + @cached_property + def versions(self) -> VersionsResourceWithRawResponse: + return VersionsResourceWithRawResponse(self._files.versions) + + @cached_property + def metadata(self) -> MetadataResourceWithRawResponse: + return MetadataResourceWithRawResponse(self._files.metadata) + + +class AsyncFilesResourceWithRawResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.update = async_to_raw_response_wrapper( + files.update, + ) + self.delete = async_to_raw_response_wrapper( + files.delete, + ) + self.copy = async_to_raw_response_wrapper( + files.copy, + ) + self.get = async_to_raw_response_wrapper( + files.get, + ) + self.move = async_to_raw_response_wrapper( + files.move, + ) + self.rename = async_to_raw_response_wrapper( + files.rename, + ) + self.upload = async_to_raw_response_wrapper( + files.upload, + ) + + @cached_property + def bulk(self) -> AsyncBulkResourceWithRawResponse: + return AsyncBulkResourceWithRawResponse(self._files.bulk) + + @cached_property + def versions(self) -> AsyncVersionsResourceWithRawResponse: + return AsyncVersionsResourceWithRawResponse(self._files.versions) + + @cached_property + def metadata(self) -> AsyncMetadataResourceWithRawResponse: + return AsyncMetadataResourceWithRawResponse(self._files.metadata) + + +class FilesResourceWithStreamingResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.update = to_streamed_response_wrapper( + files.update, + ) + self.delete = to_streamed_response_wrapper( + files.delete, + ) + self.copy = to_streamed_response_wrapper( + files.copy, + ) + self.get = to_streamed_response_wrapper( + files.get, + ) + self.move = to_streamed_response_wrapper( + files.move, + ) + self.rename = to_streamed_response_wrapper( + files.rename, + ) + self.upload = to_streamed_response_wrapper( + files.upload, + ) + + @cached_property + def bulk(self) -> BulkResourceWithStreamingResponse: + return BulkResourceWithStreamingResponse(self._files.bulk) + + @cached_property + def versions(self) -> VersionsResourceWithStreamingResponse: + return VersionsResourceWithStreamingResponse(self._files.versions) + + @cached_property + def metadata(self) -> MetadataResourceWithStreamingResponse: + return MetadataResourceWithStreamingResponse(self._files.metadata) + + +class AsyncFilesResourceWithStreamingResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.update = async_to_streamed_response_wrapper( + files.update, + ) + self.delete = async_to_streamed_response_wrapper( + files.delete, + ) + self.copy = async_to_streamed_response_wrapper( + files.copy, + ) + self.get = async_to_streamed_response_wrapper( + files.get, + ) + self.move = async_to_streamed_response_wrapper( + files.move, + ) + self.rename = async_to_streamed_response_wrapper( + files.rename, + ) + self.upload = async_to_streamed_response_wrapper( + files.upload, + ) + + @cached_property + def bulk(self) -> AsyncBulkResourceWithStreamingResponse: + return AsyncBulkResourceWithStreamingResponse(self._files.bulk) + + @cached_property + def versions(self) -> AsyncVersionsResourceWithStreamingResponse: + return AsyncVersionsResourceWithStreamingResponse(self._files.versions) + + @cached_property + def metadata(self) -> AsyncMetadataResourceWithStreamingResponse: + return AsyncMetadataResourceWithStreamingResponse(self._files.metadata) diff --git a/src/imagekitio/resources/files/metadata.py b/src/imagekitio/resources/files/metadata.py new file mode 100644 index 00000000..d9e05412 --- /dev/null +++ b/src/imagekitio/resources/files/metadata.py @@ -0,0 +1,263 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import Body, Query, Headers, NotGiven, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.files import metadata_get_from_url_params +from ..._base_client import make_request_options +from ...types.metadata import Metadata + +__all__ = ["MetadataResource", "AsyncMetadataResource"] + + +class MetadataResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> MetadataResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return MetadataResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> MetadataResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return MetadataResourceWithStreamingResponse(self) + + def get( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Metadata: + """ + You can programmatically get image EXIF, pHash, and other metadata for uploaded + files in the ImageKit.io media library using this API. + + You can also get the metadata in upload API response by passing `metadata` in + `responseFields` parameter. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/v1/files/{file_id}/metadata", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Metadata, + ) + + def get_from_url( + self, + *, + url: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Metadata: + """ + Get image EXIF, pHash, and other metadata from ImageKit.io powered remote URL + using this API. + + Args: + url: Should be a valid file URL. It should be accessible using your ImageKit.io + account. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v1/files/metadata", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"url": url}, metadata_get_from_url_params.MetadataGetFromURLParams), + ), + cast_to=Metadata, + ) + + +class AsyncMetadataResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncMetadataResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncMetadataResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncMetadataResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncMetadataResourceWithStreamingResponse(self) + + async def get( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Metadata: + """ + You can programmatically get image EXIF, pHash, and other metadata for uploaded + files in the ImageKit.io media library using this API. + + You can also get the metadata in upload API response by passing `metadata` in + `responseFields` parameter. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/v1/files/{file_id}/metadata", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Metadata, + ) + + async def get_from_url( + self, + *, + url: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Metadata: + """ + Get image EXIF, pHash, and other metadata from ImageKit.io powered remote URL + using this API. + + Args: + url: Should be a valid file URL. It should be accessible using your ImageKit.io + account. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v1/files/metadata", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"url": url}, metadata_get_from_url_params.MetadataGetFromURLParams), + ), + cast_to=Metadata, + ) + + +class MetadataResourceWithRawResponse: + def __init__(self, metadata: MetadataResource) -> None: + self._metadata = metadata + + self.get = to_raw_response_wrapper( + metadata.get, + ) + self.get_from_url = to_raw_response_wrapper( + metadata.get_from_url, + ) + + +class AsyncMetadataResourceWithRawResponse: + def __init__(self, metadata: AsyncMetadataResource) -> None: + self._metadata = metadata + + self.get = async_to_raw_response_wrapper( + metadata.get, + ) + self.get_from_url = async_to_raw_response_wrapper( + metadata.get_from_url, + ) + + +class MetadataResourceWithStreamingResponse: + def __init__(self, metadata: MetadataResource) -> None: + self._metadata = metadata + + self.get = to_streamed_response_wrapper( + metadata.get, + ) + self.get_from_url = to_streamed_response_wrapper( + metadata.get_from_url, + ) + + +class AsyncMetadataResourceWithStreamingResponse: + def __init__(self, metadata: AsyncMetadataResource) -> None: + self._metadata = metadata + + self.get = async_to_streamed_response_wrapper( + metadata.get, + ) + self.get_from_url = async_to_streamed_response_wrapper( + metadata.get_from_url, + ) diff --git a/src/imagekitio/resources/files/versions.py b/src/imagekitio/resources/files/versions.py new file mode 100644 index 00000000..e6479e19 --- /dev/null +++ b/src/imagekitio/resources/files/versions.py @@ -0,0 +1,425 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import Body, Query, Headers, NotGiven, not_given +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.file import File +from ..._base_client import make_request_options +from ...types.files.version_list_response import VersionListResponse +from ...types.files.version_delete_response import VersionDeleteResponse + +__all__ = ["VersionsResource", "AsyncVersionsResource"] + + +class VersionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> VersionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return VersionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return VersionsResourceWithStreamingResponse(self) + + def list( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> VersionListResponse: + """ + This API returns details of all versions of a file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/v1/files/{file_id}/versions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VersionListResponse, + ) + + def delete( + self, + version_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> VersionDeleteResponse: + """This API deletes a non-current file version permanently. + + The API returns an + empty response. + + Note: If you want to delete all versions of a file, use the delete file API. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + if not version_id: + raise ValueError(f"Expected a non-empty value for `version_id` but received {version_id!r}") + return self._delete( + f"/v1/files/{file_id}/versions/{version_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VersionDeleteResponse, + ) + + def get( + self, + version_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> File: + """ + This API returns an object with details or attributes of a file version. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + if not version_id: + raise ValueError(f"Expected a non-empty value for `version_id` but received {version_id!r}") + return self._get( + f"/v1/files/{file_id}/versions/{version_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=File, + ) + + def restore( + self, + version_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> File: + """ + This API restores a file version as the current file version. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + if not version_id: + raise ValueError(f"Expected a non-empty value for `version_id` but received {version_id!r}") + return self._put( + f"/v1/files/{file_id}/versions/{version_id}/restore", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=File, + ) + + +class AsyncVersionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncVersionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncVersionsResourceWithStreamingResponse(self) + + async def list( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> VersionListResponse: + """ + This API returns details of all versions of a file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/v1/files/{file_id}/versions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VersionListResponse, + ) + + async def delete( + self, + version_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> VersionDeleteResponse: + """This API deletes a non-current file version permanently. + + The API returns an + empty response. + + Note: If you want to delete all versions of a file, use the delete file API. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + if not version_id: + raise ValueError(f"Expected a non-empty value for `version_id` but received {version_id!r}") + return await self._delete( + f"/v1/files/{file_id}/versions/{version_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VersionDeleteResponse, + ) + + async def get( + self, + version_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> File: + """ + This API returns an object with details or attributes of a file version. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + if not version_id: + raise ValueError(f"Expected a non-empty value for `version_id` but received {version_id!r}") + return await self._get( + f"/v1/files/{file_id}/versions/{version_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=File, + ) + + async def restore( + self, + version_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> File: + """ + This API restores a file version as the current file version. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + if not version_id: + raise ValueError(f"Expected a non-empty value for `version_id` but received {version_id!r}") + return await self._put( + f"/v1/files/{file_id}/versions/{version_id}/restore", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=File, + ) + + +class VersionsResourceWithRawResponse: + def __init__(self, versions: VersionsResource) -> None: + self._versions = versions + + self.list = to_raw_response_wrapper( + versions.list, + ) + self.delete = to_raw_response_wrapper( + versions.delete, + ) + self.get = to_raw_response_wrapper( + versions.get, + ) + self.restore = to_raw_response_wrapper( + versions.restore, + ) + + +class AsyncVersionsResourceWithRawResponse: + def __init__(self, versions: AsyncVersionsResource) -> None: + self._versions = versions + + self.list = async_to_raw_response_wrapper( + versions.list, + ) + self.delete = async_to_raw_response_wrapper( + versions.delete, + ) + self.get = async_to_raw_response_wrapper( + versions.get, + ) + self.restore = async_to_raw_response_wrapper( + versions.restore, + ) + + +class VersionsResourceWithStreamingResponse: + def __init__(self, versions: VersionsResource) -> None: + self._versions = versions + + self.list = to_streamed_response_wrapper( + versions.list, + ) + self.delete = to_streamed_response_wrapper( + versions.delete, + ) + self.get = to_streamed_response_wrapper( + versions.get, + ) + self.restore = to_streamed_response_wrapper( + versions.restore, + ) + + +class AsyncVersionsResourceWithStreamingResponse: + def __init__(self, versions: AsyncVersionsResource) -> None: + self._versions = versions + + self.list = async_to_streamed_response_wrapper( + versions.list, + ) + self.delete = async_to_streamed_response_wrapper( + versions.delete, + ) + self.get = async_to_streamed_response_wrapper( + versions.get, + ) + self.restore = async_to_streamed_response_wrapper( + versions.restore, + ) diff --git a/src/imagekitio/resources/folders/__init__.py b/src/imagekitio/resources/folders/__init__.py new file mode 100644 index 00000000..a88720f2 --- /dev/null +++ b/src/imagekitio/resources/folders/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .job import ( + JobResource, + AsyncJobResource, + JobResourceWithRawResponse, + AsyncJobResourceWithRawResponse, + JobResourceWithStreamingResponse, + AsyncJobResourceWithStreamingResponse, +) +from .folders import ( + FoldersResource, + AsyncFoldersResource, + FoldersResourceWithRawResponse, + AsyncFoldersResourceWithRawResponse, + FoldersResourceWithStreamingResponse, + AsyncFoldersResourceWithStreamingResponse, +) + +__all__ = [ + "JobResource", + "AsyncJobResource", + "JobResourceWithRawResponse", + "AsyncJobResourceWithRawResponse", + "JobResourceWithStreamingResponse", + "AsyncJobResourceWithStreamingResponse", + "FoldersResource", + "AsyncFoldersResource", + "FoldersResourceWithRawResponse", + "AsyncFoldersResourceWithRawResponse", + "FoldersResourceWithStreamingResponse", + "AsyncFoldersResourceWithStreamingResponse", +] diff --git a/src/imagekitio/resources/folders/folders.py b/src/imagekitio/resources/folders/folders.py new file mode 100644 index 00000000..d986fd7f --- /dev/null +++ b/src/imagekitio/resources/folders/folders.py @@ -0,0 +1,713 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .job import ( + JobResource, + AsyncJobResource, + JobResourceWithRawResponse, + AsyncJobResourceWithRawResponse, + JobResourceWithStreamingResponse, + AsyncJobResourceWithStreamingResponse, +) +from ...types import ( + folder_copy_params, + folder_move_params, + folder_create_params, + folder_delete_params, + folder_rename_params, +) +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.folder_copy_response import FolderCopyResponse +from ...types.folder_move_response import FolderMoveResponse +from ...types.folder_create_response import FolderCreateResponse +from ...types.folder_delete_response import FolderDeleteResponse +from ...types.folder_rename_response import FolderRenameResponse + +__all__ = ["FoldersResource", "AsyncFoldersResource"] + + +class FoldersResource(SyncAPIResource): + @cached_property + def job(self) -> JobResource: + return JobResource(self._client) + + @cached_property + def with_raw_response(self) -> FoldersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return FoldersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FoldersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return FoldersResourceWithStreamingResponse(self) + + def create( + self, + *, + folder_name: str, + parent_folder_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderCreateResponse: + """This will create a new folder. + + You can specify the folder name and location of + the parent folder where this new folder should be created. + + Args: + folder_name: The folder will be created with this name. + + All characters except alphabets and numbers (inclusive of unicode letters, + marks, and numerals in other languages) will be replaced by an underscore i.e. + `_`. + + parent_folder_path: The folder where the new folder should be created, for root use `/` else the + path e.g. `containing/folder/`. + + Note: If any folder(s) is not present in the parentFolderPath parameter, it will + be automatically created. For example, if you pass `/product/images/summer`, + then `product`, `images`, and `summer` folders will be created if they don't + already exist. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/folder", + body=maybe_transform( + { + "folder_name": folder_name, + "parent_folder_path": parent_folder_path, + }, + folder_create_params.FolderCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderCreateResponse, + ) + + def delete( + self, + *, + folder_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderDeleteResponse: + """This will delete a folder and all its contents permanently. + + The API returns an + empty response. + + Args: + folder_path: Full path to the folder you want to delete. For example `/folder/to/delete/`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._delete( + "/v1/folder", + body=maybe_transform({"folder_path": folder_path}, folder_delete_params.FolderDeleteParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderDeleteResponse, + ) + + def copy( + self, + *, + destination_path: str, + source_folder_path: str, + include_versions: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderCopyResponse: + """This will copy one folder into another. + + The selected folder, its nested folders, + files, and their versions (in `includeVersions` is set to true) are copied in + this operation. Note: If any file at the destination has the same name as the + source file, then the source file and its versions will be appended to the + destination file version history. + + Args: + destination_path: Full path to the destination folder where you want to copy the source folder + into. + + source_folder_path: The full path to the source folder you want to copy. + + include_versions: Option to copy all versions of files that are nested inside the selected folder. + By default, only the current version of each file will be copied. When set to + true, all versions of each file will be copied. Default value - `false`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/bulkJobs/copyFolder", + body=maybe_transform( + { + "destination_path": destination_path, + "source_folder_path": source_folder_path, + "include_versions": include_versions, + }, + folder_copy_params.FolderCopyParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderCopyResponse, + ) + + def move( + self, + *, + destination_path: str, + source_folder_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderMoveResponse: + """This will move one folder into another. + + The selected folder, its nested folders, + files, and their versions are moved in this operation. Note: If any file at the + destination has the same name as the source file, then the source file and its + versions will be appended to the destination file version history. + + Args: + destination_path: Full path to the destination folder where you want to move the source folder + into. + + source_folder_path: The full path to the source folder you want to move. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/bulkJobs/moveFolder", + body=maybe_transform( + { + "destination_path": destination_path, + "source_folder_path": source_folder_path, + }, + folder_move_params.FolderMoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderMoveResponse, + ) + + def rename( + self, + *, + folder_path: str, + new_folder_name: str, + purge_cache: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderRenameResponse: + """This API allows you to rename an existing folder. + + The folder and all its nested + assets and sub-folders will remain unchanged, but their paths will be updated to + reflect the new folder name. + + Args: + folder_path: The full path to the folder you want to rename. + + new_folder_name: The new name for the folder. + + All characters except alphabets and numbers (inclusive of unicode letters, + marks, and numerals in other languages) and `-` will be replaced by an + underscore i.e. `_`. + + purge_cache: Option to purge cache for the old nested files and their versions' URLs. + + When set to true, it will internally issue a purge cache request on CDN to + remove the cached content of the old nested files and their versions. There will + only be one purge request for all the nested files, which will be counted + against your monthly purge quota. + + Note: A purge cache request will be issued against + `https://ik.imagekit.io/old/folder/path*` (with a wildcard at the end). This + will remove all nested files, their versions' URLs, and any transformations made + using query parameters on these files or their versions. However, the cache for + file transformations made using path parameters will persist. You can purge them + using the purge API. For more details, refer to the purge API documentation. + + Default value - `false` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/bulkJobs/renameFolder", + body=maybe_transform( + { + "folder_path": folder_path, + "new_folder_name": new_folder_name, + "purge_cache": purge_cache, + }, + folder_rename_params.FolderRenameParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderRenameResponse, + ) + + +class AsyncFoldersResource(AsyncAPIResource): + @cached_property + def job(self) -> AsyncJobResource: + return AsyncJobResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncFoldersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncFoldersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFoldersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncFoldersResourceWithStreamingResponse(self) + + async def create( + self, + *, + folder_name: str, + parent_folder_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderCreateResponse: + """This will create a new folder. + + You can specify the folder name and location of + the parent folder where this new folder should be created. + + Args: + folder_name: The folder will be created with this name. + + All characters except alphabets and numbers (inclusive of unicode letters, + marks, and numerals in other languages) will be replaced by an underscore i.e. + `_`. + + parent_folder_path: The folder where the new folder should be created, for root use `/` else the + path e.g. `containing/folder/`. + + Note: If any folder(s) is not present in the parentFolderPath parameter, it will + be automatically created. For example, if you pass `/product/images/summer`, + then `product`, `images`, and `summer` folders will be created if they don't + already exist. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/folder", + body=await async_maybe_transform( + { + "folder_name": folder_name, + "parent_folder_path": parent_folder_path, + }, + folder_create_params.FolderCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderCreateResponse, + ) + + async def delete( + self, + *, + folder_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderDeleteResponse: + """This will delete a folder and all its contents permanently. + + The API returns an + empty response. + + Args: + folder_path: Full path to the folder you want to delete. For example `/folder/to/delete/`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._delete( + "/v1/folder", + body=await async_maybe_transform({"folder_path": folder_path}, folder_delete_params.FolderDeleteParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderDeleteResponse, + ) + + async def copy( + self, + *, + destination_path: str, + source_folder_path: str, + include_versions: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderCopyResponse: + """This will copy one folder into another. + + The selected folder, its nested folders, + files, and their versions (in `includeVersions` is set to true) are copied in + this operation. Note: If any file at the destination has the same name as the + source file, then the source file and its versions will be appended to the + destination file version history. + + Args: + destination_path: Full path to the destination folder where you want to copy the source folder + into. + + source_folder_path: The full path to the source folder you want to copy. + + include_versions: Option to copy all versions of files that are nested inside the selected folder. + By default, only the current version of each file will be copied. When set to + true, all versions of each file will be copied. Default value - `false`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/bulkJobs/copyFolder", + body=await async_maybe_transform( + { + "destination_path": destination_path, + "source_folder_path": source_folder_path, + "include_versions": include_versions, + }, + folder_copy_params.FolderCopyParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderCopyResponse, + ) + + async def move( + self, + *, + destination_path: str, + source_folder_path: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderMoveResponse: + """This will move one folder into another. + + The selected folder, its nested folders, + files, and their versions are moved in this operation. Note: If any file at the + destination has the same name as the source file, then the source file and its + versions will be appended to the destination file version history. + + Args: + destination_path: Full path to the destination folder where you want to move the source folder + into. + + source_folder_path: The full path to the source folder you want to move. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/bulkJobs/moveFolder", + body=await async_maybe_transform( + { + "destination_path": destination_path, + "source_folder_path": source_folder_path, + }, + folder_move_params.FolderMoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderMoveResponse, + ) + + async def rename( + self, + *, + folder_path: str, + new_folder_name: str, + purge_cache: bool | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> FolderRenameResponse: + """This API allows you to rename an existing folder. + + The folder and all its nested + assets and sub-folders will remain unchanged, but their paths will be updated to + reflect the new folder name. + + Args: + folder_path: The full path to the folder you want to rename. + + new_folder_name: The new name for the folder. + + All characters except alphabets and numbers (inclusive of unicode letters, + marks, and numerals in other languages) and `-` will be replaced by an + underscore i.e. `_`. + + purge_cache: Option to purge cache for the old nested files and their versions' URLs. + + When set to true, it will internally issue a purge cache request on CDN to + remove the cached content of the old nested files and their versions. There will + only be one purge request for all the nested files, which will be counted + against your monthly purge quota. + + Note: A purge cache request will be issued against + `https://ik.imagekit.io/old/folder/path*` (with a wildcard at the end). This + will remove all nested files, their versions' URLs, and any transformations made + using query parameters on these files or their versions. However, the cache for + file transformations made using path parameters will persist. You can purge them + using the purge API. For more details, refer to the purge API documentation. + + Default value - `false` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/bulkJobs/renameFolder", + body=await async_maybe_transform( + { + "folder_path": folder_path, + "new_folder_name": new_folder_name, + "purge_cache": purge_cache, + }, + folder_rename_params.FolderRenameParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FolderRenameResponse, + ) + + +class FoldersResourceWithRawResponse: + def __init__(self, folders: FoldersResource) -> None: + self._folders = folders + + self.create = to_raw_response_wrapper( + folders.create, + ) + self.delete = to_raw_response_wrapper( + folders.delete, + ) + self.copy = to_raw_response_wrapper( + folders.copy, + ) + self.move = to_raw_response_wrapper( + folders.move, + ) + self.rename = to_raw_response_wrapper( + folders.rename, + ) + + @cached_property + def job(self) -> JobResourceWithRawResponse: + return JobResourceWithRawResponse(self._folders.job) + + +class AsyncFoldersResourceWithRawResponse: + def __init__(self, folders: AsyncFoldersResource) -> None: + self._folders = folders + + self.create = async_to_raw_response_wrapper( + folders.create, + ) + self.delete = async_to_raw_response_wrapper( + folders.delete, + ) + self.copy = async_to_raw_response_wrapper( + folders.copy, + ) + self.move = async_to_raw_response_wrapper( + folders.move, + ) + self.rename = async_to_raw_response_wrapper( + folders.rename, + ) + + @cached_property + def job(self) -> AsyncJobResourceWithRawResponse: + return AsyncJobResourceWithRawResponse(self._folders.job) + + +class FoldersResourceWithStreamingResponse: + def __init__(self, folders: FoldersResource) -> None: + self._folders = folders + + self.create = to_streamed_response_wrapper( + folders.create, + ) + self.delete = to_streamed_response_wrapper( + folders.delete, + ) + self.copy = to_streamed_response_wrapper( + folders.copy, + ) + self.move = to_streamed_response_wrapper( + folders.move, + ) + self.rename = to_streamed_response_wrapper( + folders.rename, + ) + + @cached_property + def job(self) -> JobResourceWithStreamingResponse: + return JobResourceWithStreamingResponse(self._folders.job) + + +class AsyncFoldersResourceWithStreamingResponse: + def __init__(self, folders: AsyncFoldersResource) -> None: + self._folders = folders + + self.create = async_to_streamed_response_wrapper( + folders.create, + ) + self.delete = async_to_streamed_response_wrapper( + folders.delete, + ) + self.copy = async_to_streamed_response_wrapper( + folders.copy, + ) + self.move = async_to_streamed_response_wrapper( + folders.move, + ) + self.rename = async_to_streamed_response_wrapper( + folders.rename, + ) + + @cached_property + def job(self) -> AsyncJobResourceWithStreamingResponse: + return AsyncJobResourceWithStreamingResponse(self._folders.job) diff --git a/src/imagekitio/resources/folders/job.py b/src/imagekitio/resources/folders/job.py new file mode 100644 index 00000000..5ccbd3bb --- /dev/null +++ b/src/imagekitio/resources/folders/job.py @@ -0,0 +1,163 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import Body, Query, Headers, NotGiven, not_given +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.folders.job_get_response import JobGetResponse + +__all__ = ["JobResource", "AsyncJobResource"] + + +class JobResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> JobResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return JobResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> JobResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return JobResourceWithStreamingResponse(self) + + def get( + self, + job_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> JobGetResponse: + """ + This API returns the status of a bulk job like copy and move folder operations. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") + return self._get( + f"/v1/bulkJobs/{job_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=JobGetResponse, + ) + + +class AsyncJobResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncJobResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#accessing-raw-response-data-eg-headers + """ + return AsyncJobResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncJobResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/imagekit-developer/imagekit-python#with_streaming_response + """ + return AsyncJobResourceWithStreamingResponse(self) + + async def get( + self, + job_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> JobGetResponse: + """ + This API returns the status of a bulk job like copy and move folder operations. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") + return await self._get( + f"/v1/bulkJobs/{job_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=JobGetResponse, + ) + + +class JobResourceWithRawResponse: + def __init__(self, job: JobResource) -> None: + self._job = job + + self.get = to_raw_response_wrapper( + job.get, + ) + + +class AsyncJobResourceWithRawResponse: + def __init__(self, job: AsyncJobResource) -> None: + self._job = job + + self.get = async_to_raw_response_wrapper( + job.get, + ) + + +class JobResourceWithStreamingResponse: + def __init__(self, job: JobResource) -> None: + self._job = job + + self.get = to_streamed_response_wrapper( + job.get, + ) + + +class AsyncJobResourceWithStreamingResponse: + def __init__(self, job: AsyncJobResource) -> None: + self._job = job + + self.get = async_to_streamed_response_wrapper( + job.get, + ) diff --git a/src/imagekitio/resources/webhooks.py b/src/imagekitio/resources/webhooks.py new file mode 100644 index 00000000..0ca75b5c --- /dev/null +++ b/src/imagekitio/resources/webhooks.py @@ -0,0 +1,101 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import json +import base64 +from typing import Mapping, cast + +from .._models import construct_type +from .._resource import SyncAPIResource, AsyncAPIResource +from .._exceptions import ImageKitError +from ..types.unwrap_webhook_event import UnwrapWebhookEvent +from ..types.unsafe_unwrap_webhook_event import UnsafeUnwrapWebhookEvent + +__all__ = ["WebhooksResource", "AsyncWebhooksResource"] + + +class WebhooksResource(SyncAPIResource): + def unsafe_unwrap(self, payload: str) -> UnsafeUnwrapWebhookEvent: + return cast( + UnsafeUnwrapWebhookEvent, + construct_type( + type_=UnsafeUnwrapWebhookEvent, + value=json.loads(payload), + ), + ) + + def unwrap(self, payload: str, *, headers: Mapping[str, str], key: str | bytes | None = None) -> UnwrapWebhookEvent: + try: + from standardwebhooks import Webhook + except ImportError as exc: + raise ImageKitError("You need to install `imagekitio[webhooks]` to use this method") from exc + + if key is None: + key = self._client.webhook_secret + if key is None: + raise ValueError( + "Cannot verify a webhook without a key on either the client's webhook_secret or passed in as an argument" + ) + + if not isinstance(headers, dict): + headers = dict(headers) + + if isinstance(key, str): + key_bytes = key.encode("utf-8") + else: + key_bytes = key + encoded_key = base64.b64encode(key_bytes).decode("ascii") + + Webhook(encoded_key).verify(payload, headers) + + return cast( + UnwrapWebhookEvent, + construct_type( + type_=UnwrapWebhookEvent, + value=json.loads(payload), + ), + ) + + +class AsyncWebhooksResource(AsyncAPIResource): + def unsafe_unwrap(self, payload: str) -> UnsafeUnwrapWebhookEvent: + return cast( + UnsafeUnwrapWebhookEvent, + construct_type( + type_=UnsafeUnwrapWebhookEvent, + value=json.loads(payload), + ), + ) + + def unwrap(self, payload: str, *, headers: Mapping[str, str], key: str | bytes | None = None) -> UnwrapWebhookEvent: + try: + from standardwebhooks import Webhook + except ImportError as exc: + raise ImageKitError("You need to install `imagekitio[webhooks]` to use this method") from exc + + if key is None: + key = self._client.webhook_secret + if key is None: + raise ValueError( + "Cannot verify a webhook without a key on either the client's webhook_secret or passed in as an argument" + ) + + if not isinstance(headers, dict): + headers = dict(headers) + + if isinstance(key, str): + key_bytes = key.encode("utf-8") + else: + key_bytes = key + encoded_key = base64.b64encode(key_bytes).decode("ascii") + + Webhook(encoded_key).verify(payload, headers) + + return cast( + UnwrapWebhookEvent, + construct_type( + type_=UnwrapWebhookEvent, + value=json.loads(payload), + ), + ) diff --git a/src/imagekitio/types/__init__.py b/src/imagekitio/types/__init__.py new file mode 100644 index 00000000..dfbbb78f --- /dev/null +++ b/src/imagekitio/types/__init__.py @@ -0,0 +1,83 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from . import shared +from .. import _compat +from .file import File as File +from .folder import Folder as Folder +from .shared import ( + Overlay as Overlay, + Extensions as Extensions, + SrcOptions as SrcOptions, + BaseOverlay as BaseOverlay, + TextOverlay as TextOverlay, + ImageOverlay as ImageOverlay, + VideoOverlay as VideoOverlay, + OverlayTiming as OverlayTiming, + Transformation as Transformation, + OverlayPosition as OverlayPosition, + SubtitleOverlay as SubtitleOverlay, + SolidColorOverlay as SolidColorOverlay, + StreamingResolution as StreamingResolution, + TransformationPosition as TransformationPosition, + GetImageAttributesOptions as GetImageAttributesOptions, + ResponsiveImageAttributes as ResponsiveImageAttributes, + TextOverlayTransformation as TextOverlayTransformation, + SubtitleOverlayTransformation as SubtitleOverlayTransformation, + SolidColorOverlayTransformation as SolidColorOverlayTransformation, +) +from .metadata import Metadata as Metadata +from .file_copy_params import FileCopyParams as FileCopyParams +from .file_move_params import FileMoveParams as FileMoveParams +from .asset_list_params import AssetListParams as AssetListParams +from .base_webhook_event import BaseWebhookEvent as BaseWebhookEvent +from .file_copy_response import FileCopyResponse as FileCopyResponse +from .file_move_response import FileMoveResponse as FileMoveResponse +from .file_rename_params import FileRenameParams as FileRenameParams +from .file_update_params import FileUpdateParams as FileUpdateParams +from .file_upload_params import FileUploadParams as FileUploadParams +from .folder_copy_params import FolderCopyParams as FolderCopyParams +from .folder_move_params import FolderMoveParams as FolderMoveParams +from .asset_list_response import AssetListResponse as AssetListResponse +from .dummy_create_params import DummyCreateParams as DummyCreateParams +from .file_rename_response import FileRenameResponse as FileRenameResponse +from .file_update_response import FileUpdateResponse as FileUpdateResponse +from .file_upload_response import FileUploadResponse as FileUploadResponse +from .folder_copy_response import FolderCopyResponse as FolderCopyResponse +from .folder_create_params import FolderCreateParams as FolderCreateParams +from .folder_delete_params import FolderDeleteParams as FolderDeleteParams +from .folder_move_response import FolderMoveResponse as FolderMoveResponse +from .folder_rename_params import FolderRenameParams as FolderRenameParams +from .unwrap_webhook_event import UnwrapWebhookEvent as UnwrapWebhookEvent +from .custom_metadata_field import CustomMetadataField as CustomMetadataField +from .folder_create_response import FolderCreateResponse as FolderCreateResponse +from .folder_delete_response import FolderDeleteResponse as FolderDeleteResponse +from .folder_rename_response import FolderRenameResponse as FolderRenameResponse +from .update_file_request_param import UpdateFileRequestParam as UpdateFileRequestParam +from .unsafe_unwrap_webhook_event import UnsafeUnwrapWebhookEvent as UnsafeUnwrapWebhookEvent +from .upload_pre_transform_error_event import UploadPreTransformErrorEvent as UploadPreTransformErrorEvent +from .video_transformation_error_event import VideoTransformationErrorEvent as VideoTransformationErrorEvent +from .video_transformation_ready_event import VideoTransformationReadyEvent as VideoTransformationReadyEvent +from .custom_metadata_field_list_params import CustomMetadataFieldListParams as CustomMetadataFieldListParams +from .upload_post_transform_error_event import UploadPostTransformErrorEvent as UploadPostTransformErrorEvent +from .upload_pre_transform_success_event import UploadPreTransformSuccessEvent as UploadPreTransformSuccessEvent +from .custom_metadata_field_create_params import CustomMetadataFieldCreateParams as CustomMetadataFieldCreateParams +from .custom_metadata_field_list_response import CustomMetadataFieldListResponse as CustomMetadataFieldListResponse +from .custom_metadata_field_update_params import CustomMetadataFieldUpdateParams as CustomMetadataFieldUpdateParams +from .upload_post_transform_success_event import UploadPostTransformSuccessEvent as UploadPostTransformSuccessEvent +from .video_transformation_accepted_event import VideoTransformationAcceptedEvent as VideoTransformationAcceptedEvent +from .custom_metadata_field_delete_response import ( + CustomMetadataFieldDeleteResponse as CustomMetadataFieldDeleteResponse, +) + +# Rebuild cyclical models only after all modules are imported. +# This ensures that, when building the deferred (due to cyclical references) model schema, +# Pydantic can resolve the necessary references. +# See: https://github.com/pydantic/pydantic/issues/11250 for more context. +if _compat.PYDANTIC_V1: + shared.src_options.SrcOptions.update_forward_refs() # type: ignore + shared.transformation.Transformation.update_forward_refs() # type: ignore +else: + shared.src_options.SrcOptions.model_rebuild(_parent_namespace_depth=0) + shared.transformation.Transformation.model_rebuild(_parent_namespace_depth=0) diff --git a/src/imagekitio/types/accounts/__init__.py b/src/imagekitio/types/accounts/__init__.py new file mode 100644 index 00000000..3d713dbe --- /dev/null +++ b/src/imagekitio/types/accounts/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .origin_response import OriginResponse as OriginResponse +from .usage_get_params import UsageGetParams as UsageGetParams +from .usage_get_response import UsageGetResponse as UsageGetResponse +from .origin_create_params import OriginCreateParams as OriginCreateParams +from .origin_list_response import OriginListResponse as OriginListResponse +from .origin_request_param import OriginRequestParam as OriginRequestParam +from .origin_update_params import OriginUpdateParams as OriginUpdateParams +from .url_endpoint_response import URLEndpointResponse as URLEndpointResponse +from .url_endpoint_create_params import URLEndpointCreateParams as URLEndpointCreateParams +from .url_endpoint_list_response import URLEndpointListResponse as URLEndpointListResponse +from .url_endpoint_update_params import URLEndpointUpdateParams as URLEndpointUpdateParams diff --git a/src/imagekitio/types/accounts/origin_create_params.py b/src/imagekitio/types/accounts/origin_create_params.py new file mode 100644 index 00000000..7489a1d9 --- /dev/null +++ b/src/imagekitio/types/accounts/origin_create_params.py @@ -0,0 +1,208 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ..._utils import PropertyInfo + +__all__ = [ + "OriginCreateParams", + "S3", + "S3Compatible", + "CloudinaryBackup", + "WebFolder", + "WebProxy", + "GoogleCloudStorageGcs", + "AzureBlobStorage", + "AkeneoPim", +] + + +class S3(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["S3"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + +class S3Compatible(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + endpoint: Required[str] + """Custom S3-compatible endpoint.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["S3_COMPATIBLE"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + s3_force_path_style: Annotated[bool, PropertyInfo(alias="s3ForcePathStyle")] + """Use path-style S3 URLs?""" + + +class CloudinaryBackup(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["CLOUDINARY_BACKUP"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + +class WebFolder(TypedDict, total=False): + base_url: Required[Annotated[str, PropertyInfo(alias="baseUrl")]] + """Root URL for the web folder origin.""" + + name: Required[str] + """Display name of the origin.""" + + type: Required[Literal["WEB_FOLDER"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + forward_host_header_to_origin: Annotated[bool, PropertyInfo(alias="forwardHostHeaderToOrigin")] + """Forward the Host header to origin?""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +class WebProxy(TypedDict, total=False): + name: Required[str] + """Display name of the origin.""" + + type: Required[Literal["WEB_PROXY"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +class GoogleCloudStorageGcs(TypedDict, total=False): + bucket: Required[str] + + client_email: Required[Annotated[str, PropertyInfo(alias="clientEmail")]] + + name: Required[str] + """Display name of the origin.""" + + private_key: Required[Annotated[str, PropertyInfo(alias="privateKey")]] + + type: Required[Literal["GCS"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + + +class AzureBlobStorage(TypedDict, total=False): + account_name: Required[Annotated[str, PropertyInfo(alias="accountName")]] + + container: Required[str] + + name: Required[str] + """Display name of the origin.""" + + sas_token: Required[Annotated[str, PropertyInfo(alias="sasToken")]] + + type: Required[Literal["AZURE_BLOB"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + + +class AkeneoPim(TypedDict, total=False): + base_url: Required[Annotated[str, PropertyInfo(alias="baseUrl")]] + """Akeneo instance base URL.""" + + client_id: Required[Annotated[str, PropertyInfo(alias="clientId")]] + """Akeneo API client ID.""" + + client_secret: Required[Annotated[str, PropertyInfo(alias="clientSecret")]] + """Akeneo API client secret.""" + + name: Required[str] + """Display name of the origin.""" + + password: Required[str] + """Akeneo API password.""" + + type: Required[Literal["AKENEO_PIM"]] + + username: Required[str] + """Akeneo API username.""" + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +OriginCreateParams: TypeAlias = Union[ + S3, S3Compatible, CloudinaryBackup, WebFolder, WebProxy, GoogleCloudStorageGcs, AzureBlobStorage, AkeneoPim +] diff --git a/src/imagekitio/types/accounts/origin_list_response.py b/src/imagekitio/types/accounts/origin_list_response.py new file mode 100644 index 00000000..b0e09315 --- /dev/null +++ b/src/imagekitio/types/accounts/origin_list_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from .origin_response import OriginResponse + +__all__ = ["OriginListResponse"] + +OriginListResponse: TypeAlias = List[OriginResponse] diff --git a/src/imagekitio/types/accounts/origin_request_param.py b/src/imagekitio/types/accounts/origin_request_param.py new file mode 100644 index 00000000..a2864ad4 --- /dev/null +++ b/src/imagekitio/types/accounts/origin_request_param.py @@ -0,0 +1,208 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ..._utils import PropertyInfo + +__all__ = [ + "OriginRequestParam", + "S3", + "S3Compatible", + "CloudinaryBackup", + "WebFolder", + "WebProxy", + "Gcs", + "AzureBlob", + "AkeneoPim", +] + + +class S3(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["S3"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + +class S3Compatible(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + endpoint: Required[str] + """Custom S3-compatible endpoint.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["S3_COMPATIBLE"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + s3_force_path_style: Annotated[bool, PropertyInfo(alias="s3ForcePathStyle")] + """Use path-style S3 URLs?""" + + +class CloudinaryBackup(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["CLOUDINARY_BACKUP"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + +class WebFolder(TypedDict, total=False): + base_url: Required[Annotated[str, PropertyInfo(alias="baseUrl")]] + """Root URL for the web folder origin.""" + + name: Required[str] + """Display name of the origin.""" + + type: Required[Literal["WEB_FOLDER"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + forward_host_header_to_origin: Annotated[bool, PropertyInfo(alias="forwardHostHeaderToOrigin")] + """Forward the Host header to origin?""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +class WebProxy(TypedDict, total=False): + name: Required[str] + """Display name of the origin.""" + + type: Required[Literal["WEB_PROXY"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +class Gcs(TypedDict, total=False): + bucket: Required[str] + + client_email: Required[Annotated[str, PropertyInfo(alias="clientEmail")]] + + name: Required[str] + """Display name of the origin.""" + + private_key: Required[Annotated[str, PropertyInfo(alias="privateKey")]] + + type: Required[Literal["GCS"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + + +class AzureBlob(TypedDict, total=False): + account_name: Required[Annotated[str, PropertyInfo(alias="accountName")]] + + container: Required[str] + + name: Required[str] + """Display name of the origin.""" + + sas_token: Required[Annotated[str, PropertyInfo(alias="sasToken")]] + + type: Required[Literal["AZURE_BLOB"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + + +class AkeneoPim(TypedDict, total=False): + base_url: Required[Annotated[str, PropertyInfo(alias="baseUrl")]] + """Akeneo instance base URL.""" + + client_id: Required[Annotated[str, PropertyInfo(alias="clientId")]] + """Akeneo API client ID.""" + + client_secret: Required[Annotated[str, PropertyInfo(alias="clientSecret")]] + """Akeneo API client secret.""" + + name: Required[str] + """Display name of the origin.""" + + password: Required[str] + """Akeneo API password.""" + + type: Required[Literal["AKENEO_PIM"]] + + username: Required[str] + """Akeneo API username.""" + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +OriginRequestParam: TypeAlias = Union[ + S3, S3Compatible, CloudinaryBackup, WebFolder, WebProxy, Gcs, AzureBlob, AkeneoPim +] diff --git a/src/imagekitio/types/accounts/origin_response.py b/src/imagekitio/types/accounts/origin_response.py new file mode 100644 index 00000000..d4374470 --- /dev/null +++ b/src/imagekitio/types/accounts/origin_response.py @@ -0,0 +1,224 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from pydantic import Field as FieldInfo + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = [ + "OriginResponse", + "S3", + "S3Compatible", + "CloudinaryBackup", + "WebFolder", + "WebProxy", + "Gcs", + "AzureBlob", + "AkeneoPim", +] + + +class S3(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + bucket: str + """S3 bucket name.""" + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + prefix: str + """Path prefix inside the bucket.""" + + type: Literal["S3"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +class S3Compatible(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + bucket: str + """S3 bucket name.""" + + endpoint: str + """Custom S3-compatible endpoint.""" + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + prefix: str + """Path prefix inside the bucket.""" + + s3_force_path_style: bool = FieldInfo(alias="s3ForcePathStyle") + """Use path-style S3 URLs?""" + + type: Literal["S3_COMPATIBLE"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +class CloudinaryBackup(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + bucket: str + """S3 bucket name.""" + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + prefix: str + """Path prefix inside the bucket.""" + + type: Literal["CLOUDINARY_BACKUP"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +class WebFolder(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + base_url: str = FieldInfo(alias="baseUrl") + """Root URL for the web folder origin.""" + + forward_host_header_to_origin: bool = FieldInfo(alias="forwardHostHeaderToOrigin") + """Forward the Host header to origin?""" + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + type: Literal["WEB_FOLDER"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +class WebProxy(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + type: Literal["WEB_PROXY"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +class Gcs(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + bucket: str + + client_email: str = FieldInfo(alias="clientEmail") + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + prefix: str + + type: Literal["GCS"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +class AzureBlob(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + account_name: str = FieldInfo(alias="accountName") + + container: str + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + prefix: str + + type: Literal["AZURE_BLOB"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +class AkeneoPim(BaseModel): + id: str + """Unique identifier for the origin. + + This is generated by ImageKit when you create a new origin. + """ + + base_url: str = FieldInfo(alias="baseUrl") + """Akeneo instance base URL.""" + + include_canonical_header: bool = FieldInfo(alias="includeCanonicalHeader") + """Whether to send a Canonical header.""" + + name: str + """Display name of the origin.""" + + type: Literal["AKENEO_PIM"] + + base_url_for_canonical_header: Optional[str] = FieldInfo(alias="baseUrlForCanonicalHeader", default=None) + """URL used in the Canonical header (if enabled).""" + + +OriginResponse: TypeAlias = Annotated[ + Union[S3, S3Compatible, CloudinaryBackup, WebFolder, WebProxy, Gcs, AzureBlob, AkeneoPim], + PropertyInfo(discriminator="type"), +] diff --git a/src/imagekitio/types/accounts/origin_update_params.py b/src/imagekitio/types/accounts/origin_update_params.py new file mode 100644 index 00000000..a7b39fba --- /dev/null +++ b/src/imagekitio/types/accounts/origin_update_params.py @@ -0,0 +1,208 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ..._utils import PropertyInfo + +__all__ = [ + "OriginUpdateParams", + "S3", + "S3Compatible", + "CloudinaryBackup", + "WebFolder", + "WebProxy", + "GoogleCloudStorageGcs", + "AzureBlobStorage", + "AkeneoPim", +] + + +class S3(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["S3"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + +class S3Compatible(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + endpoint: Required[str] + """Custom S3-compatible endpoint.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["S3_COMPATIBLE"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + s3_force_path_style: Annotated[bool, PropertyInfo(alias="s3ForcePathStyle")] + """Use path-style S3 URLs?""" + + +class CloudinaryBackup(TypedDict, total=False): + access_key: Required[Annotated[str, PropertyInfo(alias="accessKey")]] + """Access key for the bucket.""" + + bucket: Required[str] + """S3 bucket name.""" + + name: Required[str] + """Display name of the origin.""" + + secret_key: Required[Annotated[str, PropertyInfo(alias="secretKey")]] + """Secret key for the bucket.""" + + type: Required[Literal["CLOUDINARY_BACKUP"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + """Path prefix inside the bucket.""" + + +class WebFolder(TypedDict, total=False): + base_url: Required[Annotated[str, PropertyInfo(alias="baseUrl")]] + """Root URL for the web folder origin.""" + + name: Required[str] + """Display name of the origin.""" + + type: Required[Literal["WEB_FOLDER"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + forward_host_header_to_origin: Annotated[bool, PropertyInfo(alias="forwardHostHeaderToOrigin")] + """Forward the Host header to origin?""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +class WebProxy(TypedDict, total=False): + name: Required[str] + """Display name of the origin.""" + + type: Required[Literal["WEB_PROXY"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +class GoogleCloudStorageGcs(TypedDict, total=False): + bucket: Required[str] + + client_email: Required[Annotated[str, PropertyInfo(alias="clientEmail")]] + + name: Required[str] + """Display name of the origin.""" + + private_key: Required[Annotated[str, PropertyInfo(alias="privateKey")]] + + type: Required[Literal["GCS"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + + +class AzureBlobStorage(TypedDict, total=False): + account_name: Required[Annotated[str, PropertyInfo(alias="accountName")]] + + container: Required[str] + + name: Required[str] + """Display name of the origin.""" + + sas_token: Required[Annotated[str, PropertyInfo(alias="sasToken")]] + + type: Required[Literal["AZURE_BLOB"]] + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + prefix: str + + +class AkeneoPim(TypedDict, total=False): + base_url: Required[Annotated[str, PropertyInfo(alias="baseUrl")]] + """Akeneo instance base URL.""" + + client_id: Required[Annotated[str, PropertyInfo(alias="clientId")]] + """Akeneo API client ID.""" + + client_secret: Required[Annotated[str, PropertyInfo(alias="clientSecret")]] + """Akeneo API client secret.""" + + name: Required[str] + """Display name of the origin.""" + + password: Required[str] + """Akeneo API password.""" + + type: Required[Literal["AKENEO_PIM"]] + + username: Required[str] + """Akeneo API username.""" + + base_url_for_canonical_header: Annotated[str, PropertyInfo(alias="baseUrlForCanonicalHeader")] + """URL used in the Canonical header (if enabled).""" + + include_canonical_header: Annotated[bool, PropertyInfo(alias="includeCanonicalHeader")] + """Whether to send a Canonical header.""" + + +OriginUpdateParams: TypeAlias = Union[ + S3, S3Compatible, CloudinaryBackup, WebFolder, WebProxy, GoogleCloudStorageGcs, AzureBlobStorage, AkeneoPim +] diff --git a/src/imagekitio/types/accounts/url_endpoint_create_params.py b/src/imagekitio/types/accounts/url_endpoint_create_params.py new file mode 100644 index 00000000..1c4653f5 --- /dev/null +++ b/src/imagekitio/types/accounts/url_endpoint_create_params.py @@ -0,0 +1,50 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ..._types import SequenceNotStr +from ..._utils import PropertyInfo + +__all__ = ["URLEndpointCreateParams", "URLRewriter", "URLRewriterCloudinary", "URLRewriterImgix", "URLRewriterAkamai"] + + +class URLEndpointCreateParams(TypedDict, total=False): + description: Required[str] + """Description of the URL endpoint.""" + + origins: SequenceNotStr[str] + """ + Ordered list of origin IDs to try when the file isn’t in the Media Library; + ImageKit checks them in the sequence provided. Origin must be created before it + can be used in a URL endpoint. + """ + + url_prefix: Annotated[str, PropertyInfo(alias="urlPrefix")] + """ + Path segment appended to your base URL to form the endpoint (letters, digits, + and hyphens only — or empty for the default endpoint). + """ + + url_rewriter: Annotated[URLRewriter, PropertyInfo(alias="urlRewriter")] + """Configuration for third-party URL rewriting.""" + + +class URLRewriterCloudinary(TypedDict, total=False): + type: Required[Literal["CLOUDINARY"]] + + preserve_asset_delivery_types: Annotated[bool, PropertyInfo(alias="preserveAssetDeliveryTypes")] + """Whether to preserve `/` in the rewritten URL.""" + + +class URLRewriterImgix(TypedDict, total=False): + type: Required[Literal["IMGIX"]] + + +class URLRewriterAkamai(TypedDict, total=False): + type: Required[Literal["AKAMAI"]] + + +URLRewriter: TypeAlias = Union[URLRewriterCloudinary, URLRewriterImgix, URLRewriterAkamai] diff --git a/src/imagekitio/types/accounts/url_endpoint_list_response.py b/src/imagekitio/types/accounts/url_endpoint_list_response.py new file mode 100644 index 00000000..a51a8df0 --- /dev/null +++ b/src/imagekitio/types/accounts/url_endpoint_list_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from .url_endpoint_response import URLEndpointResponse + +__all__ = ["URLEndpointListResponse"] + +URLEndpointListResponse: TypeAlias = List[URLEndpointResponse] diff --git a/src/imagekitio/types/accounts/url_endpoint_response.py b/src/imagekitio/types/accounts/url_endpoint_response.py new file mode 100644 index 00000000..c2bcadc7 --- /dev/null +++ b/src/imagekitio/types/accounts/url_endpoint_response.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from pydantic import Field as FieldInfo + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["URLEndpointResponse", "URLRewriter", "URLRewriterCloudinary", "URLRewriterImgix", "URLRewriterAkamai"] + + +class URLRewriterCloudinary(BaseModel): + preserve_asset_delivery_types: bool = FieldInfo(alias="preserveAssetDeliveryTypes") + """Whether to preserve `/` in the rewritten URL.""" + + type: Literal["CLOUDINARY"] + + +class URLRewriterImgix(BaseModel): + type: Literal["IMGIX"] + + +class URLRewriterAkamai(BaseModel): + type: Literal["AKAMAI"] + + +URLRewriter: TypeAlias = Annotated[ + Union[URLRewriterCloudinary, URLRewriterImgix, URLRewriterAkamai], PropertyInfo(discriminator="type") +] + + +class URLEndpointResponse(BaseModel): + """URL‑endpoint object as returned by the API.""" + + id: str + """Unique identifier for the URL-endpoint. + + This is generated by ImageKit when you create a new URL-endpoint. For the + default URL-endpoint, this is always `default`. + """ + + description: str + """Description of the URL endpoint.""" + + origins: List[str] + """ + Ordered list of origin IDs to try when the file isn’t in the Media Library; + ImageKit checks them in the sequence provided. Origin must be created before it + can be used in a URL endpoint. + """ + + url_prefix: str = FieldInfo(alias="urlPrefix") + """ + Path segment appended to your base URL to form the endpoint (letters, digits, + and hyphens only — or empty for the default endpoint). + """ + + url_rewriter: Optional[URLRewriter] = FieldInfo(alias="urlRewriter", default=None) + """Configuration for third-party URL rewriting.""" diff --git a/src/imagekitio/types/accounts/url_endpoint_update_params.py b/src/imagekitio/types/accounts/url_endpoint_update_params.py new file mode 100644 index 00000000..e34f27b2 --- /dev/null +++ b/src/imagekitio/types/accounts/url_endpoint_update_params.py @@ -0,0 +1,50 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ..._types import SequenceNotStr +from ..._utils import PropertyInfo + +__all__ = ["URLEndpointUpdateParams", "URLRewriter", "URLRewriterCloudinary", "URLRewriterImgix", "URLRewriterAkamai"] + + +class URLEndpointUpdateParams(TypedDict, total=False): + description: Required[str] + """Description of the URL endpoint.""" + + origins: SequenceNotStr[str] + """ + Ordered list of origin IDs to try when the file isn’t in the Media Library; + ImageKit checks them in the sequence provided. Origin must be created before it + can be used in a URL endpoint. + """ + + url_prefix: Annotated[str, PropertyInfo(alias="urlPrefix")] + """ + Path segment appended to your base URL to form the endpoint (letters, digits, + and hyphens only — or empty for the default endpoint). + """ + + url_rewriter: Annotated[URLRewriter, PropertyInfo(alias="urlRewriter")] + """Configuration for third-party URL rewriting.""" + + +class URLRewriterCloudinary(TypedDict, total=False): + type: Required[Literal["CLOUDINARY"]] + + preserve_asset_delivery_types: Annotated[bool, PropertyInfo(alias="preserveAssetDeliveryTypes")] + """Whether to preserve `/` in the rewritten URL.""" + + +class URLRewriterImgix(TypedDict, total=False): + type: Required[Literal["IMGIX"]] + + +class URLRewriterAkamai(TypedDict, total=False): + type: Required[Literal["AKAMAI"]] + + +URLRewriter: TypeAlias = Union[URLRewriterCloudinary, URLRewriterImgix, URLRewriterAkamai] diff --git a/src/imagekitio/types/accounts/usage_get_params.py b/src/imagekitio/types/accounts/usage_get_params.py new file mode 100644 index 00000000..298a9690 --- /dev/null +++ b/src/imagekitio/types/accounts/usage_get_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from datetime import date +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["UsageGetParams"] + + +class UsageGetParams(TypedDict, total=False): + end_date: Required[Annotated[Union[str, date], PropertyInfo(alias="endDate", format="iso8601")]] + """Specify a `endDate` in `YYYY-MM-DD` format. + + It should be after the `startDate`. The difference between `startDate` and + `endDate` should be less than 90 days. + """ + + start_date: Required[Annotated[Union[str, date], PropertyInfo(alias="startDate", format="iso8601")]] + """Specify a `startDate` in `YYYY-MM-DD` format. + + It should be before the `endDate`. The difference between `startDate` and + `endDate` should be less than 90 days. + """ diff --git a/src/imagekitio/types/accounts/usage_get_response.py b/src/imagekitio/types/accounts/usage_get_response.py new file mode 100644 index 00000000..651552c3 --- /dev/null +++ b/src/imagekitio/types/accounts/usage_get_response.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["UsageGetResponse"] + + +class UsageGetResponse(BaseModel): + bandwidth_bytes: Optional[int] = FieldInfo(alias="bandwidthBytes", default=None) + """Amount of bandwidth used in bytes.""" + + extension_units_count: Optional[int] = FieldInfo(alias="extensionUnitsCount", default=None) + """Number of extension units used.""" + + media_library_storage_bytes: Optional[int] = FieldInfo(alias="mediaLibraryStorageBytes", default=None) + """Storage used by media library in bytes.""" + + original_cache_storage_bytes: Optional[int] = FieldInfo(alias="originalCacheStorageBytes", default=None) + """Storage used by the original cache in bytes.""" + + video_processing_units_count: Optional[int] = FieldInfo(alias="videoProcessingUnitsCount", default=None) + """Number of video processing units used.""" diff --git a/src/imagekitio/types/asset_list_params.py b/src/imagekitio/types/asset_list_params.py new file mode 100644 index 00000000..ea4024e0 --- /dev/null +++ b/src/imagekitio/types/asset_list_params.py @@ -0,0 +1,81 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["AssetListParams"] + + +class AssetListParams(TypedDict, total=False): + file_type: Annotated[Literal["all", "image", "non-image"], PropertyInfo(alias="fileType")] + """Filter results by file type. + + - `all` — include all file types + - `image` — include only image files + - `non-image` — include only non-image files (e.g., JS, CSS, video) + """ + + limit: int + """The maximum number of results to return in response.""" + + path: str + """Folder path if you want to limit the search within a specific folder. + + For example, `/sales-banner/` will only search in folder sales-banner. + + Note : If your use case involves searching within a folder as well as its + subfolders, you can use `path` parameter in `searchQuery` with appropriate + operator. Checkout + [Supported parameters](/docs/api-reference/digital-asset-management-dam/list-and-search-assets#supported-parameters) + for more information. + """ + + search_query: Annotated[str, PropertyInfo(alias="searchQuery")] + """Query string in a Lucene-like query language e.g. `createdAt > "7d"`. + + Note : When the searchQuery parameter is present, the following query parameters + will have no effect on the result: + + 1. `tags` + 2. `type` + 3. `name` + + [Learn more](/docs/api-reference/digital-asset-management-dam/list-and-search-assets#advanced-search-queries) + from examples. + """ + + skip: int + """The number of results to skip before returning results.""" + + sort: Literal[ + "ASC_NAME", + "DESC_NAME", + "ASC_CREATED", + "DESC_CREATED", + "ASC_UPDATED", + "DESC_UPDATED", + "ASC_HEIGHT", + "DESC_HEIGHT", + "ASC_WIDTH", + "DESC_WIDTH", + "ASC_SIZE", + "DESC_SIZE", + "ASC_RELEVANCE", + "DESC_RELEVANCE", + ] + """ + Sort the results by one of the supported fields in ascending or descending + order. + """ + + type: Literal["file", "file-version", "folder", "all"] + """Filter results by asset type. + + - `file` — returns only files + - `file-version` — returns specific file versions + - `folder` — returns only folders + - `all` — returns both files and folders (excludes `file-version`) + """ diff --git a/src/imagekitio/types/asset_list_response.py b/src/imagekitio/types/asset_list_response.py new file mode 100644 index 00000000..00596e25 --- /dev/null +++ b/src/imagekitio/types/asset_list_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import TypeAlias + +from .file import File +from .folder import Folder + +__all__ = ["AssetListResponse", "AssetListResponseItem"] + +AssetListResponseItem: TypeAlias = Union[File, Folder] + +AssetListResponse: TypeAlias = List[AssetListResponseItem] diff --git a/src/imagekitio/types/base_webhook_event.py b/src/imagekitio/types/base_webhook_event.py new file mode 100644 index 00000000..b37e44a2 --- /dev/null +++ b/src/imagekitio/types/base_webhook_event.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["BaseWebhookEvent"] + + +class BaseWebhookEvent(BaseModel): + id: str + """Unique identifier for the event.""" + + type: str + """The type of webhook event.""" diff --git a/src/imagekitio/types/beta/__init__.py b/src/imagekitio/types/beta/__init__.py new file mode 100644 index 00000000..f8ee8b14 --- /dev/null +++ b/src/imagekitio/types/beta/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/imagekitio/types/beta/v2/__init__.py b/src/imagekitio/types/beta/v2/__init__.py new file mode 100644 index 00000000..c5c3d837 --- /dev/null +++ b/src/imagekitio/types/beta/v2/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .file_upload_params import FileUploadParams as FileUploadParams +from .file_upload_response import FileUploadResponse as FileUploadResponse diff --git a/src/imagekitio/types/beta/v2/file_upload_params.py b/src/imagekitio/types/beta/v2/file_upload_params.py new file mode 100644 index 00000000..7f75be26 --- /dev/null +++ b/src/imagekitio/types/beta/v2/file_upload_params.py @@ -0,0 +1,273 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ...._types import FileTypes, SequenceNotStr +from ...._utils import PropertyInfo +from ...shared_params.extensions import Extensions + +__all__ = [ + "FileUploadParams", + "Transformation", + "TransformationPost", + "TransformationPostTransformation", + "TransformationPostGifToVideo", + "TransformationPostThumbnail", + "TransformationPostAbs", +] + + +class FileUploadParams(TypedDict, total=False): + file: Required[FileTypes] + """The API accepts any of the following: + + - **Binary data** – send the raw bytes as `multipart/form-data`. + - **HTTP / HTTPS URL** – a publicly reachable URL that ImageKit’s servers can + fetch. + - **Base64 string** – the file encoded as a Base64 data URI or plain Base64. + + When supplying a URL, the server must receive the response headers within 8 + seconds; otherwise the request fails with 400 Bad Request. + """ + + file_name: Required[Annotated[str, PropertyInfo(alias="fileName")]] + """The name with which the file has to be uploaded.""" + + token: str + """This is the client-generated JSON Web Token (JWT). + + The ImageKit.io server uses it to authenticate and check that the upload request + parameters have not been tampered with after the token has been generated. Learn + how to create the token on the page below. This field is only required for + authentication when uploading a file from the client side. + + **Note**: Sending a JWT that has been used in the past will result in a + validation error. Even if your previous request resulted in an error, you should + always send a new token. + + **⚠️Warning**: JWT must be generated on the server-side because it is generated + using your account's private API key. This field is required for authentication + when uploading a file from the client-side. + """ + + checks: str + """ + Server-side checks to run on the asset. Read more about + [Upload API checks](/docs/api-reference/upload-file/upload-file-v2#upload-api-checks). + """ + + custom_coordinates: Annotated[str, PropertyInfo(alias="customCoordinates")] + """Define an important area in the image. + + This is only relevant for image type files. + + - To be passed as a string with the x and y coordinates of the top-left corner, + and width and height of the area of interest in the format `x,y,width,height`. + For example - `10,10,100,100` + - Can be used with fo-customtransformation. + - If this field is not specified and the file is overwritten, then + customCoordinates will be removed. + """ + + custom_metadata: Annotated[Dict[str, object], PropertyInfo(alias="customMetadata")] + """JSON key-value pairs to associate with the asset. + + Create the custom metadata fields before setting these values. + """ + + description: str + """Optional text to describe the contents of the file.""" + + extensions: Extensions + """Array of extensions to be applied to the asset. + + Each extension can be configured with specific parameters based on the extension + type. + """ + + folder: str + """The folder path in which the image has to be uploaded. + + If the folder(s) didn't exist before, a new folder(s) is created. Using multiple + `/` creates a nested folder. + """ + + is_private_file: Annotated[bool, PropertyInfo(alias="isPrivateFile")] + """Whether to mark the file as private or not. + + If `true`, the file is marked as private and is accessible only using named + transformation or signed URL. + """ + + is_published: Annotated[bool, PropertyInfo(alias="isPublished")] + """Whether to upload file as published or not. + + If `false`, the file is marked as unpublished, which restricts access to the + file only via the media library. Files in draft or unpublished state can only be + publicly accessed after being published. + + The option to upload in draft state is only available in custom enterprise + pricing plans. + """ + + overwrite_ai_tags: Annotated[bool, PropertyInfo(alias="overwriteAITags")] + """ + If set to `true` and a file already exists at the exact location, its AITags + will be removed. Set `overwriteAITags` to `false` to preserve AITags. + """ + + overwrite_custom_metadata: Annotated[bool, PropertyInfo(alias="overwriteCustomMetadata")] + """ + If the request does not have `customMetadata`, and a file already exists at the + exact location, existing customMetadata will be removed. + """ + + overwrite_file: Annotated[bool, PropertyInfo(alias="overwriteFile")] + """ + If `false` and `useUniqueFileName` is also `false`, and a file already exists at + the exact location, upload API will return an error immediately. + """ + + overwrite_tags: Annotated[bool, PropertyInfo(alias="overwriteTags")] + """ + If the request does not have `tags`, and a file already exists at the exact + location, existing tags will be removed. + """ + + response_fields: Annotated[ + List[ + Literal[ + "tags", + "customCoordinates", + "isPrivateFile", + "embeddedMetadata", + "isPublished", + "customMetadata", + "metadata", + "selectedFieldsSchema", + ] + ], + PropertyInfo(alias="responseFields"), + ] + """Array of response field keys to include in the API response body.""" + + tags: SequenceNotStr[str] + """Set the tags while uploading the file. Provide an array of tag strings (e.g. + + `["tag1", "tag2", "tag3"]`). The combined length of all tag characters must not + exceed 500, and the `%` character is not allowed. If this field is not specified + and the file is overwritten, the existing tags will be removed. + """ + + transformation: Transformation + """Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., + resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) + in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + """ + + use_unique_file_name: Annotated[bool, PropertyInfo(alias="useUniqueFileName")] + """Whether to use a unique filename for this file or not. + + If `true`, ImageKit.io will add a unique suffix to the filename parameter to get + a unique filename. + + If `false`, then the image is uploaded with the provided filename parameter, and + any existing file with the same name is replaced. + """ + + webhook_url: Annotated[str, PropertyInfo(alias="webhookUrl")] + """ + The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + """ + + +class TransformationPostTransformation(TypedDict, total=False): + type: Required[Literal["transformation"]] + """Transformation type.""" + + value: Required[str] + """Transformation string (e.g. + + `w-200,h-200`). + Same syntax as ImageKit URL-based transformations. + """ + + +class TransformationPostGifToVideo(TypedDict, total=False): + type: Required[Literal["gif-to-video"]] + """Converts an animated GIF into an MP4.""" + + value: str + """Optional transformation string to apply to the output video. + + **Example**: `q-80` + """ + + +class TransformationPostThumbnail(TypedDict, total=False): + type: Required[Literal["thumbnail"]] + """Generates a thumbnail image.""" + + value: str + """Optional transformation string. + + **Example**: `w-150,h-150` + """ + + +class TransformationPostAbs(TypedDict, total=False): + protocol: Required[Literal["hls", "dash"]] + """Streaming protocol to use (`hls` or `dash`).""" + + type: Required[Literal["abs"]] + """Adaptive Bitrate Streaming (ABS) setup.""" + + value: Required[str] + """ + List of different representations you want to create separated by an underscore. + """ + + +TransformationPost: TypeAlias = Union[ + TransformationPostTransformation, TransformationPostGifToVideo, TransformationPostThumbnail, TransformationPostAbs +] + + +class Transformation(TypedDict, total=False): + """Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + """ + + post: Iterable[TransformationPost] + """List of transformations to apply _after_ the file is uploaded. + + Each item must match one of the following types: `transformation`, + `gif-to-video`, `thumbnail`, `abs`. + """ + + pre: str + """Transformation string to apply before uploading the file to the Media Library. + + Useful for optimizing files at ingestion. + """ diff --git a/src/imagekitio/types/beta/v2/file_upload_response.py b/src/imagekitio/types/beta/v2/file_upload_response.py new file mode 100644 index 00000000..46cb7559 --- /dev/null +++ b/src/imagekitio/types/beta/v2/file_upload_response.py @@ -0,0 +1,257 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ...._models import BaseModel +from ...metadata import Metadata + +__all__ = ["FileUploadResponse", "AITag", "ExtensionStatus", "SelectedFieldsSchema", "VersionInfo"] + + +class AITag(BaseModel): + confidence: Optional[float] = None + """Confidence score of the tag.""" + + name: Optional[str] = None + """Name of the tag.""" + + source: Optional[str] = None + """Array of `AITags` associated with the image. + + If no `AITags` are set, it will be null. These tags can be added using the + `google-auto-tagging` or `aws-auto-tagging` extensions. + """ + + +class ExtensionStatus(BaseModel): + """ + Extension names with their processing status at the time of completion of the request. It could have one of the following status values: + + `success`: The extension has been successfully applied. + `failed`: The extension has failed and will not be retried. + `pending`: The extension will finish processing in some time. On completion, the final status (success / failed) will be sent to the `webhookUrl` provided. + + If no extension was requested, then this parameter is not returned. + """ + + ai_auto_description: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="ai-auto-description", default=None + ) + + aws_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="aws-auto-tagging", default=None + ) + + google_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="google-auto-tagging", default=None + ) + + remove_bg: Optional[Literal["success", "pending", "failed"]] = FieldInfo(alias="remove-bg", default=None) + + +class SelectedFieldsSchema(BaseModel): + type: Literal["Text", "Textarea", "Number", "Date", "Boolean", "SingleSelect", "MultiSelect"] + """Type of the custom metadata field.""" + + default_value: Union[str, float, bool, List[Union[str, float, bool]], None] = FieldInfo( + alias="defaultValue", default=None + ) + """The default value for this custom metadata field. + + The value should match the `type` of custom metadata field. + """ + + is_value_required: Optional[bool] = FieldInfo(alias="isValueRequired", default=None) + """Specifies if the custom metadata field is required or not.""" + + max_length: Optional[float] = FieldInfo(alias="maxLength", default=None) + """Maximum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + max_value: Union[str, float, None] = FieldInfo(alias="maxValue", default=None) + """Maximum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + min_length: Optional[float] = FieldInfo(alias="minLength", default=None) + """Minimum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + min_value: Union[str, float, None] = FieldInfo(alias="minValue", default=None) + """Minimum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + read_only: Optional[bool] = FieldInfo(alias="readOnly", default=None) + """Indicates whether the custom metadata field is read only. + + A read only field cannot be modified after being set. This field is configurable + only via the **Path policy** feature. + """ + + select_options: Optional[List[Union[str, float, bool]]] = FieldInfo(alias="selectOptions", default=None) + """An array of allowed values when field type is `SingleSelect` or `MultiSelect`.""" + + select_options_truncated: Optional[bool] = FieldInfo(alias="selectOptionsTruncated", default=None) + """Specifies if the selectOptions array is truncated. + + It is truncated when number of options are > 100. + """ + + +class VersionInfo(BaseModel): + """An object containing the file or file version's `id` (versionId) and `name`.""" + + id: Optional[str] = None + """Unique identifier of the file version.""" + + name: Optional[str] = None + """Name of the file version.""" + + +class FileUploadResponse(BaseModel): + """Object containing details of a successful upload.""" + + ai_tags: Optional[List[AITag]] = FieldInfo(alias="AITags", default=None) + """An array of tags assigned to the uploaded file by auto tagging.""" + + audio_codec: Optional[str] = FieldInfo(alias="audioCodec", default=None) + """The audio codec used in the video (only for video).""" + + bit_rate: Optional[int] = FieldInfo(alias="bitRate", default=None) + """The bit rate of the video in kbps (only for video).""" + + custom_coordinates: Optional[str] = FieldInfo(alias="customCoordinates", default=None) + """ + Value of custom coordinates associated with the image in the format + `x,y,width,height`. If `customCoordinates` are not defined, then it is `null`. + Send `customCoordinates` in `responseFields` in API request to get the value of + this field. + """ + + custom_metadata: Optional[Dict[str, object]] = FieldInfo(alias="customMetadata", default=None) + """A key-value data associated with the asset. + + Use `responseField` in API request to get `customMetadata` in the upload API + response. Before setting any custom metadata on an asset, you have to create the + field using custom metadata fields API. Send `customMetadata` in + `responseFields` in API request to get the value of this field. + """ + + description: Optional[str] = None + """Optional text to describe the contents of the file. + + Can be set by the user or the ai-auto-description extension. + """ + + duration: Optional[int] = None + """The duration of the video in seconds (only for video).""" + + embedded_metadata: Optional[Dict[str, object]] = FieldInfo(alias="embeddedMetadata", default=None) + """Consolidated embedded metadata associated with the file. + + It includes exif, iptc, and xmp data. Send `embeddedMetadata` in + `responseFields` in API request to get embeddedMetadata in the upload API + response. + """ + + extension_status: Optional[ExtensionStatus] = FieldInfo(alias="extensionStatus", default=None) + """ + Extension names with their processing status at the time of completion of the + request. It could have one of the following status values: + + `success`: The extension has been successfully applied. `failed`: The extension + has failed and will not be retried. `pending`: The extension will finish + processing in some time. On completion, the final status (success / failed) will + be sent to the `webhookUrl` provided. + + If no extension was requested, then this parameter is not returned. + """ + + file_id: Optional[str] = FieldInfo(alias="fileId", default=None) + """Unique fileId. + + Store this fileld in your database, as this will be used to perform update + action on this file. + """ + + file_path: Optional[str] = FieldInfo(alias="filePath", default=None) + """The relative path of the file in the media library e.g. + + `/marketing-assets/new-banner.jpg`. + """ + + file_type: Optional[str] = FieldInfo(alias="fileType", default=None) + """Type of the uploaded file. Possible values are `image`, `non-image`.""" + + height: Optional[float] = None + """Height of the image in pixels (Only for images)""" + + is_private_file: Optional[bool] = FieldInfo(alias="isPrivateFile", default=None) + """Is the file marked as private. + + It can be either `true` or `false`. Send `isPrivateFile` in `responseFields` in + API request to get the value of this field. + """ + + is_published: Optional[bool] = FieldInfo(alias="isPublished", default=None) + """Is the file published or in draft state. + + It can be either `true` or `false`. Send `isPublished` in `responseFields` in + API request to get the value of this field. + """ + + metadata: Optional[Metadata] = None + """Legacy metadata. + + Send `metadata` in `responseFields` in API request to get metadata in the upload + API response. + """ + + name: Optional[str] = None + """Name of the asset.""" + + selected_fields_schema: Optional[Dict[str, SelectedFieldsSchema]] = FieldInfo( + alias="selectedFieldsSchema", default=None + ) + """ + This field is included in the response only if the Path policy feature is + available in the plan. It contains schema definitions for the custom metadata + fields selected for the specified file path. Field selection can only be done + when the Path policy feature is enabled. + + Keys are the names of the custom metadata fields; the value object has details + about the custom metadata schema. + """ + + size: Optional[float] = None + """Size of the image file in Bytes.""" + + tags: Optional[List[str]] = None + """The array of tags associated with the asset. + + If no tags are set, it will be `null`. Send `tags` in `responseFields` in API + request to get the value of this field. + """ + + thumbnail_url: Optional[str] = FieldInfo(alias="thumbnailUrl", default=None) + """In the case of an image, a small thumbnail URL.""" + + url: Optional[str] = None + """A publicly accessible URL of the file.""" + + version_info: Optional[VersionInfo] = FieldInfo(alias="versionInfo", default=None) + """An object containing the file or file version's `id` (versionId) and `name`.""" + + video_codec: Optional[str] = FieldInfo(alias="videoCodec", default=None) + """The video codec used in the video (only for video).""" + + width: Optional[float] = None + """Width of the image in pixels (Only for Images)""" diff --git a/src/imagekitio/types/cache/__init__.py b/src/imagekitio/types/cache/__init__.py new file mode 100644 index 00000000..76e5283c --- /dev/null +++ b/src/imagekitio/types/cache/__init__.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .invalidation_get_response import InvalidationGetResponse as InvalidationGetResponse +from .invalidation_create_params import InvalidationCreateParams as InvalidationCreateParams +from .invalidation_create_response import InvalidationCreateResponse as InvalidationCreateResponse diff --git a/src/imagekitio/types/cache/invalidation_create_params.py b/src/imagekitio/types/cache/invalidation_create_params.py new file mode 100644 index 00000000..55df58fe --- /dev/null +++ b/src/imagekitio/types/cache/invalidation_create_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["InvalidationCreateParams"] + + +class InvalidationCreateParams(TypedDict, total=False): + url: Required[str] + """The full URL of the file to be purged.""" diff --git a/src/imagekitio/types/cache/invalidation_create_response.py b/src/imagekitio/types/cache/invalidation_create_response.py new file mode 100644 index 00000000..2dfbce88 --- /dev/null +++ b/src/imagekitio/types/cache/invalidation_create_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["InvalidationCreateResponse"] + + +class InvalidationCreateResponse(BaseModel): + request_id: Optional[str] = FieldInfo(alias="requestId", default=None) + """Unique identifier of the purge request. + + This can be used to check the status of the purge request. + """ diff --git a/src/imagekitio/types/cache/invalidation_get_response.py b/src/imagekitio/types/cache/invalidation_get_response.py new file mode 100644 index 00000000..96c13046 --- /dev/null +++ b/src/imagekitio/types/cache/invalidation_get_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InvalidationGetResponse"] + + +class InvalidationGetResponse(BaseModel): + status: Optional[Literal["Pending", "Completed"]] = None + """Status of the purge request.""" diff --git a/src/imagekitio/types/custom_metadata_field.py b/src/imagekitio/types/custom_metadata_field.py new file mode 100644 index 00000000..1ebde723 --- /dev/null +++ b/src/imagekitio/types/custom_metadata_field.py @@ -0,0 +1,77 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["CustomMetadataField", "Schema"] + + +class Schema(BaseModel): + """An object that describes the rules for the custom metadata field value.""" + + type: Literal["Text", "Textarea", "Number", "Date", "Boolean", "SingleSelect", "MultiSelect"] + """Type of the custom metadata field.""" + + default_value: Union[str, float, bool, List[Union[str, float, bool]], None] = FieldInfo( + alias="defaultValue", default=None + ) + """The default value for this custom metadata field. + + Data type of default value depends on the field type. + """ + + is_value_required: Optional[bool] = FieldInfo(alias="isValueRequired", default=None) + """Specifies if the this custom metadata field is required or not.""" + + max_length: Optional[float] = FieldInfo(alias="maxLength", default=None) + """Maximum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + max_value: Union[str, float, None] = FieldInfo(alias="maxValue", default=None) + """Maximum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + min_length: Optional[float] = FieldInfo(alias="minLength", default=None) + """Minimum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + min_value: Union[str, float, None] = FieldInfo(alias="minValue", default=None) + """Minimum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + select_options: Optional[List[Union[str, float, bool]]] = FieldInfo(alias="selectOptions", default=None) + """An array of allowed values when field type is `SingleSelect` or `MultiSelect`.""" + + +class CustomMetadataField(BaseModel): + """Object containing details of a custom metadata field.""" + + id: str + """Unique identifier for the custom metadata field. Use this to update the field.""" + + label: str + """Human readable name of the custom metadata field. + + This name is displayed as form field label to the users while setting field + value on the asset in the media library UI. + """ + + name: str + """API name of the custom metadata field. + + This becomes the key while setting `customMetadata` (key-value object) for an + asset using upload or update API. + """ + + schema_: Schema = FieldInfo(alias="schema") + """An object that describes the rules for the custom metadata field value.""" diff --git a/src/imagekitio/types/custom_metadata_field_create_params.py b/src/imagekitio/types/custom_metadata_field_create_params.py new file mode 100644 index 00000000..0e265b09 --- /dev/null +++ b/src/imagekitio/types/custom_metadata_field_create_params.py @@ -0,0 +1,85 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, Annotated, TypedDict + +from .._types import SequenceNotStr +from .._utils import PropertyInfo + +__all__ = ["CustomMetadataFieldCreateParams", "Schema"] + + +class CustomMetadataFieldCreateParams(TypedDict, total=False): + label: Required[str] + """Human readable name of the custom metadata field. + + This should be unique across all non deleted custom metadata fields. This name + is displayed as form field label to the users while setting field value on an + asset in the media library UI. + """ + + name: Required[str] + """API name of the custom metadata field. + + This should be unique across all (including deleted) custom metadata fields. + """ + + schema: Required[Schema] + + +class Schema(TypedDict, total=False): + type: Required[Literal["Text", "Textarea", "Number", "Date", "Boolean", "SingleSelect", "MultiSelect"]] + """Type of the custom metadata field.""" + + default_value: Annotated[ + Union[str, float, bool, SequenceNotStr[Union[str, float, bool]]], PropertyInfo(alias="defaultValue") + ] + """The default value for this custom metadata field. + + This property is only required if `isValueRequired` property is set to `true`. + The value should match the `type` of custom metadata field. + """ + + is_value_required: Annotated[bool, PropertyInfo(alias="isValueRequired")] + """Sets this custom metadata field as required. + + Setting custom metadata fields on an asset will throw error if the value for all + required fields are not present in upload or update asset API request body. + """ + + max_length: Annotated[float, PropertyInfo(alias="maxLength")] + """Maximum length of string. + + Only set this property if `type` is set to `Text` or `Textarea`. + """ + + max_value: Annotated[Union[str, float], PropertyInfo(alias="maxValue")] + """Maximum value of the field. + + Only set this property if field type is `Date` or `Number`. For `Date` type + field, set the minimum date in ISO8601 string format. For `Number` type field, + set the minimum numeric value. + """ + + min_length: Annotated[float, PropertyInfo(alias="minLength")] + """Minimum length of string. + + Only set this property if `type` is set to `Text` or `Textarea`. + """ + + min_value: Annotated[Union[str, float], PropertyInfo(alias="minValue")] + """Minimum value of the field. + + Only set this property if field type is `Date` or `Number`. For `Date` type + field, set the minimum date in ISO8601 string format. For `Number` type field, + set the minimum numeric value. + """ + + select_options: Annotated[SequenceNotStr[Union[str, float, bool]], PropertyInfo(alias="selectOptions")] + """An array of allowed values. + + This property is only required if `type` property is set to `SingleSelect` or + `MultiSelect`. + """ diff --git a/src/imagekitio/types/custom_metadata_field_delete_response.py b/src/imagekitio/types/custom_metadata_field_delete_response.py new file mode 100644 index 00000000..247d6a95 --- /dev/null +++ b/src/imagekitio/types/custom_metadata_field_delete_response.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["CustomMetadataFieldDeleteResponse"] + + +class CustomMetadataFieldDeleteResponse(BaseModel): + pass diff --git a/src/imagekitio/types/custom_metadata_field_list_params.py b/src/imagekitio/types/custom_metadata_field_list_params.py new file mode 100644 index 00000000..a84f93ea --- /dev/null +++ b/src/imagekitio/types/custom_metadata_field_list_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["CustomMetadataFieldListParams"] + + +class CustomMetadataFieldListParams(TypedDict, total=False): + folder_path: Annotated[str, PropertyInfo(alias="folderPath")] + """ + The folder path (e.g., `/path/to/folder`) for which to retrieve applicable + custom metadata fields. Useful for determining path-specific field selections + when the [Path policy](https://imagekit.io/docs/dam/path-policy) feature is in + use. + """ + + include_deleted: Annotated[bool, PropertyInfo(alias="includeDeleted")] + """Set it to `true` to include deleted field objects in the API response.""" diff --git a/src/imagekitio/types/custom_metadata_field_list_response.py b/src/imagekitio/types/custom_metadata_field_list_response.py new file mode 100644 index 00000000..f3928746 --- /dev/null +++ b/src/imagekitio/types/custom_metadata_field_list_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from .custom_metadata_field import CustomMetadataField + +__all__ = ["CustomMetadataFieldListResponse"] + +CustomMetadataFieldListResponse: TypeAlias = List[CustomMetadataField] diff --git a/src/imagekitio/types/custom_metadata_field_update_params.py b/src/imagekitio/types/custom_metadata_field_update_params.py new file mode 100644 index 00000000..fbb9effc --- /dev/null +++ b/src/imagekitio/types/custom_metadata_field_update_params.py @@ -0,0 +1,88 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Annotated, TypedDict + +from .._types import SequenceNotStr +from .._utils import PropertyInfo + +__all__ = ["CustomMetadataFieldUpdateParams", "Schema"] + + +class CustomMetadataFieldUpdateParams(TypedDict, total=False): + label: str + """Human readable name of the custom metadata field. + + This should be unique across all non deleted custom metadata fields. This name + is displayed as form field label to the users while setting field value on an + asset in the media library UI. This parameter is required if `schema` is not + provided. + """ + + schema: Schema + """An object that describes the rules for the custom metadata key. + + This parameter is required if `label` is not provided. Note: `type` cannot be + updated and will be ignored if sent with the `schema`. The schema will be + validated as per the existing `type`. + """ + + +class Schema(TypedDict, total=False): + """An object that describes the rules for the custom metadata key. + + This parameter is required if `label` is not provided. Note: `type` cannot be updated and will be ignored if sent with the `schema`. The schema will be validated as per the existing `type`. + """ + + default_value: Annotated[ + Union[str, float, bool, SequenceNotStr[Union[str, float, bool]]], PropertyInfo(alias="defaultValue") + ] + """The default value for this custom metadata field. + + This property is only required if `isValueRequired` property is set to `true`. + The value should match the `type` of custom metadata field. + """ + + is_value_required: Annotated[bool, PropertyInfo(alias="isValueRequired")] + """Sets this custom metadata field as required. + + Setting custom metadata fields on an asset will throw error if the value for all + required fields are not present in upload or update asset API request body. + """ + + max_length: Annotated[float, PropertyInfo(alias="maxLength")] + """Maximum length of string. + + Only set this property if `type` is set to `Text` or `Textarea`. + """ + + max_value: Annotated[Union[str, float], PropertyInfo(alias="maxValue")] + """Maximum value of the field. + + Only set this property if field type is `Date` or `Number`. For `Date` type + field, set the minimum date in ISO8601 string format. For `Number` type field, + set the minimum numeric value. + """ + + min_length: Annotated[float, PropertyInfo(alias="minLength")] + """Minimum length of string. + + Only set this property if `type` is set to `Text` or `Textarea`. + """ + + min_value: Annotated[Union[str, float], PropertyInfo(alias="minValue")] + """Minimum value of the field. + + Only set this property if field type is `Date` or `Number`. For `Date` type + field, set the minimum date in ISO8601 string format. For `Number` type field, + set the minimum numeric value. + """ + + select_options: Annotated[SequenceNotStr[Union[str, float, bool]], PropertyInfo(alias="selectOptions")] + """An array of allowed values. + + This property is only required if `type` property is set to `SingleSelect` or + `MultiSelect`. + """ diff --git a/src/imagekitio/types/dummy_create_params.py b/src/imagekitio/types/dummy_create_params.py new file mode 100644 index 00000000..e21a3964 --- /dev/null +++ b/src/imagekitio/types/dummy_create_params.py @@ -0,0 +1,124 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo +from .shared_params.extensions import Extensions +from .shared_params.base_overlay import BaseOverlay +from .shared_params.text_overlay import TextOverlay +from .shared.streaming_resolution import StreamingResolution +from .shared_params.overlay_timing import OverlayTiming +from .shared.transformation_position import TransformationPosition +from .shared_params.overlay_position import OverlayPosition +from .shared_params.subtitle_overlay import SubtitleOverlay +from .shared_params.solid_color_overlay import SolidColorOverlay +from .shared_params.responsive_image_attributes import ResponsiveImageAttributes +from .shared_params.text_overlay_transformation import TextOverlayTransformation +from .shared_params.subtitle_overlay_transformation import SubtitleOverlayTransformation +from .shared_params.solid_color_overlay_transformation import SolidColorOverlayTransformation + +__all__ = ["DummyCreateParams"] + + +class DummyCreateParams(TypedDict, total=False): + base_overlay: Annotated[BaseOverlay, PropertyInfo(alias="baseOverlay")] + + extensions: Extensions + """Array of extensions to be applied to the asset. + + Each extension can be configured with specific parameters based on the extension + type. + """ + + get_image_attributes_options: Annotated[ + "GetImageAttributesOptions", PropertyInfo(alias="getImageAttributesOptions") + ] + """ + Options for generating responsive image attributes including `src`, `srcSet`, + and `sizes` for HTML `` elements. This schema extends `SrcOptions` to add + support for responsive image generation with breakpoints. + """ + + image_overlay: Annotated["ImageOverlay", PropertyInfo(alias="imageOverlay")] + + overlay: "Overlay" + """Specifies an overlay to be applied on the parent image or video. + + ImageKit supports overlays including images, text, videos, subtitles, and solid + colors. See + [Overlay using layers](https://imagekit.io/docs/transformations#overlay-using-layers). + """ + + overlay_position: Annotated[OverlayPosition, PropertyInfo(alias="overlayPosition")] + + overlay_timing: Annotated[OverlayTiming, PropertyInfo(alias="overlayTiming")] + + responsive_image_attributes: Annotated[ResponsiveImageAttributes, PropertyInfo(alias="responsiveImageAttributes")] + """ + Resulting set of attributes suitable for an HTML `` element. Useful for + enabling responsive image loading with `srcSet` and `sizes`. + """ + + solid_color_overlay: Annotated[SolidColorOverlay, PropertyInfo(alias="solidColorOverlay")] + + solid_color_overlay_transformation: Annotated[ + SolidColorOverlayTransformation, PropertyInfo(alias="solidColorOverlayTransformation") + ] + + src_options: Annotated["SrcOptions", PropertyInfo(alias="srcOptions")] + """Options for generating ImageKit URLs with transformations. + + See the [Transformations guide](https://imagekit.io/docs/transformations). + """ + + streaming_resolution: Annotated[StreamingResolution, PropertyInfo(alias="streamingResolution")] + """ + Available streaming resolutions for + [adaptive bitrate streaming](https://imagekit.io/docs/adaptive-bitrate-streaming) + """ + + subtitle_overlay: Annotated[SubtitleOverlay, PropertyInfo(alias="subtitleOverlay")] + + subtitle_overlay_transformation: Annotated[ + SubtitleOverlayTransformation, PropertyInfo(alias="subtitleOverlayTransformation") + ] + """Subtitle styling options. + + [Learn more](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + from the docs. + """ + + text_overlay: Annotated[TextOverlay, PropertyInfo(alias="textOverlay")] + + text_overlay_transformation: Annotated[TextOverlayTransformation, PropertyInfo(alias="textOverlayTransformation")] + + transformation: "Transformation" + """The SDK provides easy-to-use names for transformations. + + These names are converted to the corresponding transformation string before + being added to the URL. SDKs are updated regularly to support new + transformations. If you want to use a transformation that is not supported by + the SDK, You can use the `raw` parameter to pass the transformation string + directly. See the + [Transformations documentation](https://imagekit.io/docs/transformations). + """ + + transformation_position: Annotated[TransformationPosition, PropertyInfo(alias="transformationPosition")] + """ + By default, the transformation string is added as a query parameter in the URL, + e.g., `?tr=w-100,h-100`. If you want to add the transformation string in the + path of the URL, set this to `path`. Learn more in the + [Transformations guide](https://imagekit.io/docs/transformations). + """ + + video_overlay: Annotated["VideoOverlay", PropertyInfo(alias="videoOverlay")] + + +from .shared_params.overlay import Overlay +from .shared_params.src_options import SrcOptions +from .shared_params.image_overlay import ImageOverlay +from .shared_params.video_overlay import VideoOverlay +from .shared_params.transformation import Transformation +from .shared_params.get_image_attributes_options import GetImageAttributesOptions diff --git a/src/imagekitio/types/file.py b/src/imagekitio/types/file.py new file mode 100644 index 00000000..d4a1573f --- /dev/null +++ b/src/imagekitio/types/file.py @@ -0,0 +1,192 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["File", "AITag", "SelectedFieldsSchema", "VersionInfo"] + + +class AITag(BaseModel): + confidence: Optional[float] = None + """Confidence score of the tag.""" + + name: Optional[str] = None + """Name of the tag.""" + + source: Optional[str] = None + """Source of the tag. + + Possible values are `google-auto-tagging` and `aws-auto-tagging`. + """ + + +class SelectedFieldsSchema(BaseModel): + type: Literal["Text", "Textarea", "Number", "Date", "Boolean", "SingleSelect", "MultiSelect"] + """Type of the custom metadata field.""" + + default_value: Union[str, float, bool, List[Union[str, float, bool]], None] = FieldInfo( + alias="defaultValue", default=None + ) + """The default value for this custom metadata field. + + The value should match the `type` of custom metadata field. + """ + + is_value_required: Optional[bool] = FieldInfo(alias="isValueRequired", default=None) + """Specifies if the custom metadata field is required or not.""" + + max_length: Optional[float] = FieldInfo(alias="maxLength", default=None) + """Maximum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + max_value: Union[str, float, None] = FieldInfo(alias="maxValue", default=None) + """Maximum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + min_length: Optional[float] = FieldInfo(alias="minLength", default=None) + """Minimum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + min_value: Union[str, float, None] = FieldInfo(alias="minValue", default=None) + """Minimum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + read_only: Optional[bool] = FieldInfo(alias="readOnly", default=None) + """Indicates whether the custom metadata field is read only. + + A read only field cannot be modified after being set. This field is configurable + only via the **Path policy** feature. + """ + + select_options: Optional[List[Union[str, float, bool]]] = FieldInfo(alias="selectOptions", default=None) + """An array of allowed values when field type is `SingleSelect` or `MultiSelect`.""" + + select_options_truncated: Optional[bool] = FieldInfo(alias="selectOptionsTruncated", default=None) + """Specifies if the selectOptions array is truncated. + + It is truncated when number of options are > 100. + """ + + +class VersionInfo(BaseModel): + """An object with details of the file version.""" + + id: Optional[str] = None + """Unique identifier of the file version.""" + + name: Optional[str] = None + """Name of the file version.""" + + +class File(BaseModel): + """Object containing details of a file or file version.""" + + ai_tags: Optional[List[AITag]] = FieldInfo(alias="AITags", default=None) + """An array of tags assigned to the file by auto tagging.""" + + created_at: Optional[datetime] = FieldInfo(alias="createdAt", default=None) + """Date and time when the file was uploaded. + + The date and time is in ISO8601 format. + """ + + custom_coordinates: Optional[str] = FieldInfo(alias="customCoordinates", default=None) + """An string with custom coordinates of the file.""" + + custom_metadata: Optional[Dict[str, object]] = FieldInfo(alias="customMetadata", default=None) + """An object with custom metadata for the file.""" + + description: Optional[str] = None + """Optional text to describe the contents of the file. + + Can be set by the user or the ai-auto-description extension. + """ + + file_id: Optional[str] = FieldInfo(alias="fileId", default=None) + """Unique identifier of the asset.""" + + file_path: Optional[str] = FieldInfo(alias="filePath", default=None) + """Path of the file. + + This is the path you would use in the URL to access the file. For example, if + the file is at the root of the media library, the path will be `/file.jpg`. If + the file is inside a folder named `images`, the path will be `/images/file.jpg`. + """ + + file_type: Optional[str] = FieldInfo(alias="fileType", default=None) + """Type of the file. Possible values are `image`, `non-image`.""" + + has_alpha: Optional[bool] = FieldInfo(alias="hasAlpha", default=None) + """Specifies if the image has an alpha channel.""" + + height: Optional[float] = None + """Height of the file.""" + + is_private_file: Optional[bool] = FieldInfo(alias="isPrivateFile", default=None) + """Specifies if the file is private or not.""" + + is_published: Optional[bool] = FieldInfo(alias="isPublished", default=None) + """Specifies if the file is published or not.""" + + mime: Optional[str] = None + """MIME type of the file.""" + + name: Optional[str] = None + """Name of the asset.""" + + selected_fields_schema: Optional[Dict[str, SelectedFieldsSchema]] = FieldInfo( + alias="selectedFieldsSchema", default=None + ) + """ + This field is included in the response only if the Path policy feature is + available in the plan. It contains schema definitions for the custom metadata + fields selected for the specified file path. Field selection can only be done + when the Path policy feature is enabled. + + Keys are the names of the custom metadata fields; the value object has details + about the custom metadata schema. + """ + + size: Optional[float] = None + """Size of the file in bytes.""" + + tags: Optional[List[str]] = None + """An array of tags assigned to the file. + + Tags are used to search files in the media library. + """ + + thumbnail: Optional[str] = None + """URL of the thumbnail image. + + This URL is used to access the thumbnail image of the file in the media library. + """ + + type: Optional[Literal["file", "file-version"]] = None + """Type of the asset.""" + + updated_at: Optional[datetime] = FieldInfo(alias="updatedAt", default=None) + """Date and time when the file was last updated. + + The date and time is in ISO8601 format. + """ + + url: Optional[str] = None + """URL of the file.""" + + version_info: Optional[VersionInfo] = FieldInfo(alias="versionInfo", default=None) + """An object with details of the file version.""" + + width: Optional[float] = None + """Width of the file.""" diff --git a/src/imagekitio/types/file_copy_params.py b/src/imagekitio/types/file_copy_params.py new file mode 100644 index 00000000..e8a8f946 --- /dev/null +++ b/src/imagekitio/types/file_copy_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FileCopyParams"] + + +class FileCopyParams(TypedDict, total=False): + destination_path: Required[Annotated[str, PropertyInfo(alias="destinationPath")]] + """Full path to the folder you want to copy the above file into.""" + + source_file_path: Required[Annotated[str, PropertyInfo(alias="sourceFilePath")]] + """The full path of the file you want to copy.""" + + include_file_versions: Annotated[bool, PropertyInfo(alias="includeFileVersions")] + """Option to copy all versions of a file. + + By default, only the current version of the file is copied. When set to true, + all versions of the file will be copied. Default value - `false`. + """ diff --git a/src/imagekitio/types/file_copy_response.py b/src/imagekitio/types/file_copy_response.py new file mode 100644 index 00000000..81267b0d --- /dev/null +++ b/src/imagekitio/types/file_copy_response.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["FileCopyResponse"] + + +class FileCopyResponse(BaseModel): + pass diff --git a/src/imagekitio/types/file_move_params.py b/src/imagekitio/types/file_move_params.py new file mode 100644 index 00000000..1fc2a9e6 --- /dev/null +++ b/src/imagekitio/types/file_move_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FileMoveParams"] + + +class FileMoveParams(TypedDict, total=False): + destination_path: Required[Annotated[str, PropertyInfo(alias="destinationPath")]] + """Full path to the folder you want to move the above file into.""" + + source_file_path: Required[Annotated[str, PropertyInfo(alias="sourceFilePath")]] + """The full path of the file you want to move.""" diff --git a/src/imagekitio/types/file_move_response.py b/src/imagekitio/types/file_move_response.py new file mode 100644 index 00000000..b92b949f --- /dev/null +++ b/src/imagekitio/types/file_move_response.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["FileMoveResponse"] + + +class FileMoveResponse(BaseModel): + pass diff --git a/src/imagekitio/types/file_rename_params.py b/src/imagekitio/types/file_rename_params.py new file mode 100644 index 00000000..22ada893 --- /dev/null +++ b/src/imagekitio/types/file_rename_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FileRenameParams"] + + +class FileRenameParams(TypedDict, total=False): + file_path: Required[Annotated[str, PropertyInfo(alias="filePath")]] + """The full path of the file you want to rename.""" + + new_file_name: Required[Annotated[str, PropertyInfo(alias="newFileName")]] + """The new name of the file. A filename can contain: + + Alphanumeric Characters: `a-z`, `A-Z`, `0-9` (including Unicode letters, marks, + and numerals in other languages). Special Characters: `.`, `_`, and `-`. + + Any other character, including space, will be replaced by `_`. + """ + + purge_cache: Annotated[bool, PropertyInfo(alias="purgeCache")] + """Option to purge cache for the old file and its versions' URLs. + + When set to true, it will internally issue a purge cache request on CDN to + remove cached content of old file and its versions. This purge request is + counted against your monthly purge quota. + + Note: If the old file were accessible at + `https://ik.imagekit.io/demo/old-filename.jpg`, a purge cache request would be + issued against `https://ik.imagekit.io/demo/old-filename.jpg*` (with a wildcard + at the end). It will remove the file and its versions' URLs and any + transformations made using query parameters on this file or its versions. + However, the cache for file transformations made using path parameters will + persist. You can purge them using the purge API. For more details, refer to the + purge API documentation. + + Default value - `false` + """ diff --git a/src/imagekitio/types/file_rename_response.py b/src/imagekitio/types/file_rename_response.py new file mode 100644 index 00000000..2ef8d821 --- /dev/null +++ b/src/imagekitio/types/file_rename_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["FileRenameResponse"] + + +class FileRenameResponse(BaseModel): + purge_request_id: Optional[str] = FieldInfo(alias="purgeRequestId", default=None) + """Unique identifier of the purge request. + + This can be used to check the status of the purge request. + """ diff --git a/src/imagekitio/types/file_update_params.py b/src/imagekitio/types/file_update_params.py new file mode 100644 index 00000000..503bafe5 --- /dev/null +++ b/src/imagekitio/types/file_update_params.py @@ -0,0 +1,85 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from .._types import SequenceNotStr +from .._utils import PropertyInfo +from .shared_params.extensions import Extensions + +__all__ = ["FileUpdateParams", "UpdateFileDetails", "ChangePublicationStatus", "ChangePublicationStatusPublish"] + + +class UpdateFileDetails(TypedDict, total=False): + custom_coordinates: Annotated[Optional[str], PropertyInfo(alias="customCoordinates")] + """Define an important area in the image in the format `x,y,width,height` e.g. + + `10,10,100,100`. Send `null` to unset this value. + """ + + custom_metadata: Annotated[Dict[str, object], PropertyInfo(alias="customMetadata")] + """A key-value data to be associated with the asset. + + To unset a key, send `null` value for that key. Before setting any custom + metadata on an asset you have to create the field using custom metadata fields + API. + """ + + description: str + """Optional text to describe the contents of the file.""" + + extensions: Extensions + """Array of extensions to be applied to the asset. + + Each extension can be configured with specific parameters based on the extension + type. + """ + + remove_ai_tags: Annotated[Union[SequenceNotStr[str], Literal["all"]], PropertyInfo(alias="removeAITags")] + """An array of AITags associated with the file that you want to remove, e.g. + + `["car", "vehicle", "motorsports"]`. + + If you want to remove all AITags associated with the file, send a string - + "all". + + Note: The remove operation for `AITags` executes before any of the `extensions` + are processed. + """ + + tags: Optional[SequenceNotStr[str]] + """An array of tags associated with the file, such as `["tag1", "tag2"]`. + + Send `null` to unset all tags associated with the file. + """ + + webhook_url: Annotated[str, PropertyInfo(alias="webhookUrl")] + """ + The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + """ + + +class ChangePublicationStatus(TypedDict, total=False): + publish: ChangePublicationStatusPublish + """Configure the publication status of a file and its versions.""" + + +class ChangePublicationStatusPublish(TypedDict, total=False): + """Configure the publication status of a file and its versions.""" + + is_published: Required[Annotated[bool, PropertyInfo(alias="isPublished")]] + """Set to `true` to publish the file. Set to `false` to unpublish the file.""" + + include_file_versions: Annotated[bool, PropertyInfo(alias="includeFileVersions")] + """Set to `true` to publish/unpublish all versions of the file. + + Set to `false` to publish/unpublish only the current version of the file. + """ + + +FileUpdateParams: TypeAlias = Union[UpdateFileDetails, ChangePublicationStatus] diff --git a/src/imagekitio/types/file_update_response.py b/src/imagekitio/types/file_update_response.py new file mode 100644 index 00000000..936e3361 --- /dev/null +++ b/src/imagekitio/types/file_update_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .file import File +from .._models import BaseModel + +__all__ = ["FileUpdateResponse", "FileUpdateResponseExtensionStatus"] + + +class FileUpdateResponseExtensionStatus(BaseModel): + ai_auto_description: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="ai-auto-description", default=None + ) + + aws_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="aws-auto-tagging", default=None + ) + + google_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="google-auto-tagging", default=None + ) + + remove_bg: Optional[Literal["success", "pending", "failed"]] = FieldInfo(alias="remove-bg", default=None) + + +class FileUpdateResponse(File): + """Object containing details of a file or file version.""" + + extension_status: Optional[FileUpdateResponseExtensionStatus] = FieldInfo(alias="extensionStatus", default=None) diff --git a/src/imagekitio/types/file_upload_params.py b/src/imagekitio/types/file_upload_params.py new file mode 100644 index 00000000..1dd9a23d --- /dev/null +++ b/src/imagekitio/types/file_upload_params.py @@ -0,0 +1,305 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from .._types import FileTypes, SequenceNotStr +from .._utils import PropertyInfo +from .shared_params.extensions import Extensions + +__all__ = [ + "FileUploadParams", + "Transformation", + "TransformationPost", + "TransformationPostTransformation", + "TransformationPostGifToVideo", + "TransformationPostThumbnail", + "TransformationPostAbs", +] + + +class FileUploadParams(TypedDict, total=False): + file: Required[FileTypes] + """The API accepts any of the following: + + - **Binary data** – send the raw bytes as `multipart/form-data`. + - **HTTP / HTTPS URL** – a publicly reachable URL that ImageKit’s servers can + fetch. + - **Base64 string** – the file encoded as a Base64 data URI or plain Base64. + + When supplying a URL, the server must receive the response headers within 8 + seconds; otherwise the request fails with 400 Bad Request. + """ + + file_name: Required[Annotated[str, PropertyInfo(alias="fileName")]] + """The name with which the file has to be uploaded. The file name can contain: + + - Alphanumeric Characters: `a-z`, `A-Z`, `0-9`. + - Special Characters: `.`, `-` + + Any other character including space will be replaced by `_` + """ + + token: str + """ + A unique value that the ImageKit.io server will use to recognize and prevent + subsequent retries for the same request. We suggest using V4 UUIDs, or another + random string with enough entropy to avoid collisions. This field is only + required for authentication when uploading a file from the client side. + + **Note**: Sending a value that has been used in the past will result in a + validation error. Even if your previous request resulted in an error, you should + always send a new value for this field. + """ + + checks: str + """ + Server-side checks to run on the asset. Read more about + [Upload API checks](/docs/api-reference/upload-file/upload-file#upload-api-checks). + """ + + custom_coordinates: Annotated[str, PropertyInfo(alias="customCoordinates")] + """Define an important area in the image. + + This is only relevant for image type files. + + - To be passed as a string with the x and y coordinates of the top-left corner, + and width and height of the area of interest in the format `x,y,width,height`. + For example - `10,10,100,100` + - Can be used with fo-customtransformation. + - If this field is not specified and the file is overwritten, then + customCoordinates will be removed. + """ + + custom_metadata: Annotated[Dict[str, object], PropertyInfo(alias="customMetadata")] + """JSON key-value pairs to associate with the asset. + + Create the custom metadata fields before setting these values. + """ + + description: str + """Optional text to describe the contents of the file.""" + + expire: int + """The time until your signature is valid. + + It must be a [Unix time](https://en.wikipedia.org/wiki/Unix_time) in less than 1 + hour into the future. It should be in seconds. This field is only required for + authentication when uploading a file from the client side. + """ + + extensions: Extensions + """Array of extensions to be applied to the asset. + + Each extension can be configured with specific parameters based on the extension + type. + """ + + folder: str + """The folder path in which the image has to be uploaded. + + If the folder(s) didn't exist before, a new folder(s) is created. + + The folder name can contain: + + - Alphanumeric Characters: `a-z` , `A-Z` , `0-9` + - Special Characters: `/` , `_` , `-` + + Using multiple `/` creates a nested folder. + """ + + is_private_file: Annotated[bool, PropertyInfo(alias="isPrivateFile")] + """Whether to mark the file as private or not. + + If `true`, the file is marked as private and is accessible only using named + transformation or signed URL. + """ + + is_published: Annotated[bool, PropertyInfo(alias="isPublished")] + """Whether to upload file as published or not. + + If `false`, the file is marked as unpublished, which restricts access to the + file only via the media library. Files in draft or unpublished state can only be + publicly accessed after being published. + + The option to upload in draft state is only available in custom enterprise + pricing plans. + """ + + overwrite_ai_tags: Annotated[bool, PropertyInfo(alias="overwriteAITags")] + """ + If set to `true` and a file already exists at the exact location, its AITags + will be removed. Set `overwriteAITags` to `false` to preserve AITags. + """ + + overwrite_custom_metadata: Annotated[bool, PropertyInfo(alias="overwriteCustomMetadata")] + """ + If the request does not have `customMetadata`, and a file already exists at the + exact location, existing customMetadata will be removed. + """ + + overwrite_file: Annotated[bool, PropertyInfo(alias="overwriteFile")] + """ + If `false` and `useUniqueFileName` is also `false`, and a file already exists at + the exact location, upload API will return an error immediately. + """ + + overwrite_tags: Annotated[bool, PropertyInfo(alias="overwriteTags")] + """ + If the request does not have `tags`, and a file already exists at the exact + location, existing tags will be removed. + """ + + public_key: Annotated[str, PropertyInfo(alias="publicKey")] + """Your ImageKit.io public key. + + This field is only required for authentication when uploading a file from the + client side. + """ + + response_fields: Annotated[ + List[ + Literal[ + "tags", + "customCoordinates", + "isPrivateFile", + "embeddedMetadata", + "isPublished", + "customMetadata", + "metadata", + "selectedFieldsSchema", + ] + ], + PropertyInfo(alias="responseFields"), + ] + """Array of response field keys to include in the API response body.""" + + signature: str + """ + HMAC-SHA1 digest of the token+expire using your ImageKit.io private API key as a + key. Learn how to create a signature on the page below. This should be in + lowercase. + + Signature must be calculated on the server-side. This field is only required for + authentication when uploading a file from the client side. + """ + + tags: SequenceNotStr[str] + """Set the tags while uploading the file. Provide an array of tag strings (e.g. + + `["tag1", "tag2", "tag3"]`). The combined length of all tag characters must not + exceed 500, and the `%` character is not allowed. If this field is not specified + and the file is overwritten, the existing tags will be removed. + """ + + transformation: Transformation + """Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., + resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) + in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + """ + + use_unique_file_name: Annotated[bool, PropertyInfo(alias="useUniqueFileName")] + """Whether to use a unique filename for this file or not. + + If `true`, ImageKit.io will add a unique suffix to the filename parameter to get + a unique filename. + + If `false`, then the image is uploaded with the provided filename parameter, and + any existing file with the same name is replaced. + """ + + webhook_url: Annotated[str, PropertyInfo(alias="webhookUrl")] + """ + The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + """ + + +class TransformationPostTransformation(TypedDict, total=False): + type: Required[Literal["transformation"]] + """Transformation type.""" + + value: Required[str] + """Transformation string (e.g. + + `w-200,h-200`). + Same syntax as ImageKit URL-based transformations. + """ + + +class TransformationPostGifToVideo(TypedDict, total=False): + type: Required[Literal["gif-to-video"]] + """Converts an animated GIF into an MP4.""" + + value: str + """Optional transformation string to apply to the output video. + + **Example**: `q-80` + """ + + +class TransformationPostThumbnail(TypedDict, total=False): + type: Required[Literal["thumbnail"]] + """Generates a thumbnail image.""" + + value: str + """Optional transformation string. + + **Example**: `w-150,h-150` + """ + + +class TransformationPostAbs(TypedDict, total=False): + protocol: Required[Literal["hls", "dash"]] + """Streaming protocol to use (`hls` or `dash`).""" + + type: Required[Literal["abs"]] + """Adaptive Bitrate Streaming (ABS) setup.""" + + value: Required[str] + """ + List of different representations you want to create separated by an underscore. + """ + + +TransformationPost: TypeAlias = Union[ + TransformationPostTransformation, TransformationPostGifToVideo, TransformationPostThumbnail, TransformationPostAbs +] + + +class Transformation(TypedDict, total=False): + """Configure pre-processing (`pre`) and post-processing (`post`) transformations. + + - `pre` — applied before the file is uploaded to the Media Library. + Useful for reducing file size or applying basic optimizations upfront (e.g., resize, compress). + + - `post` — applied immediately after upload. + Ideal for generating transformed versions (like video encodes or thumbnails) in advance, so they're ready for delivery without delay. + + You can mix and match any combination of post-processing types. + """ + + post: Iterable[TransformationPost] + """List of transformations to apply _after_ the file is uploaded. + + Each item must match one of the following types: `transformation`, + `gif-to-video`, `thumbnail`, `abs`. + """ + + pre: str + """Transformation string to apply before uploading the file to the Media Library. + + Useful for optimizing files at ingestion. + """ diff --git a/src/imagekitio/types/file_upload_response.py b/src/imagekitio/types/file_upload_response.py new file mode 100644 index 00000000..e99dc77a --- /dev/null +++ b/src/imagekitio/types/file_upload_response.py @@ -0,0 +1,257 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel +from .metadata import Metadata + +__all__ = ["FileUploadResponse", "AITag", "ExtensionStatus", "SelectedFieldsSchema", "VersionInfo"] + + +class AITag(BaseModel): + confidence: Optional[float] = None + """Confidence score of the tag.""" + + name: Optional[str] = None + """Name of the tag.""" + + source: Optional[str] = None + """Array of `AITags` associated with the image. + + If no `AITags` are set, it will be null. These tags can be added using the + `google-auto-tagging` or `aws-auto-tagging` extensions. + """ + + +class ExtensionStatus(BaseModel): + """ + Extension names with their processing status at the time of completion of the request. It could have one of the following status values: + + `success`: The extension has been successfully applied. + `failed`: The extension has failed and will not be retried. + `pending`: The extension will finish processing in some time. On completion, the final status (success / failed) will be sent to the `webhookUrl` provided. + + If no extension was requested, then this parameter is not returned. + """ + + ai_auto_description: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="ai-auto-description", default=None + ) + + aws_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="aws-auto-tagging", default=None + ) + + google_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="google-auto-tagging", default=None + ) + + remove_bg: Optional[Literal["success", "pending", "failed"]] = FieldInfo(alias="remove-bg", default=None) + + +class SelectedFieldsSchema(BaseModel): + type: Literal["Text", "Textarea", "Number", "Date", "Boolean", "SingleSelect", "MultiSelect"] + """Type of the custom metadata field.""" + + default_value: Union[str, float, bool, List[Union[str, float, bool]], None] = FieldInfo( + alias="defaultValue", default=None + ) + """The default value for this custom metadata field. + + The value should match the `type` of custom metadata field. + """ + + is_value_required: Optional[bool] = FieldInfo(alias="isValueRequired", default=None) + """Specifies if the custom metadata field is required or not.""" + + max_length: Optional[float] = FieldInfo(alias="maxLength", default=None) + """Maximum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + max_value: Union[str, float, None] = FieldInfo(alias="maxValue", default=None) + """Maximum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + min_length: Optional[float] = FieldInfo(alias="minLength", default=None) + """Minimum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + min_value: Union[str, float, None] = FieldInfo(alias="minValue", default=None) + """Minimum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + read_only: Optional[bool] = FieldInfo(alias="readOnly", default=None) + """Indicates whether the custom metadata field is read only. + + A read only field cannot be modified after being set. This field is configurable + only via the **Path policy** feature. + """ + + select_options: Optional[List[Union[str, float, bool]]] = FieldInfo(alias="selectOptions", default=None) + """An array of allowed values when field type is `SingleSelect` or `MultiSelect`.""" + + select_options_truncated: Optional[bool] = FieldInfo(alias="selectOptionsTruncated", default=None) + """Specifies if the selectOptions array is truncated. + + It is truncated when number of options are > 100. + """ + + +class VersionInfo(BaseModel): + """An object containing the file or file version's `id` (versionId) and `name`.""" + + id: Optional[str] = None + """Unique identifier of the file version.""" + + name: Optional[str] = None + """Name of the file version.""" + + +class FileUploadResponse(BaseModel): + """Object containing details of a successful upload.""" + + ai_tags: Optional[List[AITag]] = FieldInfo(alias="AITags", default=None) + """An array of tags assigned to the uploaded file by auto tagging.""" + + audio_codec: Optional[str] = FieldInfo(alias="audioCodec", default=None) + """The audio codec used in the video (only for video).""" + + bit_rate: Optional[int] = FieldInfo(alias="bitRate", default=None) + """The bit rate of the video in kbps (only for video).""" + + custom_coordinates: Optional[str] = FieldInfo(alias="customCoordinates", default=None) + """ + Value of custom coordinates associated with the image in the format + `x,y,width,height`. If `customCoordinates` are not defined, then it is `null`. + Send `customCoordinates` in `responseFields` in API request to get the value of + this field. + """ + + custom_metadata: Optional[Dict[str, object]] = FieldInfo(alias="customMetadata", default=None) + """A key-value data associated with the asset. + + Use `responseField` in API request to get `customMetadata` in the upload API + response. Before setting any custom metadata on an asset, you have to create the + field using custom metadata fields API. Send `customMetadata` in + `responseFields` in API request to get the value of this field. + """ + + description: Optional[str] = None + """Optional text to describe the contents of the file. + + Can be set by the user or the ai-auto-description extension. + """ + + duration: Optional[int] = None + """The duration of the video in seconds (only for video).""" + + embedded_metadata: Optional[Dict[str, object]] = FieldInfo(alias="embeddedMetadata", default=None) + """Consolidated embedded metadata associated with the file. + + It includes exif, iptc, and xmp data. Send `embeddedMetadata` in + `responseFields` in API request to get embeddedMetadata in the upload API + response. + """ + + extension_status: Optional[ExtensionStatus] = FieldInfo(alias="extensionStatus", default=None) + """ + Extension names with their processing status at the time of completion of the + request. It could have one of the following status values: + + `success`: The extension has been successfully applied. `failed`: The extension + has failed and will not be retried. `pending`: The extension will finish + processing in some time. On completion, the final status (success / failed) will + be sent to the `webhookUrl` provided. + + If no extension was requested, then this parameter is not returned. + """ + + file_id: Optional[str] = FieldInfo(alias="fileId", default=None) + """Unique fileId. + + Store this fileld in your database, as this will be used to perform update + action on this file. + """ + + file_path: Optional[str] = FieldInfo(alias="filePath", default=None) + """The relative path of the file in the media library e.g. + + `/marketing-assets/new-banner.jpg`. + """ + + file_type: Optional[str] = FieldInfo(alias="fileType", default=None) + """Type of the uploaded file. Possible values are `image`, `non-image`.""" + + height: Optional[float] = None + """Height of the image in pixels (Only for images)""" + + is_private_file: Optional[bool] = FieldInfo(alias="isPrivateFile", default=None) + """Is the file marked as private. + + It can be either `true` or `false`. Send `isPrivateFile` in `responseFields` in + API request to get the value of this field. + """ + + is_published: Optional[bool] = FieldInfo(alias="isPublished", default=None) + """Is the file published or in draft state. + + It can be either `true` or `false`. Send `isPublished` in `responseFields` in + API request to get the value of this field. + """ + + metadata: Optional[Metadata] = None + """Legacy metadata. + + Send `metadata` in `responseFields` in API request to get metadata in the upload + API response. + """ + + name: Optional[str] = None + """Name of the asset.""" + + selected_fields_schema: Optional[Dict[str, SelectedFieldsSchema]] = FieldInfo( + alias="selectedFieldsSchema", default=None + ) + """ + This field is included in the response only if the Path policy feature is + available in the plan. It contains schema definitions for the custom metadata + fields selected for the specified file path. Field selection can only be done + when the Path policy feature is enabled. + + Keys are the names of the custom metadata fields; the value object has details + about the custom metadata schema. + """ + + size: Optional[float] = None + """Size of the image file in Bytes.""" + + tags: Optional[List[str]] = None + """The array of tags associated with the asset. + + If no tags are set, it will be `null`. Send `tags` in `responseFields` in API + request to get the value of this field. + """ + + thumbnail_url: Optional[str] = FieldInfo(alias="thumbnailUrl", default=None) + """In the case of an image, a small thumbnail URL.""" + + url: Optional[str] = None + """A publicly accessible URL of the file.""" + + version_info: Optional[VersionInfo] = FieldInfo(alias="versionInfo", default=None) + """An object containing the file or file version's `id` (versionId) and `name`.""" + + video_codec: Optional[str] = FieldInfo(alias="videoCodec", default=None) + """The video codec used in the video (only for video).""" + + width: Optional[float] = None + """Width of the image in pixels (Only for Images)""" diff --git a/src/imagekitio/types/files/__init__.py b/src/imagekitio/types/files/__init__.py new file mode 100644 index 00000000..b46129a5 --- /dev/null +++ b/src/imagekitio/types/files/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .bulk_delete_params import BulkDeleteParams as BulkDeleteParams +from .bulk_add_tags_params import BulkAddTagsParams as BulkAddTagsParams +from .bulk_delete_response import BulkDeleteResponse as BulkDeleteResponse +from .version_list_response import VersionListResponse as VersionListResponse +from .bulk_add_tags_response import BulkAddTagsResponse as BulkAddTagsResponse +from .bulk_remove_tags_params import BulkRemoveTagsParams as BulkRemoveTagsParams +from .version_delete_response import VersionDeleteResponse as VersionDeleteResponse +from .bulk_remove_tags_response import BulkRemoveTagsResponse as BulkRemoveTagsResponse +from .bulk_remove_ai_tags_params import BulkRemoveAITagsParams as BulkRemoveAITagsParams +from .bulk_remove_ai_tags_response import BulkRemoveAITagsResponse as BulkRemoveAITagsResponse +from .metadata_get_from_url_params import MetadataGetFromURLParams as MetadataGetFromURLParams diff --git a/src/imagekitio/types/files/bulk_add_tags_params.py b/src/imagekitio/types/files/bulk_add_tags_params.py new file mode 100644 index 00000000..f83f5776 --- /dev/null +++ b/src/imagekitio/types/files/bulk_add_tags_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._types import SequenceNotStr +from ..._utils import PropertyInfo + +__all__ = ["BulkAddTagsParams"] + + +class BulkAddTagsParams(TypedDict, total=False): + file_ids: Required[Annotated[SequenceNotStr[str], PropertyInfo(alias="fileIds")]] + """An array of fileIds to which you want to add tags.""" + + tags: Required[SequenceNotStr[str]] + """An array of tags that you want to add to the files.""" diff --git a/src/imagekitio/types/files/bulk_add_tags_response.py b/src/imagekitio/types/files/bulk_add_tags_response.py new file mode 100644 index 00000000..059ae528 --- /dev/null +++ b/src/imagekitio/types/files/bulk_add_tags_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["BulkAddTagsResponse"] + + +class BulkAddTagsResponse(BaseModel): + successfully_updated_file_ids: Optional[List[str]] = FieldInfo(alias="successfullyUpdatedFileIds", default=None) + """An array of fileIds that in which tags were successfully added.""" diff --git a/src/imagekitio/types/files/bulk_delete_params.py b/src/imagekitio/types/files/bulk_delete_params.py new file mode 100644 index 00000000..2df1ee5a --- /dev/null +++ b/src/imagekitio/types/files/bulk_delete_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._types import SequenceNotStr +from ..._utils import PropertyInfo + +__all__ = ["BulkDeleteParams"] + + +class BulkDeleteParams(TypedDict, total=False): + file_ids: Required[Annotated[SequenceNotStr[str], PropertyInfo(alias="fileIds")]] + """An array of fileIds which you want to delete.""" diff --git a/src/imagekitio/types/files/bulk_delete_response.py b/src/imagekitio/types/files/bulk_delete_response.py new file mode 100644 index 00000000..af431f3b --- /dev/null +++ b/src/imagekitio/types/files/bulk_delete_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["BulkDeleteResponse"] + + +class BulkDeleteResponse(BaseModel): + successfully_deleted_file_ids: Optional[List[str]] = FieldInfo(alias="successfullyDeletedFileIds", default=None) + """An array of fileIds that were successfully deleted.""" diff --git a/src/imagekitio/types/files/bulk_remove_ai_tags_params.py b/src/imagekitio/types/files/bulk_remove_ai_tags_params.py new file mode 100644 index 00000000..7d69ed3e --- /dev/null +++ b/src/imagekitio/types/files/bulk_remove_ai_tags_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._types import SequenceNotStr +from ..._utils import PropertyInfo + +__all__ = ["BulkRemoveAITagsParams"] + + +class BulkRemoveAITagsParams(TypedDict, total=False): + ai_tags: Required[Annotated[SequenceNotStr[str], PropertyInfo(alias="AITags")]] + """An array of AITags that you want to remove from the files.""" + + file_ids: Required[Annotated[SequenceNotStr[str], PropertyInfo(alias="fileIds")]] + """An array of fileIds from which you want to remove AITags.""" diff --git a/src/imagekitio/types/files/bulk_remove_ai_tags_response.py b/src/imagekitio/types/files/bulk_remove_ai_tags_response.py new file mode 100644 index 00000000..155d03ca --- /dev/null +++ b/src/imagekitio/types/files/bulk_remove_ai_tags_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["BulkRemoveAITagsResponse"] + + +class BulkRemoveAITagsResponse(BaseModel): + successfully_updated_file_ids: Optional[List[str]] = FieldInfo(alias="successfullyUpdatedFileIds", default=None) + """An array of fileIds that in which AITags were successfully removed.""" diff --git a/src/imagekitio/types/files/bulk_remove_tags_params.py b/src/imagekitio/types/files/bulk_remove_tags_params.py new file mode 100644 index 00000000..02e7f241 --- /dev/null +++ b/src/imagekitio/types/files/bulk_remove_tags_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._types import SequenceNotStr +from ..._utils import PropertyInfo + +__all__ = ["BulkRemoveTagsParams"] + + +class BulkRemoveTagsParams(TypedDict, total=False): + file_ids: Required[Annotated[SequenceNotStr[str], PropertyInfo(alias="fileIds")]] + """An array of fileIds from which you want to remove tags.""" + + tags: Required[SequenceNotStr[str]] + """An array of tags that you want to remove from the files.""" diff --git a/src/imagekitio/types/files/bulk_remove_tags_response.py b/src/imagekitio/types/files/bulk_remove_tags_response.py new file mode 100644 index 00000000..37c1c671 --- /dev/null +++ b/src/imagekitio/types/files/bulk_remove_tags_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["BulkRemoveTagsResponse"] + + +class BulkRemoveTagsResponse(BaseModel): + successfully_updated_file_ids: Optional[List[str]] = FieldInfo(alias="successfullyUpdatedFileIds", default=None) + """An array of fileIds that in which tags were successfully removed.""" diff --git a/src/imagekitio/types/files/metadata_get_from_url_params.py b/src/imagekitio/types/files/metadata_get_from_url_params.py new file mode 100644 index 00000000..6fdbb631 --- /dev/null +++ b/src/imagekitio/types/files/metadata_get_from_url_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["MetadataGetFromURLParams"] + + +class MetadataGetFromURLParams(TypedDict, total=False): + url: Required[str] + """Should be a valid file URL. + + It should be accessible using your ImageKit.io account. + """ diff --git a/src/imagekitio/types/files/version_delete_response.py b/src/imagekitio/types/files/version_delete_response.py new file mode 100644 index 00000000..51b121fd --- /dev/null +++ b/src/imagekitio/types/files/version_delete_response.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel + +__all__ = ["VersionDeleteResponse"] + + +class VersionDeleteResponse(BaseModel): + pass diff --git a/src/imagekitio/types/files/version_list_response.py b/src/imagekitio/types/files/version_list_response.py new file mode 100644 index 00000000..6d0c3f02 --- /dev/null +++ b/src/imagekitio/types/files/version_list_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from ..file import File + +__all__ = ["VersionListResponse"] + +VersionListResponse: TypeAlias = List[File] diff --git a/src/imagekitio/types/folder.py b/src/imagekitio/types/folder.py new file mode 100644 index 00000000..17b72874 --- /dev/null +++ b/src/imagekitio/types/folder.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["Folder"] + + +class Folder(BaseModel): + created_at: Optional[datetime] = FieldInfo(alias="createdAt", default=None) + """Date and time when the folder was created. + + The date and time is in ISO8601 format. + """ + + folder_id: Optional[str] = FieldInfo(alias="folderId", default=None) + """Unique identifier of the asset.""" + + folder_path: Optional[str] = FieldInfo(alias="folderPath", default=None) + """Path of the folder. + + This is the path you would use in the URL to access the folder. For example, if + the folder is at the root of the media library, the path will be /folder. If the + folder is inside another folder named images, the path will be /images/folder. + """ + + name: Optional[str] = None + """Name of the asset.""" + + type: Optional[Literal["folder"]] = None + """Type of the asset.""" + + updated_at: Optional[datetime] = FieldInfo(alias="updatedAt", default=None) + """Date and time when the folder was last updated. + + The date and time is in ISO8601 format. + """ diff --git a/src/imagekitio/types/folder_copy_params.py b/src/imagekitio/types/folder_copy_params.py new file mode 100644 index 00000000..4ccc7fa7 --- /dev/null +++ b/src/imagekitio/types/folder_copy_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FolderCopyParams"] + + +class FolderCopyParams(TypedDict, total=False): + destination_path: Required[Annotated[str, PropertyInfo(alias="destinationPath")]] + """ + Full path to the destination folder where you want to copy the source folder + into. + """ + + source_folder_path: Required[Annotated[str, PropertyInfo(alias="sourceFolderPath")]] + """The full path to the source folder you want to copy.""" + + include_versions: Annotated[bool, PropertyInfo(alias="includeVersions")] + """Option to copy all versions of files that are nested inside the selected folder. + + By default, only the current version of each file will be copied. When set to + true, all versions of each file will be copied. Default value - `false`. + """ diff --git a/src/imagekitio/types/folder_copy_response.py b/src/imagekitio/types/folder_copy_response.py new file mode 100644 index 00000000..69c34c1e --- /dev/null +++ b/src/imagekitio/types/folder_copy_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["FolderCopyResponse"] + + +class FolderCopyResponse(BaseModel): + """Job submitted successfully. A `jobId` will be returned.""" + + job_id: str = FieldInfo(alias="jobId") + """Unique identifier of the bulk job. + + This can be used to check the status of the bulk job. + """ diff --git a/src/imagekitio/types/folder_create_params.py b/src/imagekitio/types/folder_create_params.py new file mode 100644 index 00000000..82863b56 --- /dev/null +++ b/src/imagekitio/types/folder_create_params.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FolderCreateParams"] + + +class FolderCreateParams(TypedDict, total=False): + folder_name: Required[Annotated[str, PropertyInfo(alias="folderName")]] + """The folder will be created with this name. + + All characters except alphabets and numbers (inclusive of unicode letters, + marks, and numerals in other languages) will be replaced by an underscore i.e. + `_`. + """ + + parent_folder_path: Required[Annotated[str, PropertyInfo(alias="parentFolderPath")]] + """ + The folder where the new folder should be created, for root use `/` else the + path e.g. `containing/folder/`. + + Note: If any folder(s) is not present in the parentFolderPath parameter, it will + be automatically created. For example, if you pass `/product/images/summer`, + then `product`, `images`, and `summer` folders will be created if they don't + already exist. + """ diff --git a/src/imagekitio/types/folder_create_response.py b/src/imagekitio/types/folder_create_response.py new file mode 100644 index 00000000..1f10670c --- /dev/null +++ b/src/imagekitio/types/folder_create_response.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["FolderCreateResponse"] + + +class FolderCreateResponse(BaseModel): + pass diff --git a/src/imagekitio/types/folder_delete_params.py b/src/imagekitio/types/folder_delete_params.py new file mode 100644 index 00000000..8b5ff529 --- /dev/null +++ b/src/imagekitio/types/folder_delete_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FolderDeleteParams"] + + +class FolderDeleteParams(TypedDict, total=False): + folder_path: Required[Annotated[str, PropertyInfo(alias="folderPath")]] + """Full path to the folder you want to delete. For example `/folder/to/delete/`.""" diff --git a/src/imagekitio/types/folder_delete_response.py b/src/imagekitio/types/folder_delete_response.py new file mode 100644 index 00000000..40686cb2 --- /dev/null +++ b/src/imagekitio/types/folder_delete_response.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["FolderDeleteResponse"] + + +class FolderDeleteResponse(BaseModel): + pass diff --git a/src/imagekitio/types/folder_move_params.py b/src/imagekitio/types/folder_move_params.py new file mode 100644 index 00000000..59f63cdf --- /dev/null +++ b/src/imagekitio/types/folder_move_params.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FolderMoveParams"] + + +class FolderMoveParams(TypedDict, total=False): + destination_path: Required[Annotated[str, PropertyInfo(alias="destinationPath")]] + """ + Full path to the destination folder where you want to move the source folder + into. + """ + + source_folder_path: Required[Annotated[str, PropertyInfo(alias="sourceFolderPath")]] + """The full path to the source folder you want to move.""" diff --git a/src/imagekitio/types/folder_move_response.py b/src/imagekitio/types/folder_move_response.py new file mode 100644 index 00000000..d3fe1d83 --- /dev/null +++ b/src/imagekitio/types/folder_move_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["FolderMoveResponse"] + + +class FolderMoveResponse(BaseModel): + """Job submitted successfully. A `jobId` will be returned.""" + + job_id: str = FieldInfo(alias="jobId") + """Unique identifier of the bulk job. + + This can be used to check the status of the bulk job. + """ diff --git a/src/imagekitio/types/folder_rename_params.py b/src/imagekitio/types/folder_rename_params.py new file mode 100644 index 00000000..8c9caba1 --- /dev/null +++ b/src/imagekitio/types/folder_rename_params.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["FolderRenameParams"] + + +class FolderRenameParams(TypedDict, total=False): + folder_path: Required[Annotated[str, PropertyInfo(alias="folderPath")]] + """The full path to the folder you want to rename.""" + + new_folder_name: Required[Annotated[str, PropertyInfo(alias="newFolderName")]] + """The new name for the folder. + + All characters except alphabets and numbers (inclusive of unicode letters, + marks, and numerals in other languages) and `-` will be replaced by an + underscore i.e. `_`. + """ + + purge_cache: Annotated[bool, PropertyInfo(alias="purgeCache")] + """Option to purge cache for the old nested files and their versions' URLs. + + When set to true, it will internally issue a purge cache request on CDN to + remove the cached content of the old nested files and their versions. There will + only be one purge request for all the nested files, which will be counted + against your monthly purge quota. + + Note: A purge cache request will be issued against + `https://ik.imagekit.io/old/folder/path*` (with a wildcard at the end). This + will remove all nested files, their versions' URLs, and any transformations made + using query parameters on these files or their versions. However, the cache for + file transformations made using path parameters will persist. You can purge them + using the purge API. For more details, refer to the purge API documentation. + + Default value - `false` + """ diff --git a/src/imagekitio/types/folder_rename_response.py b/src/imagekitio/types/folder_rename_response.py new file mode 100644 index 00000000..d3319b76 --- /dev/null +++ b/src/imagekitio/types/folder_rename_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["FolderRenameResponse"] + + +class FolderRenameResponse(BaseModel): + """Job submitted successfully. A `jobId` will be returned.""" + + job_id: str = FieldInfo(alias="jobId") + """Unique identifier of the bulk job. + + This can be used to check the status of the bulk job. + """ diff --git a/src/imagekitio/types/folders/__init__.py b/src/imagekitio/types/folders/__init__.py new file mode 100644 index 00000000..8c60dd28 --- /dev/null +++ b/src/imagekitio/types/folders/__init__.py @@ -0,0 +1,5 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .job_get_response import JobGetResponse as JobGetResponse diff --git a/src/imagekitio/types/folders/job_get_response.py b/src/imagekitio/types/folders/job_get_response.py new file mode 100644 index 00000000..17a11efc --- /dev/null +++ b/src/imagekitio/types/folders/job_get_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["JobGetResponse"] + + +class JobGetResponse(BaseModel): + job_id: Optional[str] = FieldInfo(alias="jobId", default=None) + """Unique identifier of the bulk job.""" + + purge_request_id: Optional[str] = FieldInfo(alias="purgeRequestId", default=None) + """Unique identifier of the purge request. + + This will be present only if `purgeCache` is set to `true` in the rename folder + API request. + """ + + status: Optional[Literal["Pending", "Completed"]] = None + """Status of the bulk job.""" + + type: Optional[Literal["COPY_FOLDER", "MOVE_FOLDER", "RENAME_FOLDER"]] = None + """Type of the bulk job.""" diff --git a/src/imagekitio/types/metadata.py b/src/imagekitio/types/metadata.py new file mode 100644 index 00000000..87ac3341 --- /dev/null +++ b/src/imagekitio/types/metadata.py @@ -0,0 +1,185 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Optional + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = ["Metadata", "Exif", "ExifExif", "ExifGps", "ExifImage", "ExifInteroperability", "ExifThumbnail"] + + +class ExifExif(BaseModel): + """Object containing Exif details.""" + + aperture_value: Optional[float] = FieldInfo(alias="ApertureValue", default=None) + + color_space: Optional[int] = FieldInfo(alias="ColorSpace", default=None) + + create_date: Optional[str] = FieldInfo(alias="CreateDate", default=None) + + custom_rendered: Optional[int] = FieldInfo(alias="CustomRendered", default=None) + + date_time_original: Optional[str] = FieldInfo(alias="DateTimeOriginal", default=None) + + exif_image_height: Optional[int] = FieldInfo(alias="ExifImageHeight", default=None) + + exif_image_width: Optional[int] = FieldInfo(alias="ExifImageWidth", default=None) + + exif_version: Optional[str] = FieldInfo(alias="ExifVersion", default=None) + + exposure_compensation: Optional[float] = FieldInfo(alias="ExposureCompensation", default=None) + + exposure_mode: Optional[int] = FieldInfo(alias="ExposureMode", default=None) + + exposure_program: Optional[int] = FieldInfo(alias="ExposureProgram", default=None) + + exposure_time: Optional[float] = FieldInfo(alias="ExposureTime", default=None) + + flash: Optional[int] = FieldInfo(alias="Flash", default=None) + + flashpix_version: Optional[str] = FieldInfo(alias="FlashpixVersion", default=None) + + f_number: Optional[float] = FieldInfo(alias="FNumber", default=None) + + focal_length: Optional[int] = FieldInfo(alias="FocalLength", default=None) + + focal_plane_resolution_unit: Optional[int] = FieldInfo(alias="FocalPlaneResolutionUnit", default=None) + + focal_plane_x_resolution: Optional[float] = FieldInfo(alias="FocalPlaneXResolution", default=None) + + focal_plane_y_resolution: Optional[float] = FieldInfo(alias="FocalPlaneYResolution", default=None) + + interop_offset: Optional[int] = FieldInfo(alias="InteropOffset", default=None) + + iso: Optional[int] = FieldInfo(alias="ISO", default=None) + + metering_mode: Optional[int] = FieldInfo(alias="MeteringMode", default=None) + + scene_capture_type: Optional[int] = FieldInfo(alias="SceneCaptureType", default=None) + + shutter_speed_value: Optional[float] = FieldInfo(alias="ShutterSpeedValue", default=None) + + sub_sec_time: Optional[str] = FieldInfo(alias="SubSecTime", default=None) + + white_balance: Optional[int] = FieldInfo(alias="WhiteBalance", default=None) + + +class ExifGps(BaseModel): + """Object containing GPS information.""" + + gps_version_id: Optional[List[int]] = FieldInfo(alias="GPSVersionID", default=None) + + +class ExifImage(BaseModel): + """Object containing EXIF image information.""" + + exif_offset: Optional[int] = FieldInfo(alias="ExifOffset", default=None) + + gps_info: Optional[int] = FieldInfo(alias="GPSInfo", default=None) + + make: Optional[str] = FieldInfo(alias="Make", default=None) + + model: Optional[str] = FieldInfo(alias="Model", default=None) + + modify_date: Optional[str] = FieldInfo(alias="ModifyDate", default=None) + + orientation: Optional[int] = FieldInfo(alias="Orientation", default=None) + + resolution_unit: Optional[int] = FieldInfo(alias="ResolutionUnit", default=None) + + software: Optional[str] = FieldInfo(alias="Software", default=None) + + x_resolution: Optional[int] = FieldInfo(alias="XResolution", default=None) + + y_cb_cr_positioning: Optional[int] = FieldInfo(alias="YCbCrPositioning", default=None) + + y_resolution: Optional[int] = FieldInfo(alias="YResolution", default=None) + + +class ExifInteroperability(BaseModel): + """JSON object.""" + + interop_index: Optional[str] = FieldInfo(alias="InteropIndex", default=None) + + interop_version: Optional[str] = FieldInfo(alias="InteropVersion", default=None) + + +class ExifThumbnail(BaseModel): + """Object containing Thumbnail information.""" + + compression: Optional[int] = FieldInfo(alias="Compression", default=None) + + resolution_unit: Optional[int] = FieldInfo(alias="ResolutionUnit", default=None) + + thumbnail_length: Optional[int] = FieldInfo(alias="ThumbnailLength", default=None) + + thumbnail_offset: Optional[int] = FieldInfo(alias="ThumbnailOffset", default=None) + + x_resolution: Optional[int] = FieldInfo(alias="XResolution", default=None) + + y_resolution: Optional[int] = FieldInfo(alias="YResolution", default=None) + + +class Exif(BaseModel): + exif: Optional[ExifExif] = None + """Object containing Exif details.""" + + gps: Optional[ExifGps] = None + """Object containing GPS information.""" + + image: Optional[ExifImage] = None + """Object containing EXIF image information.""" + + interoperability: Optional[ExifInteroperability] = None + """JSON object.""" + + makernote: Optional[Dict[str, object]] = None + + thumbnail: Optional[ExifThumbnail] = None + """Object containing Thumbnail information.""" + + +class Metadata(BaseModel): + """JSON object containing metadata.""" + + audio_codec: Optional[str] = FieldInfo(alias="audioCodec", default=None) + """The audio codec used in the video (only for video).""" + + bit_rate: Optional[int] = FieldInfo(alias="bitRate", default=None) + """The bit rate of the video in kbps (only for video).""" + + density: Optional[int] = None + """The density of the image in DPI.""" + + duration: Optional[int] = None + """The duration of the video in seconds (only for video).""" + + exif: Optional[Exif] = None + + format: Optional[str] = None + """The format of the file (e.g., 'jpg', 'mp4').""" + + has_color_profile: Optional[bool] = FieldInfo(alias="hasColorProfile", default=None) + """Indicates if the image has a color profile.""" + + has_transparency: Optional[bool] = FieldInfo(alias="hasTransparency", default=None) + """Indicates if the image contains transparent areas.""" + + height: Optional[int] = None + """The height of the image or video in pixels.""" + + p_hash: Optional[str] = FieldInfo(alias="pHash", default=None) + """Perceptual hash of the image.""" + + quality: Optional[int] = None + """The quality indicator of the image.""" + + size: Optional[int] = None + """The file size in bytes.""" + + video_codec: Optional[str] = FieldInfo(alias="videoCodec", default=None) + """The video codec used in the video (only for video).""" + + width: Optional[int] = None + """The width of the image or video in pixels.""" diff --git a/src/imagekitio/types/shared/__init__.py b/src/imagekitio/types/shared/__init__.py new file mode 100644 index 00000000..49f3e91b --- /dev/null +++ b/src/imagekitio/types/shared/__init__.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .overlay import Overlay as Overlay +from .extensions import Extensions as Extensions +from .src_options import SrcOptions as SrcOptions +from .base_overlay import BaseOverlay as BaseOverlay +from .text_overlay import TextOverlay as TextOverlay +from .image_overlay import ImageOverlay as ImageOverlay +from .video_overlay import VideoOverlay as VideoOverlay +from .overlay_timing import OverlayTiming as OverlayTiming +from .transformation import Transformation as Transformation +from .overlay_position import OverlayPosition as OverlayPosition +from .subtitle_overlay import SubtitleOverlay as SubtitleOverlay +from .solid_color_overlay import SolidColorOverlay as SolidColorOverlay +from .streaming_resolution import StreamingResolution as StreamingResolution +from .transformation_position import TransformationPosition as TransformationPosition +from .responsive_image_attributes import ResponsiveImageAttributes as ResponsiveImageAttributes +from .text_overlay_transformation import TextOverlayTransformation as TextOverlayTransformation +from .get_image_attributes_options import GetImageAttributesOptions as GetImageAttributesOptions +from .subtitle_overlay_transformation import SubtitleOverlayTransformation as SubtitleOverlayTransformation +from .solid_color_overlay_transformation import SolidColorOverlayTransformation as SolidColorOverlayTransformation diff --git a/src/imagekitio/types/shared/base_overlay.py b/src/imagekitio/types/shared/base_overlay.py new file mode 100644 index 00000000..fa490a4c --- /dev/null +++ b/src/imagekitio/types/shared/base_overlay.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .overlay_timing import OverlayTiming +from .overlay_position import OverlayPosition + +__all__ = ["BaseOverlay"] + + +class BaseOverlay(BaseModel): + position: Optional[OverlayPosition] = None + + timing: Optional[OverlayTiming] = None diff --git a/src/imagekitio/types/shared/extensions.py b/src/imagekitio/types/shared/extensions.py new file mode 100644 index 00000000..36d0a051 --- /dev/null +++ b/src/imagekitio/types/shared/extensions.py @@ -0,0 +1,78 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from pydantic import Field as FieldInfo + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = [ + "Extensions", + "ExtensionItem", + "ExtensionItemRemoveBg", + "ExtensionItemRemoveBgOptions", + "ExtensionItemAutoTaggingExtension", + "ExtensionItemAIAutoDescription", +] + + +class ExtensionItemRemoveBgOptions(BaseModel): + add_shadow: Optional[bool] = None + """Whether to add an artificial shadow to the result. + + Default is false. Note: Adding shadows is currently only supported for car + photos. + """ + + bg_color: Optional[str] = None + """ + Specifies a solid color background using hex code (e.g., "81d4fa", "fff") or + color name (e.g., "green"). If this parameter is set, `bg_image_url` must be + empty. + """ + + bg_image_url: Optional[str] = None + """Sets a background image from a URL. + + If this parameter is set, `bg_color` must be empty. + """ + + semitransparency: Optional[bool] = None + """Allows semi-transparent regions in the result. + + Default is true. Note: Semitransparency is currently only supported for car + windows. + """ + + +class ExtensionItemRemoveBg(BaseModel): + name: Literal["remove-bg"] + """Specifies the background removal extension.""" + + options: Optional[ExtensionItemRemoveBgOptions] = None + + +class ExtensionItemAutoTaggingExtension(BaseModel): + max_tags: int = FieldInfo(alias="maxTags") + """Maximum number of tags to attach to the asset.""" + + min_confidence: int = FieldInfo(alias="minConfidence") + """Minimum confidence level for tags to be considered valid.""" + + name: Literal["google-auto-tagging", "aws-auto-tagging"] + """Specifies the auto-tagging extension used.""" + + +class ExtensionItemAIAutoDescription(BaseModel): + name: Literal["ai-auto-description"] + """Specifies the auto description extension.""" + + +ExtensionItem: TypeAlias = Annotated[ + Union[ExtensionItemRemoveBg, ExtensionItemAutoTaggingExtension, ExtensionItemAIAutoDescription], + PropertyInfo(discriminator="name"), +] + +Extensions: TypeAlias = List[ExtensionItem] diff --git a/src/imagekitio/types/shared/get_image_attributes_options.py b/src/imagekitio/types/shared/get_image_attributes_options.py new file mode 100644 index 00000000..2203f5f5 --- /dev/null +++ b/src/imagekitio/types/shared/get_image_attributes_options.py @@ -0,0 +1,59 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from pydantic import Field as FieldInfo + +from .src_options import SrcOptions + +__all__ = ["GetImageAttributesOptions"] + + +class GetImageAttributesOptions(SrcOptions): + """ + Options for generating responsive image attributes including `src`, `srcSet`, and `sizes` for HTML `` elements. + This schema extends `SrcOptions` to add support for responsive image generation with breakpoints. + """ + + device_breakpoints: Optional[List[float]] = FieldInfo(alias="deviceBreakpoints", default=None) + """ + Custom list of **device-width breakpoints** in pixels. These define common + screen widths for responsive image generation. + + Defaults to `[640, 750, 828, 1080, 1200, 1920, 2048, 3840]`. Sorted + automatically. + """ + + image_breakpoints: Optional[List[float]] = FieldInfo(alias="imageBreakpoints", default=None) + """ + Custom list of **image-specific breakpoints** in pixels. Useful for generating + small variants (e.g., placeholders or thumbnails). + + Merged with `deviceBreakpoints` before calculating `srcSet`. Defaults to + `[16, 32, 48, 64, 96, 128, 256, 384]`. Sorted automatically. + """ + + sizes: Optional[str] = None + """ + The value for the HTML `sizes` attribute (e.g., `"100vw"` or + `"(min-width:768px) 50vw, 100vw"`). + + - If it includes one or more `vw` units, breakpoints smaller than the + corresponding percentage of the smallest device width are excluded. + - If it contains no `vw` units, the full breakpoint list is used. + + Enables a width-based strategy and generates `w` descriptors in `srcSet`. + """ + + width: Optional[float] = None + """ + The intended display width of the image in pixels, used **only when the `sizes` + attribute is not provided**. + + Triggers a DPR-based strategy (1x and 2x variants) and generates `x` descriptors + in `srcSet`. + + Ignored if `sizes` is present. + """ diff --git a/src/imagekitio/types/shared/image_overlay.py b/src/imagekitio/types/shared/image_overlay.py new file mode 100644 index 00000000..178864c1 --- /dev/null +++ b/src/imagekitio/types/shared/image_overlay.py @@ -0,0 +1,38 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal + +from .base_overlay import BaseOverlay + +__all__ = ["ImageOverlay"] + + +class ImageOverlay(BaseOverlay): + input: str + """Specifies the relative path to the image used as an overlay.""" + + type: Literal["image"] + + encoding: Optional[Literal["auto", "plain", "base64"]] = None + """ + The input path can be included in the layer as either `i-{input}` or + `ie-{base64_encoded_input}`. By default, the SDK determines the appropriate + format automatically. To always use base64 encoding (`ie-{base64}`), set this + parameter to `base64`. To always use plain text (`i-{input}`), set it to + `plain`. + """ + + transformation: Optional[List["Transformation"]] = None + """Array of transformations to be applied to the overlay image. + + Supported transformations depends on the base/parent asset. See overlays on + [Images](https://imagekit.io/docs/add-overlays-on-images#list-of-supported-image-transformations-in-image-layers) + and + [Videos](https://imagekit.io/docs/add-overlays-on-videos#list-of-transformations-supported-on-image-overlay). + """ + + +from .transformation import Transformation diff --git a/src/imagekitio/types/shared/overlay.py b/src/imagekitio/types/shared/overlay.py new file mode 100644 index 00000000..f7c120d5 --- /dev/null +++ b/src/imagekitio/types/shared/overlay.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import TYPE_CHECKING, Union +from typing_extensions import Annotated, TypeAlias, TypeAliasType + +from ..._utils import PropertyInfo +from ..._compat import PYDANTIC_V1 +from .text_overlay import TextOverlay +from .subtitle_overlay import SubtitleOverlay +from .solid_color_overlay import SolidColorOverlay + +__all__ = ["Overlay"] + +if TYPE_CHECKING or not PYDANTIC_V1: + Overlay = TypeAliasType( + "Overlay", + Annotated[ + Union[TextOverlay, "ImageOverlay", "VideoOverlay", SubtitleOverlay, SolidColorOverlay], + PropertyInfo(discriminator="type"), + ], + ) +else: + Overlay: TypeAlias = Annotated[ + Union[TextOverlay, "ImageOverlay", "VideoOverlay", SubtitleOverlay, SolidColorOverlay], + PropertyInfo(discriminator="type"), + ] + +from .image_overlay import ImageOverlay +from .video_overlay import VideoOverlay diff --git a/src/imagekitio/types/shared/overlay_position.py b/src/imagekitio/types/shared/overlay_position.py new file mode 100644 index 00000000..a6fe5f89 --- /dev/null +++ b/src/imagekitio/types/shared/overlay_position.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["OverlayPosition"] + + +class OverlayPosition(BaseModel): + focus: Optional[ + Literal["center", "top", "left", "bottom", "right", "top_left", "top_right", "bottom_left", "bottom_right"] + ] = None + """ + Specifies the position of the overlay relative to the parent image or video. + Maps to `lfo` in the URL. + """ + + x: Union[float, str, None] = None + """ + Specifies the x-coordinate of the top-left corner of the base asset where the + overlay's top-left corner will be positioned. It also accepts arithmetic + expressions such as `bw_mul_0.4` or `bw_sub_cw`. Maps to `lx` in the URL. Learn + about + [Arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ + + y: Union[float, str, None] = None + """ + Specifies the y-coordinate of the top-left corner of the base asset where the + overlay's top-left corner will be positioned. It also accepts arithmetic + expressions such as `bh_mul_0.4` or `bh_sub_ch`. Maps to `ly` in the URL. Learn + about + [Arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ diff --git a/src/imagekitio/types/shared/overlay_timing.py b/src/imagekitio/types/shared/overlay_timing.py new file mode 100644 index 00000000..f4d9c67a --- /dev/null +++ b/src/imagekitio/types/shared/overlay_timing.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union + +from ..._models import BaseModel + +__all__ = ["OverlayTiming"] + + +class OverlayTiming(BaseModel): + duration: Union[float, str, None] = None + """ + Specifies the duration (in seconds) during which the overlay should appear on + the base video. Accepts a positive number up to two decimal places (e.g., `20` + or `20.50`) and arithmetic expressions such as `bdu_mul_0.4` or `bdu_sub_idu`. + Applies only if the base asset is a video. Maps to `ldu` in the URL. + """ + + end: Union[float, str, None] = None + """ + Specifies the end time (in seconds) for when the overlay should disappear from + the base video. If both end and duration are provided, duration is ignored. + Accepts a positive number up to two decimal places (e.g., `20` or `20.50`) and + arithmetic expressions such as `bdu_mul_0.4` or `bdu_sub_idu`. Applies only if + the base asset is a video. Maps to `leo` in the URL. + """ + + start: Union[float, str, None] = None + """ + Specifies the start time (in seconds) for when the overlay should appear on the + base video. Accepts a positive number up to two decimal places (e.g., `20` or + `20.50`) and arithmetic expressions such as `bdu_mul_0.4` or `bdu_sub_idu`. + Applies only if the base asset is a video. Maps to `lso` in the URL. + """ diff --git a/src/imagekitio/types/shared/responsive_image_attributes.py b/src/imagekitio/types/shared/responsive_image_attributes.py new file mode 100644 index 00000000..3e9c0ed4 --- /dev/null +++ b/src/imagekitio/types/shared/responsive_image_attributes.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["ResponsiveImageAttributes"] + + +class ResponsiveImageAttributes(BaseModel): + """ + Resulting set of attributes suitable for an HTML `` element. + Useful for enabling responsive image loading with `srcSet` and `sizes`. + """ + + src: str + """URL for the _largest_ candidate (assigned to plain `src`).""" + + sizes: Optional[str] = None + """`sizes` returned (or synthesised as `100vw`). + + The value for the HTML `sizes` attribute. + """ + + src_set: Optional[str] = FieldInfo(alias="srcSet", default=None) + """Candidate set with `w` or `x` descriptors. + + Multiple image URLs separated by commas, each with a descriptor. + """ + + width: Optional[float] = None + """Width as a number (if `width` was provided in the input options).""" diff --git a/src/imagekitio/types/shared/solid_color_overlay.py b/src/imagekitio/types/shared/solid_color_overlay.py new file mode 100644 index 00000000..49c11a9d --- /dev/null +++ b/src/imagekitio/types/shared/solid_color_overlay.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .base_overlay import BaseOverlay +from .solid_color_overlay_transformation import SolidColorOverlayTransformation + +__all__ = ["SolidColorOverlay"] + + +class SolidColorOverlay(BaseOverlay): + color: str + """ + Specifies the color of the block using an RGB hex code (e.g., `FF0000`), an RGBA + code (e.g., `FFAABB50`), or a color name (e.g., `red`). If an 8-character value + is provided, the last two characters represent the opacity level (from `00` for + 0.00 to `99` for 0.99). + """ + + type: Literal["solidColor"] + + transformation: Optional[List[SolidColorOverlayTransformation]] = None + """Control width and height of the solid color overlay. + + Supported transformations depend on the base/parent asset. See overlays on + [Images](https://imagekit.io/docs/add-overlays-on-images#apply-transformation-on-solid-color-overlay) + and + [Videos](https://imagekit.io/docs/add-overlays-on-videos#apply-transformations-on-solid-color-block-overlay). + """ diff --git a/src/imagekitio/types/shared/solid_color_overlay_transformation.py b/src/imagekitio/types/shared/solid_color_overlay_transformation.py new file mode 100644 index 00000000..4e0f1733 --- /dev/null +++ b/src/imagekitio/types/shared/solid_color_overlay_transformation.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["SolidColorOverlayTransformation"] + + +class SolidColorOverlayTransformation(BaseModel): + alpha: Optional[float] = None + """Specifies the transparency level of the solid color overlay. + + Accepts integers from `1` to `9`. + """ + + background: Optional[str] = None + """Specifies the background color of the solid color overlay. + + Accepts an RGB hex code (e.g., `FF0000`), an RGBA code (e.g., `FFAABB50`), or a + color name. + """ + + gradient: Union[Literal[True], str, None] = None + """Creates a linear gradient with two colors. + + Pass `true` for a default gradient, or provide a string for a custom gradient. + Only works if the base asset is an image. See + [gradient](https://imagekit.io/docs/effects-and-enhancements#gradient---e-gradient). + """ + + height: Union[float, str, None] = None + """Controls the height of the solid color overlay. + + Accepts a numeric value or an arithmetic expression. Learn about + [arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ + + radius: Union[float, Literal["max"], None] = None + """Specifies the corner radius of the solid color overlay. + + Set to `max` for circular or oval shape. See + [radius](https://imagekit.io/docs/effects-and-enhancements#radius---r). + """ + + width: Union[float, str, None] = None + """Controls the width of the solid color overlay. + + Accepts a numeric value or an arithmetic expression (e.g., `bw_mul_0.2` or + `bh_div_2`). Learn about + [arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ diff --git a/src/imagekitio/types/shared/src_options.py b/src/imagekitio/types/shared/src_options.py new file mode 100644 index 00000000..7f833284 --- /dev/null +++ b/src/imagekitio/types/shared/src_options.py @@ -0,0 +1,82 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Optional + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel +from .transformation_position import TransformationPosition + +__all__ = ["SrcOptions"] + + +class SrcOptions(BaseModel): + """Options for generating ImageKit URLs with transformations. + + See the [Transformations guide](https://imagekit.io/docs/transformations). + """ + + src: str + """Accepts a relative or absolute path of the resource. + + If a relative path is provided, it is appended to the `urlEndpoint`. If an + absolute path is provided, `urlEndpoint` is ignored. + """ + + url_endpoint: str = FieldInfo(alias="urlEndpoint") + """ + Get your urlEndpoint from the + [ImageKit dashboard](https://imagekit.io/dashboard/url-endpoints). + """ + + expires_in: Optional[float] = FieldInfo(alias="expiresIn", default=None) + """When you want the signed URL to expire, specified in seconds. + + If `expiresIn` is anything above 0, the URL will always be signed even if + `signed` is set to false. If not specified and `signed` is `true`, the signed + URL will not expire (valid indefinitely). + + Example: Setting `expiresIn: 3600` will make the URL expire 1 hour from + generation time. After the expiry time, the signed URL will no longer be valid + and ImageKit will return a 401 Unauthorized status code. + + [Learn more](https://imagekit.io/docs/media-delivery-basic-security#how-to-generate-signed-urls). + """ + + query_parameters: Optional[Dict[str, str]] = FieldInfo(alias="queryParameters", default=None) + """ + These are additional query parameters that you want to add to the final URL. + They can be any query parameters and not necessarily related to ImageKit. This + is especially useful if you want to add a versioning parameter to your URLs. + """ + + signed: Optional[bool] = None + """Whether to sign the URL or not. + + Set this to `true` if you want to generate a signed URL. If `signed` is `true` + and `expiresIn` is not specified, the signed URL will not expire (valid + indefinitely). Note: If `expiresIn` is set to any value above 0, the URL will + always be signed regardless of this setting. + [Learn more](https://imagekit.io/docs/media-delivery-basic-security#how-to-generate-signed-urls). + """ + + transformation: Optional[List["Transformation"]] = None + """An array of objects specifying the transformations to be applied in the URL. + + If more than one transformation is specified, they are applied in the order they + are specified as chained transformations. See + [Chained transformations](https://imagekit.io/docs/transformations#chained-transformations). + """ + + transformation_position: Optional[TransformationPosition] = FieldInfo(alias="transformationPosition", default=None) + """ + By default, the transformation string is added as a query parameter in the URL, + e.g., `?tr=w-100,h-100`. If you want to add the transformation string in the + path of the URL, set this to `path`. Learn more in the + [Transformations guide](https://imagekit.io/docs/transformations). + """ + + +from .transformation import Transformation diff --git a/src/imagekitio/types/shared/streaming_resolution.py b/src/imagekitio/types/shared/streaming_resolution.py new file mode 100644 index 00000000..6eb30085 --- /dev/null +++ b/src/imagekitio/types/shared/streaming_resolution.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["StreamingResolution"] + +StreamingResolution: TypeAlias = Literal["240", "360", "480", "720", "1080", "1440", "2160"] diff --git a/src/imagekitio/types/shared/subtitle_overlay.py b/src/imagekitio/types/shared/subtitle_overlay.py new file mode 100644 index 00000000..f44f3c4e --- /dev/null +++ b/src/imagekitio/types/shared/subtitle_overlay.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .base_overlay import BaseOverlay +from .subtitle_overlay_transformation import SubtitleOverlayTransformation + +__all__ = ["SubtitleOverlay"] + + +class SubtitleOverlay(BaseOverlay): + input: str + """Specifies the relative path to the subtitle file used as an overlay.""" + + type: Literal["subtitle"] + + encoding: Optional[Literal["auto", "plain", "base64"]] = None + """ + The input path can be included in the layer as either `i-{input}` or + `ie-{base64_encoded_input}`. By default, the SDK determines the appropriate + format automatically. To always use base64 encoding (`ie-{base64}`), set this + parameter to `base64`. To always use plain text (`i-{input}`), set it to + `plain`. + """ + + transformation: Optional[List[SubtitleOverlayTransformation]] = None + """Control styling of the subtitle. + + See + [Styling subtitles](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer). + """ diff --git a/src/imagekitio/types/shared/subtitle_overlay_transformation.py b/src/imagekitio/types/shared/subtitle_overlay_transformation.py new file mode 100644 index 00000000..2f7c739d --- /dev/null +++ b/src/imagekitio/types/shared/subtitle_overlay_transformation.py @@ -0,0 +1,80 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["SubtitleOverlayTransformation"] + + +class SubtitleOverlayTransformation(BaseModel): + """Subtitle styling options. + + [Learn more](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) from the docs. + """ + + background: Optional[str] = None + """ + Specifies the subtitle background color using a standard color name, an RGB + color code (e.g., FF0000), or an RGBA color code (e.g., FFAABB50). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + color: Optional[str] = None + """ + Sets the font color of the subtitle text using a standard color name, an RGB + color code (e.g., FF0000), or an RGBA color code (e.g., FFAABB50). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + font_family: Optional[str] = FieldInfo(alias="fontFamily", default=None) + """Font family for subtitles. + + Refer to the + [supported fonts](https://imagekit.io/docs/add-overlays-on-images#supported-text-font-list). + """ + + font_outline: Optional[str] = FieldInfo(alias="fontOutline", default=None) + """Sets the font outline of the subtitle text. + + Requires the outline width (an integer) and the outline color (as an RGB color + code, RGBA color code, or standard web color name) separated by an underscore. + Example: `fol-2_blue` (outline width of 2px and outline color blue), + `fol-2_A1CCDD` (outline width of 2px and outline color `#A1CCDD`) and + `fol-2_A1CCDD50` (outline width of 2px and outline color `#A1CCDD` at 50% + opacity). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + font_shadow: Optional[str] = FieldInfo(alias="fontShadow", default=None) + """Sets the font shadow for the subtitle text. + + Requires the shadow color (as an RGB color code, RGBA color code, or standard + web color name) and shadow indent (an integer) separated by an underscore. + Example: `fsh-blue_2` (shadow color blue, indent of 2px), `fsh-A1CCDD_3` (shadow + color `#A1CCDD`, indent of 3px), `fsh-A1CCDD50_3` (shadow color `#A1CCDD` at 50% + opacity, indent of 3px). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + font_size: Optional[float] = FieldInfo(alias="fontSize", default=None) + """Sets the font size of subtitle text. + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + typography: Optional[Literal["b", "i", "b_i"]] = None + """Sets the typography style of the subtitle text. + + Supports values are `b` for bold, `i` for italics, and `b_i` for bold with + italics. + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ diff --git a/src/imagekitio/types/shared/text_overlay.py b/src/imagekitio/types/shared/text_overlay.py new file mode 100644 index 00000000..b1568340 --- /dev/null +++ b/src/imagekitio/types/shared/text_overlay.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .base_overlay import BaseOverlay +from .text_overlay_transformation import TextOverlayTransformation + +__all__ = ["TextOverlay"] + + +class TextOverlay(BaseOverlay): + text: str + """Specifies the text to be displayed in the overlay. + + The SDK automatically handles special characters and encoding. + """ + + type: Literal["text"] + + encoding: Optional[Literal["auto", "plain", "base64"]] = None + """ + Text can be included in the layer as either `i-{input}` (plain text) or + `ie-{base64_encoded_input}` (base64). By default, the SDK selects the + appropriate format based on the input text. To always use base64 + (`ie-{base64}`), set this parameter to `base64`. To always use plain text + (`i-{input}`), set it to `plain`. + """ + + transformation: Optional[List[TextOverlayTransformation]] = None + """Control styling of the text overlay. + + See + [Text overlays](https://imagekit.io/docs/add-overlays-on-images#text-overlay). + """ diff --git a/src/imagekitio/types/shared/text_overlay_transformation.py b/src/imagekitio/types/shared/text_overlay_transformation.py new file mode 100644 index 00000000..8aa07111 --- /dev/null +++ b/src/imagekitio/types/shared/text_overlay_transformation.py @@ -0,0 +1,99 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["TextOverlayTransformation"] + + +class TextOverlayTransformation(BaseModel): + alpha: Optional[float] = None + """Specifies the transparency level of the text overlay. + + Accepts integers from `1` to `9`. + """ + + background: Optional[str] = None + """ + Specifies the background color of the text overlay. Accepts an RGB hex code, an + RGBA code, or a color name. + """ + + flip: Optional[Literal["h", "v", "h_v", "v_h"]] = None + """Flip the text overlay horizontally, vertically, or both.""" + + font_color: Optional[str] = FieldInfo(alias="fontColor", default=None) + """Specifies the font color of the overlaid text. + + Accepts an RGB hex code (e.g., `FF0000`), an RGBA code (e.g., `FFAABB50`), or a + color name. + """ + + font_family: Optional[str] = FieldInfo(alias="fontFamily", default=None) + """Specifies the font family of the overlaid text. + + Choose from the supported fonts list or use a custom font. See + [Supported fonts](https://imagekit.io/docs/add-overlays-on-images#supported-text-font-list) + and + [Custom font](https://imagekit.io/docs/add-overlays-on-images#change-font-family-in-text-overlay). + """ + + font_size: Union[float, str, None] = FieldInfo(alias="fontSize", default=None) + """Specifies the font size of the overlaid text. + + Accepts a numeric value or an arithmetic expression. + """ + + inner_alignment: Optional[Literal["left", "right", "center"]] = FieldInfo(alias="innerAlignment", default=None) + """ + Specifies the inner alignment of the text when width is more than the text + length. + """ + + line_height: Union[float, str, None] = FieldInfo(alias="lineHeight", default=None) + """Specifies the line height of the text overlay. + + Accepts integer values representing line height in points. It can also accept + [arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations) + such as `bw_mul_0.2`, or `bh_div_20`. + """ + + padding: Union[float, str, None] = None + """ + Specifies the padding around the overlaid text. Can be provided as a single + positive integer or multiple values separated by underscores (following CSS + shorthand order). Arithmetic expressions are also accepted. + """ + + radius: Union[float, Literal["max"], None] = None + """ + Specifies the corner radius of the text overlay. Set to `max` to achieve a + circular or oval shape. + """ + + rotation: Union[float, str, None] = None + """ + Specifies the rotation angle of the text overlay. Accepts a numeric value for + clockwise rotation or a string prefixed with "N" for counter-clockwise rotation. + """ + + typography: Optional[str] = None + """Specifies the typography style of the text. Supported values: + + - Single styles: `b` (bold), `i` (italic), `strikethrough`. + - Combinations: Any combination separated by underscores, e.g., `b_i`, + `b_i_strikethrough`. + """ + + width: Union[float, str, None] = None + """Specifies the maximum width (in pixels) of the overlaid text. + + The text wraps automatically, and arithmetic expressions (e.g., `bw_mul_0.2` or + `bh_div_2`) are supported. Useful when used in conjunction with the + `background`. Learn about + [Arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ diff --git a/src/imagekitio/types/shared/transformation.py b/src/imagekitio/types/shared/transformation.py new file mode 100644 index 00000000..c0f42d1b --- /dev/null +++ b/src/imagekitio/types/shared/transformation.py @@ -0,0 +1,434 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel +from .streaming_resolution import StreamingResolution + +__all__ = ["Transformation"] + + +class Transformation(BaseModel): + """The SDK provides easy-to-use names for transformations. + + These names are converted to the corresponding transformation string before being added to the URL. + SDKs are updated regularly to support new transformations. If you want to use a transformation that is not supported by the SDK, + You can use the `raw` parameter to pass the transformation string directly. + See the [Transformations documentation](https://imagekit.io/docs/transformations). + """ + + ai_change_background: Optional[str] = FieldInfo(alias="aiChangeBackground", default=None) + """Uses AI to change the background. + + Provide a text prompt or a base64-encoded prompt, e.g., `prompt-snow road` or + `prompte-[urlencoded_base64_encoded_text]`. Not supported inside overlay. See + [AI Change Background](https://imagekit.io/docs/ai-transformations#change-background-e-changebg). + """ + + ai_drop_shadow: Union[Literal[True], str, None] = FieldInfo(alias="aiDropShadow", default=None) + """ + Adds an AI-based drop shadow around a foreground object on a transparent or + removed background. Optionally, control the direction, elevation, and saturation + of the light source (e.g., `az-45` to change light direction). Pass `true` for + the default drop shadow, or provide a string for a custom drop shadow. Supported + inside overlay. See + [AI Drop Shadow](https://imagekit.io/docs/ai-transformations#ai-drop-shadow-e-dropshadow). + """ + + ai_edit: Optional[str] = FieldInfo(alias="aiEdit", default=None) + """Uses AI to edit images based on a text prompt. + + Provide a text prompt or a base64-encoded prompt, e.g., `prompt-snow road` or + `prompte-[urlencoded_base64_encoded_text]`. Not supported inside overlay. + See [AI Edit](https://imagekit.io/docs/ai-transformations#edit-image-e-edit). + """ + + ai_remove_background: Optional[Literal[True]] = FieldInfo(alias="aiRemoveBackground", default=None) + """Applies ImageKit's in-house background removal. + + Supported inside overlay. See + [AI Background Removal](https://imagekit.io/docs/ai-transformations#imagekit-background-removal-e-bgremove). + """ + + ai_remove_background_external: Optional[Literal[True]] = FieldInfo(alias="aiRemoveBackgroundExternal", default=None) + """Uses third-party background removal. + + Note: It is recommended to use aiRemoveBackground, ImageKit's in-house solution, + which is more cost-effective. Supported inside overlay. See + [External Background Removal](https://imagekit.io/docs/ai-transformations#background-removal-e-removedotbg). + """ + + ai_retouch: Optional[Literal[True]] = FieldInfo(alias="aiRetouch", default=None) + """Performs AI-based retouching to improve faces or product shots. + + Not supported inside overlay. See + [AI Retouch](https://imagekit.io/docs/ai-transformations#retouch-e-retouch). + """ + + ai_upscale: Optional[Literal[True]] = FieldInfo(alias="aiUpscale", default=None) + """Upscales images beyond their original dimensions using AI. + + Not supported inside overlay. See + [AI Upscale](https://imagekit.io/docs/ai-transformations#upscale-e-upscale). + """ + + ai_variation: Optional[Literal[True]] = FieldInfo(alias="aiVariation", default=None) + """Generates a variation of an image using AI. + + This produces a new image with slight variations from the original, such as + changes in color, texture, and other visual elements, while preserving the + structure and essence of the original image. Not supported inside overlay. See + [AI Generate Variations](https://imagekit.io/docs/ai-transformations#generate-variations-of-an-image-e-genvar). + """ + + aspect_ratio: Union[float, str, None] = FieldInfo(alias="aspectRatio", default=None) + """Specifies the aspect ratio for the output, e.g., "ar-4-3". + + Typically used with either width or height (but not both). For example: + aspectRatio = `4:3`, `4_3`, or an expression like `iar_div_2`. See + [Image resize and crop – Aspect ratio](https://imagekit.io/docs/image-resize-and-crop#aspect-ratio---ar). + """ + + audio_codec: Optional[Literal["aac", "opus", "none"]] = FieldInfo(alias="audioCodec", default=None) + """Specifies the audio codec, e.g., `aac`, `opus`, or `none`. + + See [Audio codec](https://imagekit.io/docs/video-optimization#audio-codec---ac). + """ + + background: Optional[str] = None + """ + Specifies the background to be used in conjunction with certain cropping + strategies when resizing an image. + + - A solid color: e.g., `red`, `F3F3F3`, `AAFF0010`. See + [Solid color background](https://imagekit.io/docs/effects-and-enhancements#solid-color-background). + - A blurred background: e.g., `blurred`, `blurred_25_N15`, etc. See + [Blurred background](https://imagekit.io/docs/effects-and-enhancements#blurred-background). + - Expand the image boundaries using generative fill: `genfill`. Not supported + inside overlay. Optionally, control the background scene by passing a text + prompt: `genfill[:-prompt-${text}]` or + `genfill[:-prompte-${urlencoded_base64_encoded_text}]`. See + [Generative fill background](https://imagekit.io/docs/ai-transformations#generative-fill-bg-genfill). + """ + + blur: Optional[float] = None + """Specifies the Gaussian blur level. + + Accepts an integer value between 1 and 100, or an expression like `bl-10`. See + [Blur](https://imagekit.io/docs/effects-and-enhancements#blur---bl). + """ + + border: Optional[str] = None + """Adds a border to the output media. + + Accepts a string in the format `_` (e.g., `5_FFF000` for + a 5px yellow border), or an expression like `ih_div_20_FF00FF`. See + [Border](https://imagekit.io/docs/effects-and-enhancements#border---b). + """ + + color_profile: Optional[bool] = FieldInfo(alias="colorProfile", default=None) + """ + Indicates whether the output image should retain the original color profile. See + [Color profile](https://imagekit.io/docs/image-optimization#color-profile---cp). + """ + + contrast_stretch: Optional[Literal[True]] = FieldInfo(alias="contrastStretch", default=None) + """ + Automatically enhances the contrast of an image (contrast stretch). See + [Contrast Stretch](https://imagekit.io/docs/effects-and-enhancements#contrast-stretch---e-contrast). + """ + + crop: Optional[Literal["force", "at_max", "at_max_enlarge", "at_least", "maintain_ratio"]] = None + """Crop modes for image resizing. + + See + [Crop modes & focus](https://imagekit.io/docs/image-resize-and-crop#crop-crop-modes--focus). + """ + + crop_mode: Optional[Literal["pad_resize", "extract", "pad_extract"]] = FieldInfo(alias="cropMode", default=None) + """Additional crop modes for image resizing. + + See + [Crop modes & focus](https://imagekit.io/docs/image-resize-and-crop#crop-crop-modes--focus). + """ + + default_image: Optional[str] = FieldInfo(alias="defaultImage", default=None) + """ + Specifies a fallback image if the resource is not found, e.g., a URL or file + path. See + [Default image](https://imagekit.io/docs/image-transformation#default-image---di). + """ + + dpr: Optional[float] = None + """ + Accepts values between 0.1 and 5, or `auto` for automatic device pixel ratio + (DPR) calculation. See + [DPR](https://imagekit.io/docs/image-resize-and-crop#dpr---dpr). + """ + + duration: Union[float, str, None] = None + """Specifies the duration (in seconds) for trimming videos, e.g., `5` or `10.5`. + + Typically used with startOffset to indicate the length from the start offset. + Arithmetic expressions are supported. See + [Trim videos – Duration](https://imagekit.io/docs/trim-videos#duration---du). + """ + + end_offset: Union[float, str, None] = FieldInfo(alias="endOffset", default=None) + """Specifies the end offset (in seconds) for trimming videos, e.g., `5` or `10.5`. + + Typically used with startOffset to define a time window. Arithmetic expressions + are supported. See + [Trim videos – End offset](https://imagekit.io/docs/trim-videos#end-offset---eo). + """ + + flip: Optional[Literal["h", "v", "h_v", "v_h"]] = None + """Flips or mirrors an image either horizontally, vertically, or both. + + Acceptable values: `h` (horizontal), `v` (vertical), `h_v` (horizontal and + vertical), or `v_h`. See + [Flip](https://imagekit.io/docs/effects-and-enhancements#flip---fl). + """ + + focus: Optional[str] = None + """ + Refines padding and cropping behavior for pad resize, maintain ratio, and + extract crop modes. Supports manual positions and coordinate-based focus. With + AI-based cropping, you can automatically keep key subjects in frame—such as + faces or detected objects (e.g., `fo-face`, `fo-person`, `fo-car`)— while + resizing. + + - See [Focus](https://imagekit.io/docs/image-resize-and-crop#focus---fo). + - [Object aware cropping](https://imagekit.io/docs/image-resize-and-crop#object-aware-cropping---fo-object-name) + """ + + format: Optional[Literal["auto", "webp", "jpg", "jpeg", "png", "gif", "svg", "mp4", "webm", "avif", "orig"]] = None + """ + Specifies the output format for images or videos, e.g., `jpg`, `png`, `webp`, + `mp4`, or `auto`. You can also pass `orig` for images to return the original + format. ImageKit automatically delivers images and videos in the optimal format + based on device support unless overridden by the dashboard settings or the + format parameter. See + [Image format](https://imagekit.io/docs/image-optimization#format---f) and + [Video format](https://imagekit.io/docs/video-optimization#format---f). + """ + + gradient: Union[Literal[True], str, None] = None + """Creates a linear gradient with two colors. + + Pass `true` for a default gradient, or provide a string for a custom gradient. + See + [Gradient](https://imagekit.io/docs/effects-and-enhancements#gradient---e-gradient). + """ + + grayscale: Optional[Literal[True]] = None + """Enables a grayscale effect for images. + + See + [Grayscale](https://imagekit.io/docs/effects-and-enhancements#grayscale---e-grayscale). + """ + + height: Union[float, str, None] = None + """Specifies the height of the output. + + If a value between 0 and 1 is provided, it is treated as a percentage (e.g., + `0.5` represents 50% of the original height). You can also supply arithmetic + expressions (e.g., `ih_mul_0.5`). Height transformation – + [Images](https://imagekit.io/docs/image-resize-and-crop#height---h) · + [Videos](https://imagekit.io/docs/video-resize-and-crop#height---h) + """ + + lossless: Optional[bool] = None + """ + Specifies whether the output image (in JPEG or PNG) should be compressed + losslessly. See + [Lossless compression](https://imagekit.io/docs/image-optimization#lossless-webp-and-png---lo). + """ + + metadata: Optional[bool] = None + """By default, ImageKit removes all metadata during automatic image compression. + + Set this to true to preserve metadata. See + [Image metadata](https://imagekit.io/docs/image-optimization#image-metadata---md). + """ + + named: Optional[str] = None + """Named transformation reference. + + See + [Named transformations](https://imagekit.io/docs/transformations#named-transformations). + """ + + opacity: Optional[float] = None + """Specifies the opacity level of the output image. + + See [Opacity](https://imagekit.io/docs/effects-and-enhancements#opacity---o). + """ + + original: Optional[bool] = None + """ + If set to true, serves the original file without applying any transformations. + See + [Deliver original file as-is](https://imagekit.io/docs/core-delivery-features#deliver-original-file-as-is---orig-true). + """ + + overlay: Optional["Overlay"] = None + """Specifies an overlay to be applied on the parent image or video. + + ImageKit supports overlays including images, text, videos, subtitles, and solid + colors. See + [Overlay using layers](https://imagekit.io/docs/transformations#overlay-using-layers). + """ + + page: Union[float, str, None] = None + """ + Extracts a specific page or frame from multi-page or layered files (PDF, PSD, + AI). For example, specify by number (e.g., `2`), a range (e.g., `3-4` for the + 2nd and 3rd layers), or by name (e.g., `name-layer-4` for a PSD layer). See + [Thumbnail extraction](https://imagekit.io/docs/vector-and-animated-images#get-thumbnail-from-psd-pdf-ai-eps-and-animated-files). + """ + + progressive: Optional[bool] = None + """Specifies whether the output JPEG image should be rendered progressively. + + Progressive loading begins with a low-quality, pixelated version of the full + image, which gradually improves to provide a faster perceived load time. See + [Progressive images](https://imagekit.io/docs/image-optimization#progressive-image---pr). + """ + + quality: Optional[float] = None + """ + Specifies the quality of the output image for lossy formats such as JPEG, WebP, + and AVIF. A higher quality value results in a larger file size with better + quality, while a lower value produces a smaller file size with reduced quality. + See [Quality](https://imagekit.io/docs/image-optimization#quality---q). + """ + + radius: Union[float, Literal["max"], None] = None + """ + Specifies the corner radius for rounded corners (e.g., 20) or `max` for circular + or oval shape. See + [Radius](https://imagekit.io/docs/effects-and-enhancements#radius---r). + """ + + raw: Optional[str] = None + """Pass any transformation not directly supported by the SDK. + + This transformation string is appended to the URL as provided. + """ + + rotation: Union[float, str, None] = None + """Specifies the rotation angle in degrees. + + Positive values rotate the image clockwise; you can also use, for example, `N40` + for counterclockwise rotation or `auto` to use the orientation specified in the + image's EXIF data. For videos, only the following values are supported: 0, 90, + 180, 270, or 360. See + [Rotate](https://imagekit.io/docs/effects-and-enhancements#rotate---rt). + """ + + shadow: Union[Literal[True], str, None] = None + """Adds a shadow beneath solid objects in an image with a transparent background. + + For AI-based drop shadows, refer to aiDropShadow. Pass `true` for a default + shadow, or provide a string for a custom shadow. See + [Shadow](https://imagekit.io/docs/effects-and-enhancements#shadow---e-shadow). + """ + + sharpen: Union[Literal[True], float, None] = None + """Sharpens the input image, highlighting edges and finer details. + + Pass `true` for default sharpening, or provide a numeric value for custom + sharpening. See + [Sharpen](https://imagekit.io/docs/effects-and-enhancements#sharpen---e-sharpen). + """ + + start_offset: Union[float, str, None] = FieldInfo(alias="startOffset", default=None) + """Specifies the start offset (in seconds) for trimming videos, e.g., `5` or + `10.5`. + + Arithmetic expressions are also supported. See + [Trim videos – Start offset](https://imagekit.io/docs/trim-videos#start-offset---so). + """ + + streaming_resolutions: Optional[List[StreamingResolution]] = FieldInfo(alias="streamingResolutions", default=None) + """ + An array of resolutions for adaptive bitrate streaming, e.g., [`240`, `360`, + `480`, `720`, `1080`]. See + [Adaptive Bitrate Streaming](https://imagekit.io/docs/adaptive-bitrate-streaming). + """ + + trim: Union[Literal[True], float, None] = None + """Useful for images with a solid or nearly solid background and a central object. + + This parameter trims the background, leaving only the central object in the + output image. See + [Trim edges](https://imagekit.io/docs/effects-and-enhancements#trim-edges---t). + """ + + unsharp_mask: Union[Literal[True], str, None] = FieldInfo(alias="unsharpMask", default=None) + """Applies Unsharp Masking (USM), an image sharpening technique. + + Pass `true` for a default unsharp mask, or provide a string for a custom unsharp + mask. See + [Unsharp Mask](https://imagekit.io/docs/effects-and-enhancements#unsharp-mask---e-usm). + """ + + video_codec: Optional[Literal["h264", "vp9", "av1", "none"]] = FieldInfo(alias="videoCodec", default=None) + """Specifies the video codec, e.g., `h264`, `vp9`, `av1`, or `none`. + + See [Video codec](https://imagekit.io/docs/video-optimization#video-codec---vc). + """ + + width: Union[float, str, None] = None + """Specifies the width of the output. + + If a value between 0 and 1 is provided, it is treated as a percentage (e.g., + `0.4` represents 40% of the original width). You can also supply arithmetic + expressions (e.g., `iw_div_2`). Width transformation – + [Images](https://imagekit.io/docs/image-resize-and-crop#width---w) · + [Videos](https://imagekit.io/docs/video-resize-and-crop#width---w) + """ + + x: Union[float, str, None] = None + """Focus using cropped image coordinates - X coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + x_center: Union[float, str, None] = FieldInfo(alias="xCenter", default=None) + """Focus using cropped image coordinates - X center coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + y: Union[float, str, None] = None + """Focus using cropped image coordinates - Y coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + y_center: Union[float, str, None] = FieldInfo(alias="yCenter", default=None) + """Focus using cropped image coordinates - Y center coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + zoom: Optional[float] = None + """ + Accepts a numeric value that determines how much to zoom in or out of the + cropped area. It should be used in conjunction with fo-face or fo-. + See [Zoom](https://imagekit.io/docs/image-resize-and-crop#zoom---z). + """ + + +from .overlay import Overlay diff --git a/src/imagekitio/types/shared/transformation_position.py b/src/imagekitio/types/shared/transformation_position.py new file mode 100644 index 00000000..bded9e81 --- /dev/null +++ b/src/imagekitio/types/shared/transformation_position.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["TransformationPosition"] + +TransformationPosition: TypeAlias = Literal["path", "query"] diff --git a/src/imagekitio/types/shared/video_overlay.py b/src/imagekitio/types/shared/video_overlay.py new file mode 100644 index 00000000..3cc64c64 --- /dev/null +++ b/src/imagekitio/types/shared/video_overlay.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal + +from .base_overlay import BaseOverlay + +__all__ = ["VideoOverlay"] + + +class VideoOverlay(BaseOverlay): + input: str + """Specifies the relative path to the video used as an overlay.""" + + type: Literal["video"] + + encoding: Optional[Literal["auto", "plain", "base64"]] = None + """ + The input path can be included in the layer as either `i-{input}` or + `ie-{base64_encoded_input}`. By default, the SDK determines the appropriate + format automatically. To always use base64 encoding (`ie-{base64}`), set this + parameter to `base64`. To always use plain text (`i-{input}`), set it to + `plain`. + """ + + transformation: Optional[List["Transformation"]] = None + """Array of transformation to be applied to the overlay video. + + Except `streamingResolutions`, all other video transformations are supported. + See [Video transformations](https://imagekit.io/docs/video-transformation). + """ + + +from .transformation import Transformation diff --git a/src/imagekitio/types/shared_params/__init__.py b/src/imagekitio/types/shared_params/__init__.py new file mode 100644 index 00000000..49f3e91b --- /dev/null +++ b/src/imagekitio/types/shared_params/__init__.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .overlay import Overlay as Overlay +from .extensions import Extensions as Extensions +from .src_options import SrcOptions as SrcOptions +from .base_overlay import BaseOverlay as BaseOverlay +from .text_overlay import TextOverlay as TextOverlay +from .image_overlay import ImageOverlay as ImageOverlay +from .video_overlay import VideoOverlay as VideoOverlay +from .overlay_timing import OverlayTiming as OverlayTiming +from .transformation import Transformation as Transformation +from .overlay_position import OverlayPosition as OverlayPosition +from .subtitle_overlay import SubtitleOverlay as SubtitleOverlay +from .solid_color_overlay import SolidColorOverlay as SolidColorOverlay +from .streaming_resolution import StreamingResolution as StreamingResolution +from .transformation_position import TransformationPosition as TransformationPosition +from .responsive_image_attributes import ResponsiveImageAttributes as ResponsiveImageAttributes +from .text_overlay_transformation import TextOverlayTransformation as TextOverlayTransformation +from .get_image_attributes_options import GetImageAttributesOptions as GetImageAttributesOptions +from .subtitle_overlay_transformation import SubtitleOverlayTransformation as SubtitleOverlayTransformation +from .solid_color_overlay_transformation import SolidColorOverlayTransformation as SolidColorOverlayTransformation diff --git a/src/imagekitio/types/shared_params/base_overlay.py b/src/imagekitio/types/shared_params/base_overlay.py new file mode 100644 index 00000000..bf3bf1eb --- /dev/null +++ b/src/imagekitio/types/shared_params/base_overlay.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from .overlay_timing import OverlayTiming +from .overlay_position import OverlayPosition + +__all__ = ["BaseOverlay"] + + +class BaseOverlay(TypedDict, total=False): + position: OverlayPosition + + timing: OverlayTiming diff --git a/src/imagekitio/types/shared_params/extensions.py b/src/imagekitio/types/shared_params/extensions.py new file mode 100644 index 00000000..f2ab9d14 --- /dev/null +++ b/src/imagekitio/types/shared_params/extensions.py @@ -0,0 +1,76 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ..._utils import PropertyInfo + +__all__ = [ + "Extensions", + "ExtensionItem", + "ExtensionItemRemoveBg", + "ExtensionItemRemoveBgOptions", + "ExtensionItemAutoTaggingExtension", + "ExtensionItemAIAutoDescription", +] + + +class ExtensionItemRemoveBgOptions(TypedDict, total=False): + add_shadow: bool + """Whether to add an artificial shadow to the result. + + Default is false. Note: Adding shadows is currently only supported for car + photos. + """ + + bg_color: str + """ + Specifies a solid color background using hex code (e.g., "81d4fa", "fff") or + color name (e.g., "green"). If this parameter is set, `bg_image_url` must be + empty. + """ + + bg_image_url: str + """Sets a background image from a URL. + + If this parameter is set, `bg_color` must be empty. + """ + + semitransparency: bool + """Allows semi-transparent regions in the result. + + Default is true. Note: Semitransparency is currently only supported for car + windows. + """ + + +class ExtensionItemRemoveBg(TypedDict, total=False): + name: Required[Literal["remove-bg"]] + """Specifies the background removal extension.""" + + options: ExtensionItemRemoveBgOptions + + +class ExtensionItemAutoTaggingExtension(TypedDict, total=False): + max_tags: Required[Annotated[int, PropertyInfo(alias="maxTags")]] + """Maximum number of tags to attach to the asset.""" + + min_confidence: Required[Annotated[int, PropertyInfo(alias="minConfidence")]] + """Minimum confidence level for tags to be considered valid.""" + + name: Required[Literal["google-auto-tagging", "aws-auto-tagging"]] + """Specifies the auto-tagging extension used.""" + + +class ExtensionItemAIAutoDescription(TypedDict, total=False): + name: Required[Literal["ai-auto-description"]] + """Specifies the auto description extension.""" + + +ExtensionItem: TypeAlias = Union[ + ExtensionItemRemoveBg, ExtensionItemAutoTaggingExtension, ExtensionItemAIAutoDescription +] + +Extensions: TypeAlias = List[ExtensionItem] diff --git a/src/imagekitio/types/shared_params/get_image_attributes_options.py b/src/imagekitio/types/shared_params/get_image_attributes_options.py new file mode 100644 index 00000000..e8bf8b6b --- /dev/null +++ b/src/imagekitio/types/shared_params/get_image_attributes_options.py @@ -0,0 +1,59 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Annotated + +from ..._utils import PropertyInfo +from .src_options import SrcOptions + +__all__ = ["GetImageAttributesOptions"] + + +class GetImageAttributesOptions(SrcOptions, total=False): + """ + Options for generating responsive image attributes including `src`, `srcSet`, and `sizes` for HTML `` elements. + This schema extends `SrcOptions` to add support for responsive image generation with breakpoints. + """ + + device_breakpoints: Annotated[Iterable[float], PropertyInfo(alias="deviceBreakpoints")] + """ + Custom list of **device-width breakpoints** in pixels. These define common + screen widths for responsive image generation. + + Defaults to `[640, 750, 828, 1080, 1200, 1920, 2048, 3840]`. Sorted + automatically. + """ + + image_breakpoints: Annotated[Iterable[float], PropertyInfo(alias="imageBreakpoints")] + """ + Custom list of **image-specific breakpoints** in pixels. Useful for generating + small variants (e.g., placeholders or thumbnails). + + Merged with `deviceBreakpoints` before calculating `srcSet`. Defaults to + `[16, 32, 48, 64, 96, 128, 256, 384]`. Sorted automatically. + """ + + sizes: str + """ + The value for the HTML `sizes` attribute (e.g., `"100vw"` or + `"(min-width:768px) 50vw, 100vw"`). + + - If it includes one or more `vw` units, breakpoints smaller than the + corresponding percentage of the smallest device width are excluded. + - If it contains no `vw` units, the full breakpoint list is used. + + Enables a width-based strategy and generates `w` descriptors in `srcSet`. + """ + + width: float + """ + The intended display width of the image in pixels, used **only when the `sizes` + attribute is not provided**. + + Triggers a DPR-based strategy (1x and 2x variants) and generates `x` descriptors + in `srcSet`. + + Ignored if `sizes` is present. + """ diff --git a/src/imagekitio/types/shared_params/image_overlay.py b/src/imagekitio/types/shared_params/image_overlay.py new file mode 100644 index 00000000..3b7d74e0 --- /dev/null +++ b/src/imagekitio/types/shared_params/image_overlay.py @@ -0,0 +1,38 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required + +from .base_overlay import BaseOverlay + +__all__ = ["ImageOverlay"] + + +class ImageOverlay(BaseOverlay, total=False): + input: Required[str] + """Specifies the relative path to the image used as an overlay.""" + + type: Required[Literal["image"]] + + encoding: Literal["auto", "plain", "base64"] + """ + The input path can be included in the layer as either `i-{input}` or + `ie-{base64_encoded_input}`. By default, the SDK determines the appropriate + format automatically. To always use base64 encoding (`ie-{base64}`), set this + parameter to `base64`. To always use plain text (`i-{input}`), set it to + `plain`. + """ + + transformation: Iterable["Transformation"] + """Array of transformations to be applied to the overlay image. + + Supported transformations depends on the base/parent asset. See overlays on + [Images](https://imagekit.io/docs/add-overlays-on-images#list-of-supported-image-transformations-in-image-layers) + and + [Videos](https://imagekit.io/docs/add-overlays-on-videos#list-of-transformations-supported-on-image-overlay). + """ + + +from .transformation import Transformation diff --git a/src/imagekitio/types/shared_params/overlay.py b/src/imagekitio/types/shared_params/overlay.py new file mode 100644 index 00000000..ed4e79a8 --- /dev/null +++ b/src/imagekitio/types/shared_params/overlay.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import TYPE_CHECKING, Union +from typing_extensions import TypeAlias, TypeAliasType + +from ..._compat import PYDANTIC_V1 +from .text_overlay import TextOverlay +from .subtitle_overlay import SubtitleOverlay +from .solid_color_overlay import SolidColorOverlay + +__all__ = ["Overlay"] + +if TYPE_CHECKING or not PYDANTIC_V1: + Overlay = TypeAliasType( + "Overlay", Union[TextOverlay, "ImageOverlay", "VideoOverlay", SubtitleOverlay, SolidColorOverlay] + ) +else: + Overlay: TypeAlias = Union[TextOverlay, "ImageOverlay", "VideoOverlay", SubtitleOverlay, SolidColorOverlay] + +from .image_overlay import ImageOverlay +from .video_overlay import VideoOverlay diff --git a/src/imagekitio/types/shared_params/overlay_position.py b/src/imagekitio/types/shared_params/overlay_position.py new file mode 100644 index 00000000..f74e3e1b --- /dev/null +++ b/src/imagekitio/types/shared_params/overlay_position.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypedDict + +__all__ = ["OverlayPosition"] + + +class OverlayPosition(TypedDict, total=False): + focus: Literal["center", "top", "left", "bottom", "right", "top_left", "top_right", "bottom_left", "bottom_right"] + """ + Specifies the position of the overlay relative to the parent image or video. + Maps to `lfo` in the URL. + """ + + x: Union[float, str] + """ + Specifies the x-coordinate of the top-left corner of the base asset where the + overlay's top-left corner will be positioned. It also accepts arithmetic + expressions such as `bw_mul_0.4` or `bw_sub_cw`. Maps to `lx` in the URL. Learn + about + [Arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ + + y: Union[float, str] + """ + Specifies the y-coordinate of the top-left corner of the base asset where the + overlay's top-left corner will be positioned. It also accepts arithmetic + expressions such as `bh_mul_0.4` or `bh_sub_ch`. Maps to `ly` in the URL. Learn + about + [Arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ diff --git a/src/imagekitio/types/shared_params/overlay_timing.py b/src/imagekitio/types/shared_params/overlay_timing.py new file mode 100644 index 00000000..4f766d1a --- /dev/null +++ b/src/imagekitio/types/shared_params/overlay_timing.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypedDict + +__all__ = ["OverlayTiming"] + + +class OverlayTiming(TypedDict, total=False): + duration: Union[float, str] + """ + Specifies the duration (in seconds) during which the overlay should appear on + the base video. Accepts a positive number up to two decimal places (e.g., `20` + or `20.50`) and arithmetic expressions such as `bdu_mul_0.4` or `bdu_sub_idu`. + Applies only if the base asset is a video. Maps to `ldu` in the URL. + """ + + end: Union[float, str] + """ + Specifies the end time (in seconds) for when the overlay should disappear from + the base video. If both end and duration are provided, duration is ignored. + Accepts a positive number up to two decimal places (e.g., `20` or `20.50`) and + arithmetic expressions such as `bdu_mul_0.4` or `bdu_sub_idu`. Applies only if + the base asset is a video. Maps to `leo` in the URL. + """ + + start: Union[float, str] + """ + Specifies the start time (in seconds) for when the overlay should appear on the + base video. Accepts a positive number up to two decimal places (e.g., `20` or + `20.50`) and arithmetic expressions such as `bdu_mul_0.4` or `bdu_sub_idu`. + Applies only if the base asset is a video. Maps to `lso` in the URL. + """ diff --git a/src/imagekitio/types/shared_params/responsive_image_attributes.py b/src/imagekitio/types/shared_params/responsive_image_attributes.py new file mode 100644 index 00000000..fbf901ca --- /dev/null +++ b/src/imagekitio/types/shared_params/responsive_image_attributes.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["ResponsiveImageAttributes"] + + +class ResponsiveImageAttributes(TypedDict, total=False): + """ + Resulting set of attributes suitable for an HTML `` element. + Useful for enabling responsive image loading with `srcSet` and `sizes`. + """ + + src: Required[str] + """URL for the _largest_ candidate (assigned to plain `src`).""" + + sizes: str + """`sizes` returned (or synthesised as `100vw`). + + The value for the HTML `sizes` attribute. + """ + + src_set: Annotated[str, PropertyInfo(alias="srcSet")] + """Candidate set with `w` or `x` descriptors. + + Multiple image URLs separated by commas, each with a descriptor. + """ + + width: float + """Width as a number (if `width` was provided in the input options).""" diff --git a/src/imagekitio/types/shared_params/solid_color_overlay.py b/src/imagekitio/types/shared_params/solid_color_overlay.py new file mode 100644 index 00000000..35f36b14 --- /dev/null +++ b/src/imagekitio/types/shared_params/solid_color_overlay.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required + +from .base_overlay import BaseOverlay +from .solid_color_overlay_transformation import SolidColorOverlayTransformation + +__all__ = ["SolidColorOverlay"] + + +class SolidColorOverlay(BaseOverlay, total=False): + color: Required[str] + """ + Specifies the color of the block using an RGB hex code (e.g., `FF0000`), an RGBA + code (e.g., `FFAABB50`), or a color name (e.g., `red`). If an 8-character value + is provided, the last two characters represent the opacity level (from `00` for + 0.00 to `99` for 0.99). + """ + + type: Required[Literal["solidColor"]] + + transformation: Iterable[SolidColorOverlayTransformation] + """Control width and height of the solid color overlay. + + Supported transformations depend on the base/parent asset. See overlays on + [Images](https://imagekit.io/docs/add-overlays-on-images#apply-transformation-on-solid-color-overlay) + and + [Videos](https://imagekit.io/docs/add-overlays-on-videos#apply-transformations-on-solid-color-block-overlay). + """ diff --git a/src/imagekitio/types/shared_params/solid_color_overlay_transformation.py b/src/imagekitio/types/shared_params/solid_color_overlay_transformation.py new file mode 100644 index 00000000..8bfcca71 --- /dev/null +++ b/src/imagekitio/types/shared_params/solid_color_overlay_transformation.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypedDict + +__all__ = ["SolidColorOverlayTransformation"] + + +class SolidColorOverlayTransformation(TypedDict, total=False): + alpha: float + """Specifies the transparency level of the solid color overlay. + + Accepts integers from `1` to `9`. + """ + + background: str + """Specifies the background color of the solid color overlay. + + Accepts an RGB hex code (e.g., `FF0000`), an RGBA code (e.g., `FFAABB50`), or a + color name. + """ + + gradient: Union[Literal[True], str] + """Creates a linear gradient with two colors. + + Pass `true` for a default gradient, or provide a string for a custom gradient. + Only works if the base asset is an image. See + [gradient](https://imagekit.io/docs/effects-and-enhancements#gradient---e-gradient). + """ + + height: Union[float, str] + """Controls the height of the solid color overlay. + + Accepts a numeric value or an arithmetic expression. Learn about + [arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ + + radius: Union[float, Literal["max"]] + """Specifies the corner radius of the solid color overlay. + + Set to `max` for circular or oval shape. See + [radius](https://imagekit.io/docs/effects-and-enhancements#radius---r). + """ + + width: Union[float, str] + """Controls the width of the solid color overlay. + + Accepts a numeric value or an arithmetic expression (e.g., `bw_mul_0.2` or + `bh_div_2`). Learn about + [arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ diff --git a/src/imagekitio/types/shared_params/src_options.py b/src/imagekitio/types/shared_params/src_options.py new file mode 100644 index 00000000..cd262e53 --- /dev/null +++ b/src/imagekitio/types/shared_params/src_options.py @@ -0,0 +1,81 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo +from ..shared.transformation_position import TransformationPosition + +__all__ = ["SrcOptions"] + + +class SrcOptions(TypedDict, total=False): + """Options for generating ImageKit URLs with transformations. + + See the [Transformations guide](https://imagekit.io/docs/transformations). + """ + + src: Required[str] + """Accepts a relative or absolute path of the resource. + + If a relative path is provided, it is appended to the `urlEndpoint`. If an + absolute path is provided, `urlEndpoint` is ignored. + """ + + url_endpoint: Required[Annotated[str, PropertyInfo(alias="urlEndpoint")]] + """ + Get your urlEndpoint from the + [ImageKit dashboard](https://imagekit.io/dashboard/url-endpoints). + """ + + expires_in: Annotated[float, PropertyInfo(alias="expiresIn")] + """When you want the signed URL to expire, specified in seconds. + + If `expiresIn` is anything above 0, the URL will always be signed even if + `signed` is set to false. If not specified and `signed` is `true`, the signed + URL will not expire (valid indefinitely). + + Example: Setting `expiresIn: 3600` will make the URL expire 1 hour from + generation time. After the expiry time, the signed URL will no longer be valid + and ImageKit will return a 401 Unauthorized status code. + + [Learn more](https://imagekit.io/docs/media-delivery-basic-security#how-to-generate-signed-urls). + """ + + query_parameters: Annotated[Dict[str, str], PropertyInfo(alias="queryParameters")] + """ + These are additional query parameters that you want to add to the final URL. + They can be any query parameters and not necessarily related to ImageKit. This + is especially useful if you want to add a versioning parameter to your URLs. + """ + + signed: bool + """Whether to sign the URL or not. + + Set this to `true` if you want to generate a signed URL. If `signed` is `true` + and `expiresIn` is not specified, the signed URL will not expire (valid + indefinitely). Note: If `expiresIn` is set to any value above 0, the URL will + always be signed regardless of this setting. + [Learn more](https://imagekit.io/docs/media-delivery-basic-security#how-to-generate-signed-urls). + """ + + transformation: Iterable["Transformation"] + """An array of objects specifying the transformations to be applied in the URL. + + If more than one transformation is specified, they are applied in the order they + are specified as chained transformations. See + [Chained transformations](https://imagekit.io/docs/transformations#chained-transformations). + """ + + transformation_position: Annotated[TransformationPosition, PropertyInfo(alias="transformationPosition")] + """ + By default, the transformation string is added as a query parameter in the URL, + e.g., `?tr=w-100,h-100`. If you want to add the transformation string in the + path of the URL, set this to `path`. Learn more in the + [Transformations guide](https://imagekit.io/docs/transformations). + """ + + +from .transformation import Transformation diff --git a/src/imagekitio/types/shared_params/streaming_resolution.py b/src/imagekitio/types/shared_params/streaming_resolution.py new file mode 100644 index 00000000..ac32902e --- /dev/null +++ b/src/imagekitio/types/shared_params/streaming_resolution.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypeAlias + +__all__ = ["StreamingResolution"] + +StreamingResolution: TypeAlias = Literal["240", "360", "480", "720", "1080", "1440", "2160"] diff --git a/src/imagekitio/types/shared_params/subtitle_overlay.py b/src/imagekitio/types/shared_params/subtitle_overlay.py new file mode 100644 index 00000000..71e885ee --- /dev/null +++ b/src/imagekitio/types/shared_params/subtitle_overlay.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required + +from .base_overlay import BaseOverlay +from .subtitle_overlay_transformation import SubtitleOverlayTransformation + +__all__ = ["SubtitleOverlay"] + + +class SubtitleOverlay(BaseOverlay, total=False): + input: Required[str] + """Specifies the relative path to the subtitle file used as an overlay.""" + + type: Required[Literal["subtitle"]] + + encoding: Literal["auto", "plain", "base64"] + """ + The input path can be included in the layer as either `i-{input}` or + `ie-{base64_encoded_input}`. By default, the SDK determines the appropriate + format automatically. To always use base64 encoding (`ie-{base64}`), set this + parameter to `base64`. To always use plain text (`i-{input}`), set it to + `plain`. + """ + + transformation: Iterable[SubtitleOverlayTransformation] + """Control styling of the subtitle. + + See + [Styling subtitles](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer). + """ diff --git a/src/imagekitio/types/shared_params/subtitle_overlay_transformation.py b/src/imagekitio/types/shared_params/subtitle_overlay_transformation.py new file mode 100644 index 00000000..08b8de57 --- /dev/null +++ b/src/imagekitio/types/shared_params/subtitle_overlay_transformation.py @@ -0,0 +1,79 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["SubtitleOverlayTransformation"] + + +class SubtitleOverlayTransformation(TypedDict, total=False): + """Subtitle styling options. + + [Learn more](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) from the docs. + """ + + background: str + """ + Specifies the subtitle background color using a standard color name, an RGB + color code (e.g., FF0000), or an RGBA color code (e.g., FFAABB50). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + color: str + """ + Sets the font color of the subtitle text using a standard color name, an RGB + color code (e.g., FF0000), or an RGBA color code (e.g., FFAABB50). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + font_family: Annotated[str, PropertyInfo(alias="fontFamily")] + """Font family for subtitles. + + Refer to the + [supported fonts](https://imagekit.io/docs/add-overlays-on-images#supported-text-font-list). + """ + + font_outline: Annotated[str, PropertyInfo(alias="fontOutline")] + """Sets the font outline of the subtitle text. + + Requires the outline width (an integer) and the outline color (as an RGB color + code, RGBA color code, or standard web color name) separated by an underscore. + Example: `fol-2_blue` (outline width of 2px and outline color blue), + `fol-2_A1CCDD` (outline width of 2px and outline color `#A1CCDD`) and + `fol-2_A1CCDD50` (outline width of 2px and outline color `#A1CCDD` at 50% + opacity). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + font_shadow: Annotated[str, PropertyInfo(alias="fontShadow")] + """Sets the font shadow for the subtitle text. + + Requires the shadow color (as an RGB color code, RGBA color code, or standard + web color name) and shadow indent (an integer) separated by an underscore. + Example: `fsh-blue_2` (shadow color blue, indent of 2px), `fsh-A1CCDD_3` (shadow + color `#A1CCDD`, indent of 3px), `fsh-A1CCDD50_3` (shadow color `#A1CCDD` at 50% + opacity, indent of 3px). + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + font_size: Annotated[float, PropertyInfo(alias="fontSize")] + """Sets the font size of subtitle text. + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ + + typography: Literal["b", "i", "b_i"] + """Sets the typography style of the subtitle text. + + Supports values are `b` for bold, `i` for italics, and `b_i` for bold with + italics. + + [Subtitle styling options](https://imagekit.io/docs/add-overlays-on-videos#styling-controls-for-subtitles-layer) + """ diff --git a/src/imagekitio/types/shared_params/text_overlay.py b/src/imagekitio/types/shared_params/text_overlay.py new file mode 100644 index 00000000..62ebe4cc --- /dev/null +++ b/src/imagekitio/types/shared_params/text_overlay.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required + +from .base_overlay import BaseOverlay +from .text_overlay_transformation import TextOverlayTransformation + +__all__ = ["TextOverlay"] + + +class TextOverlay(BaseOverlay, total=False): + text: Required[str] + """Specifies the text to be displayed in the overlay. + + The SDK automatically handles special characters and encoding. + """ + + type: Required[Literal["text"]] + + encoding: Literal["auto", "plain", "base64"] + """ + Text can be included in the layer as either `i-{input}` (plain text) or + `ie-{base64_encoded_input}` (base64). By default, the SDK selects the + appropriate format based on the input text. To always use base64 + (`ie-{base64}`), set this parameter to `base64`. To always use plain text + (`i-{input}`), set it to `plain`. + """ + + transformation: Iterable[TextOverlayTransformation] + """Control styling of the text overlay. + + See + [Text overlays](https://imagekit.io/docs/add-overlays-on-images#text-overlay). + """ diff --git a/src/imagekitio/types/shared_params/text_overlay_transformation.py b/src/imagekitio/types/shared_params/text_overlay_transformation.py new file mode 100644 index 00000000..5f05fbd2 --- /dev/null +++ b/src/imagekitio/types/shared_params/text_overlay_transformation.py @@ -0,0 +1,99 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["TextOverlayTransformation"] + + +class TextOverlayTransformation(TypedDict, total=False): + alpha: float + """Specifies the transparency level of the text overlay. + + Accepts integers from `1` to `9`. + """ + + background: str + """ + Specifies the background color of the text overlay. Accepts an RGB hex code, an + RGBA code, or a color name. + """ + + flip: Literal["h", "v", "h_v", "v_h"] + """Flip the text overlay horizontally, vertically, or both.""" + + font_color: Annotated[str, PropertyInfo(alias="fontColor")] + """Specifies the font color of the overlaid text. + + Accepts an RGB hex code (e.g., `FF0000`), an RGBA code (e.g., `FFAABB50`), or a + color name. + """ + + font_family: Annotated[str, PropertyInfo(alias="fontFamily")] + """Specifies the font family of the overlaid text. + + Choose from the supported fonts list or use a custom font. See + [Supported fonts](https://imagekit.io/docs/add-overlays-on-images#supported-text-font-list) + and + [Custom font](https://imagekit.io/docs/add-overlays-on-images#change-font-family-in-text-overlay). + """ + + font_size: Annotated[Union[float, str], PropertyInfo(alias="fontSize")] + """Specifies the font size of the overlaid text. + + Accepts a numeric value or an arithmetic expression. + """ + + inner_alignment: Annotated[Literal["left", "right", "center"], PropertyInfo(alias="innerAlignment")] + """ + Specifies the inner alignment of the text when width is more than the text + length. + """ + + line_height: Annotated[Union[float, str], PropertyInfo(alias="lineHeight")] + """Specifies the line height of the text overlay. + + Accepts integer values representing line height in points. It can also accept + [arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations) + such as `bw_mul_0.2`, or `bh_div_20`. + """ + + padding: Union[float, str] + """ + Specifies the padding around the overlaid text. Can be provided as a single + positive integer or multiple values separated by underscores (following CSS + shorthand order). Arithmetic expressions are also accepted. + """ + + radius: Union[float, Literal["max"]] + """ + Specifies the corner radius of the text overlay. Set to `max` to achieve a + circular or oval shape. + """ + + rotation: Union[float, str] + """ + Specifies the rotation angle of the text overlay. Accepts a numeric value for + clockwise rotation or a string prefixed with "N" for counter-clockwise rotation. + """ + + typography: str + """Specifies the typography style of the text. Supported values: + + - Single styles: `b` (bold), `i` (italic), `strikethrough`. + - Combinations: Any combination separated by underscores, e.g., `b_i`, + `b_i_strikethrough`. + """ + + width: Union[float, str] + """Specifies the maximum width (in pixels) of the overlaid text. + + The text wraps automatically, and arithmetic expressions (e.g., `bw_mul_0.2` or + `bh_div_2`) are supported. Useful when used in conjunction with the + `background`. Learn about + [Arithmetic expressions](https://imagekit.io/docs/arithmetic-expressions-in-transformations). + """ diff --git a/src/imagekitio/types/shared_params/transformation.py b/src/imagekitio/types/shared_params/transformation.py new file mode 100644 index 00000000..a48ddf8e --- /dev/null +++ b/src/imagekitio/types/shared_params/transformation.py @@ -0,0 +1,432 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypedDict + +from ..._utils import PropertyInfo +from ..shared.streaming_resolution import StreamingResolution + +__all__ = ["Transformation"] + + +class Transformation(TypedDict, total=False): + """The SDK provides easy-to-use names for transformations. + + These names are converted to the corresponding transformation string before being added to the URL. + SDKs are updated regularly to support new transformations. If you want to use a transformation that is not supported by the SDK, + You can use the `raw` parameter to pass the transformation string directly. + See the [Transformations documentation](https://imagekit.io/docs/transformations). + """ + + ai_change_background: Annotated[str, PropertyInfo(alias="aiChangeBackground")] + """Uses AI to change the background. + + Provide a text prompt or a base64-encoded prompt, e.g., `prompt-snow road` or + `prompte-[urlencoded_base64_encoded_text]`. Not supported inside overlay. See + [AI Change Background](https://imagekit.io/docs/ai-transformations#change-background-e-changebg). + """ + + ai_drop_shadow: Annotated[Union[Literal[True], str], PropertyInfo(alias="aiDropShadow")] + """ + Adds an AI-based drop shadow around a foreground object on a transparent or + removed background. Optionally, control the direction, elevation, and saturation + of the light source (e.g., `az-45` to change light direction). Pass `true` for + the default drop shadow, or provide a string for a custom drop shadow. Supported + inside overlay. See + [AI Drop Shadow](https://imagekit.io/docs/ai-transformations#ai-drop-shadow-e-dropshadow). + """ + + ai_edit: Annotated[str, PropertyInfo(alias="aiEdit")] + """Uses AI to edit images based on a text prompt. + + Provide a text prompt or a base64-encoded prompt, e.g., `prompt-snow road` or + `prompte-[urlencoded_base64_encoded_text]`. Not supported inside overlay. + See [AI Edit](https://imagekit.io/docs/ai-transformations#edit-image-e-edit). + """ + + ai_remove_background: Annotated[Literal[True], PropertyInfo(alias="aiRemoveBackground")] + """Applies ImageKit's in-house background removal. + + Supported inside overlay. See + [AI Background Removal](https://imagekit.io/docs/ai-transformations#imagekit-background-removal-e-bgremove). + """ + + ai_remove_background_external: Annotated[Literal[True], PropertyInfo(alias="aiRemoveBackgroundExternal")] + """Uses third-party background removal. + + Note: It is recommended to use aiRemoveBackground, ImageKit's in-house solution, + which is more cost-effective. Supported inside overlay. See + [External Background Removal](https://imagekit.io/docs/ai-transformations#background-removal-e-removedotbg). + """ + + ai_retouch: Annotated[Literal[True], PropertyInfo(alias="aiRetouch")] + """Performs AI-based retouching to improve faces or product shots. + + Not supported inside overlay. See + [AI Retouch](https://imagekit.io/docs/ai-transformations#retouch-e-retouch). + """ + + ai_upscale: Annotated[Literal[True], PropertyInfo(alias="aiUpscale")] + """Upscales images beyond their original dimensions using AI. + + Not supported inside overlay. See + [AI Upscale](https://imagekit.io/docs/ai-transformations#upscale-e-upscale). + """ + + ai_variation: Annotated[Literal[True], PropertyInfo(alias="aiVariation")] + """Generates a variation of an image using AI. + + This produces a new image with slight variations from the original, such as + changes in color, texture, and other visual elements, while preserving the + structure and essence of the original image. Not supported inside overlay. See + [AI Generate Variations](https://imagekit.io/docs/ai-transformations#generate-variations-of-an-image-e-genvar). + """ + + aspect_ratio: Annotated[Union[float, str], PropertyInfo(alias="aspectRatio")] + """Specifies the aspect ratio for the output, e.g., "ar-4-3". + + Typically used with either width or height (but not both). For example: + aspectRatio = `4:3`, `4_3`, or an expression like `iar_div_2`. See + [Image resize and crop – Aspect ratio](https://imagekit.io/docs/image-resize-and-crop#aspect-ratio---ar). + """ + + audio_codec: Annotated[Literal["aac", "opus", "none"], PropertyInfo(alias="audioCodec")] + """Specifies the audio codec, e.g., `aac`, `opus`, or `none`. + + See [Audio codec](https://imagekit.io/docs/video-optimization#audio-codec---ac). + """ + + background: str + """ + Specifies the background to be used in conjunction with certain cropping + strategies when resizing an image. + + - A solid color: e.g., `red`, `F3F3F3`, `AAFF0010`. See + [Solid color background](https://imagekit.io/docs/effects-and-enhancements#solid-color-background). + - A blurred background: e.g., `blurred`, `blurred_25_N15`, etc. See + [Blurred background](https://imagekit.io/docs/effects-and-enhancements#blurred-background). + - Expand the image boundaries using generative fill: `genfill`. Not supported + inside overlay. Optionally, control the background scene by passing a text + prompt: `genfill[:-prompt-${text}]` or + `genfill[:-prompte-${urlencoded_base64_encoded_text}]`. See + [Generative fill background](https://imagekit.io/docs/ai-transformations#generative-fill-bg-genfill). + """ + + blur: float + """Specifies the Gaussian blur level. + + Accepts an integer value between 1 and 100, or an expression like `bl-10`. See + [Blur](https://imagekit.io/docs/effects-and-enhancements#blur---bl). + """ + + border: str + """Adds a border to the output media. + + Accepts a string in the format `_` (e.g., `5_FFF000` for + a 5px yellow border), or an expression like `ih_div_20_FF00FF`. See + [Border](https://imagekit.io/docs/effects-and-enhancements#border---b). + """ + + color_profile: Annotated[bool, PropertyInfo(alias="colorProfile")] + """ + Indicates whether the output image should retain the original color profile. See + [Color profile](https://imagekit.io/docs/image-optimization#color-profile---cp). + """ + + contrast_stretch: Annotated[Literal[True], PropertyInfo(alias="contrastStretch")] + """ + Automatically enhances the contrast of an image (contrast stretch). See + [Contrast Stretch](https://imagekit.io/docs/effects-and-enhancements#contrast-stretch---e-contrast). + """ + + crop: Literal["force", "at_max", "at_max_enlarge", "at_least", "maintain_ratio"] + """Crop modes for image resizing. + + See + [Crop modes & focus](https://imagekit.io/docs/image-resize-and-crop#crop-crop-modes--focus). + """ + + crop_mode: Annotated[Literal["pad_resize", "extract", "pad_extract"], PropertyInfo(alias="cropMode")] + """Additional crop modes for image resizing. + + See + [Crop modes & focus](https://imagekit.io/docs/image-resize-and-crop#crop-crop-modes--focus). + """ + + default_image: Annotated[str, PropertyInfo(alias="defaultImage")] + """ + Specifies a fallback image if the resource is not found, e.g., a URL or file + path. See + [Default image](https://imagekit.io/docs/image-transformation#default-image---di). + """ + + dpr: float + """ + Accepts values between 0.1 and 5, or `auto` for automatic device pixel ratio + (DPR) calculation. See + [DPR](https://imagekit.io/docs/image-resize-and-crop#dpr---dpr). + """ + + duration: Union[float, str] + """Specifies the duration (in seconds) for trimming videos, e.g., `5` or `10.5`. + + Typically used with startOffset to indicate the length from the start offset. + Arithmetic expressions are supported. See + [Trim videos – Duration](https://imagekit.io/docs/trim-videos#duration---du). + """ + + end_offset: Annotated[Union[float, str], PropertyInfo(alias="endOffset")] + """Specifies the end offset (in seconds) for trimming videos, e.g., `5` or `10.5`. + + Typically used with startOffset to define a time window. Arithmetic expressions + are supported. See + [Trim videos – End offset](https://imagekit.io/docs/trim-videos#end-offset---eo). + """ + + flip: Literal["h", "v", "h_v", "v_h"] + """Flips or mirrors an image either horizontally, vertically, or both. + + Acceptable values: `h` (horizontal), `v` (vertical), `h_v` (horizontal and + vertical), or `v_h`. See + [Flip](https://imagekit.io/docs/effects-and-enhancements#flip---fl). + """ + + focus: str + """ + Refines padding and cropping behavior for pad resize, maintain ratio, and + extract crop modes. Supports manual positions and coordinate-based focus. With + AI-based cropping, you can automatically keep key subjects in frame—such as + faces or detected objects (e.g., `fo-face`, `fo-person`, `fo-car`)— while + resizing. + + - See [Focus](https://imagekit.io/docs/image-resize-and-crop#focus---fo). + - [Object aware cropping](https://imagekit.io/docs/image-resize-and-crop#object-aware-cropping---fo-object-name) + """ + + format: Literal["auto", "webp", "jpg", "jpeg", "png", "gif", "svg", "mp4", "webm", "avif", "orig"] + """ + Specifies the output format for images or videos, e.g., `jpg`, `png`, `webp`, + `mp4`, or `auto`. You can also pass `orig` for images to return the original + format. ImageKit automatically delivers images and videos in the optimal format + based on device support unless overridden by the dashboard settings or the + format parameter. See + [Image format](https://imagekit.io/docs/image-optimization#format---f) and + [Video format](https://imagekit.io/docs/video-optimization#format---f). + """ + + gradient: Union[Literal[True], str] + """Creates a linear gradient with two colors. + + Pass `true` for a default gradient, or provide a string for a custom gradient. + See + [Gradient](https://imagekit.io/docs/effects-and-enhancements#gradient---e-gradient). + """ + + grayscale: Literal[True] + """Enables a grayscale effect for images. + + See + [Grayscale](https://imagekit.io/docs/effects-and-enhancements#grayscale---e-grayscale). + """ + + height: Union[float, str] + """Specifies the height of the output. + + If a value between 0 and 1 is provided, it is treated as a percentage (e.g., + `0.5` represents 50% of the original height). You can also supply arithmetic + expressions (e.g., `ih_mul_0.5`). Height transformation – + [Images](https://imagekit.io/docs/image-resize-and-crop#height---h) · + [Videos](https://imagekit.io/docs/video-resize-and-crop#height---h) + """ + + lossless: bool + """ + Specifies whether the output image (in JPEG or PNG) should be compressed + losslessly. See + [Lossless compression](https://imagekit.io/docs/image-optimization#lossless-webp-and-png---lo). + """ + + metadata: bool + """By default, ImageKit removes all metadata during automatic image compression. + + Set this to true to preserve metadata. See + [Image metadata](https://imagekit.io/docs/image-optimization#image-metadata---md). + """ + + named: str + """Named transformation reference. + + See + [Named transformations](https://imagekit.io/docs/transformations#named-transformations). + """ + + opacity: float + """Specifies the opacity level of the output image. + + See [Opacity](https://imagekit.io/docs/effects-and-enhancements#opacity---o). + """ + + original: bool + """ + If set to true, serves the original file without applying any transformations. + See + [Deliver original file as-is](https://imagekit.io/docs/core-delivery-features#deliver-original-file-as-is---orig-true). + """ + + overlay: "Overlay" + """Specifies an overlay to be applied on the parent image or video. + + ImageKit supports overlays including images, text, videos, subtitles, and solid + colors. See + [Overlay using layers](https://imagekit.io/docs/transformations#overlay-using-layers). + """ + + page: Union[float, str] + """ + Extracts a specific page or frame from multi-page or layered files (PDF, PSD, + AI). For example, specify by number (e.g., `2`), a range (e.g., `3-4` for the + 2nd and 3rd layers), or by name (e.g., `name-layer-4` for a PSD layer). See + [Thumbnail extraction](https://imagekit.io/docs/vector-and-animated-images#get-thumbnail-from-psd-pdf-ai-eps-and-animated-files). + """ + + progressive: bool + """Specifies whether the output JPEG image should be rendered progressively. + + Progressive loading begins with a low-quality, pixelated version of the full + image, which gradually improves to provide a faster perceived load time. See + [Progressive images](https://imagekit.io/docs/image-optimization#progressive-image---pr). + """ + + quality: float + """ + Specifies the quality of the output image for lossy formats such as JPEG, WebP, + and AVIF. A higher quality value results in a larger file size with better + quality, while a lower value produces a smaller file size with reduced quality. + See [Quality](https://imagekit.io/docs/image-optimization#quality---q). + """ + + radius: Union[float, Literal["max"]] + """ + Specifies the corner radius for rounded corners (e.g., 20) or `max` for circular + or oval shape. See + [Radius](https://imagekit.io/docs/effects-and-enhancements#radius---r). + """ + + raw: str + """Pass any transformation not directly supported by the SDK. + + This transformation string is appended to the URL as provided. + """ + + rotation: Union[float, str] + """Specifies the rotation angle in degrees. + + Positive values rotate the image clockwise; you can also use, for example, `N40` + for counterclockwise rotation or `auto` to use the orientation specified in the + image's EXIF data. For videos, only the following values are supported: 0, 90, + 180, 270, or 360. See + [Rotate](https://imagekit.io/docs/effects-and-enhancements#rotate---rt). + """ + + shadow: Union[Literal[True], str] + """Adds a shadow beneath solid objects in an image with a transparent background. + + For AI-based drop shadows, refer to aiDropShadow. Pass `true` for a default + shadow, or provide a string for a custom shadow. See + [Shadow](https://imagekit.io/docs/effects-and-enhancements#shadow---e-shadow). + """ + + sharpen: Union[Literal[True], float] + """Sharpens the input image, highlighting edges and finer details. + + Pass `true` for default sharpening, or provide a numeric value for custom + sharpening. See + [Sharpen](https://imagekit.io/docs/effects-and-enhancements#sharpen---e-sharpen). + """ + + start_offset: Annotated[Union[float, str], PropertyInfo(alias="startOffset")] + """Specifies the start offset (in seconds) for trimming videos, e.g., `5` or + `10.5`. + + Arithmetic expressions are also supported. See + [Trim videos – Start offset](https://imagekit.io/docs/trim-videos#start-offset---so). + """ + + streaming_resolutions: Annotated[List[StreamingResolution], PropertyInfo(alias="streamingResolutions")] + """ + An array of resolutions for adaptive bitrate streaming, e.g., [`240`, `360`, + `480`, `720`, `1080`]. See + [Adaptive Bitrate Streaming](https://imagekit.io/docs/adaptive-bitrate-streaming). + """ + + trim: Union[Literal[True], float] + """Useful for images with a solid or nearly solid background and a central object. + + This parameter trims the background, leaving only the central object in the + output image. See + [Trim edges](https://imagekit.io/docs/effects-and-enhancements#trim-edges---t). + """ + + unsharp_mask: Annotated[Union[Literal[True], str], PropertyInfo(alias="unsharpMask")] + """Applies Unsharp Masking (USM), an image sharpening technique. + + Pass `true` for a default unsharp mask, or provide a string for a custom unsharp + mask. See + [Unsharp Mask](https://imagekit.io/docs/effects-and-enhancements#unsharp-mask---e-usm). + """ + + video_codec: Annotated[Literal["h264", "vp9", "av1", "none"], PropertyInfo(alias="videoCodec")] + """Specifies the video codec, e.g., `h264`, `vp9`, `av1`, or `none`. + + See [Video codec](https://imagekit.io/docs/video-optimization#video-codec---vc). + """ + + width: Union[float, str] + """Specifies the width of the output. + + If a value between 0 and 1 is provided, it is treated as a percentage (e.g., + `0.4` represents 40% of the original width). You can also supply arithmetic + expressions (e.g., `iw_div_2`). Width transformation – + [Images](https://imagekit.io/docs/image-resize-and-crop#width---w) · + [Videos](https://imagekit.io/docs/video-resize-and-crop#width---w) + """ + + x: Union[float, str] + """Focus using cropped image coordinates - X coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + x_center: Annotated[Union[float, str], PropertyInfo(alias="xCenter")] + """Focus using cropped image coordinates - X center coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + y: Union[float, str] + """Focus using cropped image coordinates - Y coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + y_center: Annotated[Union[float, str], PropertyInfo(alias="yCenter")] + """Focus using cropped image coordinates - Y center coordinate. + + See + [Focus using cropped coordinates](https://imagekit.io/docs/image-resize-and-crop#example---focus-using-cropped-image-coordinates). + """ + + zoom: float + """ + Accepts a numeric value that determines how much to zoom in or out of the + cropped area. It should be used in conjunction with fo-face or fo-. + See [Zoom](https://imagekit.io/docs/image-resize-and-crop#zoom---z). + """ + + +from .overlay import Overlay diff --git a/src/imagekitio/types/shared_params/transformation_position.py b/src/imagekitio/types/shared_params/transformation_position.py new file mode 100644 index 00000000..3959993f --- /dev/null +++ b/src/imagekitio/types/shared_params/transformation_position.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypeAlias + +__all__ = ["TransformationPosition"] + +TransformationPosition: TypeAlias = Literal["path", "query"] diff --git a/src/imagekitio/types/shared_params/video_overlay.py b/src/imagekitio/types/shared_params/video_overlay.py new file mode 100644 index 00000000..6c020fa2 --- /dev/null +++ b/src/imagekitio/types/shared_params/video_overlay.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required + +from .base_overlay import BaseOverlay + +__all__ = ["VideoOverlay"] + + +class VideoOverlay(BaseOverlay, total=False): + input: Required[str] + """Specifies the relative path to the video used as an overlay.""" + + type: Required[Literal["video"]] + + encoding: Literal["auto", "plain", "base64"] + """ + The input path can be included in the layer as either `i-{input}` or + `ie-{base64_encoded_input}`. By default, the SDK determines the appropriate + format automatically. To always use base64 encoding (`ie-{base64}`), set this + parameter to `base64`. To always use plain text (`i-{input}`), set it to + `plain`. + """ + + transformation: Iterable["Transformation"] + """Array of transformation to be applied to the overlay video. + + Except `streamingResolutions`, all other video transformations are supported. + See [Video transformations](https://imagekit.io/docs/video-transformation). + """ + + +from .transformation import Transformation diff --git a/src/imagekitio/types/unsafe_unwrap_webhook_event.py b/src/imagekitio/types/unsafe_unwrap_webhook_event.py new file mode 100644 index 00000000..9ed05b32 --- /dev/null +++ b/src/imagekitio/types/unsafe_unwrap_webhook_event.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from .._utils import PropertyInfo +from .upload_pre_transform_error_event import UploadPreTransformErrorEvent +from .video_transformation_error_event import VideoTransformationErrorEvent +from .video_transformation_ready_event import VideoTransformationReadyEvent +from .upload_post_transform_error_event import UploadPostTransformErrorEvent +from .upload_pre_transform_success_event import UploadPreTransformSuccessEvent +from .upload_post_transform_success_event import UploadPostTransformSuccessEvent +from .video_transformation_accepted_event import VideoTransformationAcceptedEvent + +__all__ = ["UnsafeUnwrapWebhookEvent"] + +UnsafeUnwrapWebhookEvent: TypeAlias = Annotated[ + Union[ + VideoTransformationAcceptedEvent, + VideoTransformationReadyEvent, + VideoTransformationErrorEvent, + UploadPreTransformSuccessEvent, + UploadPreTransformErrorEvent, + UploadPostTransformSuccessEvent, + UploadPostTransformErrorEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/imagekitio/types/unwrap_webhook_event.py b/src/imagekitio/types/unwrap_webhook_event.py new file mode 100644 index 00000000..e67355f5 --- /dev/null +++ b/src/imagekitio/types/unwrap_webhook_event.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from .._utils import PropertyInfo +from .upload_pre_transform_error_event import UploadPreTransformErrorEvent +from .video_transformation_error_event import VideoTransformationErrorEvent +from .video_transformation_ready_event import VideoTransformationReadyEvent +from .upload_post_transform_error_event import UploadPostTransformErrorEvent +from .upload_pre_transform_success_event import UploadPreTransformSuccessEvent +from .upload_post_transform_success_event import UploadPostTransformSuccessEvent +from .video_transformation_accepted_event import VideoTransformationAcceptedEvent + +__all__ = ["UnwrapWebhookEvent"] + +UnwrapWebhookEvent: TypeAlias = Annotated[ + Union[ + VideoTransformationAcceptedEvent, + VideoTransformationReadyEvent, + VideoTransformationErrorEvent, + UploadPreTransformSuccessEvent, + UploadPreTransformErrorEvent, + UploadPostTransformSuccessEvent, + UploadPostTransformErrorEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/imagekitio/types/update_file_request_param.py b/src/imagekitio/types/update_file_request_param.py new file mode 100644 index 00000000..8b90827f --- /dev/null +++ b/src/imagekitio/types/update_file_request_param.py @@ -0,0 +1,85 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from .._types import SequenceNotStr +from .._utils import PropertyInfo +from .shared_params.extensions import Extensions + +__all__ = ["UpdateFileRequestParam", "UpdateFileDetails", "ChangePublicationStatus", "ChangePublicationStatusPublish"] + + +class UpdateFileDetails(TypedDict, total=False): + custom_coordinates: Annotated[Optional[str], PropertyInfo(alias="customCoordinates")] + """Define an important area in the image in the format `x,y,width,height` e.g. + + `10,10,100,100`. Send `null` to unset this value. + """ + + custom_metadata: Annotated[Dict[str, object], PropertyInfo(alias="customMetadata")] + """A key-value data to be associated with the asset. + + To unset a key, send `null` value for that key. Before setting any custom + metadata on an asset you have to create the field using custom metadata fields + API. + """ + + description: str + """Optional text to describe the contents of the file.""" + + extensions: Extensions + """Array of extensions to be applied to the asset. + + Each extension can be configured with specific parameters based on the extension + type. + """ + + remove_ai_tags: Annotated[Union[SequenceNotStr[str], Literal["all"]], PropertyInfo(alias="removeAITags")] + """An array of AITags associated with the file that you want to remove, e.g. + + `["car", "vehicle", "motorsports"]`. + + If you want to remove all AITags associated with the file, send a string - + "all". + + Note: The remove operation for `AITags` executes before any of the `extensions` + are processed. + """ + + tags: Optional[SequenceNotStr[str]] + """An array of tags associated with the file, such as `["tag1", "tag2"]`. + + Send `null` to unset all tags associated with the file. + """ + + webhook_url: Annotated[str, PropertyInfo(alias="webhookUrl")] + """ + The final status of extensions after they have completed execution will be + delivered to this endpoint as a POST request. + [Learn more](/docs/api-reference/digital-asset-management-dam/managing-assets/update-file-details#webhook-payload-structure) + about the webhook payload structure. + """ + + +class ChangePublicationStatusPublish(TypedDict, total=False): + """Configure the publication status of a file and its versions.""" + + is_published: Required[Annotated[bool, PropertyInfo(alias="isPublished")]] + """Set to `true` to publish the file. Set to `false` to unpublish the file.""" + + include_file_versions: Annotated[bool, PropertyInfo(alias="includeFileVersions")] + """Set to `true` to publish/unpublish all versions of the file. + + Set to `false` to publish/unpublish only the current version of the file. + """ + + +class ChangePublicationStatus(TypedDict, total=False): + publish: ChangePublicationStatusPublish + """Configure the publication status of a file and its versions.""" + + +UpdateFileRequestParam: TypeAlias = Union[UpdateFileDetails, ChangePublicationStatus] diff --git a/src/imagekitio/types/upload_post_transform_error_event.py b/src/imagekitio/types/upload_post_transform_error_event.py new file mode 100644 index 00000000..8f0a4ce3 --- /dev/null +++ b/src/imagekitio/types/upload_post_transform_error_event.py @@ -0,0 +1,78 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel +from .base_webhook_event import BaseWebhookEvent + +__all__ = [ + "UploadPostTransformErrorEvent", + "UploadPostTransformErrorEventData", + "UploadPostTransformErrorEventDataTransformation", + "UploadPostTransformErrorEventDataTransformationError", + "UploadPostTransformErrorEventRequest", + "UploadPostTransformErrorEventRequestTransformation", +] + + +class UploadPostTransformErrorEventDataTransformationError(BaseModel): + reason: str + """Reason for the post-transformation failure.""" + + +class UploadPostTransformErrorEventDataTransformation(BaseModel): + error: UploadPostTransformErrorEventDataTransformationError + + +class UploadPostTransformErrorEventData(BaseModel): + file_id: str = FieldInfo(alias="fileId") + """Unique identifier of the originally uploaded file.""" + + name: str + """Name of the file.""" + + path: str + """Path of the file.""" + + transformation: UploadPostTransformErrorEventDataTransformation + + url: str + """URL of the attempted post-transformation.""" + + +class UploadPostTransformErrorEventRequestTransformation(BaseModel): + type: Literal["transformation", "abs", "gif-to-video", "thumbnail"] + """Type of the requested post-transformation.""" + + protocol: Optional[Literal["hls", "dash"]] = None + """Only applicable if transformation type is 'abs'. Streaming protocol used.""" + + value: Optional[str] = None + """Value for the requested transformation type.""" + + +class UploadPostTransformErrorEventRequest(BaseModel): + transformation: UploadPostTransformErrorEventRequestTransformation + + x_request_id: str + """Unique identifier for the originating request.""" + + +class UploadPostTransformErrorEvent(BaseWebhookEvent): + """Triggered when a post-transformation fails. + + The original file remains available, but the requested transformation could not be generated. + """ + + created_at: datetime + """Timestamp of when the event occurred in ISO8601 format.""" + + data: UploadPostTransformErrorEventData + + request: UploadPostTransformErrorEventRequest + + type: Literal["upload.post-transform.error"] # type: ignore diff --git a/src/imagekitio/types/upload_post_transform_success_event.py b/src/imagekitio/types/upload_post_transform_success_event.py new file mode 100644 index 00000000..10e4ad7f --- /dev/null +++ b/src/imagekitio/types/upload_post_transform_success_event.py @@ -0,0 +1,62 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel +from .base_webhook_event import BaseWebhookEvent + +__all__ = [ + "UploadPostTransformSuccessEvent", + "UploadPostTransformSuccessEventData", + "UploadPostTransformSuccessEventRequest", + "UploadPostTransformSuccessEventRequestTransformation", +] + + +class UploadPostTransformSuccessEventData(BaseModel): + file_id: str = FieldInfo(alias="fileId") + """Unique identifier of the originally uploaded file.""" + + name: str + """Name of the file.""" + + url: str + """URL of the generated post-transformation.""" + + +class UploadPostTransformSuccessEventRequestTransformation(BaseModel): + type: Literal["transformation", "abs", "gif-to-video", "thumbnail"] + """Type of the requested post-transformation.""" + + protocol: Optional[Literal["hls", "dash"]] = None + """Only applicable if transformation type is 'abs'. Streaming protocol used.""" + + value: Optional[str] = None + """Value for the requested transformation type.""" + + +class UploadPostTransformSuccessEventRequest(BaseModel): + transformation: UploadPostTransformSuccessEventRequestTransformation + + x_request_id: str + """Unique identifier for the originating request.""" + + +class UploadPostTransformSuccessEvent(BaseWebhookEvent): + """Triggered when a post-transformation completes successfully. + + The transformed version of the file is now ready and can be accessed via the provided URL. Note that each post-transformation generates a separate webhook event. + """ + + created_at: datetime + """Timestamp of when the event occurred in ISO8601 format.""" + + data: UploadPostTransformSuccessEventData + + request: UploadPostTransformSuccessEventRequest + + type: Literal["upload.post-transform.success"] # type: ignore diff --git a/src/imagekitio/types/upload_pre_transform_error_event.py b/src/imagekitio/types/upload_pre_transform_error_event.py new file mode 100644 index 00000000..de907898 --- /dev/null +++ b/src/imagekitio/types/upload_pre_transform_error_event.py @@ -0,0 +1,58 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .base_webhook_event import BaseWebhookEvent + +__all__ = [ + "UploadPreTransformErrorEvent", + "UploadPreTransformErrorEventData", + "UploadPreTransformErrorEventDataTransformation", + "UploadPreTransformErrorEventDataTransformationError", + "UploadPreTransformErrorEventRequest", +] + + +class UploadPreTransformErrorEventDataTransformationError(BaseModel): + reason: str + """Reason for the pre-transformation failure.""" + + +class UploadPreTransformErrorEventDataTransformation(BaseModel): + error: UploadPreTransformErrorEventDataTransformationError + + +class UploadPreTransformErrorEventData(BaseModel): + name: str + """Name of the file.""" + + path: str + """Path of the file.""" + + transformation: UploadPreTransformErrorEventDataTransformation + + +class UploadPreTransformErrorEventRequest(BaseModel): + transformation: str + """The requested pre-transformation string.""" + + x_request_id: str + """Unique identifier for the originating request.""" + + +class UploadPreTransformErrorEvent(BaseWebhookEvent): + """Triggered when a pre-transformation fails. + + The file upload may have been accepted, but the requested transformation could not be applied. + """ + + created_at: datetime + """Timestamp of when the event occurred in ISO8601 format.""" + + data: UploadPreTransformErrorEventData + + request: UploadPreTransformErrorEventRequest + + type: Literal["upload.pre-transform.error"] # type: ignore diff --git a/src/imagekitio/types/upload_pre_transform_success_event.py b/src/imagekitio/types/upload_pre_transform_success_event.py new file mode 100644 index 00000000..8584f867 --- /dev/null +++ b/src/imagekitio/types/upload_pre_transform_success_event.py @@ -0,0 +1,294 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel +from .metadata import Metadata +from .base_webhook_event import BaseWebhookEvent + +__all__ = [ + "UploadPreTransformSuccessEvent", + "UploadPreTransformSuccessEventData", + "UploadPreTransformSuccessEventDataAITag", + "UploadPreTransformSuccessEventDataExtensionStatus", + "UploadPreTransformSuccessEventDataSelectedFieldsSchema", + "UploadPreTransformSuccessEventDataVersionInfo", + "UploadPreTransformSuccessEventRequest", +] + + +class UploadPreTransformSuccessEventDataAITag(BaseModel): + confidence: Optional[float] = None + """Confidence score of the tag.""" + + name: Optional[str] = None + """Name of the tag.""" + + source: Optional[str] = None + """Array of `AITags` associated with the image. + + If no `AITags` are set, it will be null. These tags can be added using the + `google-auto-tagging` or `aws-auto-tagging` extensions. + """ + + +class UploadPreTransformSuccessEventDataExtensionStatus(BaseModel): + """ + Extension names with their processing status at the time of completion of the request. It could have one of the following status values: + + `success`: The extension has been successfully applied. + `failed`: The extension has failed and will not be retried. + `pending`: The extension will finish processing in some time. On completion, the final status (success / failed) will be sent to the `webhookUrl` provided. + + If no extension was requested, then this parameter is not returned. + """ + + ai_auto_description: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="ai-auto-description", default=None + ) + + aws_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="aws-auto-tagging", default=None + ) + + google_auto_tagging: Optional[Literal["success", "pending", "failed"]] = FieldInfo( + alias="google-auto-tagging", default=None + ) + + remove_bg: Optional[Literal["success", "pending", "failed"]] = FieldInfo(alias="remove-bg", default=None) + + +class UploadPreTransformSuccessEventDataSelectedFieldsSchema(BaseModel): + type: Literal["Text", "Textarea", "Number", "Date", "Boolean", "SingleSelect", "MultiSelect"] + """Type of the custom metadata field.""" + + default_value: Union[str, float, bool, List[Union[str, float, bool]], None] = FieldInfo( + alias="defaultValue", default=None + ) + """The default value for this custom metadata field. + + The value should match the `type` of custom metadata field. + """ + + is_value_required: Optional[bool] = FieldInfo(alias="isValueRequired", default=None) + """Specifies if the custom metadata field is required or not.""" + + max_length: Optional[float] = FieldInfo(alias="maxLength", default=None) + """Maximum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + max_value: Union[str, float, None] = FieldInfo(alias="maxValue", default=None) + """Maximum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + min_length: Optional[float] = FieldInfo(alias="minLength", default=None) + """Minimum length of string. Only set if `type` is set to `Text` or `Textarea`.""" + + min_value: Union[str, float, None] = FieldInfo(alias="minValue", default=None) + """Minimum value of the field. + + Only set if field type is `Date` or `Number`. For `Date` type field, the value + will be in ISO8601 string format. For `Number` type field, it will be a numeric + value. + """ + + read_only: Optional[bool] = FieldInfo(alias="readOnly", default=None) + """Indicates whether the custom metadata field is read only. + + A read only field cannot be modified after being set. This field is configurable + only via the **Path policy** feature. + """ + + select_options: Optional[List[Union[str, float, bool]]] = FieldInfo(alias="selectOptions", default=None) + """An array of allowed values when field type is `SingleSelect` or `MultiSelect`.""" + + select_options_truncated: Optional[bool] = FieldInfo(alias="selectOptionsTruncated", default=None) + """Specifies if the selectOptions array is truncated. + + It is truncated when number of options are > 100. + """ + + +class UploadPreTransformSuccessEventDataVersionInfo(BaseModel): + """An object containing the file or file version's `id` (versionId) and `name`.""" + + id: Optional[str] = None + """Unique identifier of the file version.""" + + name: Optional[str] = None + """Name of the file version.""" + + +class UploadPreTransformSuccessEventData(BaseModel): + """Object containing details of a successful upload.""" + + ai_tags: Optional[List[UploadPreTransformSuccessEventDataAITag]] = FieldInfo(alias="AITags", default=None) + """An array of tags assigned to the uploaded file by auto tagging.""" + + audio_codec: Optional[str] = FieldInfo(alias="audioCodec", default=None) + """The audio codec used in the video (only for video).""" + + bit_rate: Optional[int] = FieldInfo(alias="bitRate", default=None) + """The bit rate of the video in kbps (only for video).""" + + custom_coordinates: Optional[str] = FieldInfo(alias="customCoordinates", default=None) + """ + Value of custom coordinates associated with the image in the format + `x,y,width,height`. If `customCoordinates` are not defined, then it is `null`. + Send `customCoordinates` in `responseFields` in API request to get the value of + this field. + """ + + custom_metadata: Optional[Dict[str, object]] = FieldInfo(alias="customMetadata", default=None) + """A key-value data associated with the asset. + + Use `responseField` in API request to get `customMetadata` in the upload API + response. Before setting any custom metadata on an asset, you have to create the + field using custom metadata fields API. Send `customMetadata` in + `responseFields` in API request to get the value of this field. + """ + + description: Optional[str] = None + """Optional text to describe the contents of the file. + + Can be set by the user or the ai-auto-description extension. + """ + + duration: Optional[int] = None + """The duration of the video in seconds (only for video).""" + + embedded_metadata: Optional[Dict[str, object]] = FieldInfo(alias="embeddedMetadata", default=None) + """Consolidated embedded metadata associated with the file. + + It includes exif, iptc, and xmp data. Send `embeddedMetadata` in + `responseFields` in API request to get embeddedMetadata in the upload API + response. + """ + + extension_status: Optional[UploadPreTransformSuccessEventDataExtensionStatus] = FieldInfo( + alias="extensionStatus", default=None + ) + """ + Extension names with their processing status at the time of completion of the + request. It could have one of the following status values: + + `success`: The extension has been successfully applied. `failed`: The extension + has failed and will not be retried. `pending`: The extension will finish + processing in some time. On completion, the final status (success / failed) will + be sent to the `webhookUrl` provided. + + If no extension was requested, then this parameter is not returned. + """ + + file_id: Optional[str] = FieldInfo(alias="fileId", default=None) + """Unique fileId. + + Store this fileld in your database, as this will be used to perform update + action on this file. + """ + + file_path: Optional[str] = FieldInfo(alias="filePath", default=None) + """The relative path of the file in the media library e.g. + + `/marketing-assets/new-banner.jpg`. + """ + + file_type: Optional[str] = FieldInfo(alias="fileType", default=None) + """Type of the uploaded file. Possible values are `image`, `non-image`.""" + + height: Optional[float] = None + """Height of the image in pixels (Only for images)""" + + is_private_file: Optional[bool] = FieldInfo(alias="isPrivateFile", default=None) + """Is the file marked as private. + + It can be either `true` or `false`. Send `isPrivateFile` in `responseFields` in + API request to get the value of this field. + """ + + is_published: Optional[bool] = FieldInfo(alias="isPublished", default=None) + """Is the file published or in draft state. + + It can be either `true` or `false`. Send `isPublished` in `responseFields` in + API request to get the value of this field. + """ + + metadata: Optional[Metadata] = None + """Legacy metadata. + + Send `metadata` in `responseFields` in API request to get metadata in the upload + API response. + """ + + name: Optional[str] = None + """Name of the asset.""" + + selected_fields_schema: Optional[Dict[str, UploadPreTransformSuccessEventDataSelectedFieldsSchema]] = FieldInfo( + alias="selectedFieldsSchema", default=None + ) + """ + This field is included in the response only if the Path policy feature is + available in the plan. It contains schema definitions for the custom metadata + fields selected for the specified file path. Field selection can only be done + when the Path policy feature is enabled. + + Keys are the names of the custom metadata fields; the value object has details + about the custom metadata schema. + """ + + size: Optional[float] = None + """Size of the image file in Bytes.""" + + tags: Optional[List[str]] = None + """The array of tags associated with the asset. + + If no tags are set, it will be `null`. Send `tags` in `responseFields` in API + request to get the value of this field. + """ + + thumbnail_url: Optional[str] = FieldInfo(alias="thumbnailUrl", default=None) + """In the case of an image, a small thumbnail URL.""" + + url: Optional[str] = None + """A publicly accessible URL of the file.""" + + version_info: Optional[UploadPreTransformSuccessEventDataVersionInfo] = FieldInfo(alias="versionInfo", default=None) + """An object containing the file or file version's `id` (versionId) and `name`.""" + + video_codec: Optional[str] = FieldInfo(alias="videoCodec", default=None) + """The video codec used in the video (only for video).""" + + width: Optional[float] = None + """Width of the image in pixels (Only for Images)""" + + +class UploadPreTransformSuccessEventRequest(BaseModel): + transformation: str + """The requested pre-transformation string.""" + + x_request_id: str + """Unique identifier for the originating request.""" + + +class UploadPreTransformSuccessEvent(BaseWebhookEvent): + """Triggered when a pre-transformation completes successfully. + + The file has been processed with the requested transformation and is now available in the Media Library. + """ + + created_at: datetime + """Timestamp of when the event occurred in ISO8601 format.""" + + data: UploadPreTransformSuccessEventData + """Object containing details of a successful upload.""" + + request: UploadPreTransformSuccessEventRequest + + type: Literal["upload.pre-transform.success"] # type: ignore diff --git a/src/imagekitio/types/video_transformation_accepted_event.py b/src/imagekitio/types/video_transformation_accepted_event.py new file mode 100644 index 00000000..4ddb83ca --- /dev/null +++ b/src/imagekitio/types/video_transformation_accepted_event.py @@ -0,0 +1,103 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .base_webhook_event import BaseWebhookEvent + +__all__ = [ + "VideoTransformationAcceptedEvent", + "VideoTransformationAcceptedEventData", + "VideoTransformationAcceptedEventDataAsset", + "VideoTransformationAcceptedEventDataTransformation", + "VideoTransformationAcceptedEventDataTransformationOptions", + "VideoTransformationAcceptedEventRequest", +] + + +class VideoTransformationAcceptedEventDataAsset(BaseModel): + """Information about the source video asset being transformed.""" + + url: str + """URL to download or access the source video file.""" + + +class VideoTransformationAcceptedEventDataTransformationOptions(BaseModel): + """Configuration options for video transformations.""" + + audio_codec: Optional[Literal["aac", "opus"]] = None + """Audio codec used for encoding (aac or opus).""" + + auto_rotate: Optional[bool] = None + """Whether to automatically rotate the video based on metadata.""" + + format: Optional[Literal["mp4", "webm", "jpg", "png", "webp"]] = None + """Output format for the transformed video or thumbnail.""" + + quality: Optional[int] = None + """Quality setting for the output video.""" + + stream_protocol: Optional[Literal["HLS", "DASH"]] = None + """Streaming protocol for adaptive bitrate streaming.""" + + variants: Optional[List[str]] = None + """Array of quality representations for adaptive bitrate streaming.""" + + video_codec: Optional[Literal["h264", "vp9", "av1"]] = None + """Video codec used for encoding (h264, vp9, or av1).""" + + +class VideoTransformationAcceptedEventDataTransformation(BaseModel): + """Base information about a video transformation request.""" + + type: Literal["video-transformation", "gif-to-video", "video-thumbnail"] + """Type of video transformation: + + - `video-transformation`: Standard video processing (resize, format conversion, + etc.) + - `gif-to-video`: Convert animated GIF to video format + - `video-thumbnail`: Generate thumbnail image from video + """ + + options: Optional[VideoTransformationAcceptedEventDataTransformationOptions] = None + """Configuration options for video transformations.""" + + +class VideoTransformationAcceptedEventData(BaseModel): + asset: VideoTransformationAcceptedEventDataAsset + """Information about the source video asset being transformed.""" + + transformation: VideoTransformationAcceptedEventDataTransformation + """Base information about a video transformation request.""" + + +class VideoTransformationAcceptedEventRequest(BaseModel): + """Information about the original request that triggered the video transformation.""" + + url: str + """Full URL of the transformation request that was submitted.""" + + x_request_id: str + """Unique identifier for the originating transformation request.""" + + user_agent: Optional[str] = None + """User-Agent header from the original request that triggered the transformation.""" + + +class VideoTransformationAcceptedEvent(BaseWebhookEvent): + """Triggered when a new video transformation request is accepted for processing. + + This event confirms that ImageKit has received and queued your transformation request. Use this for debugging and tracking transformation lifecycle. + """ + + created_at: datetime + """Timestamp when the event was created in ISO8601 format.""" + + data: VideoTransformationAcceptedEventData + + request: VideoTransformationAcceptedEventRequest + """Information about the original request that triggered the video transformation.""" + + type: Literal["video.transformation.accepted"] # type: ignore diff --git a/src/imagekitio/types/video_transformation_error_event.py b/src/imagekitio/types/video_transformation_error_event.py new file mode 100644 index 00000000..788142fe --- /dev/null +++ b/src/imagekitio/types/video_transformation_error_event.py @@ -0,0 +1,116 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .base_webhook_event import BaseWebhookEvent + +__all__ = [ + "VideoTransformationErrorEvent", + "VideoTransformationErrorEventData", + "VideoTransformationErrorEventDataAsset", + "VideoTransformationErrorEventDataTransformation", + "VideoTransformationErrorEventDataTransformationError", + "VideoTransformationErrorEventDataTransformationOptions", + "VideoTransformationErrorEventRequest", +] + + +class VideoTransformationErrorEventDataAsset(BaseModel): + """Information about the source video asset being transformed.""" + + url: str + """URL to download or access the source video file.""" + + +class VideoTransformationErrorEventDataTransformationError(BaseModel): + """Details about the transformation error.""" + + reason: Literal["encoding_failed", "download_failed", "internal_server_error"] + """Specific reason for the transformation failure: + + - `encoding_failed`: Error during video encoding process + - `download_failed`: Could not download source video + - `internal_server_error`: Unexpected server error + """ + + +class VideoTransformationErrorEventDataTransformationOptions(BaseModel): + """Configuration options for video transformations.""" + + audio_codec: Optional[Literal["aac", "opus"]] = None + """Audio codec used for encoding (aac or opus).""" + + auto_rotate: Optional[bool] = None + """Whether to automatically rotate the video based on metadata.""" + + format: Optional[Literal["mp4", "webm", "jpg", "png", "webp"]] = None + """Output format for the transformed video or thumbnail.""" + + quality: Optional[int] = None + """Quality setting for the output video.""" + + stream_protocol: Optional[Literal["HLS", "DASH"]] = None + """Streaming protocol for adaptive bitrate streaming.""" + + variants: Optional[List[str]] = None + """Array of quality representations for adaptive bitrate streaming.""" + + video_codec: Optional[Literal["h264", "vp9", "av1"]] = None + """Video codec used for encoding (h264, vp9, or av1).""" + + +class VideoTransformationErrorEventDataTransformation(BaseModel): + type: Literal["video-transformation", "gif-to-video", "video-thumbnail"] + """Type of video transformation: + + - `video-transformation`: Standard video processing (resize, format conversion, + etc.) + - `gif-to-video`: Convert animated GIF to video format + - `video-thumbnail`: Generate thumbnail image from video + """ + + error: Optional[VideoTransformationErrorEventDataTransformationError] = None + """Details about the transformation error.""" + + options: Optional[VideoTransformationErrorEventDataTransformationOptions] = None + """Configuration options for video transformations.""" + + +class VideoTransformationErrorEventData(BaseModel): + asset: VideoTransformationErrorEventDataAsset + """Information about the source video asset being transformed.""" + + transformation: VideoTransformationErrorEventDataTransformation + + +class VideoTransformationErrorEventRequest(BaseModel): + """Information about the original request that triggered the video transformation.""" + + url: str + """Full URL of the transformation request that was submitted.""" + + x_request_id: str + """Unique identifier for the originating transformation request.""" + + user_agent: Optional[str] = None + """User-Agent header from the original request that triggered the transformation.""" + + +class VideoTransformationErrorEvent(BaseWebhookEvent): + """Triggered when an error occurs during video encoding. + + Listen to this webhook to log error reasons and debug issues. Check your origin and URL endpoint settings if the reason is related to download failure. For other errors, contact ImageKit support. + """ + + created_at: datetime + """Timestamp when the event was created in ISO8601 format.""" + + data: VideoTransformationErrorEventData + + request: VideoTransformationErrorEventRequest + """Information about the original request that triggered the video transformation.""" + + type: Literal["video.transformation.error"] # type: ignore diff --git a/src/imagekitio/types/video_transformation_ready_event.py b/src/imagekitio/types/video_transformation_ready_event.py new file mode 100644 index 00000000..a711a9e9 --- /dev/null +++ b/src/imagekitio/types/video_transformation_ready_event.py @@ -0,0 +1,147 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .base_webhook_event import BaseWebhookEvent + +__all__ = [ + "VideoTransformationReadyEvent", + "VideoTransformationReadyEventData", + "VideoTransformationReadyEventDataAsset", + "VideoTransformationReadyEventDataTransformation", + "VideoTransformationReadyEventDataTransformationOptions", + "VideoTransformationReadyEventDataTransformationOutput", + "VideoTransformationReadyEventDataTransformationOutputVideoMetadata", + "VideoTransformationReadyEventRequest", + "VideoTransformationReadyEventTimings", +] + + +class VideoTransformationReadyEventDataAsset(BaseModel): + """Information about the source video asset being transformed.""" + + url: str + """URL to download or access the source video file.""" + + +class VideoTransformationReadyEventDataTransformationOptions(BaseModel): + """Configuration options for video transformations.""" + + audio_codec: Optional[Literal["aac", "opus"]] = None + """Audio codec used for encoding (aac or opus).""" + + auto_rotate: Optional[bool] = None + """Whether to automatically rotate the video based on metadata.""" + + format: Optional[Literal["mp4", "webm", "jpg", "png", "webp"]] = None + """Output format for the transformed video or thumbnail.""" + + quality: Optional[int] = None + """Quality setting for the output video.""" + + stream_protocol: Optional[Literal["HLS", "DASH"]] = None + """Streaming protocol for adaptive bitrate streaming.""" + + variants: Optional[List[str]] = None + """Array of quality representations for adaptive bitrate streaming.""" + + video_codec: Optional[Literal["h264", "vp9", "av1"]] = None + """Video codec used for encoding (h264, vp9, or av1).""" + + +class VideoTransformationReadyEventDataTransformationOutputVideoMetadata(BaseModel): + """Metadata of the output video file.""" + + bitrate: int + """Bitrate of the output video in bits per second.""" + + duration: float + """Duration of the output video in seconds.""" + + height: int + """Height of the output video in pixels.""" + + width: int + """Width of the output video in pixels.""" + + +class VideoTransformationReadyEventDataTransformationOutput(BaseModel): + """Information about the transformed output video.""" + + url: str + """URL to access the transformed video.""" + + video_metadata: Optional[VideoTransformationReadyEventDataTransformationOutputVideoMetadata] = None + """Metadata of the output video file.""" + + +class VideoTransformationReadyEventDataTransformation(BaseModel): + type: Literal["video-transformation", "gif-to-video", "video-thumbnail"] + """Type of video transformation: + + - `video-transformation`: Standard video processing (resize, format conversion, + etc.) + - `gif-to-video`: Convert animated GIF to video format + - `video-thumbnail`: Generate thumbnail image from video + """ + + options: Optional[VideoTransformationReadyEventDataTransformationOptions] = None + """Configuration options for video transformations.""" + + output: Optional[VideoTransformationReadyEventDataTransformationOutput] = None + """Information about the transformed output video.""" + + +class VideoTransformationReadyEventData(BaseModel): + asset: VideoTransformationReadyEventDataAsset + """Information about the source video asset being transformed.""" + + transformation: VideoTransformationReadyEventDataTransformation + + +class VideoTransformationReadyEventRequest(BaseModel): + """Information about the original request that triggered the video transformation.""" + + url: str + """Full URL of the transformation request that was submitted.""" + + x_request_id: str + """Unique identifier for the originating transformation request.""" + + user_agent: Optional[str] = None + """User-Agent header from the original request that triggered the transformation.""" + + +class VideoTransformationReadyEventTimings(BaseModel): + """Performance metrics for the transformation process.""" + + download_duration: Optional[int] = None + """ + Time spent downloading the source video from your origin or media library, in + milliseconds. + """ + + encoding_duration: Optional[int] = None + """Time spent encoding the video, in milliseconds.""" + + +class VideoTransformationReadyEvent(BaseWebhookEvent): + """ + Triggered when video encoding is finished and the transformed resource is ready to be served. This is the key event to listen for - update your database or CMS flags when you receive this so your application can start showing the transformed video to users. + """ + + created_at: datetime + """Timestamp when the event was created in ISO8601 format.""" + + data: VideoTransformationReadyEventData + + request: VideoTransformationReadyEventRequest + """Information about the original request that triggered the video transformation.""" + + type: Literal["video.transformation.ready"] # type: ignore + + timings: Optional[VideoTransformationReadyEventTimings] = None + """Performance metrics for the transformation process.""" diff --git a/tests/__init__.py b/tests/__init__.py index e69de29b..fd8019a9 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/__init__.py b/tests/api_resources/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/accounts/__init__.py b/tests/api_resources/accounts/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/accounts/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/accounts/test_origins.py b/tests/api_resources/accounts/test_origins.py new file mode 100644 index 00000000..95b52147 --- /dev/null +++ b/tests/api_resources/accounts/test_origins.py @@ -0,0 +1,2432 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types.accounts import OriginResponse, OriginListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestOrigins: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_1(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_1(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_1(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_1(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_2(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_2(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + s3_force_path_style=True, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_2(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_2(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_3(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_3(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_3(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_3(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_4(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_4(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + base_url_for_canonical_header="https://cdn.example.com", + forward_host_header_to_origin=False, + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_4(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_4(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_5(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + name="US S3 Storage", + type="WEB_PROXY", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_5(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + name="US S3 Storage", + type="WEB_PROXY", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_5(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + name="US S3 Storage", + type="WEB_PROXY", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_5(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + name="US S3 Storage", + type="WEB_PROXY", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_6(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_6(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="products", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_6(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_6(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_7(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_7(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="uploads", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_7(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_7(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_8(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_8(self, client: ImageKit) -> None: + origin = client.accounts.origins.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_8(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_8(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_1(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_1(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_1(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_1(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_1(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_2(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_2(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + s3_force_path_style=True, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_2(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_2(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_2(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_3(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_3(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_3(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_3(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_3(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_4(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_4(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + base_url_for_canonical_header="https://cdn.example.com", + forward_host_header_to_origin=False, + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_4(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_4(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_4(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_5(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_5(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_5(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_5(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_5(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + name="US S3 Storage", + type="WEB_PROXY", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_6(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_6(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="products", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_6(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_6(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_6(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_7(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_7(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="uploads", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_7(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_7(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_7(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_8(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_8(self, client: ImageKit) -> None: + origin = client.accounts.origins.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_8(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_8(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_8(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.update( + id="", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list(self, client: ImageKit) -> None: + origin = client.accounts.origins.list() + assert_matches_type(OriginListResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginListResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginListResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: ImageKit) -> None: + origin = client.accounts.origins.delete( + "id", + ) + assert origin is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.delete( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert origin is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.delete( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert origin is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.delete( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + origin = client.accounts.origins.get( + "id", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.accounts.origins.with_raw_response.get( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.accounts.origins.with_streaming_response.get( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_get(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.origins.with_raw_response.get( + "", + ) + + +class TestAsyncOrigins: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + s3_force_path_style=True, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_3(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_3(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_3(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_3(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_4(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_4(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + base_url_for_canonical_header="https://cdn.example.com", + forward_host_header_to_origin=False, + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_4(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_4(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_5(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + name="US S3 Storage", + type="WEB_PROXY", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_5(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + name="US S3 Storage", + type="WEB_PROXY", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_5(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + name="US S3 Storage", + type="WEB_PROXY", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_5(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + name="US S3 Storage", + type="WEB_PROXY", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_6(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_6(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="products", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_6(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_6(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_7(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_7(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="uploads", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_7(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_7(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_8(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_8(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_8(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_8(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.create( + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_1(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_1(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_1(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_1(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_1(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_2(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_2(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + s3_force_path_style=True, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_2(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_2(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_2(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + endpoint="https://s3.eu-central-1.wasabisys.com", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="S3_COMPATIBLE", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_3(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_3(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="raw-assets", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_3(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_3(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_3(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + access_key="AKIAIOSFODNN7EXAMPLE", + bucket="product-images", + name="US S3 Storage", + secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + type="CLOUDINARY_BACKUP", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_4(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_4(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + base_url_for_canonical_header="https://cdn.example.com", + forward_host_header_to_origin=False, + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_4(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_4(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_4(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + base_url="https://images.example.com/assets", + name="US S3 Storage", + type="WEB_FOLDER", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_5(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_5(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_5(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_5(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + name="US S3 Storage", + type="WEB_PROXY", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_5(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + name="US S3 Storage", + type="WEB_PROXY", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_6(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_6(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="products", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_6(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_6(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_6(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + bucket="gcs-media", + client_email="service-account@project.iam.gserviceaccount.com", + name="US S3 Storage", + private_key="-----BEGIN PRIVATE KEY-----\\nMIIEv...", + type="GCS", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_7(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_7(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + prefix="uploads", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_7(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_7(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_7(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + account_name="account123", + container="images", + name="US S3 Storage", + sas_token="?sv=2023-01-03&sr=c&sig=abc123", + type="AZURE_BLOB", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_8(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_8(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + base_url_for_canonical_header="https://cdn.example.com", + include_canonical_header=False, + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_8(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_8(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.update( + id="id", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_8(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.update( + id="", + base_url="https://akeneo.company.com", + client_id="akeneo-client-id", + client_secret="akeneo-client-secret", + name="US S3 Storage", + password="strongpassword123", + type="AKENEO_PIM", + username="integration-user", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.list() + assert_matches_type(OriginListResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginListResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginListResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.delete( + "id", + ) + assert origin is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.delete( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert origin is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.delete( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert origin is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.delete( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + origin = await async_client.accounts.origins.get( + "id", + ) + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.origins.with_raw_response.get( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.origins.with_streaming_response.get( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + origin = await response.parse() + assert_matches_type(OriginResponse, origin, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_get(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.origins.with_raw_response.get( + "", + ) diff --git a/tests/api_resources/accounts/test_url_endpoints.py b/tests/api_resources/accounts/test_url_endpoints.py new file mode 100644 index 00000000..954f6309 --- /dev/null +++ b/tests/api_resources/accounts/test_url_endpoints.py @@ -0,0 +1,469 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types.accounts import ( + URLEndpointResponse, + URLEndpointListResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestURLEndpoints: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: ImageKit) -> None: + url_endpoint = client.accounts.url_endpoints.create( + description="My custom URL endpoint", + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params(self, client: ImageKit) -> None: + url_endpoint = client.accounts.url_endpoints.create( + description="My custom URL endpoint", + origins=["origin-id-1"], + url_prefix="product-images", + url_rewriter={ + "type": "CLOUDINARY", + "preserve_asset_delivery_types": True, + }, + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: ImageKit) -> None: + response = client.accounts.url_endpoints.with_raw_response.create( + description="My custom URL endpoint", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: ImageKit) -> None: + with client.accounts.url_endpoints.with_streaming_response.create( + description="My custom URL endpoint", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update(self, client: ImageKit) -> None: + url_endpoint = client.accounts.url_endpoints.update( + id="id", + description="My custom URL endpoint", + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params(self, client: ImageKit) -> None: + url_endpoint = client.accounts.url_endpoints.update( + id="id", + description="My custom URL endpoint", + origins=["origin-id-1"], + url_prefix="product-images", + url_rewriter={ + "type": "CLOUDINARY", + "preserve_asset_delivery_types": True, + }, + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update(self, client: ImageKit) -> None: + response = client.accounts.url_endpoints.with_raw_response.update( + id="id", + description="My custom URL endpoint", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update(self, client: ImageKit) -> None: + with client.accounts.url_endpoints.with_streaming_response.update( + id="id", + description="My custom URL endpoint", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.url_endpoints.with_raw_response.update( + id="", + description="My custom URL endpoint", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list(self, client: ImageKit) -> None: + url_endpoint = client.accounts.url_endpoints.list() + assert_matches_type(URLEndpointListResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list(self, client: ImageKit) -> None: + response = client.accounts.url_endpoints.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = response.parse() + assert_matches_type(URLEndpointListResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list(self, client: ImageKit) -> None: + with client.accounts.url_endpoints.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = response.parse() + assert_matches_type(URLEndpointListResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: ImageKit) -> None: + url_endpoint = client.accounts.url_endpoints.delete( + "id", + ) + assert url_endpoint is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: ImageKit) -> None: + response = client.accounts.url_endpoints.with_raw_response.delete( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = response.parse() + assert url_endpoint is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: ImageKit) -> None: + with client.accounts.url_endpoints.with_streaming_response.delete( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = response.parse() + assert url_endpoint is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.url_endpoints.with_raw_response.delete( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + url_endpoint = client.accounts.url_endpoints.get( + "id", + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.accounts.url_endpoints.with_raw_response.get( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.accounts.url_endpoints.with_streaming_response.get( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_get(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.accounts.url_endpoints.with_raw_response.get( + "", + ) + + +class TestAsyncURLEndpoints: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncImageKit) -> None: + url_endpoint = await async_client.accounts.url_endpoints.create( + description="My custom URL endpoint", + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncImageKit) -> None: + url_endpoint = await async_client.accounts.url_endpoints.create( + description="My custom URL endpoint", + origins=["origin-id-1"], + url_prefix="product-images", + url_rewriter={ + "type": "CLOUDINARY", + "preserve_asset_delivery_types": True, + }, + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.url_endpoints.with_raw_response.create( + description="My custom URL endpoint", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = await response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.url_endpoints.with_streaming_response.create( + description="My custom URL endpoint", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = await response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update(self, async_client: AsyncImageKit) -> None: + url_endpoint = await async_client.accounts.url_endpoints.update( + id="id", + description="My custom URL endpoint", + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncImageKit) -> None: + url_endpoint = await async_client.accounts.url_endpoints.update( + id="id", + description="My custom URL endpoint", + origins=["origin-id-1"], + url_prefix="product-images", + url_rewriter={ + "type": "CLOUDINARY", + "preserve_asset_delivery_types": True, + }, + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.url_endpoints.with_raw_response.update( + id="id", + description="My custom URL endpoint", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = await response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.url_endpoints.with_streaming_response.update( + id="id", + description="My custom URL endpoint", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = await response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.url_endpoints.with_raw_response.update( + id="", + description="My custom URL endpoint", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list(self, async_client: AsyncImageKit) -> None: + url_endpoint = await async_client.accounts.url_endpoints.list() + assert_matches_type(URLEndpointListResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.url_endpoints.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = await response.parse() + assert_matches_type(URLEndpointListResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.url_endpoints.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = await response.parse() + assert_matches_type(URLEndpointListResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncImageKit) -> None: + url_endpoint = await async_client.accounts.url_endpoints.delete( + "id", + ) + assert url_endpoint is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.url_endpoints.with_raw_response.delete( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = await response.parse() + assert url_endpoint is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.url_endpoints.with_streaming_response.delete( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = await response.parse() + assert url_endpoint is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.url_endpoints.with_raw_response.delete( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + url_endpoint = await async_client.accounts.url_endpoints.get( + "id", + ) + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.url_endpoints.with_raw_response.get( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + url_endpoint = await response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.url_endpoints.with_streaming_response.get( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + url_endpoint = await response.parse() + assert_matches_type(URLEndpointResponse, url_endpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_get(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.accounts.url_endpoints.with_raw_response.get( + "", + ) diff --git a/tests/api_resources/accounts/test_usage.py b/tests/api_resources/accounts/test_usage.py new file mode 100644 index 00000000..f4776931 --- /dev/null +++ b/tests/api_resources/accounts/test_usage.py @@ -0,0 +1,99 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio._utils import parse_date +from imagekitio.types.accounts import UsageGetResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestUsage: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + usage = client.accounts.usage.get( + end_date=parse_date("2019-12-27"), + start_date=parse_date("2019-12-27"), + ) + assert_matches_type(UsageGetResponse, usage, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.accounts.usage.with_raw_response.get( + end_date=parse_date("2019-12-27"), + start_date=parse_date("2019-12-27"), + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageGetResponse, usage, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.accounts.usage.with_streaming_response.get( + end_date=parse_date("2019-12-27"), + start_date=parse_date("2019-12-27"), + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageGetResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncUsage: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + usage = await async_client.accounts.usage.get( + end_date=parse_date("2019-12-27"), + start_date=parse_date("2019-12-27"), + ) + assert_matches_type(UsageGetResponse, usage, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.accounts.usage.with_raw_response.get( + end_date=parse_date("2019-12-27"), + start_date=parse_date("2019-12-27"), + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageGetResponse, usage, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.accounts.usage.with_streaming_response.get( + end_date=parse_date("2019-12-27"), + start_date=parse_date("2019-12-27"), + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageGetResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/beta/__init__.py b/tests/api_resources/beta/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/beta/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/beta/v2/__init__.py b/tests/api_resources/beta/v2/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/beta/v2/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/beta/v2/test_files.py b/tests/api_resources/beta/v2/test_files.py new file mode 100644 index 00000000..d5f6bbdb --- /dev/null +++ b/tests/api_resources/beta/v2/test_files.py @@ -0,0 +1,216 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types.beta.v2 import FileUploadResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFiles: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_upload(self, client: ImageKit) -> None: + file = client.beta.v2.files.upload( + file=b"raw file contents", + file_name="fileName", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_upload_with_all_params(self, client: ImageKit) -> None: + file = client.beta.v2.files.upload( + file=b"raw file contents", + file_name="fileName", + token="token", + checks='"request.folder" : "marketing/"\n', + custom_coordinates="customCoordinates", + custom_metadata={ + "brand": "bar", + "color": "bar", + }, + description="Running shoes", + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + folder="folder", + is_private_file=True, + is_published=True, + overwrite_ai_tags=True, + overwrite_custom_metadata=True, + overwrite_file=True, + overwrite_tags=True, + response_fields=["tags", "customCoordinates", "isPrivateFile"], + tags=["t-shirt", "round-neck", "men"], + transformation={ + "post": [ + { + "type": "thumbnail", + "value": "w-150,h-150", + }, + { + "protocol": "dash", + "type": "abs", + "value": "sr-240_360_480_720_1080", + }, + ], + "pre": "w-300,h-300,q-80", + }, + use_unique_file_name=True, + webhook_url="https://example.com", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_upload(self, client: ImageKit) -> None: + response = client.beta.v2.files.with_raw_response.upload( + file=b"raw file contents", + file_name="fileName", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_upload(self, client: ImageKit) -> None: + with client.beta.v2.files.with_streaming_response.upload( + file=b"raw file contents", + file_name="fileName", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncFiles: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_upload(self, async_client: AsyncImageKit) -> None: + file = await async_client.beta.v2.files.upload( + file=b"raw file contents", + file_name="fileName", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_upload_with_all_params(self, async_client: AsyncImageKit) -> None: + file = await async_client.beta.v2.files.upload( + file=b"raw file contents", + file_name="fileName", + token="token", + checks='"request.folder" : "marketing/"\n', + custom_coordinates="customCoordinates", + custom_metadata={ + "brand": "bar", + "color": "bar", + }, + description="Running shoes", + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + folder="folder", + is_private_file=True, + is_published=True, + overwrite_ai_tags=True, + overwrite_custom_metadata=True, + overwrite_file=True, + overwrite_tags=True, + response_fields=["tags", "customCoordinates", "isPrivateFile"], + tags=["t-shirt", "round-neck", "men"], + transformation={ + "post": [ + { + "type": "thumbnail", + "value": "w-150,h-150", + }, + { + "protocol": "dash", + "type": "abs", + "value": "sr-240_360_480_720_1080", + }, + ], + "pre": "w-300,h-300,q-80", + }, + use_unique_file_name=True, + webhook_url="https://example.com", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_upload(self, async_client: AsyncImageKit) -> None: + response = await async_client.beta.v2.files.with_raw_response.upload( + file=b"raw file contents", + file_name="fileName", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_upload(self, async_client: AsyncImageKit) -> None: + async with async_client.beta.v2.files.with_streaming_response.upload( + file=b"raw file contents", + file_name="fileName", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/cache/__init__.py b/tests/api_resources/cache/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/cache/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/cache/test_invalidation.py b/tests/api_resources/cache/test_invalidation.py new file mode 100644 index 00000000..e68c2a63 --- /dev/null +++ b/tests/api_resources/cache/test_invalidation.py @@ -0,0 +1,176 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types.cache import InvalidationGetResponse, InvalidationCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestInvalidation: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: ImageKit) -> None: + invalidation = client.cache.invalidation.create( + url="https://ik.imagekit.io/your_imagekit_id/default-image.jpg", + ) + assert_matches_type(InvalidationCreateResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: ImageKit) -> None: + response = client.cache.invalidation.with_raw_response.create( + url="https://ik.imagekit.io/your_imagekit_id/default-image.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invalidation = response.parse() + assert_matches_type(InvalidationCreateResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: ImageKit) -> None: + with client.cache.invalidation.with_streaming_response.create( + url="https://ik.imagekit.io/your_imagekit_id/default-image.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invalidation = response.parse() + assert_matches_type(InvalidationCreateResponse, invalidation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + invalidation = client.cache.invalidation.get( + "requestId", + ) + assert_matches_type(InvalidationGetResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.cache.invalidation.with_raw_response.get( + "requestId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invalidation = response.parse() + assert_matches_type(InvalidationGetResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.cache.invalidation.with_streaming_response.get( + "requestId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invalidation = response.parse() + assert_matches_type(InvalidationGetResponse, invalidation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_get(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `request_id` but received ''"): + client.cache.invalidation.with_raw_response.get( + "", + ) + + +class TestAsyncInvalidation: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncImageKit) -> None: + invalidation = await async_client.cache.invalidation.create( + url="https://ik.imagekit.io/your_imagekit_id/default-image.jpg", + ) + assert_matches_type(InvalidationCreateResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncImageKit) -> None: + response = await async_client.cache.invalidation.with_raw_response.create( + url="https://ik.imagekit.io/your_imagekit_id/default-image.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invalidation = await response.parse() + assert_matches_type(InvalidationCreateResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncImageKit) -> None: + async with async_client.cache.invalidation.with_streaming_response.create( + url="https://ik.imagekit.io/your_imagekit_id/default-image.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invalidation = await response.parse() + assert_matches_type(InvalidationCreateResponse, invalidation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + invalidation = await async_client.cache.invalidation.get( + "requestId", + ) + assert_matches_type(InvalidationGetResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.cache.invalidation.with_raw_response.get( + "requestId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invalidation = await response.parse() + assert_matches_type(InvalidationGetResponse, invalidation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.cache.invalidation.with_streaming_response.get( + "requestId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invalidation = await response.parse() + assert_matches_type(InvalidationGetResponse, invalidation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_get(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `request_id` but received ''"): + await async_client.cache.invalidation.with_raw_response.get( + "", + ) diff --git a/tests/api_resources/files/__init__.py b/tests/api_resources/files/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/files/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/files/test_bulk.py b/tests/api_resources/files/test_bulk.py new file mode 100644 index 00000000..150b5b24 --- /dev/null +++ b/tests/api_resources/files/test_bulk.py @@ -0,0 +1,319 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types.files import ( + BulkDeleteResponse, + BulkAddTagsResponse, + BulkRemoveTagsResponse, + BulkRemoveAITagsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestBulk: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: ImageKit) -> None: + bulk = client.files.bulk.delete( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + assert_matches_type(BulkDeleteResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: ImageKit) -> None: + response = client.files.bulk.with_raw_response.delete( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = response.parse() + assert_matches_type(BulkDeleteResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: ImageKit) -> None: + with client.files.bulk.with_streaming_response.delete( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = response.parse() + assert_matches_type(BulkDeleteResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_add_tags(self, client: ImageKit) -> None: + bulk = client.files.bulk.add_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + assert_matches_type(BulkAddTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_add_tags(self, client: ImageKit) -> None: + response = client.files.bulk.with_raw_response.add_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = response.parse() + assert_matches_type(BulkAddTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_add_tags(self, client: ImageKit) -> None: + with client.files.bulk.with_streaming_response.add_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = response.parse() + assert_matches_type(BulkAddTagsResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_remove_ai_tags(self, client: ImageKit) -> None: + bulk = client.files.bulk.remove_ai_tags( + ai_tags=["t-shirt", "round-neck", "sale2019"], + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + assert_matches_type(BulkRemoveAITagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_remove_ai_tags(self, client: ImageKit) -> None: + response = client.files.bulk.with_raw_response.remove_ai_tags( + ai_tags=["t-shirt", "round-neck", "sale2019"], + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = response.parse() + assert_matches_type(BulkRemoveAITagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_remove_ai_tags(self, client: ImageKit) -> None: + with client.files.bulk.with_streaming_response.remove_ai_tags( + ai_tags=["t-shirt", "round-neck", "sale2019"], + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = response.parse() + assert_matches_type(BulkRemoveAITagsResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_remove_tags(self, client: ImageKit) -> None: + bulk = client.files.bulk.remove_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + assert_matches_type(BulkRemoveTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_remove_tags(self, client: ImageKit) -> None: + response = client.files.bulk.with_raw_response.remove_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = response.parse() + assert_matches_type(BulkRemoveTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_remove_tags(self, client: ImageKit) -> None: + with client.files.bulk.with_streaming_response.remove_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = response.parse() + assert_matches_type(BulkRemoveTagsResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncBulk: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncImageKit) -> None: + bulk = await async_client.files.bulk.delete( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + assert_matches_type(BulkDeleteResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.bulk.with_raw_response.delete( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = await response.parse() + assert_matches_type(BulkDeleteResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncImageKit) -> None: + async with async_client.files.bulk.with_streaming_response.delete( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = await response.parse() + assert_matches_type(BulkDeleteResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_add_tags(self, async_client: AsyncImageKit) -> None: + bulk = await async_client.files.bulk.add_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + assert_matches_type(BulkAddTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_add_tags(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.bulk.with_raw_response.add_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = await response.parse() + assert_matches_type(BulkAddTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_add_tags(self, async_client: AsyncImageKit) -> None: + async with async_client.files.bulk.with_streaming_response.add_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = await response.parse() + assert_matches_type(BulkAddTagsResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_remove_ai_tags(self, async_client: AsyncImageKit) -> None: + bulk = await async_client.files.bulk.remove_ai_tags( + ai_tags=["t-shirt", "round-neck", "sale2019"], + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + assert_matches_type(BulkRemoveAITagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_remove_ai_tags(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.bulk.with_raw_response.remove_ai_tags( + ai_tags=["t-shirt", "round-neck", "sale2019"], + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = await response.parse() + assert_matches_type(BulkRemoveAITagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_remove_ai_tags(self, async_client: AsyncImageKit) -> None: + async with async_client.files.bulk.with_streaming_response.remove_ai_tags( + ai_tags=["t-shirt", "round-neck", "sale2019"], + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = await response.parse() + assert_matches_type(BulkRemoveAITagsResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_remove_tags(self, async_client: AsyncImageKit) -> None: + bulk = await async_client.files.bulk.remove_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + assert_matches_type(BulkRemoveTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_remove_tags(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.bulk.with_raw_response.remove_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + bulk = await response.parse() + assert_matches_type(BulkRemoveTagsResponse, bulk, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_remove_tags(self, async_client: AsyncImageKit) -> None: + async with async_client.files.bulk.with_streaming_response.remove_tags( + file_ids=["598821f949c0a938d57563bd", "598821f949c0a938d57563be"], + tags=["t-shirt", "round-neck", "sale2019"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + bulk = await response.parse() + assert_matches_type(BulkRemoveTagsResponse, bulk, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/files/test_metadata.py b/tests/api_resources/files/test_metadata.py new file mode 100644 index 00000000..eb4007c1 --- /dev/null +++ b/tests/api_resources/files/test_metadata.py @@ -0,0 +1,176 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types import Metadata + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestMetadata: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + metadata = client.files.metadata.get( + "fileId", + ) + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.files.metadata.with_raw_response.get( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + metadata = response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.files.metadata.with_streaming_response.get( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + metadata = response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_get(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.metadata.with_raw_response.get( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get_from_url(self, client: ImageKit) -> None: + metadata = client.files.metadata.get_from_url( + url="https://example.com", + ) + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get_from_url(self, client: ImageKit) -> None: + response = client.files.metadata.with_raw_response.get_from_url( + url="https://example.com", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + metadata = response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get_from_url(self, client: ImageKit) -> None: + with client.files.metadata.with_streaming_response.get_from_url( + url="https://example.com", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + metadata = response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncMetadata: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + metadata = await async_client.files.metadata.get( + "fileId", + ) + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.metadata.with_raw_response.get( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + metadata = await response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.files.metadata.with_streaming_response.get( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + metadata = await response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_get(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.metadata.with_raw_response.get( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get_from_url(self, async_client: AsyncImageKit) -> None: + metadata = await async_client.files.metadata.get_from_url( + url="https://example.com", + ) + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get_from_url(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.metadata.with_raw_response.get_from_url( + url="https://example.com", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + metadata = await response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get_from_url(self, async_client: AsyncImageKit) -> None: + async with async_client.files.metadata.with_streaming_response.get_from_url( + url="https://example.com", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + metadata = await response.parse() + assert_matches_type(Metadata, metadata, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/files/test_versions.py b/tests/api_resources/files/test_versions.py new file mode 100644 index 00000000..13f3c51a --- /dev/null +++ b/tests/api_resources/files/test_versions.py @@ -0,0 +1,421 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types import File +from imagekitio.types.files import VersionListResponse, VersionDeleteResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestVersions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list(self, client: ImageKit) -> None: + version = client.files.versions.list( + "fileId", + ) + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list(self, client: ImageKit) -> None: + response = client.files.versions.with_raw_response.list( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list(self, client: ImageKit) -> None: + with client.files.versions.with_streaming_response.list( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_list(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.versions.with_raw_response.list( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: ImageKit) -> None: + version = client.files.versions.delete( + version_id="versionId", + file_id="fileId", + ) + assert_matches_type(VersionDeleteResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: ImageKit) -> None: + response = client.files.versions.with_raw_response.delete( + version_id="versionId", + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = response.parse() + assert_matches_type(VersionDeleteResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: ImageKit) -> None: + with client.files.versions.with_streaming_response.delete( + version_id="versionId", + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = response.parse() + assert_matches_type(VersionDeleteResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.versions.with_raw_response.delete( + version_id="versionId", + file_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `version_id` but received ''"): + client.files.versions.with_raw_response.delete( + version_id="", + file_id="fileId", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + version = client.files.versions.get( + version_id="versionId", + file_id="fileId", + ) + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.files.versions.with_raw_response.get( + version_id="versionId", + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = response.parse() + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.files.versions.with_streaming_response.get( + version_id="versionId", + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = response.parse() + assert_matches_type(File, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_get(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.versions.with_raw_response.get( + version_id="versionId", + file_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `version_id` but received ''"): + client.files.versions.with_raw_response.get( + version_id="", + file_id="fileId", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_restore(self, client: ImageKit) -> None: + version = client.files.versions.restore( + version_id="versionId", + file_id="fileId", + ) + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_restore(self, client: ImageKit) -> None: + response = client.files.versions.with_raw_response.restore( + version_id="versionId", + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = response.parse() + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_restore(self, client: ImageKit) -> None: + with client.files.versions.with_streaming_response.restore( + version_id="versionId", + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = response.parse() + assert_matches_type(File, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_restore(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.versions.with_raw_response.restore( + version_id="versionId", + file_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `version_id` but received ''"): + client.files.versions.with_raw_response.restore( + version_id="", + file_id="fileId", + ) + + +class TestAsyncVersions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list(self, async_client: AsyncImageKit) -> None: + version = await async_client.files.versions.list( + "fileId", + ) + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.versions.with_raw_response.list( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = await response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list(self, async_client: AsyncImageKit) -> None: + async with async_client.files.versions.with_streaming_response.list( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = await response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_list(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.versions.with_raw_response.list( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncImageKit) -> None: + version = await async_client.files.versions.delete( + version_id="versionId", + file_id="fileId", + ) + assert_matches_type(VersionDeleteResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.versions.with_raw_response.delete( + version_id="versionId", + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = await response.parse() + assert_matches_type(VersionDeleteResponse, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncImageKit) -> None: + async with async_client.files.versions.with_streaming_response.delete( + version_id="versionId", + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = await response.parse() + assert_matches_type(VersionDeleteResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.versions.with_raw_response.delete( + version_id="versionId", + file_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `version_id` but received ''"): + await async_client.files.versions.with_raw_response.delete( + version_id="", + file_id="fileId", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + version = await async_client.files.versions.get( + version_id="versionId", + file_id="fileId", + ) + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.versions.with_raw_response.get( + version_id="versionId", + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = await response.parse() + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.files.versions.with_streaming_response.get( + version_id="versionId", + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = await response.parse() + assert_matches_type(File, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_get(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.versions.with_raw_response.get( + version_id="versionId", + file_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `version_id` but received ''"): + await async_client.files.versions.with_raw_response.get( + version_id="", + file_id="fileId", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_restore(self, async_client: AsyncImageKit) -> None: + version = await async_client.files.versions.restore( + version_id="versionId", + file_id="fileId", + ) + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_restore(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.versions.with_raw_response.restore( + version_id="versionId", + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = await response.parse() + assert_matches_type(File, version, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_restore(self, async_client: AsyncImageKit) -> None: + async with async_client.files.versions.with_streaming_response.restore( + version_id="versionId", + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = await response.parse() + assert_matches_type(File, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_restore(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.versions.with_raw_response.restore( + version_id="versionId", + file_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `version_id` but received ''"): + await async_client.files.versions.with_raw_response.restore( + version_id="", + file_id="fileId", + ) diff --git a/tests/api_resources/folders/__init__.py b/tests/api_resources/folders/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/folders/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/folders/test_job.py b/tests/api_resources/folders/test_job.py new file mode 100644 index 00000000..2bbc1cf5 --- /dev/null +++ b/tests/api_resources/folders/test_job.py @@ -0,0 +1,108 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types.folders import JobGetResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestJob: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + job = client.folders.job.get( + "jobId", + ) + assert_matches_type(JobGetResponse, job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.folders.job.with_raw_response.get( + "jobId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = response.parse() + assert_matches_type(JobGetResponse, job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.folders.job.with_streaming_response.get( + "jobId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = response.parse() + assert_matches_type(JobGetResponse, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_get(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + client.folders.job.with_raw_response.get( + "", + ) + + +class TestAsyncJob: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + job = await async_client.folders.job.get( + "jobId", + ) + assert_matches_type(JobGetResponse, job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.folders.job.with_raw_response.get( + "jobId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = await response.parse() + assert_matches_type(JobGetResponse, job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.folders.job.with_streaming_response.get( + "jobId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = await response.parse() + assert_matches_type(JobGetResponse, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_get(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + await async_client.folders.job.with_raw_response.get( + "", + ) diff --git a/tests/api_resources/test_assets.py b/tests/api_resources/test_assets.py new file mode 100644 index 00000000..6958ecd3 --- /dev/null +++ b/tests/api_resources/test_assets.py @@ -0,0 +1,108 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types import AssetListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAssets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list(self, client: ImageKit) -> None: + asset = client.assets.list() + assert_matches_type(AssetListResponse, asset, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list_with_all_params(self, client: ImageKit) -> None: + asset = client.assets.list( + file_type="all", + limit=1, + path="path", + search_query="searchQuery", + skip=0, + sort="ASC_NAME", + type="file", + ) + assert_matches_type(AssetListResponse, asset, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list(self, client: ImageKit) -> None: + response = client.assets.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + asset = response.parse() + assert_matches_type(AssetListResponse, asset, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list(self, client: ImageKit) -> None: + with client.assets.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + asset = response.parse() + assert_matches_type(AssetListResponse, asset, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncAssets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list(self, async_client: AsyncImageKit) -> None: + asset = await async_client.assets.list() + assert_matches_type(AssetListResponse, asset, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncImageKit) -> None: + asset = await async_client.assets.list( + file_type="all", + limit=1, + path="path", + search_query="searchQuery", + skip=0, + sort="ASC_NAME", + type="file", + ) + assert_matches_type(AssetListResponse, asset, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list(self, async_client: AsyncImageKit) -> None: + response = await async_client.assets.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + asset = await response.parse() + assert_matches_type(AssetListResponse, asset, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list(self, async_client: AsyncImageKit) -> None: + async with async_client.assets.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + asset = await response.parse() + assert_matches_type(AssetListResponse, asset, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_custom_metadata_fields.py b/tests/api_resources/test_custom_metadata_fields.py new file mode 100644 index 00000000..4c9fed68 --- /dev/null +++ b/tests/api_resources/test_custom_metadata_fields.py @@ -0,0 +1,424 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types import ( + CustomMetadataField, + CustomMetadataFieldListResponse, + CustomMetadataFieldDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCustomMetadataFields: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: ImageKit) -> None: + custom_metadata_field = client.custom_metadata_fields.create( + label="price", + name="price", + schema={"type": "Number"}, + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params(self, client: ImageKit) -> None: + custom_metadata_field = client.custom_metadata_fields.create( + label="price", + name="price", + schema={ + "type": "Number", + "default_value": "string", + "is_value_required": True, + "max_length": 0, + "max_value": 3000, + "min_length": 0, + "min_value": 1000, + "select_options": ["small", "medium", "large", 30, 40, True], + }, + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: ImageKit) -> None: + response = client.custom_metadata_fields.with_raw_response.create( + label="price", + name="price", + schema={"type": "Number"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: ImageKit) -> None: + with client.custom_metadata_fields.with_streaming_response.create( + label="price", + name="price", + schema={"type": "Number"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update(self, client: ImageKit) -> None: + custom_metadata_field = client.custom_metadata_fields.update( + id="id", + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params(self, client: ImageKit) -> None: + custom_metadata_field = client.custom_metadata_fields.update( + id="id", + label="price", + schema={ + "default_value": "string", + "is_value_required": True, + "max_length": 0, + "max_value": 3000, + "min_length": 0, + "min_value": 1000, + "select_options": ["small", "medium", "large", 30, 40, True], + }, + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update(self, client: ImageKit) -> None: + response = client.custom_metadata_fields.with_raw_response.update( + id="id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update(self, client: ImageKit) -> None: + with client.custom_metadata_fields.with_streaming_response.update( + id="id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.custom_metadata_fields.with_raw_response.update( + id="", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list(self, client: ImageKit) -> None: + custom_metadata_field = client.custom_metadata_fields.list() + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list_with_all_params(self, client: ImageKit) -> None: + custom_metadata_field = client.custom_metadata_fields.list( + folder_path="folderPath", + include_deleted=True, + ) + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list(self, client: ImageKit) -> None: + response = client.custom_metadata_fields.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list(self, client: ImageKit) -> None: + with client.custom_metadata_fields.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: ImageKit) -> None: + custom_metadata_field = client.custom_metadata_fields.delete( + "id", + ) + assert_matches_type(CustomMetadataFieldDeleteResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: ImageKit) -> None: + response = client.custom_metadata_fields.with_raw_response.delete( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataFieldDeleteResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: ImageKit) -> None: + with client.custom_metadata_fields.with_streaming_response.delete( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = response.parse() + assert_matches_type(CustomMetadataFieldDeleteResponse, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + client.custom_metadata_fields.with_raw_response.delete( + "", + ) + + +class TestAsyncCustomMetadataFields: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncImageKit) -> None: + custom_metadata_field = await async_client.custom_metadata_fields.create( + label="price", + name="price", + schema={"type": "Number"}, + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncImageKit) -> None: + custom_metadata_field = await async_client.custom_metadata_fields.create( + label="price", + name="price", + schema={ + "type": "Number", + "default_value": "string", + "is_value_required": True, + "max_length": 0, + "max_value": 3000, + "min_length": 0, + "min_value": 1000, + "select_options": ["small", "medium", "large", 30, 40, True], + }, + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncImageKit) -> None: + response = await async_client.custom_metadata_fields.with_raw_response.create( + label="price", + name="price", + schema={"type": "Number"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncImageKit) -> None: + async with async_client.custom_metadata_fields.with_streaming_response.create( + label="price", + name="price", + schema={"type": "Number"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update(self, async_client: AsyncImageKit) -> None: + custom_metadata_field = await async_client.custom_metadata_fields.update( + id="id", + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncImageKit) -> None: + custom_metadata_field = await async_client.custom_metadata_fields.update( + id="id", + label="price", + schema={ + "default_value": "string", + "is_value_required": True, + "max_length": 0, + "max_value": 3000, + "min_length": 0, + "min_value": 1000, + "select_options": ["small", "medium", "large", 30, 40, True], + }, + ) + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update(self, async_client: AsyncImageKit) -> None: + response = await async_client.custom_metadata_fields.with_raw_response.update( + id="id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update(self, async_client: AsyncImageKit) -> None: + async with async_client.custom_metadata_fields.with_streaming_response.update( + id="id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataField, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.custom_metadata_fields.with_raw_response.update( + id="", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list(self, async_client: AsyncImageKit) -> None: + custom_metadata_field = await async_client.custom_metadata_fields.list() + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncImageKit) -> None: + custom_metadata_field = await async_client.custom_metadata_fields.list( + folder_path="folderPath", + include_deleted=True, + ) + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list(self, async_client: AsyncImageKit) -> None: + response = await async_client.custom_metadata_fields.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list(self, async_client: AsyncImageKit) -> None: + async with async_client.custom_metadata_fields.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataFieldListResponse, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncImageKit) -> None: + custom_metadata_field = await async_client.custom_metadata_fields.delete( + "id", + ) + assert_matches_type(CustomMetadataFieldDeleteResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncImageKit) -> None: + response = await async_client.custom_metadata_fields.with_raw_response.delete( + "id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataFieldDeleteResponse, custom_metadata_field, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncImageKit) -> None: + async with async_client.custom_metadata_fields.with_streaming_response.delete( + "id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + custom_metadata_field = await response.parse() + assert_matches_type(CustomMetadataFieldDeleteResponse, custom_metadata_field, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"): + await async_client.custom_metadata_fields.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_dummy.py b/tests/api_resources/test_dummy.py new file mode 100644 index 00000000..bf19fc3a --- /dev/null +++ b/tests/api_resources/test_dummy.py @@ -0,0 +1,1444 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDummy: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: ImageKit) -> None: + dummy = client.dummy.create() + assert dummy is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params(self, client: ImageKit) -> None: + dummy = client.dummy.create( + base_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + }, + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + get_image_attributes_options={ + "src": "/my-image.jpg", + "url_endpoint": "https://ik.imagekit.io/demo", + "expires_in": 0, + "query_parameters": {"foo": "string"}, + "signed": True, + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + "transformation_position": "path", + "device_breakpoints": [640, 750, 828, 1080, 1200, 1920, 2048, 3840], + "image_breakpoints": [16, 32, 48, 64, 96, 128, 256, 384], + "sizes": "(min-width: 768px) 50vw, 100vw", + "width": 400, + }, + image_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "input": "input", + "type": "image", + "encoding": "auto", + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + }, + overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + overlay_position={ + "focus": "center", + "x": 0, + "y": 0, + }, + overlay_timing={ + "duration": 0, + "end": 0, + "start": 0, + }, + responsive_image_attributes={ + "src": "https://ik.imagekit.io/demo/image.jpg?tr=w-3840", + "sizes": "100vw", + "src_set": "https://ik.imagekit.io/demo/image.jpg?tr=w-640 640w, https://ik.imagekit.io/demo/image.jpg?tr=w-1080 1080w, https://ik.imagekit.io/demo/image.jpg?tr=w-1920 1920w", + "width": 400, + }, + solid_color_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "color": "color", + "type": "solidColor", + "transformation": [ + { + "alpha": 1, + "background": "background", + "gradient": True, + "height": 0, + "radius": 0, + "width": 0, + } + ], + }, + solid_color_overlay_transformation={ + "alpha": 1, + "background": "background", + "gradient": True, + "height": 0, + "radius": 0, + "width": 0, + }, + src_options={ + "src": "/my-image.jpg", + "url_endpoint": "https://ik.imagekit.io/demo", + "expires_in": 0, + "query_parameters": {"foo": "string"}, + "signed": True, + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + "transformation_position": "path", + }, + streaming_resolution="240", + subtitle_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "input": "input", + "type": "subtitle", + "encoding": "auto", + "transformation": [ + { + "background": "background", + "color": "color", + "font_family": "fontFamily", + "font_outline": "fontOutline", + "font_shadow": "fontShadow", + "font_size": 0, + "typography": "b", + } + ], + }, + subtitle_overlay_transformation={ + "background": "background", + "color": "color", + "font_family": "fontFamily", + "font_outline": "fontOutline", + "font_shadow": "fontShadow", + "font_size": 0, + "typography": "b", + }, + text_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + text_overlay_transformation={ + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + }, + transformation={ + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + }, + transformation_position="path", + video_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "input": "input", + "type": "video", + "encoding": "auto", + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + }, + ) + assert dummy is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: ImageKit) -> None: + response = client.dummy.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + dummy = response.parse() + assert dummy is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: ImageKit) -> None: + with client.dummy.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + dummy = response.parse() + assert dummy is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncDummy: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncImageKit) -> None: + dummy = await async_client.dummy.create() + assert dummy is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncImageKit) -> None: + dummy = await async_client.dummy.create( + base_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + }, + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + get_image_attributes_options={ + "src": "/my-image.jpg", + "url_endpoint": "https://ik.imagekit.io/demo", + "expires_in": 0, + "query_parameters": {"foo": "string"}, + "signed": True, + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + "transformation_position": "path", + "device_breakpoints": [640, 750, 828, 1080, 1200, 1920, 2048, 3840], + "image_breakpoints": [16, 32, 48, 64, 96, 128, 256, 384], + "sizes": "(min-width: 768px) 50vw, 100vw", + "width": 400, + }, + image_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "input": "input", + "type": "image", + "encoding": "auto", + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + }, + overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + overlay_position={ + "focus": "center", + "x": 0, + "y": 0, + }, + overlay_timing={ + "duration": 0, + "end": 0, + "start": 0, + }, + responsive_image_attributes={ + "src": "https://ik.imagekit.io/demo/image.jpg?tr=w-3840", + "sizes": "100vw", + "src_set": "https://ik.imagekit.io/demo/image.jpg?tr=w-640 640w, https://ik.imagekit.io/demo/image.jpg?tr=w-1080 1080w, https://ik.imagekit.io/demo/image.jpg?tr=w-1920 1920w", + "width": 400, + }, + solid_color_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "color": "color", + "type": "solidColor", + "transformation": [ + { + "alpha": 1, + "background": "background", + "gradient": True, + "height": 0, + "radius": 0, + "width": 0, + } + ], + }, + solid_color_overlay_transformation={ + "alpha": 1, + "background": "background", + "gradient": True, + "height": 0, + "radius": 0, + "width": 0, + }, + src_options={ + "src": "/my-image.jpg", + "url_endpoint": "https://ik.imagekit.io/demo", + "expires_in": 0, + "query_parameters": {"foo": "string"}, + "signed": True, + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + "transformation_position": "path", + }, + streaming_resolution="240", + subtitle_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "input": "input", + "type": "subtitle", + "encoding": "auto", + "transformation": [ + { + "background": "background", + "color": "color", + "font_family": "fontFamily", + "font_outline": "fontOutline", + "font_shadow": "fontShadow", + "font_size": 0, + "typography": "b", + } + ], + }, + subtitle_overlay_transformation={ + "background": "background", + "color": "color", + "font_family": "fontFamily", + "font_outline": "fontOutline", + "font_shadow": "fontShadow", + "font_size": 0, + "typography": "b", + }, + text_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + text_overlay_transformation={ + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + }, + transformation={ + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + }, + transformation_position="path", + video_overlay={ + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "input": "input", + "type": "video", + "encoding": "auto", + "transformation": [ + { + "ai_change_background": "aiChangeBackground", + "ai_drop_shadow": True, + "ai_edit": "aiEdit", + "ai_remove_background": True, + "ai_remove_background_external": True, + "ai_retouch": True, + "ai_upscale": True, + "ai_variation": True, + "aspect_ratio": "4:3", + "audio_codec": "aac", + "background": "red", + "blur": 10, + "border": "5_FF0000", + "color_profile": True, + "contrast_stretch": True, + "crop": "force", + "crop_mode": "pad_resize", + "default_image": "defaultImage", + "dpr": 2, + "duration": 0, + "end_offset": 0, + "flip": "h", + "focus": "center", + "format": "auto", + "gradient": True, + "grayscale": True, + "height": 200, + "lossless": True, + "metadata": True, + "named": "named", + "opacity": 0, + "original": True, + "overlay": { + "position": { + "focus": "center", + "x": 0, + "y": 0, + }, + "timing": { + "duration": 0, + "end": 0, + "start": 0, + }, + "text": "text", + "type": "text", + "encoding": "auto", + "transformation": [ + { + "alpha": 1, + "background": "background", + "flip": "h", + "font_color": "fontColor", + "font_family": "fontFamily", + "font_size": 0, + "inner_alignment": "left", + "line_height": 0, + "padding": 0, + "radius": 0, + "rotation": 0, + "typography": "typography", + "width": 0, + } + ], + }, + "page": 0, + "progressive": True, + "quality": 80, + "radius": 20, + "raw": "raw", + "rotation": 90, + "shadow": True, + "sharpen": True, + "start_offset": 0, + "streaming_resolutions": ["240"], + "trim": True, + "unsharp_mask": True, + "video_codec": "h264", + "width": 300, + "x": 0, + "x_center": 0, + "y": 0, + "y_center": 0, + "zoom": 0, + } + ], + }, + ) + assert dummy is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncImageKit) -> None: + response = await async_client.dummy.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + dummy = await response.parse() + assert dummy is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncImageKit) -> None: + async with async_client.dummy.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + dummy = await response.parse() + assert dummy is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py new file mode 100644 index 00000000..4d3d4aad --- /dev/null +++ b/tests/api_resources/test_files.py @@ -0,0 +1,913 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types import ( + File, + FileCopyResponse, + FileMoveResponse, + FileRenameResponse, + FileUpdateResponse, + FileUploadResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFiles: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_1(self, client: ImageKit) -> None: + file = client.files.update( + file_id="fileId", + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_1(self, client: ImageKit) -> None: + file = client.files.update( + file_id="fileId", + custom_coordinates="customCoordinates", + custom_metadata={"foo": "bar"}, + description="description", + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + remove_ai_tags=["string"], + tags=["tag1", "tag2"], + webhook_url="https://example.com", + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_1(self, client: ImageKit) -> None: + response = client.files.with_raw_response.update( + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_1(self, client: ImageKit) -> None: + with client.files.with_streaming_response.update( + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_1(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.with_raw_response.update( + file_id="", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_overload_2(self, client: ImageKit) -> None: + file = client.files.update( + file_id="fileId", + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_update_with_all_params_overload_2(self, client: ImageKit) -> None: + file = client.files.update( + file_id="fileId", + publish={ + "is_published": True, + "include_file_versions": True, + }, + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_update_overload_2(self, client: ImageKit) -> None: + response = client.files.with_raw_response.update( + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_update_overload_2(self, client: ImageKit) -> None: + with client.files.with_streaming_response.update( + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_update_overload_2(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.with_raw_response.update( + file_id="", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: ImageKit) -> None: + file = client.files.delete( + "fileId", + ) + assert file is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: ImageKit) -> None: + response = client.files.with_raw_response.delete( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert file is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: ImageKit) -> None: + with client.files.with_streaming_response.delete( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert file is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.with_raw_response.delete( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_copy(self, client: ImageKit) -> None: + file = client.files.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + ) + assert_matches_type(FileCopyResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_copy_with_all_params(self, client: ImageKit) -> None: + file = client.files.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + include_file_versions=False, + ) + assert_matches_type(FileCopyResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_copy(self, client: ImageKit) -> None: + response = client.files.with_raw_response.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileCopyResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_copy(self, client: ImageKit) -> None: + with client.files.with_streaming_response.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileCopyResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_get(self, client: ImageKit) -> None: + file = client.files.get( + "fileId", + ) + assert_matches_type(File, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_get(self, client: ImageKit) -> None: + response = client.files.with_raw_response.get( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(File, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_get(self, client: ImageKit) -> None: + with client.files.with_streaming_response.get( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(File, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_get(self, client: ImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.with_raw_response.get( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_move(self, client: ImageKit) -> None: + file = client.files.move( + destination_path="/folder/to/move/into/", + source_file_path="/path/to/file.jpg", + ) + assert_matches_type(FileMoveResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_move(self, client: ImageKit) -> None: + response = client.files.with_raw_response.move( + destination_path="/folder/to/move/into/", + source_file_path="/path/to/file.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileMoveResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_move(self, client: ImageKit) -> None: + with client.files.with_streaming_response.move( + destination_path="/folder/to/move/into/", + source_file_path="/path/to/file.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileMoveResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_rename(self, client: ImageKit) -> None: + file = client.files.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + ) + assert_matches_type(FileRenameResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_rename_with_all_params(self, client: ImageKit) -> None: + file = client.files.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + purge_cache=True, + ) + assert_matches_type(FileRenameResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_rename(self, client: ImageKit) -> None: + response = client.files.with_raw_response.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileRenameResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_rename(self, client: ImageKit) -> None: + with client.files.with_streaming_response.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileRenameResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_upload(self, client: ImageKit) -> None: + file = client.files.upload( + file=b"raw file contents", + file_name="fileName", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_upload_with_all_params(self, client: ImageKit) -> None: + file = client.files.upload( + file=b"raw file contents", + file_name="fileName", + token="token", + checks='"request.folder" : "marketing/"\n', + custom_coordinates="customCoordinates", + custom_metadata={ + "brand": "bar", + "color": "bar", + }, + description="Running shoes", + expire=0, + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + folder="folder", + is_private_file=True, + is_published=True, + overwrite_ai_tags=True, + overwrite_custom_metadata=True, + overwrite_file=True, + overwrite_tags=True, + public_key="publicKey", + response_fields=["tags", "customCoordinates", "isPrivateFile"], + signature="signature", + tags=["t-shirt", "round-neck", "men"], + transformation={ + "post": [ + { + "type": "thumbnail", + "value": "w-150,h-150", + }, + { + "protocol": "dash", + "type": "abs", + "value": "sr-240_360_480_720_1080", + }, + ], + "pre": "w-300,h-300,q-80", + }, + use_unique_file_name=True, + webhook_url="https://example.com", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_upload(self, client: ImageKit) -> None: + response = client.files.with_raw_response.upload( + file=b"raw file contents", + file_name="fileName", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_upload(self, client: ImageKit) -> None: + with client.files.with_streaming_response.upload( + file=b"raw file contents", + file_name="fileName", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncFiles: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_1(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.update( + file_id="fileId", + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_1(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.update( + file_id="fileId", + custom_coordinates="customCoordinates", + custom_metadata={"foo": "bar"}, + description="description", + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + remove_ai_tags=["string"], + tags=["tag1", "tag2"], + webhook_url="https://example.com", + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_1(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.update( + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_1(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.update( + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_1(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.with_raw_response.update( + file_id="", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_overload_2(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.update( + file_id="fileId", + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_update_with_all_params_overload_2(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.update( + file_id="fileId", + publish={ + "is_published": True, + "include_file_versions": True, + }, + ) + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_update_overload_2(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.update( + file_id="fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_update_overload_2(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.update( + file_id="fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileUpdateResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_update_overload_2(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.with_raw_response.update( + file_id="", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.delete( + "fileId", + ) + assert file is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.delete( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert file is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.delete( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert file is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.with_raw_response.delete( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_copy(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + ) + assert_matches_type(FileCopyResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_copy_with_all_params(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + include_file_versions=False, + ) + assert_matches_type(FileCopyResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_copy(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileCopyResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_copy(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.copy( + destination_path="/folder/to/copy/into/", + source_file_path="/path/to/file.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileCopyResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_get(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.get( + "fileId", + ) + assert_matches_type(File, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_get(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.get( + "fileId", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(File, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_get(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.get( + "fileId", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(File, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_get(self, async_client: AsyncImageKit) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.with_raw_response.get( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_move(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.move( + destination_path="/folder/to/move/into/", + source_file_path="/path/to/file.jpg", + ) + assert_matches_type(FileMoveResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_move(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.move( + destination_path="/folder/to/move/into/", + source_file_path="/path/to/file.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileMoveResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_move(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.move( + destination_path="/folder/to/move/into/", + source_file_path="/path/to/file.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileMoveResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_rename(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + ) + assert_matches_type(FileRenameResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_rename_with_all_params(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + purge_cache=True, + ) + assert_matches_type(FileRenameResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_rename(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileRenameResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_rename(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.rename( + file_path="/path/to/file.jpg", + new_file_name="newFileName.jpg", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileRenameResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_upload(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.upload( + file=b"raw file contents", + file_name="fileName", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_upload_with_all_params(self, async_client: AsyncImageKit) -> None: + file = await async_client.files.upload( + file=b"raw file contents", + file_name="fileName", + token="token", + checks='"request.folder" : "marketing/"\n', + custom_coordinates="customCoordinates", + custom_metadata={ + "brand": "bar", + "color": "bar", + }, + description="Running shoes", + expire=0, + extensions=[ + { + "name": "remove-bg", + "options": { + "add_shadow": True, + "bg_color": "bg_color", + "bg_image_url": "bg_image_url", + "semitransparency": True, + }, + }, + { + "max_tags": 5, + "min_confidence": 95, + "name": "google-auto-tagging", + }, + {"name": "ai-auto-description"}, + ], + folder="folder", + is_private_file=True, + is_published=True, + overwrite_ai_tags=True, + overwrite_custom_metadata=True, + overwrite_file=True, + overwrite_tags=True, + public_key="publicKey", + response_fields=["tags", "customCoordinates", "isPrivateFile"], + signature="signature", + tags=["t-shirt", "round-neck", "men"], + transformation={ + "post": [ + { + "type": "thumbnail", + "value": "w-150,h-150", + }, + { + "protocol": "dash", + "type": "abs", + "value": "sr-240_360_480_720_1080", + }, + ], + "pre": "w-300,h-300,q-80", + }, + use_unique_file_name=True, + webhook_url="https://example.com", + ) + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_upload(self, async_client: AsyncImageKit) -> None: + response = await async_client.files.with_raw_response.upload( + file=b"raw file contents", + file_name="fileName", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_upload(self, async_client: AsyncImageKit) -> None: + async with async_client.files.with_streaming_response.upload( + file=b"raw file contents", + file_name="fileName", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileUploadResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_folders.py b/tests/api_resources/test_folders.py new file mode 100644 index 00000000..ad5d83f0 --- /dev/null +++ b/tests/api_resources/test_folders.py @@ -0,0 +1,434 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from tests.utils import assert_matches_type +from imagekitio.types import ( + FolderCopyResponse, + FolderMoveResponse, + FolderCreateResponse, + FolderDeleteResponse, + FolderRenameResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFolders: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: ImageKit) -> None: + folder = client.folders.create( + folder_name="summer", + parent_folder_path="/product/images/", + ) + assert_matches_type(FolderCreateResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: ImageKit) -> None: + response = client.folders.with_raw_response.create( + folder_name="summer", + parent_folder_path="/product/images/", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = response.parse() + assert_matches_type(FolderCreateResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: ImageKit) -> None: + with client.folders.with_streaming_response.create( + folder_name="summer", + parent_folder_path="/product/images/", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = response.parse() + assert_matches_type(FolderCreateResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: ImageKit) -> None: + folder = client.folders.delete( + folder_path="/folder/to/delete/", + ) + assert_matches_type(FolderDeleteResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: ImageKit) -> None: + response = client.folders.with_raw_response.delete( + folder_path="/folder/to/delete/", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = response.parse() + assert_matches_type(FolderDeleteResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: ImageKit) -> None: + with client.folders.with_streaming_response.delete( + folder_path="/folder/to/delete/", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = response.parse() + assert_matches_type(FolderDeleteResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_copy(self, client: ImageKit) -> None: + folder = client.folders.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_copy_with_all_params(self, client: ImageKit) -> None: + folder = client.folders.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + include_versions=True, + ) + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_copy(self, client: ImageKit) -> None: + response = client.folders.with_raw_response.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = response.parse() + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_copy(self, client: ImageKit) -> None: + with client.folders.with_streaming_response.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = response.parse() + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_move(self, client: ImageKit) -> None: + folder = client.folders.move( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + assert_matches_type(FolderMoveResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_move(self, client: ImageKit) -> None: + response = client.folders.with_raw_response.move( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = response.parse() + assert_matches_type(FolderMoveResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_move(self, client: ImageKit) -> None: + with client.folders.with_streaming_response.move( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = response.parse() + assert_matches_type(FolderMoveResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_rename(self, client: ImageKit) -> None: + folder = client.folders.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + ) + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_rename_with_all_params(self, client: ImageKit) -> None: + folder = client.folders.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + purge_cache=True, + ) + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_rename(self, client: ImageKit) -> None: + response = client.folders.with_raw_response.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = response.parse() + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_rename(self, client: ImageKit) -> None: + with client.folders.with_streaming_response.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = response.parse() + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncFolders: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncImageKit) -> None: + folder = await async_client.folders.create( + folder_name="summer", + parent_folder_path="/product/images/", + ) + assert_matches_type(FolderCreateResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncImageKit) -> None: + response = await async_client.folders.with_raw_response.create( + folder_name="summer", + parent_folder_path="/product/images/", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = await response.parse() + assert_matches_type(FolderCreateResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncImageKit) -> None: + async with async_client.folders.with_streaming_response.create( + folder_name="summer", + parent_folder_path="/product/images/", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = await response.parse() + assert_matches_type(FolderCreateResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncImageKit) -> None: + folder = await async_client.folders.delete( + folder_path="/folder/to/delete/", + ) + assert_matches_type(FolderDeleteResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncImageKit) -> None: + response = await async_client.folders.with_raw_response.delete( + folder_path="/folder/to/delete/", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = await response.parse() + assert_matches_type(FolderDeleteResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncImageKit) -> None: + async with async_client.folders.with_streaming_response.delete( + folder_path="/folder/to/delete/", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = await response.parse() + assert_matches_type(FolderDeleteResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_copy(self, async_client: AsyncImageKit) -> None: + folder = await async_client.folders.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_copy_with_all_params(self, async_client: AsyncImageKit) -> None: + folder = await async_client.folders.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + include_versions=True, + ) + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_copy(self, async_client: AsyncImageKit) -> None: + response = await async_client.folders.with_raw_response.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = await response.parse() + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_copy(self, async_client: AsyncImageKit) -> None: + async with async_client.folders.with_streaming_response.copy( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = await response.parse() + assert_matches_type(FolderCopyResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_move(self, async_client: AsyncImageKit) -> None: + folder = await async_client.folders.move( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + assert_matches_type(FolderMoveResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_move(self, async_client: AsyncImageKit) -> None: + response = await async_client.folders.with_raw_response.move( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = await response.parse() + assert_matches_type(FolderMoveResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_move(self, async_client: AsyncImageKit) -> None: + async with async_client.folders.with_streaming_response.move( + destination_path="/path/of/destination/folder", + source_folder_path="/path/of/source/folder", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = await response.parse() + assert_matches_type(FolderMoveResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_rename(self, async_client: AsyncImageKit) -> None: + folder = await async_client.folders.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + ) + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_rename_with_all_params(self, async_client: AsyncImageKit) -> None: + folder = await async_client.folders.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + purge_cache=True, + ) + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_rename(self, async_client: AsyncImageKit) -> None: + response = await async_client.folders.with_raw_response.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + folder = await response.parse() + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_rename(self, async_client: AsyncImageKit) -> None: + async with async_client.folders.with_streaming_response.rename( + folder_path="/path/of/folder", + new_folder_name="new-folder-name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + folder = await response.parse() + assert_matches_type(FolderRenameResponse, folder, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_webhooks.py b/tests/api_resources/test_webhooks.py new file mode 100644 index 00000000..35fbbd13 --- /dev/null +++ b/tests/api_resources/test_webhooks.py @@ -0,0 +1,79 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from datetime import datetime, timezone + +import pytest +import standardwebhooks + +from imagekitio import ImageKit + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestWebhooks: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + def test_method_unwrap(self, client: ImageKit) -> None: + key = b"secret" + hook = standardwebhooks.Webhook(key) + + data = """{"id":"id","type":"video.transformation.accepted","created_at":"2019-12-27T18:11:19.117Z","data":{"asset":{"url":"https://example.com"},"transformation":{"type":"video-transformation","options":{"audio_codec":"aac","auto_rotate":true,"format":"mp4","quality":0,"stream_protocol":"HLS","variants":["string"],"video_codec":"h264"}}},"request":{"url":"https://example.com","x_request_id":"x_request_id","user_agent":"user_agent"}}""" + msg_id = "1" + timestamp = datetime.now(tz=timezone.utc) + sig = hook.sign(msg_id=msg_id, timestamp=timestamp, data=data) + headers = { + "webhook-id": msg_id, + "webhook-timestamp": str(int(timestamp.timestamp())), + "webhook-signature": sig, + } + + try: + _ = client.webhooks.unwrap(data, headers=headers, key=key) + except standardwebhooks.WebhookVerificationError as e: + raise AssertionError("Failed to unwrap valid webhook") from e + + bad_headers = [ + {**headers, "webhook-signature": hook.sign(msg_id=msg_id, timestamp=timestamp, data="xxx")}, + {**headers, "webhook-id": "bad"}, + {**headers, "webhook-timestamp": "0"}, + ] + for bad_header in bad_headers: + with pytest.raises(standardwebhooks.WebhookVerificationError): + _ = client.webhooks.unwrap(data, headers=bad_header, key=key) + + +class TestAsyncWebhooks: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + def test_method_unwrap(self, client: ImageKit) -> None: + key = b"secret" + hook = standardwebhooks.Webhook(key) + + data = """{"id":"id","type":"video.transformation.accepted","created_at":"2019-12-27T18:11:19.117Z","data":{"asset":{"url":"https://example.com"},"transformation":{"type":"video-transformation","options":{"audio_codec":"aac","auto_rotate":true,"format":"mp4","quality":0,"stream_protocol":"HLS","variants":["string"],"video_codec":"h264"}}},"request":{"url":"https://example.com","x_request_id":"x_request_id","user_agent":"user_agent"}}""" + msg_id = "1" + timestamp = datetime.now(tz=timezone.utc) + sig = hook.sign(msg_id=msg_id, timestamp=timestamp, data=data) + headers = { + "webhook-id": msg_id, + "webhook-timestamp": str(int(timestamp.timestamp())), + "webhook-signature": sig, + } + + try: + _ = client.webhooks.unwrap(data, headers=headers, key=key) + except standardwebhooks.WebhookVerificationError as e: + raise AssertionError("Failed to unwrap valid webhook") from e + + bad_headers = [ + {**headers, "webhook-signature": hook.sign(msg_id=msg_id, timestamp=timestamp, data="xxx")}, + {**headers, "webhook-id": "bad"}, + {**headers, "webhook-timestamp": "0"}, + ] + for bad_header in bad_headers: + with pytest.raises(standardwebhooks.WebhookVerificationError): + _ = client.webhooks.unwrap(data, headers=bad_header, key=key) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..d64313cd --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,91 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +import logging +from typing import TYPE_CHECKING, Iterator, AsyncIterator + +import httpx +import pytest +from pytest_asyncio import is_async_test + +from imagekitio import ImageKit, AsyncImageKit, DefaultAioHttpClient +from imagekitio._utils import is_dict + +if TYPE_CHECKING: + from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] + +pytest.register_assert_rewrite("tests.utils") + +logging.getLogger("imagekitio").setLevel(logging.DEBUG) + + +# automatically add `pytest.mark.asyncio()` to all of our async tests +# so we don't have to add that boilerplate everywhere +def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: + pytest_asyncio_tests = (item for item in items if is_async_test(item)) + session_scope_marker = pytest.mark.asyncio(loop_scope="session") + for async_test in pytest_asyncio_tests: + async_test.add_marker(session_scope_marker, append=False) + + # We skip tests that use both the aiohttp client and respx_mock as respx_mock + # doesn't support custom transports. + for item in items: + if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames: + continue + + if not hasattr(item, "callspec"): + continue + + async_client_param = item.callspec.params.get("async_client") + if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp": + item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock")) + + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + +private_key = "My Private Key" +password = "My Password" + + +@pytest.fixture(scope="session") +def client(request: FixtureRequest) -> Iterator[ImageKit]: + strict = getattr(request, "param", True) + if not isinstance(strict, bool): + raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") + + with ImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=strict + ) as client: + yield client + + +@pytest.fixture(scope="session") +async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncImageKit]: + param = getattr(request, "param", True) + + # defaults + strict = True + http_client: None | httpx.AsyncClient = None + + if isinstance(param, bool): + strict = param + elif is_dict(param): + strict = param.get("strict", True) + assert isinstance(strict, bool) + + http_client_type = param.get("http_client", "httpx") + if http_client_type == "aiohttp": + http_client = DefaultAioHttpClient() + else: + raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") + + async with AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=strict, + http_client=http_client, + ) as client: + yield client diff --git a/tests/custom/__init__.py b/tests/custom/__init__.py new file mode 100644 index 00000000..dad8a0a3 --- /dev/null +++ b/tests/custom/__init__.py @@ -0,0 +1,2 @@ +# Custom tests for manually created helper functions +# These tests are separate from auto-generated API tests diff --git a/tests/custom/test_helper_authentication.py b/tests/custom/test_helper_authentication.py new file mode 100644 index 00000000..a0a08efa --- /dev/null +++ b/tests/custom/test_helper_authentication.py @@ -0,0 +1,114 @@ +"""Helper authentication tests - converted from Ruby SDK.""" + +import re + +import pytest + +from imagekitio import ImageKit, ImageKitError + + +class TestHelperAuthentication: + """Test helper authentication parameter generation.""" + + def test_should_return_correct_authentication_parameters_with_provided_token_and_expire(self) -> None: + """Should return correct authentication parameters with provided token and expire.""" + private_key = "private_key_test" + client = ImageKit(private_key=private_key) + + token = "your_token" + expire = 1582269249 + + params = client.helper.get_authentication_parameters(token=token, expire=expire) + + # Expected exact match with Node.js output + expected_signature = "e71bcd6031016b060d349d212e23e85c791decdd" + + assert params["token"] == token + assert params["expire"] == expire + assert params["signature"] == expected_signature + + def test_should_return_authentication_parameters_with_required_properties_when_no_params_provided(self) -> None: + """Should return authentication parameters with required properties when no params provided.""" + private_key = "private_key_test" + client = ImageKit(private_key=private_key) + + params = client.helper.get_authentication_parameters() + + # Check that all required properties exist + assert "token" in params, "Expected token parameter" + assert "expire" in params, "Expected expire parameter" + assert "signature" in params, "Expected signature parameter" + + # Token should be a UUID v4 format (36 characters with dashes) + token = params["token"] + assert isinstance(token, str) + assert re.match( + r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", token, re.IGNORECASE + ), "Expected token to be UUID v4 format" + + # Expire should be a number greater than current time + expire = params["expire"] + assert isinstance(expire, int) + import time + + current_time = int(time.time()) + assert expire > current_time, f"Expected expire {expire} to be greater than current time {current_time}" + + # Signature should be a hex string (40 characters for HMAC-SHA1) + signature = params["signature"] + assert isinstance(signature, str) + assert re.match(r"^[a-f0-9]{40}$", signature), "Expected signature to be 40 character hex string" + + def test_should_handle_edge_case_with_expire_time_0(self) -> None: + """Should handle edge case with expire time 0.""" + private_key = "private_key_test" + client = ImageKit(private_key=private_key) + + token = "test_token" + expire = 0 + + params = client.helper.get_authentication_parameters(token=token, expire=expire) + + assert params["token"] == token + assert params["expire"] == expire + assert "signature" in params + # Signature should still be generated even with expire = 0 + assert isinstance(params["signature"], str) + assert len(params["signature"]) == 40 + + def test_should_handle_empty_string_token(self) -> None: + """Should handle empty string token.""" + private_key = "private_key_test" + client = ImageKit(private_key=private_key) + + token = "" # Empty string is falsy + expire = 1582269249 + + params = client.helper.get_authentication_parameters(token=token, expire=expire) + + # Since empty string is falsy, it should generate a token + token_result = params["token"] + assert isinstance(token_result, str) + assert len(token_result) > 0, "Expected token to be generated when empty string is provided" + assert re.match( + r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", token_result, re.IGNORECASE + ), "Expected generated token to be UUID v4 format" + + assert params["expire"] == expire + + # Signature should be a hex string (40 characters for HMAC-SHA1) + signature = params["signature"] + assert isinstance(signature, str) + assert re.match(r"^[a-f0-9]{40}$", signature), "Expected signature to be 40 character hex string" + + def test_should_raise_error_when_private_key_is_not_provided(self) -> None: + """Should raise error when private key is empty.""" + with pytest.raises(ValueError, match="Private key is required"): + client = ImageKit(private_key="") + client.helper.get_authentication_parameters(token="test", expire=123) + + def test_should_raise_error_when_private_key_is_nil(self) -> None: + """Should raise error when private key is None.""" + with pytest.raises(ImageKitError, match="private_key client option must be set"): + client = ImageKit(private_key=None) # type: ignore + client.helper.get_authentication_parameters(token="test", expire=123) diff --git a/tests/custom/test_serialization_utils.py b/tests/custom/test_serialization_utils.py new file mode 100644 index 00000000..b523aeb4 --- /dev/null +++ b/tests/custom/test_serialization_utils.py @@ -0,0 +1,228 @@ +"""Unit tests for serialization_utils module.""" + +import json +from typing import Any, Dict, List + +from imagekitio.lib.serialization_utils import serialize_upload_options + + +class TestSerializeUploadOptions: + """Test cases for serialize_upload_options function.""" + + def test_should_convert_tags_array_to_comma_separated_string(self): + """Test that tags array is converted to comma-separated string.""" + body = {"tags": ["tag1", "tag2", "tag3"]} + result = serialize_upload_options(body) + assert result["tags"] == "tag1,tag2,tag3" + + def test_should_convert_tags_tuple_to_comma_separated_string(self): + """Test that tags tuple is converted to comma-separated string.""" + body = {"tags": ("tag1", "tag2", "tag3")} + result = serialize_upload_options(body) + assert result["tags"] == "tag1,tag2,tag3" + + def test_should_convert_response_fields_array_to_comma_separated_string(self): + """Test that response_fields array is converted to comma-separated string.""" + body = {"response_fields": ["tags", "customCoordinates", "metadata"]} + result = serialize_upload_options(body) + assert result["response_fields"] == "tags,customCoordinates,metadata" + + def test_should_convert_response_fields_tuple_to_comma_separated_string(self): + """Test that response_fields tuple is converted to comma-separated string.""" + body = {"response_fields": ("tags", "customCoordinates")} + result = serialize_upload_options(body) + assert result["response_fields"] == "tags,customCoordinates" + + def test_should_json_stringify_extensions_array(self): + """Test that extensions array is JSON stringified.""" + body = {"extensions": [{"name": "remove-bg"}, {"name": "google-auto-tagging", "minConfidence": 80}]} + result = serialize_upload_options(body) + expected = json.dumps(body["extensions"]) + assert result["extensions"] == expected + # Verify it's valid JSON + assert json.loads(result["extensions"]) == body["extensions"] + + def test_should_json_stringify_custom_metadata_object(self): + """Test that custom_metadata object is JSON stringified.""" + body = {"custom_metadata": {"key1": "value1", "key2": 123, "key3": True}} + result = serialize_upload_options(body) + expected = json.dumps(body["custom_metadata"]) + assert result["custom_metadata"] == expected + # Verify it's valid JSON + assert json.loads(result["custom_metadata"]) == body["custom_metadata"] + + def test_should_json_stringify_transformation_object(self): + """Test that transformation object is JSON stringified.""" + body = { + "transformation": { + "pre": "l-image,i-logo.png,w-100,h-100", + "post": [{"type": "thumbnail", "value": "h-300"}], + } + } + result = serialize_upload_options(body) + expected = json.dumps(body["transformation"]) + assert result["transformation"] == expected + # Verify it's valid JSON + assert json.loads(result["transformation"]) == body["transformation"] + + def test_should_handle_all_serializable_fields_together(self): + """Test that all serializable fields are processed correctly together.""" + body = { + "file": "test.jpg", + "file_name": "test.jpg", + "tags": ["tag1", "tag2"], + "response_fields": ["tags", "metadata"], + "extensions": [{"name": "remove-bg"}], + "custom_metadata": {"key": "value"}, + "transformation": {"pre": "w-100"}, + "folder": "/images", + } + result = serialize_upload_options(body) + + assert result["tags"] == "tag1,tag2" + assert result["response_fields"] == "tags,metadata" + assert result["extensions"] == json.dumps([{"name": "remove-bg"}]) + assert result["custom_metadata"] == json.dumps({"key": "value"}) + assert result["transformation"] == json.dumps({"pre": "w-100"}) + # Non-serializable fields should remain unchanged + assert result["file"] == "test.jpg" + assert result["file_name"] == "test.jpg" + assert result["folder"] == "/images" + + def test_should_not_modify_original_body(self): + """Test that the original body is not modified.""" + body = { + "tags": ["tag1", "tag2"], + "response_fields": ["tags"], + "extensions": [{"name": "ext1"}], + } + original_tags = body["tags"].copy() + original_response_fields = body["response_fields"].copy() + original_extensions = body["extensions"].copy() + + serialize_upload_options(body) + + # Original should remain unchanged + assert body["tags"] == original_tags + assert body["response_fields"] == original_response_fields + assert body["extensions"] == original_extensions + + def test_should_handle_empty_arrays(self): + """Test that empty arrays are converted to empty strings.""" + body: Dict[str, List[str]] = {"tags": [], "response_fields": []} + result = serialize_upload_options(body) + assert result["tags"] == "" + assert result["response_fields"] == "" + + def test_should_handle_empty_extensions_array(self): + """Test that empty extensions array is JSON stringified.""" + body: Dict[str, List[Any]] = {"extensions": []} + result = serialize_upload_options(body) + assert result["extensions"] == "[]" + + def test_should_handle_none_values(self): + """Test that None values are not processed.""" + body = { + "tags": None, + "response_fields": None, + "extensions": None, + "custom_metadata": None, + "transformation": None, + } + result = serialize_upload_options(body) + # None values should remain None + assert result["tags"] is None + assert result["response_fields"] is None + assert result["extensions"] is None + assert result["custom_metadata"] is None + assert result["transformation"] is None + + def test_should_handle_empty_object(self): + """Test that an empty object is returned as is.""" + body: Dict[str, Any] = {} + result = serialize_upload_options(body) + assert result == {} + + def test_should_skip_non_matching_fields(self): + """Test that fields not in the serialization list are left unchanged.""" + body = { + "file_name": "test.jpg", + "folder": "/images", + "is_private_file": True, + "use_unique_file_name": False, + } + result = serialize_upload_options(body) + assert result == body + + def test_should_handle_single_tag(self): + """Test that a single tag array is handled correctly.""" + body = {"tags": ["single-tag"]} + result = serialize_upload_options(body) + assert result["tags"] == "single-tag" + + def test_should_handle_tags_with_empty_strings(self): + """Test that tags with empty strings are still joined.""" + body = {"tags": ["tag1", "", "tag2"]} + result = serialize_upload_options(body) + assert result["tags"] == "tag1,,tag2" + + def test_should_handle_complex_nested_extensions(self): + """Test that complex nested extensions are properly JSON stringified.""" + body = { + "extensions": [ + { + "name": "aws-auto-tagging", + "options": {"maxTags": 10, "minConfidence": 75}, + }, + { + "name": "remove-bg", + "options": {"add_shadow": True, "bg_color": "white"}, + }, + ] + } + result = serialize_upload_options(body) + expected = json.dumps(body["extensions"]) + assert result["extensions"] == expected + assert json.loads(result["extensions"]) == body["extensions"] + + def test_should_handle_nested_custom_metadata(self): + """Test that nested custom metadata is properly JSON stringified.""" + body = { + "custom_metadata": { + "product": {"name": "Test Product", "price": 99.99, "inStock": True}, + "category": "electronics", + } + } + result = serialize_upload_options(body) + expected = json.dumps(body["custom_metadata"]) + assert result["custom_metadata"] == expected + assert json.loads(result["custom_metadata"]) == body["custom_metadata"] + + def test_should_handle_transformation_with_both_pre_and_post(self): + """Test that transformation with both pre and post is properly handled.""" + body = { + "transformation": { + "pre": "w-200,h-200", + "post": [{"type": "transformation", "value": "w-100,h-100"}], + } + } + result = serialize_upload_options(body) + expected = json.dumps(body["transformation"]) + assert result["transformation"] == expected + assert json.loads(result["transformation"]) == body["transformation"] + + def test_should_not_modify_non_dict_custom_metadata(self): + """Test that custom_metadata is only serialized when it's a dict.""" + # This shouldn't happen in practice but testing edge case + body = {"custom_metadata": "string_value"} + result = serialize_upload_options(body) + # String value should remain unchanged + assert result["custom_metadata"] == "string_value" + + def test_should_not_modify_non_list_extensions(self): + """Test that extensions is only serialized when it's a list.""" + # This shouldn't happen in practice but testing edge case + body = {"extensions": "string_value"} + result = serialize_upload_options(body) + # String value should remain unchanged + assert result["extensions"] == "string_value" diff --git a/tests/custom/url_generation/__init__.py b/tests/custom/url_generation/__init__.py new file mode 100644 index 00000000..e0c071a8 --- /dev/null +++ b/tests/custom/url_generation/__init__.py @@ -0,0 +1 @@ +# URL generation test module diff --git a/tests/custom/url_generation/test_advanced_url_generation.py b/tests/custom/url_generation/test_advanced_url_generation.py new file mode 100644 index 00000000..6111f07d --- /dev/null +++ b/tests/custom/url_generation/test_advanced_url_generation.py @@ -0,0 +1,281 @@ +"""Advanced URL generation tests imported from Ruby SDK.""" + +import pytest + +from imagekitio import ImageKit + + +class TestAdvancedURLGeneration: + """Test advanced URL generation matching Ruby SDK advanced_url_generation_test.rb.""" + + @pytest.fixture(autouse=True) + def setup(self): + """Setup client for each test.""" + self.client = ImageKit(private_key="My Private API Key") + + # AI Transformation Tests + def test_should_generate_the_correct_url_for_ai_background_removal_when_set_to_true(self): + """Test AI background removal transformation.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"ai_remove_background": True}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=e-bgremove" + assert url == expected + + def test_should_generate_the_correct_url_for_external_ai_background_removal_when_set_to_true(self): + """Test external AI background removal transformation.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"ai_remove_background_external": True}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=e-removedotbg" + assert url == expected + + def test_should_generate_the_correct_url_when_ai_drop_shadow_transformation_is_set_to_true(self): + """Test AI drop shadow transformation.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"ai_drop_shadow": True}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=e-dropshadow" + assert url == expected + + def test_should_generate_the_correct_url_when_gradient_transformation_is_set_to_true(self): + """Test gradient transformation.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"gradient": True}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=e-gradient" + assert url == expected + + def test_should_not_apply_ai_background_removal_when_value_is_not_true(self): + """Test that AI background removal is not applied when not true.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg" + assert url == expected + + def test_should_not_apply_external_ai_background_removal_when_value_is_not_true(self): + """Test that external AI background removal is not applied when not true.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg" + assert url == expected + + def test_should_handle_ai_transformations_with_parameters(self): + """Test AI transformations with custom parameters.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"ai_drop_shadow": "custom-shadow-params"}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=e-dropshadow-custom-shadow-params" + assert url == expected + + def test_should_handle_gradient_with_parameters(self): + """Test gradient with custom parameters.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"gradient": "ld-top_from-green_to-00FF0010_sp-1"}], + ) + expected = ( + "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=e-gradient-ld-top_from-green_to-00FF0010_sp-1" + ) + assert url == expected + + def test_should_combine_ai_transformations_with_regular_transformations(self): + """Test combining AI and regular transformations.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"width": 300, "height": 200, "ai_remove_background": True}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=w-300,h-200,e-bgremove" + assert url == expected + + def test_should_handle_multiple_ai_transformations(self): + """Test multiple AI transformations.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"ai_remove_background": True, "ai_drop_shadow": True}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=e-bgremove,e-dropshadow" + assert url == expected + + # Parameter-specific tests + def test_should_generate_the_correct_url_for_width_transformation_when_provided_with_a_number_value(self): + """Test width transformation with number value.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"width": 400}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=w-400" + assert url == expected + + def test_should_generate_the_correct_url_for_height_transformation_when_provided_with_a_string_value(self): + """Test height transformation with string value.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": "300"}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=h-300" + assert url == expected + + def test_should_generate_the_correct_url_for_aspect_ratio_transformation_when_provided_with_colon_format(self): + """Test aspect ratio transformation with colon format.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"aspect_ratio": "4:3"}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=ar-4:3" + assert url == expected + + def test_should_generate_the_correct_url_for_quality_transformation_when_provided_with_a_number_value(self): + """Test quality transformation with number value.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"quality": 80}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=q-80" + assert url == expected + + # Additional parameter validation tests + def test_should_skip_transformation_parameters_that_are_undefined_or_empty(self): + """Test that undefined/empty parameters are skipped.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"width": 300}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=w-300" + assert url == expected + + def test_should_handle_boolean_transformation_values(self): + """Test boolean transformation values.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"trim": True}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=t-true" + assert url == expected + + def test_should_handle_transformation_parameter_with_empty_string_value(self): + """Test transformation with empty string value.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"default_image": ""}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg" + assert url == expected + + def test_should_handle_complex_transformation_combinations(self): + """Test complex transformation combinations.""" + url = self.client.helper.build_url( + src="/test_path1.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"width": 300, "height": 200, "quality": 85, "border": "5_FF0000"}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path1.jpg?tr=w-300,h-200,q-85,b-5_FF0000" + assert url == expected + + def test_should_generate_the_correct_url_with_many_transformations_including_video_and_ai_transforms(self): + """Test many transformations including video and AI.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[ + { + "height": 300, + "width": 400, + "aspect_ratio": "4-3", + "quality": 40, + "crop": "force", + "crop_mode": "extract", + "focus": "left", + "format": "jpeg", + "radius": 50, + "background": "A94D34", + "border": "5-A94D34", + "rotation": 90, + "blur": 10, + "named": "some_name", + "progressive": True, + "lossless": True, + "trim": 5, + "metadata": True, + "color_profile": True, + "default_image": "/folder/file.jpg/", + "dpr": 3, + "x": 10, + "y": 20, + "x_center": 30, + "y_center": 40, + "flip": "h", + "opacity": 0.8, + "zoom": 2, + "video_codec": "h264", + "audio_codec": "aac", + "start_offset": 5, + "end_offset": 15, + "duration": 10, + "streaming_resolutions": ["1440", "1080"], + "grayscale": True, + "ai_upscale": True, + "ai_retouch": True, + "ai_variation": True, + "ai_drop_shadow": True, + "ai_change_background": "prompt-car", + "ai_edit": "prompt-make it vintage", + "ai_remove_background": True, + "contrast_stretch": True, + "shadow": "bl-15_st-40_x-10_y-N5", + "sharpen": 10, + "unsharp_mask": "2-2-0.8-0.024", + "gradient": "from-red_to-white", + "original": True, + "page": "2_4", + "raw": "h-200,w-300,l-image,i-logo.png,l-end", + } + ], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400,ar-4-3,q-40,c-force,cm-extract,fo-left,f-jpeg,r-50,bg-A94D34,b-5-A94D34,rt-90,bl-10,n-some_name,pr-true,lo-true,t-5,md-true,cp-true,di-folder@@file.jpg,dpr-3,x-10,y-20,xc-30,yc-40,fl-h,o-0.8,z-2,vc-h264,ac-aac,so-5,eo-15,du-10,sr-1440_1080,e-grayscale,e-upscale,e-retouch,e-genvar,e-dropshadow,e-changebg-prompt-car,e-edit-prompt-make it vintage,e-bgremove,e-contrast,e-shadow-bl-15_st-40_x-10_y-N5,e-sharpen-10,e-usm-2-2-0.8-0.024,e-gradient-from-red_to-white,orig-true,pg-2_4,h-200,w-300,l-image,i-logo.png,l-end" + assert url == expected diff --git a/tests/custom/url_generation/test_basic_url_generation.py b/tests/custom/url_generation/test_basic_url_generation.py new file mode 100644 index 00000000..d91614b1 --- /dev/null +++ b/tests/custom/url_generation/test_basic_url_generation.py @@ -0,0 +1,261 @@ +"""Basic URL generation tests - converted from Ruby SDK.""" + +from typing import TYPE_CHECKING + +import pytest + +from imagekitio import ImageKit + +if TYPE_CHECKING: + from imagekitio._client import ImageKit as ImageKitType + + +class TestBasicURLGeneration: + """Test basic URL generation functionality.""" + + client: "ImageKitType" + + @pytest.fixture(autouse=True) + def setup(self) -> None: + """Set up test client.""" + self.client = ImageKit(private_key="My Private API Key") + + def test_should_return_an_empty_string_when_src_is_not_provided(self) -> None: + """Should return an empty string when src is not provided.""" + url = self.client.helper.build_url( + src="", url_endpoint="https://ik.imagekit.io/test_url_endpoint", transformation_position="query" + ) + + assert url == "" + + def test_should_generate_a_valid_url_when_src_is_slash(self) -> None: + """Should generate a valid URL when src is slash.""" + url = self.client.helper.build_url( + src="/", url_endpoint="https://ik.imagekit.io/test_url_endpoint", transformation_position="query" + ) + + expected = "https://ik.imagekit.io/test_url_endpoint" + assert url == expected + + def test_should_generate_a_valid_url_when_src_is_provided_with_transformation(self) -> None: + """Should generate a valid URL when src is provided with transformation.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg" + assert url == expected + + def test_should_generate_a_valid_url_when_a_src_is_provided_without_transformation(self) -> None: + """Should generate a valid URL when a src is provided without transformation.""" + url = self.client.helper.build_url( + src="https://ik.imagekit.io/test_url_endpoint/test_path_alt.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path_alt.jpg" + assert url == expected + + def test_should_generate_a_valid_url_when_undefined_transformation_parameters_are_provided_with_path(self) -> None: + """Should generate a valid URL when undefined transformation parameters are provided with path.""" + url = self.client.helper.build_url( + src="/test_path_alt.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path_alt.jpg" + assert url == expected + + def test_by_default_transformation_position_should_be_query(self) -> None: + """By default transformation position should be query.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation=[{"height": 300, "width": 400}, {"rotation": 90}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400:rt-90" + assert url == expected + + def test_should_generate_the_url_without_sdk_version(self) -> None: + """Should generate the URL without SDK version.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation=[{"height": 300, "width": 400}], + transformation_position="path", + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/tr:h-300,w-400/test_path.jpg" + assert url == expected + + def test_should_generate_the_correct_url_with_a_valid_src_and_transformation(self) -> None: + """Should generate the correct URL with a valid src and transformation.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400" + assert url == expected + + def test_should_add_transformation_as_query_when_src_has_absolute_url_even_if_transformation_position_is_path( + self, + ) -> None: + """Should add transformation as query when src has absolute URL even if transformation position is path.""" + url = self.client.helper.build_url( + src="https://my.custom.domain.com/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://my.custom.domain.com/test_path.jpg?tr=h-300,w-400" + assert url == expected + + def test_should_generate_correct_url_when_src_has_query_params(self) -> None: + """Should generate correct URL when src has query params.""" + url = self.client.helper.build_url( + src="https://ik.imagekit.io/imagekit_id/new-endpoint/test_path.jpg?t1=v1", + url_endpoint="https://ik.imagekit.io/imagekit_id/new-endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://ik.imagekit.io/imagekit_id/new-endpoint/test_path.jpg?t1=v1&tr=h-300,w-400" + assert url == expected + + def test_should_generate_the_correct_url_when_the_provided_path_contains_multiple_leading_slashes(self) -> None: + """Should generate the correct URL when the provided path contains multiple leading slashes.""" + url = self.client.helper.build_url( + src="///test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400" + assert url == expected + + def test_should_generate_the_correct_url_when_the_url_endpoint_is_overridden(self) -> None: + """Should generate the correct URL when the URL endpoint is overridden.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint_alt", + transformation_position="query", + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint_alt/test_path.jpg?tr=h-300,w-400" + assert url == expected + + def test_should_generate_the_correct_url_with_transformation_position_as_query_parameter_when_src_is_provided( + self, + ) -> None: + """Should generate the correct URL with transformation position as query parameter when src is provided.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400" + assert url == expected + + def test_should_generate_the_correct_url_with_a_valid_src_parameter_and_transformation(self) -> None: + """Should generate the correct URL with a valid src parameter and transformation.""" + url = self.client.helper.build_url( + src="https://ik.imagekit.io/test_url_endpoint/test_path_alt.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path_alt.jpg?tr=h-300,w-400" + assert url == expected + + def test_should_merge_query_parameters_correctly_in_the_generated_url(self) -> None: + """Should merge query parameters correctly in the generated URL.""" + url = self.client.helper.build_url( + src="https://ik.imagekit.io/test_url_endpoint/test_path_alt.jpg?t1=v1", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + query_parameters={"t2": "v2", "t3": "v3"}, + transformation=[{"height": 300, "width": 400}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path_alt.jpg?t1=v1&t2=v2&t3=v3&tr=h-300,w-400" + assert url == expected + + def test_should_generate_the_correct_url_with_chained_transformations(self) -> None: + """Should generate the correct URL with chained transformations.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400}, {"rotation": 90}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400:rt-90" + assert url == expected + + def test_should_generate_the_correct_url_with_chained_transformations_including_raw_transformation(self) -> None: + """Should generate the correct URL with chained transformations including raw transformation.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400}, {"raw": "rndm_trnsf-abcd"}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400:rndm_trnsf-abcd" + assert url == expected + + def test_should_generate_the_correct_url_when_border_transformation_is_applied(self) -> None: + """Should generate the correct URL when border transformation is applied.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"height": 300, "width": 400, "border": "20_FF0000"}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg?tr=h-300,w-400,b-20_FF0000" + assert url == expected + + def test_should_generate_the_correct_url_when_transformation_has_empty_key_and_value(self) -> None: + """Should generate the correct URL when transformation has empty key and value.""" + url = self.client.helper.build_url( + src="/test_path.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="query", + transformation=[{"raw": ""}], + ) + + expected = "https://ik.imagekit.io/test_url_endpoint/test_path.jpg" + assert url == expected + + def test_should_generate_a_valid_url_when_cname_is_used(self) -> None: + """Should generate a valid URL when CNAME is used.""" + url = self.client.helper.build_url( + src="/test_path.jpg", url_endpoint="https://custom.domain.com", transformation_position="query" + ) + + expected = "https://custom.domain.com/test_path.jpg" + assert url == expected + + def test_should_generate_a_valid_url_when_cname_with_path_is_used(self) -> None: + """Should generate a valid URL when CNAME with path is used.""" + url = self.client.helper.build_url( + src="/test_path.jpg", url_endpoint="https://custom.domain.com/url-pattern", transformation_position="query" + ) + + expected = "https://custom.domain.com/url-pattern/test_path.jpg" + assert url == expected diff --git a/tests/custom/url_generation/test_build_transformation_string.py b/tests/custom/url_generation/test_build_transformation_string.py new file mode 100644 index 00000000..447bd94c --- /dev/null +++ b/tests/custom/url_generation/test_build_transformation_string.py @@ -0,0 +1,76 @@ +"""Build transformation string tests imported from Ruby SDK.""" + +import pytest + +from imagekitio import ImageKit + + +class TestBuildTransformationString: + """Test build_transformation_string matching Ruby SDK build_transformation_string_test.rb.""" + + @pytest.fixture(autouse=True) + def setup(self): + """Setup client for each test.""" + self.client = ImageKit(private_key="test-key") + + def test_should_return_empty_string_for_empty_transformation_array(self): + """Test empty transformation array returns empty string.""" + result = self.client.helper.build_transformation_string(None) + assert result == "" + + result = self.client.helper.build_transformation_string([]) + assert result == "" + + def test_should_generate_transformation_string_for_width_only(self): + """Test transformation string for width only.""" + result = self.client.helper.build_transformation_string([{"width": 300}]) + expected = "w-300" + assert result == expected + + def test_should_generate_transformation_string_for_multiple_parameters(self): + """Test transformation string for multiple parameters.""" + result = self.client.helper.build_transformation_string([{"width": 300, "height": 200}]) + expected = "w-300,h-200" + assert result == expected + + def test_should_generate_transformation_string_for_chained_transformations(self): + """Test transformation string for chained transformations.""" + result = self.client.helper.build_transformation_string([{"width": 300}, {"height": 200}]) + expected = "w-300:h-200" + assert result == expected + + def test_should_handle_empty_transformation_object(self): + """Test empty transformation object.""" + result = self.client.helper.build_transformation_string([{}]) + expected = "" + assert result == expected + + def test_should_handle_transformation_with_overlay(self): + """Test transformation with overlay.""" + result = self.client.helper.build_transformation_string([{"overlay": {"type": "text", "text": "Hello"}}]) + expected = "l-text,i-Hello,l-end" + assert result == expected + + def test_should_handle_raw_transformation_parameter(self): + """Test raw transformation parameter.""" + result = self.client.helper.build_transformation_string([{"raw": "custom-transform-123"}]) + expected = "custom-transform-123" + assert result == expected + + def test_should_handle_mixed_parameters_with_raw(self): + """Test mixed parameters with raw.""" + result = self.client.helper.build_transformation_string([{"width": 300, "raw": "custom-param-123"}]) + expected = "w-300,custom-param-123" + assert result == expected + + def test_should_handle_quality_parameter(self): + """Test quality parameter.""" + result = self.client.helper.build_transformation_string([{"quality": 80}]) + expected = "q-80" + assert result == expected + + def test_should_handle_aspect_ratio_parameter(self): + """Test aspect ratio parameter.""" + result = self.client.helper.build_transformation_string([{"aspect_ratio": "4:3"}]) + expected = "ar-4:3" + assert result == expected diff --git a/tests/custom/url_generation/test_overlay.py b/tests/custom/url_generation/test_overlay.py new file mode 100644 index 00000000..68d6200c --- /dev/null +++ b/tests/custom/url_generation/test_overlay.py @@ -0,0 +1,405 @@ +"""Overlay transformation tests imported from Ruby SDK.""" + +import pytest + +from imagekitio import ImageKit + + +class TestOverlay: + """Test overlay functionality matching Ruby SDK overlay_test.rb.""" + + @pytest.fixture(autouse=True) + def setup(self): + """Setup client for each test.""" + self.client = ImageKit(private_key="My Private API Key") + + # Basic overlay tests + def test_should_ignore_overlay_when_type_property_is_missing(self): + """Test that overlay is ignored when type is missing.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"width": 300}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/tr:w-300/base-image.jpg" + assert url == expected + + def test_should_ignore_text_overlay_when_text_property_is_missing(self): + """Test that text overlay is ignored when text is empty.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "text", "text": ""}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/base-image.jpg" + assert url == expected + + def test_should_ignore_image_overlay_when_input_property_is_missing(self): + """Test that image overlay is ignored when input is empty.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "image", "input": ""}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/base-image.jpg" + assert url == expected + + def test_should_ignore_video_overlay_when_input_property_is_missing(self): + """Test that video overlay is ignored when input is empty.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "video", "input": ""}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/base-image.jpg" + assert url == expected + + def test_should_ignore_subtitle_overlay_when_input_property_is_missing(self): + """Test that subtitle overlay is ignored when input is empty.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "subtitle", "input": ""}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/base-image.jpg" + assert url == expected + + def test_should_ignore_solid_color_overlay_when_color_property_is_missing(self): + """Test that solid color overlay is ignored when color is empty.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "solidColor", "color": ""}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/base-image.jpg" + assert url == expected + + # Basic overlay functionality tests + def test_should_generate_url_with_text_overlay_using_url_encoding(self): + """Test text overlay with URL encoding.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "text", "text": "Minimal Text"}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/tr:l-text,i-Minimal%20Text,l-end/base-image.jpg" + assert url == expected + + def test_should_generate_url_with_image_overlay_from_input_file(self): + """Test image overlay from input file.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "image", "input": "logo.png"}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/tr:l-image,i-logo.png,l-end/base-image.jpg" + assert url == expected + + def test_should_generate_url_with_video_overlay_from_input_file(self): + """Test video overlay from input file.""" + url = self.client.helper.build_url( + src="/base-video.mp4", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "video", "input": "play-pause-loop.mp4"}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/tr:l-video,i-play-pause-loop.mp4,l-end/base-video.mp4" + assert url == expected + + def test_should_generate_url_with_subtitle_overlay_from_input_file(self): + """Test subtitle overlay from input file.""" + url = self.client.helper.build_url( + src="/base-video.mp4", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "subtitle", "input": "subtitle.srt"}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/tr:l-subtitle,i-subtitle.srt,l-end/base-video.mp4" + assert url == expected + + def test_should_generate_url_with_solid_color_overlay_using_background_color(self): + """Test solid color overlay.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[{"overlay": {"type": "solidColor", "color": "FF0000"}}], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/tr:l-image,i-ik_canvas,bg-FF0000,l-end/base-image.jpg" + assert url == expected + + def test_should_generate_url_with_multiple_complex_overlays_including_nested_transformations(self): + """Test complex overlays with nested transformations.""" + url = self.client.helper.build_url( + src="/base-image.jpg", + url_endpoint="https://ik.imagekit.io/test_url_endpoint", + transformation_position="path", + transformation=[ + # Text overlay + { + "overlay": { + "type": "text", + "text": "Every thing", + "position": {"x": "10", "y": "20", "focus": "center"}, + "timing": {"start": 5.0, "duration": "10", "end": 15.0}, + "transformation": [ + { + "width": "bw_mul_0.5", + "font_size": 20.0, + "font_family": "Arial", + "font_color": "0000ff", + "inner_alignment": "left", + "padding": 5.0, + "alpha": 7.0, + "typography": "b", + "background": "red", + "radius": 10.0, + "rotation": "N45", + "flip": "h", + "line_height": 20.0, + } + ], + } + }, + # Image overlay + { + "overlay": { + "type": "image", + "input": "logo.png", + "position": {"x": "10", "y": "20", "focus": "center"}, + "timing": {"start": 5.0, "duration": "10", "end": 15.0}, + "transformation": [ + { + "width": "bw_mul_0.5", + "height": "bh_mul_0.5", + "rotation": "N45", + "flip": "h", + "overlay": {"type": "text", "text": "Nested text overlay"}, + } + ], + } + }, + # Video overlay + { + "overlay": { + "type": "video", + "input": "play-pause-loop.mp4", + "position": {"x": "10", "y": "20", "focus": "center"}, + "timing": {"start": 5.0, "duration": "10", "end": 15.0}, + "transformation": [ + {"width": "bw_mul_0.5", "height": "bh_mul_0.5", "rotation": "N45", "flip": "h"} + ], + } + }, + # Subtitle overlay + { + "overlay": { + "type": "subtitle", + "input": "subtitle.srt", + "position": {"x": "10", "y": "20", "focus": "center"}, + "timing": {"start": 5.0, "duration": "10", "end": 15.0}, + "transformation": [ + { + "background": "red", + "color": "0000ff", + "font_family": "Arial", + "font_outline": "2_A1CCDD50", + "font_shadow": "A1CCDD_3", + } + ], + } + }, + # Solid color overlay + { + "overlay": { + "type": "solidColor", + "color": "FF0000", + "position": {"x": "10", "y": "20", "focus": "center"}, + "timing": {"start": 5.0, "duration": "10", "end": 15.0}, + "transformation": [ + { + "width": "bw_mul_0.5", + "height": "bh_mul_0.5", + "alpha": 0.5, + "background": "red", + "gradient": True, + "radius": "max", + } + ], + } + }, + ], + ) + expected = "https://ik.imagekit.io/test_url_endpoint/tr:l-text,i-Every%20thing,lx-10,ly-20,lfo-center,lso-5,leo-15,ldu-10,w-bw_mul_0.5,fs-20,ff-Arial,co-0000ff,ia-left,pa-5,al-7,tg-b,bg-red,r-10,rt-N45,fl-h,lh-20,l-end:l-image,i-logo.png,lx-10,ly-20,lfo-center,lso-5,leo-15,ldu-10,w-bw_mul_0.5,h-bh_mul_0.5,rt-N45,fl-h,l-text,i-Nested%20text%20overlay,l-end,l-end:l-video,i-play-pause-loop.mp4,lx-10,ly-20,lfo-center,lso-5,leo-15,ldu-10,w-bw_mul_0.5,h-bh_mul_0.5,rt-N45,fl-h,l-end:l-subtitle,i-subtitle.srt,lx-10,ly-20,lfo-center,lso-5,leo-15,ldu-10,bg-red,co-0000ff,ff-Arial,fol-2_A1CCDD50,fsh-A1CCDD_3,l-end:l-image,i-ik_canvas,bg-FF0000,lx-10,ly-20,lfo-center,lso-5,leo-15,ldu-10,w-bw_mul_0.5,h-bh_mul_0.5,al-0.5,bg-red,e-gradient,r-max,l-end/base-image.jpg" + assert url == expected + + # Overlay encoding tests + def test_should_use_plain_encoding_for_simple_image_paths_with_slashes_converted_to_double_at(self): + """Test plain encoding for simple image paths.""" + url = self.client.helper.build_url( + src="/medium_cafe_B1iTdD0C.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "image", "input": "/customer_logo/nykaa.png"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-image,i-customer_logo@@nykaa.png,l-end/medium_cafe_B1iTdD0C.jpg" + assert url == expected + + def test_should_use_base64_encoding_for_image_paths_containing_special_characters(self): + """Test base64 encoding for image paths with special characters.""" + url = self.client.helper.build_url( + src="/medium_cafe_B1iTdD0C.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "image", "input": "/customer_logo/Ñykaa.png"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-image,ie-Y3VzdG9tZXJfbG9nby%2FDkXlrYWEucG5n,l-end/medium_cafe_B1iTdD0C.jpg" + assert url == expected + + def test_should_use_plain_encoding_for_simple_text_overlays(self): + """Test plain encoding for simple text.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "text", "text": "HelloWorld"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-text,i-HelloWorld,l-end/sample.jpg" + assert url == expected + + def test_should_convert_slashes_to_double_at_in_font_family_paths_for_custom_fonts(self): + """Test font family path conversion.""" + url = self.client.helper.build_url( + src="/medium_cafe_B1iTdD0C.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[ + { + "overlay": { + "type": "text", + "text": "Manu", + "transformation": [{"font_family": "nested-path/Poppins-Regular_Q15GrYWmL.ttf"}], + } + } + ], + ) + expected = "https://ik.imagekit.io/demo/tr:l-text,i-Manu,ff-nested-path@@Poppins-Regular_Q15GrYWmL.ttf,l-end/medium_cafe_B1iTdD0C.jpg" + assert url == expected + + def test_should_use_url_encoding_for_text_overlays_with_spaces_and_safe_characters(self): + """Test URL encoding for text with spaces.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "text", "text": "Hello World"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-text,i-Hello%20World,l-end/sample.jpg" + assert url == expected + + def test_should_use_base64_encoding_for_text_overlays_with_special_unicode_characters(self): + """Test base64 encoding for Unicode text.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "text", "text": "हिन्दी"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-text,ie-4KS54KS%2F4KSo4KWN4KSm4KWA,l-end/sample.jpg" + assert url == expected + + def test_should_use_plain_encoding_when_explicitly_specified_for_text_overlay(self): + """Test explicit plain encoding for text.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "text", "text": "HelloWorld", "encoding": "plain"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-text,i-HelloWorld,l-end/sample.jpg" + assert url == expected + + def test_should_use_base64_encoding_when_explicitly_specified_for_text_overlay(self): + """Test explicit base64 encoding for text.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "text", "text": "HelloWorld", "encoding": "base64"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-text,ie-SGVsbG9Xb3JsZA%3D%3D,l-end/sample.jpg" + assert url == expected + + def test_should_use_plain_encoding_when_explicitly_specified_for_image_overlay(self): + """Test explicit plain encoding for image.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "image", "input": "/customer/logo.png", "encoding": "plain"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-image,i-customer@@logo.png,l-end/sample.jpg" + assert url == expected + + def test_should_use_base64_encoding_when_explicitly_specified_for_image_overlay(self): + """Test explicit base64 encoding for image.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "image", "input": "/customer/logo.png", "encoding": "base64"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-image,ie-Y3VzdG9tZXIvbG9nby5wbmc%3D,l-end/sample.jpg" + assert url == expected + + def test_should_use_base64_encoding_when_explicitly_specified_for_video_overlay(self): + """Test explicit base64 encoding for video.""" + url = self.client.helper.build_url( + src="/sample.mp4", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "video", "input": "/path/to/video.mp4", "encoding": "base64"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-video,ie-cGF0aC90by92aWRlby5tcDQ%3D,l-end/sample.mp4" + assert url == expected + + def test_should_use_base64_encoding_when_explicitly_specified_for_subtitle_overlay(self): + """Test explicit base64 encoding for subtitle.""" + url = self.client.helper.build_url( + src="/sample.mp4", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "subtitle", "input": "sub.srt", "encoding": "base64"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-subtitle,ie-c3ViLnNydA%3D%3D,l-end/sample.mp4" + assert url == expected + + def test_should_use_plain_encoding_when_explicitly_specified_for_subtitle_overlay(self): + """Test explicit plain encoding for subtitle overlay.""" + url = self.client.helper.build_url( + src="/sample.mp4", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="path", + transformation=[{"overlay": {"type": "subtitle", "input": "/sub.srt", "encoding": "plain"}}], + ) + expected = "https://ik.imagekit.io/demo/tr:l-subtitle,i-sub.srt,l-end/sample.mp4" + assert url == expected + + def test_should_properly_encode_overlay_text_when_transformations_are_in_query_parameters(self): + """Test text overlay encoding with query position.""" + url = self.client.helper.build_url( + src="/sample.jpg", + url_endpoint="https://ik.imagekit.io/demo", + transformation_position="query", + transformation=[{"overlay": {"type": "text", "text": "Minimal Text"}}], + ) + expected = "https://ik.imagekit.io/demo/sample.jpg?tr=l-text,i-Minimal%20Text,l-end" + assert url == expected diff --git a/tests/custom/url_generation/test_signing.py b/tests/custom/url_generation/test_signing.py new file mode 100644 index 00000000..8df8b6d3 --- /dev/null +++ b/tests/custom/url_generation/test_signing.py @@ -0,0 +1,168 @@ +"""Signing URL tests - converted from Ruby SDK.""" + +from typing import TYPE_CHECKING + +import pytest + +from imagekitio import ImageKit + +if TYPE_CHECKING: + from imagekitio._client import ImageKit as ImageKitType + + +class TestSigning: + """Test URL signing functionality.""" + + client: "ImageKitType" + + @pytest.fixture(autouse=True) + def setup(self) -> None: + """Set up test client.""" + self.client = ImageKit(private_key="dummy-key") + + def test_should_generate_a_signed_url_when_signed_is_true_without_expires_in(self) -> None: + """Should generate a signed URL when signed is true without expires_in.""" + url = self.client.helper.build_url( + src="sdk-testing-files/future-search.png", url_endpoint="https://ik.imagekit.io/demo/", signed=True + ) + + expected = "https://ik.imagekit.io/demo/sdk-testing-files/future-search.png?ik-s=32dbbbfc5f945c0403c71b54c38e76896ef2d6b0" + assert url == expected + + def test_should_generate_a_signed_url_when_signed_is_true_with_expires_in(self) -> None: + """Should generate a signed URL when signed is true with expires_in.""" + url = self.client.helper.build_url( + src="sdk-testing-files/future-search.png", + url_endpoint="https://ik.imagekit.io/demo/", + signed=True, + expires_in=3600, + ) + + # Expect ik-t exist in the URL. We don't assert signature because it will keep changing. + assert "ik-t" in url + + def test_should_generate_a_signed_url_when_expires_in_is_above_0_and_even_if_signed_is_false(self) -> None: + """Should generate a signed URL when expires_in is above 0 and even if signed is false.""" + url = self.client.helper.build_url( + src="sdk-testing-files/future-search.png", + url_endpoint="https://ik.imagekit.io/demo/", + signed=False, + expires_in=3600, + ) + + # Expect ik-t exist in the URL. We don't assert signature because it will keep changing. + assert "ik-t" in url + + def test_should_generate_signed_url_with_special_characters_in_filename(self) -> None: + """Should generate signed URL with special characters in filename.""" + url = self.client.helper.build_url( + src="sdk-testing-files/हिन्दी.png", url_endpoint="https://ik.imagekit.io/demo/", signed=True + ) + + expected = "https://ik.imagekit.io/demo/sdk-testing-files/%E0%A4%B9%E0%A4%BF%E0%A4%A8%E0%A5%8D%E0%A4%A6%E0%A5%80.png?ik-s=3fff2f31da1f45e007adcdbe95f88c8c330e743c" + assert url == expected + + def test_should_generate_signed_url_with_text_overlay_containing_special_characters(self) -> None: + """Should generate signed URL with text overlay containing special characters.""" + url = self.client.helper.build_url( + src="sdk-testing-files/हिन्दी.png", + url_endpoint="https://ik.imagekit.io/demo/", + transformation=[ + { + "overlay": { + "type": "text", + "text": "हिन्दी", + "transformation": [ + { + "font_color": "red", + "font_size": "32", + "font_family": "sdk-testing-files/Poppins-Regular_Q15GrYWmL.ttf", + } + ], + } + } + ], + signed=True, + ) + + expected = "https://ik.imagekit.io/demo/sdk-testing-files/%E0%A4%B9%E0%A4%BF%E0%A4%A8%E0%A5%8D%E0%A4%A6%E0%A5%80.png?tr=l-text,ie-4KS54KS%2F4KSo4KWN4KSm4KWA,co-red,fs-32,ff-sdk-testing-files@@Poppins-Regular_Q15GrYWmL.ttf,l-end&ik-s=ac9f24a03080102555e492185533c1ae6bd93fa7" + assert url == expected + + def test_should_generate_signed_url_with_text_overlay_and_special_characters_using_path_transformation_position( + self, + ) -> None: + """Should generate signed URL with text overlay and special characters using path transformation position.""" + url = self.client.helper.build_url( + src="sdk-testing-files/हिन्दी.png", + url_endpoint="https://ik.imagekit.io/demo/", + transformation_position="path", + transformation=[ + { + "overlay": { + "type": "text", + "text": "हिन्दी", + "transformation": [ + { + "font_color": "red", + "font_size": "32", + "font_family": "sdk-testing-files/Poppins-Regular_Q15GrYWmL.ttf", + } + ], + } + } + ], + signed=True, + ) + + expected = "https://ik.imagekit.io/demo/tr:l-text,ie-4KS54KS%2F4KSo4KWN4KSm4KWA,co-red,fs-32,ff-sdk-testing-files@@Poppins-Regular_Q15GrYWmL.ttf,l-end/sdk-testing-files/%E0%A4%B9%E0%A4%BF%E0%A4%A8%E0%A5%8D%E0%A4%A6%E0%A5%80.png?ik-s=69f2ecbb7364bbbad24616e1f7f1bac5a560fc71" + assert url == expected + + def test_should_generate_signed_url_with_query_parameters(self) -> None: + """Should generate signed URL with query parameters.""" + url = self.client.helper.build_url( + src="sdk-testing-files/future-search.png", + url_endpoint="https://ik.imagekit.io/demo/", + query_parameters={"version": "1.0", "cache": "false"}, + signed=True, + ) + + expected = "https://ik.imagekit.io/demo/sdk-testing-files/future-search.png?version=1.0&cache=false&ik-s=f2e5a1b8b6a0b03fd63789dfc6413a94acef9fd8" + assert url == expected + + def test_should_generate_signed_url_with_transformations_and_query_parameters(self) -> None: + """Should generate signed URL with transformations and query parameters.""" + url = self.client.helper.build_url( + src="sdk-testing-files/future-search.png", + url_endpoint="https://ik.imagekit.io/demo/", + transformation=[{"width": 300, "height": 200}], + query_parameters={"version": "2.0"}, + signed=True, + ) + + expected = "https://ik.imagekit.io/demo/sdk-testing-files/future-search.png?version=2.0&tr=w-300,h-200&ik-s=601d97a7834b7554f4dabf0d3fc3a219ceeb6b31" + assert url == expected + + def test_should_not_sign_url_when_signed_is_false(self) -> None: + """Should not sign URL when signed is false.""" + url = self.client.helper.build_url( + src="sdk-testing-files/future-search.png", url_endpoint="https://ik.imagekit.io/demo/", signed=False + ) + + expected = "https://ik.imagekit.io/demo/sdk-testing-files/future-search.png" + assert url == expected + assert "ik-s=" not in url + assert "ik-t=" not in url + + def test_should_generate_signed_url_with_transformations_in_path_position_and_query_parameters(self) -> None: + """Should generate signed URL with transformations in path position and query parameters.""" + url = self.client.helper.build_url( + src="sdk-testing-files/future-search.png", + url_endpoint="https://ik.imagekit.io/demo/", + transformation=[{"width": 300, "height": 200}], + transformation_position="path", + query_parameters={"version": "2.0"}, + signed=True, + ) + + expected = "https://ik.imagekit.io/demo/tr:w-300,h-200/sdk-testing-files/future-search.png?version=2.0&ik-s=dd1ee8f83d019bc59fd57a5fc4674a11eb8a3496" + assert url == expected diff --git a/tests/dummy_data/__init__.py b/tests/dummy_data/__init__.py deleted file mode 100644 index ab7fb752..00000000 --- a/tests/dummy_data/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from . import file - -__all__ = ["file"] diff --git a/tests/dummy_data/file.py b/tests/dummy_data/file.py deleted file mode 100644 index 67be48ed..00000000 --- a/tests/dummy_data/file.py +++ /dev/null @@ -1,60 +0,0 @@ -FAILED_GENERIC_RESP = {"message": "Hi There is an error"} -SUCCESS_GENERIC_RESP = {"response": "Success"} -AUTHENTICATION_ERR_MSG = { - "message": "Your account cannot be authenticated.", - "help": "For support kindly contact us at support@imagekit.io .", -} -FAILED_DELETE_RESP = {"message": "Item Not Found"} - -SUCCESS_PURGE_CACHE_MSG = {"request_id": "fake_abc_xyz"} - -SUCCESS_PURGE_CACHE_STATUS_MSG = {"status": "pending"} - -SERVER_ERR_MSG = { - "message": "We have experienced an internal error while processing your request.", - "help": "For support kindly contact us at support@imagekit.io .", -} - -SUCCESS_LIST_RESP_MESSAGE = { - "response": [ - { - "type": "file", - "name": "default-image.jpg", - "fileId": "53dgd6023f28ft7fse488992c", - "tags": None, - "customCoordinates": None, - "isPrivateFile": None, - "url": "https://ik.imagekit.io/fakeid/default-image.jpg", - "thumbnail": "https://ik.imagekit.io/fakeid/tr:n-media_library_thumbnail/default-image.jpg", - "fileType": "image", - "filePath": "/default-image.jpg", - }, - { - "type": "file", - "name": "default-image.jpg", - "fileId": "53dgd6023f28ft7fse488992c", - "tags": None, - "customCoordinates": None, - "isPrivateFile": None, - "url": "https://ik.imagekit.io/fakeid/default-image.jpg", - "thumbnail": "https://ik.imagekit.io/fakeid/tr:n-media_library_thumbnail/default-image.jpg", - "fileType": "image", - "filePath": "/default-image.jpg", - }, - ], -} - -SUCCESS_DETAIL_MSG = { - "response": { - "type": "file", - "name": "default-image.jpg", - "fileId": "53dgd6023f28ft7fse488992c", - "tags": None, - "customCoordinates": None, - "isPrivateFile": None, - "url": "https://ik.imagekit.io/fakeid/default-image.jpg", - "thumbnail": "https://ik.imagekit.io/fakeid/tr:n-media_library_thumbnail/default-image.jpg", - "fileType": "image", - "filePath": "/default-image.jpg", - } -} diff --git a/tests/dummy_data/image.png b/tests/dummy_data/image.png deleted file mode 100644 index 4db4e065..00000000 Binary files a/tests/dummy_data/image.png and /dev/null differ diff --git a/tests/dummy_data/urls.py b/tests/dummy_data/urls.py deleted file mode 100644 index 2944b7ee..00000000 --- a/tests/dummy_data/urls.py +++ /dev/null @@ -1,4 +0,0 @@ -URL_ENDPOINT = "https://ik.imagekit.io/your_imagekit_id/endpoint/" -BASIC_GENERATED_URL = ( - "https://ik.imagekit.io/your_imagekit_id/endpoint/tr:h-300,w-400/default-image.jpg", -) diff --git a/tests/helpers.py b/tests/helpers.py deleted file mode 100644 index 59e7ed45..00000000 --- a/tests/helpers.py +++ /dev/null @@ -1,67 +0,0 @@ -import unittest -from typing import Any -from unittest.mock import Mock, patch - -from requests import Response - -from imagekitio.client import ImageKit -from tests.dummy_data.file import AUTHENTICATION_ERR_MSG, SUCCESS_GENERIC_RESP -try: - from simplejson.errors import JSONDecodeError -except ImportError: - from json import JSONDecodeError - - -class ClientTestCase(unittest.TestCase): - """ - Base TestCase for Client - """ - private_key="fake122" - - @patch("imagekitio.file.File") - @patch("imagekitio.resource.ImageKitRequest") - def setUp(self, mock_file, mock_req): - """ - Tests if list_files work with skip and limit - """ - self.options = { - "skip": "10", - "limit": "1", - } - self.client = ImageKit( - public_key="fake122", private_key=ClientTestCase.private_key, url_endpoint="fake122", - ) - - -def get_mocked_failed_resp(message=None, status=401): - """GET failed mocked response customized by parameter - """ - mocked_resp = Mock(spec=Response) - mocked_resp.status_code = status - if not message: - mocked_resp.json.return_value = AUTHENTICATION_ERR_MSG - else: - mocked_resp.json.return_value = message - return mocked_resp - - -def get_mocked_failed_resp_text(): - """GET failed mocked response returned as text not json - """ - mocked_resp = Mock(spec=Response) - mocked_resp.status_code = 502 - mocked_resp.text = 'Bad Gateway' - mocked_resp.json.side_effect = JSONDecodeError("Expecting value: ", "Bad Gateway", 0) - return mocked_resp - - -def get_mocked_success_resp(message: dict = None, status: int = 200): - """GET success mocked response customize by parameter - """ - mocked_resp = Mock(spec=Response) - mocked_resp.status_code = status - if not message: - mocked_resp.json.return_value = SUCCESS_GENERIC_RESP - else: - mocked_resp.json.return_value = message - return mocked_resp diff --git a/tests/sample.jpg b/tests/sample.jpg deleted file mode 100644 index 1db39e04..00000000 Binary files a/tests/sample.jpg and /dev/null differ diff --git a/tests/sample_file.txt b/tests/sample_file.txt new file mode 100644 index 00000000..af5626b4 --- /dev/null +++ b/tests/sample_file.txt @@ -0,0 +1 @@ +Hello, world! diff --git a/tests/test_client.py b/tests/test_client.py index 06afeb2e..73532a86 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,52 +1,1886 @@ -import unittest -from unittest.mock import MagicMock +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from imagekitio.client import ImageKit -from tests.dummy_data.file import SUCCESS_DETAIL_MSG -from tests.helpers import ClientTestCase, get_mocked_success_resp +from __future__ import annotations -imagekit_obj = ImageKit( - private_key="private_fake:", public_key="public_fake123:", url_endpoint="fake.com", +import gc +import os +import sys +import json +import asyncio +import inspect +import tracemalloc +from typing import Any, Union, cast +from unittest import mock +from typing_extensions import Literal + +import httpx +import pytest +from respx import MockRouter +from pydantic import ValidationError + +from imagekitio import ImageKit, AsyncImageKit, APIResponseValidationError +from imagekitio._types import Omit +from imagekitio._utils import asyncify +from imagekitio._models import BaseModel, FinalRequestOptions +from imagekitio._exceptions import ImageKitError, APIStatusError, APITimeoutError, APIResponseValidationError +from imagekitio._base_client import ( + DEFAULT_TIMEOUT, + HTTPX_DEFAULT_TIMEOUT, + BaseClient, + OtherPlatform, + DefaultHttpxClient, + DefaultAsyncHttpxClient, + get_platform, + make_request_options, ) +from .utils import update_env + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") +private_key = "My Private Key" +password = "My Password" + + +def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + url = httpx.URL(request.url) + return dict(url.params) + + +def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float: + return 0.1 + + +def _get_open_connections(client: ImageKit | AsyncImageKit) -> int: + transport = client._client._transport + assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport) + + pool = transport._pool + return len(pool._requests) + + +class TestImageKit: + @pytest.mark.respx(base_url=base_url) + def test_raw_response(self, respx_mock: MockRouter, client: ImageKit) -> None: + respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + @pytest.mark.respx(base_url=base_url) + def test_raw_response_for_binary(self, respx_mock: MockRouter, client: ImageKit) -> None: + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') + ) + + response = client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + def test_copy(self, client: ImageKit) -> None: + copied = client.copy() + assert id(copied) != id(client) + + copied = client.copy(private_key="another My Private Key") + assert copied.private_key == "another My Private Key" + assert client.private_key == "My Private Key" + + copied = client.copy(password="another My Password") + assert copied.password == "another My Password" + assert client.password == "My Password" + + def test_copy_default_options(self, client: ImageKit) -> None: + # options that have a default are overridden correctly + copied = client.copy(max_retries=7) + assert copied.max_retries == 7 + assert client.max_retries == 2 + + copied2 = copied.copy(max_retries=6) + assert copied2.max_retries == 6 + assert copied.max_retries == 7 + + # timeout + assert isinstance(client.timeout, httpx.Timeout) + copied = client.copy(timeout=None) + assert copied.timeout is None + assert isinstance(client.timeout, httpx.Timeout) + + def test_copy_default_headers(self) -> None: + client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, + ) + assert client.default_headers["X-Foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert copied.default_headers["X-Foo"] == "bar" + + # merges already given headers + copied = client.copy(default_headers={"X-Bar": "stainless"}) + assert copied.default_headers["X-Foo"] == "bar" + assert copied.default_headers["X-Bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_headers={"X-Foo": "stainless"}) + assert copied.default_headers["X-Foo"] == "stainless" + + # set_default_headers + + # completely overrides already set values + copied = client.copy(set_default_headers={}) + assert copied.default_headers.get("X-Foo") is None + + copied = client.copy(set_default_headers={"X-Bar": "Robert"}) + assert copied.default_headers["X-Bar"] == "Robert" + + with pytest.raises( + ValueError, + match="`default_headers` and `set_default_headers` arguments are mutually exclusive", + ): + client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + client.close() + + def test_copy_default_query(self) -> None: + client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_query={"foo": "bar"}, + ) + assert _get_params(client)["foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert _get_params(copied)["foo"] == "bar" + + # merges already given params + copied = client.copy(default_query={"bar": "stainless"}) + params = _get_params(copied) + assert params["foo"] == "bar" + assert params["bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_query={"foo": "stainless"}) + assert _get_params(copied)["foo"] == "stainless" + + # set_default_query + + # completely overrides already set values + copied = client.copy(set_default_query={}) + assert _get_params(copied) == {} + + copied = client.copy(set_default_query={"bar": "Robert"}) + assert _get_params(copied)["bar"] == "Robert" + + with pytest.raises( + ValueError, + # TODO: update + match="`default_query` and `set_default_query` arguments are mutually exclusive", + ): + client.copy(set_default_query={}, default_query={"foo": "Bar"}) + + client.close() + + def test_copy_signature(self, client: ImageKit) -> None: + # ensure the same parameters that can be passed to the client are defined in the `.copy()` method + init_signature = inspect.signature( + # mypy doesn't like that we access the `__init__` property. + client.__init__, # type: ignore[misc] + ) + copy_signature = inspect.signature(client.copy) + exclude_params = {"transport", "proxies", "_strict_response_validation"} + + for name in init_signature.parameters.keys(): + if name in exclude_params: + continue + + copy_param = copy_signature.parameters.get(name) + assert copy_param is not None, f"copy() signature is missing the {name} param" + + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") + def test_copy_build_request(self, client: ImageKit) -> None: + options = FinalRequestOptions(method="get", url="/foo") + + def build_request(options: FinalRequestOptions) -> None: + client_copy = client.copy() + client_copy._build_request(options) + + # ensure that the machinery is warmed up before tracing starts. + build_request(options) + gc.collect() + + tracemalloc.start(1000) + + snapshot_before = tracemalloc.take_snapshot() + + ITERATIONS = 10 + for _ in range(ITERATIONS): + build_request(options) + + gc.collect() + snapshot_after = tracemalloc.take_snapshot() + + tracemalloc.stop() + + def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: + if diff.count == 0: + # Avoid false positives by considering only leaks (i.e. allocations that persist). + return + + if diff.count % ITERATIONS != 0: + # Avoid false positives by considering only leaks that appear per iteration. + return + + for frame in diff.traceback: + if any( + frame.filename.endswith(fragment) + for fragment in [ + # to_raw_response_wrapper leaks through the @functools.wraps() decorator. + # + # removing the decorator fixes the leak for reasons we don't understand. + "imagekitio/_legacy_response.py", + "imagekitio/_response.py", + # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. + "imagekitio/_compat.py", + # Standard library leaks we don't care about. + "/logging/__init__.py", + ] + ): + return + + leaks.append(diff) + + leaks: list[tracemalloc.StatisticDiff] = [] + for diff in snapshot_after.compare_to(snapshot_before, "traceback"): + add_leak(leaks, diff) + if leaks: + for leak in leaks: + print("MEMORY LEAK:", leak) + for frame in leak.traceback: + print(frame) + raise AssertionError() + + def test_request_timeout(self, client: ImageKit) -> None: + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + request = client._build_request(FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(100.0) + + def test_client_timeout_option(self) -> None: + client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + timeout=httpx.Timeout(0), + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(0) + + client.close() + + def test_http_client_timeout_option(self) -> None: + # custom timeout given to the httpx client should be used + with httpx.Client(timeout=None) as http_client: + client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=http_client, + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(None) + + client.close() + + # no timeout given to the httpx client should not use the httpx default + with httpx.Client() as http_client: + client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=http_client, + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + client.close() + + # explicitly passing the default timeout currently results in it being ignored + with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: + client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=http_client, + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT # our default + + client.close() + + async def test_invalid_http_client(self) -> None: + with pytest.raises(TypeError, match="Invalid `http_client` arg"): + async with httpx.AsyncClient() as http_client: + ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=cast(Any, http_client), + ) + + def test_default_headers_option(self) -> None: + test_client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, + ) + request = test_client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "bar" + assert request.headers.get("x-stainless-lang") == "python" + + test_client2 = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_headers={ + "X-Foo": "stainless", + "X-Stainless-Lang": "my-overriding-header", + }, + ) + request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "stainless" + assert request.headers.get("x-stainless-lang") == "my-overriding-header" + + test_client.close() + test_client2.close() + + def test_validate_headers(self) -> None: + client = ImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert "Basic" in request.headers.get("Authorization") + + with pytest.raises(ImageKitError): + with update_env( + **{ + "IMAGEKIT_PRIVATE_KEY": Omit(), + "OPTIONAL_IMAGEKIT_IGNORES_THIS": Omit(), + } + ): + client2 = ImageKit(base_url=base_url, private_key=None, password=None, _strict_response_validation=True) + _ = client2 + + def test_default_query_option(self) -> None: + client = ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_query={"query_param": "bar"}, + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + url = httpx.URL(request.url) + assert dict(url.params) == {"query_param": "bar"} + + request = client._build_request( + FinalRequestOptions( + method="get", + url="/foo", + params={"foo": "baz", "query_param": "overridden"}, + ) + ) + url = httpx.URL(request.url) + assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} + + client.close() + + def test_request_extra_json(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": False} + + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"baz": False} + + # `extra_json` takes priority over `json_data` when keys clash + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar", "baz": True}, + extra_json={"baz": None}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": None} -class TestPHashDistance(unittest.TestCase): - def test_phash_distance(self): - """Tests if phash_distance working properly + def test_request_extra_headers(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options(extra_headers={"X-Foo": "Foo"}), + ), + ) + assert request.headers.get("X-Foo") == "Foo" + + # `extra_headers` takes priority over `default_headers` when keys clash + request = client.with_options(default_headers={"X-Bar": "true"})._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_headers={"X-Bar": "false"}, + ), + ), + ) + assert request.headers.get("X-Bar") == "false" + + def test_request_extra_query(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_query={"my_query_param": "Foo"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"my_query_param": "Foo"} + + # if both `query` and `extra_query` are given, they are merged + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"bar": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"bar": "1", "foo": "2"} + + # `extra_query` takes priority over `query` when keys clash + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"foo": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"foo": "2"} + + def test_multipart_repeating_array(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions.construct( + method="post", + url="/foo", + headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, + json_data={"array": ["foo", "bar"]}, + files=[("foo.txt", b"hello world")], + ) + ) + + assert request.read().split(b"\r\n") == [ + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"foo", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"bar", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="foo.txt"; filename="upload"', + b"Content-Type: application/octet-stream", + b"", + b"hello world", + b"--6b7ba517decee4a450543ea6ae821c82--", + b"", + ] + + @pytest.mark.respx(base_url=base_url) + def test_basic_union_response(self, respx_mock: MockRouter, client: ImageKit) -> None: + class Model1(BaseModel): + name: str + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + @pytest.mark.respx(base_url=base_url) + def test_union_response_different_types(self, respx_mock: MockRouter, client: ImageKit) -> None: + """Union of objects with the same field name using a different type""" + + class Model1(BaseModel): + foo: int + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) + + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model1) + assert response.foo == 1 + + @pytest.mark.respx(base_url=base_url) + def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter, client: ImageKit) -> None: + """ + Response that sets Content-Type to something other than application/json but returns json data """ - a, b = ("33699c96619cc69e", "968e978414fe04ea") - c, d = ("33699c96619cc69e", "33699c96619cc69e") - e, f = ("a4a65595ac94518b", "7838873e791f8400") - self.assertEqual(imagekit_obj.phash_distance(a, b), 30) - self.assertEqual(imagekit_obj.phash_distance(c, d), 0) - self.assertEqual(imagekit_obj.phash_distance(e, f), 37) - self.assertRaises(TypeError, imagekit_obj.phash_distance, "", "dkf90") - self.assertRaises(TypeError, imagekit_obj.phash_distance, 1234, 111) + class Model(BaseModel): + foo: int + + respx_mock.get("/foo").mock( + return_value=httpx.Response( + 200, + content=json.dumps({"foo": 2}), + headers={"Content-Type": "application/text"}, + ) + ) + + response = client.get("/foo", cast_to=Model) + assert isinstance(response, Model) + assert response.foo == 2 + + def test_base_url_setter(self) -> None: + client = ImageKit( + base_url="https://example.com/from_init", + private_key=private_key, + password=password, + _strict_response_validation=True, + ) + assert client.base_url == "https://example.com/from_init/" + + client.base_url = "https://example.com/from_setter" # type: ignore[assignment] + + assert client.base_url == "https://example.com/from_setter/" + + client.close() + + def test_base_url_env(self) -> None: + with update_env(IMAGE_KIT_BASE_URL="http://localhost:5000/from/env"): + client = ImageKit(private_key=private_key, password=password, _strict_response_validation=True) + assert client.base_url == "http://localhost:5000/from/env/" + + @pytest.mark.parametrize( + "client", + [ + ImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + ), + ImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=httpx.Client(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_base_url_trailing_slash(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + client.close() + + @pytest.mark.parametrize( + "client", + [ + ImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + ), + ImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=httpx.Client(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_base_url_no_trailing_slash(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + client.close() + + @pytest.mark.parametrize( + "client", + [ + ImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + ), + ImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=httpx.Client(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_absolute_request_url(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="https://myapi.com/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "https://myapi.com/foo" + client.close() + + def test_copied_client_does_not_close_http(self) -> None: + test_client = ImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + assert not test_client.is_closed() + + copied = test_client.copy() + assert copied is not test_client + + del copied + + assert not test_client.is_closed() + + def test_client_context_manager(self) -> None: + test_client = ImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + with test_client as c2: + assert c2 is test_client + assert not c2.is_closed() + assert not test_client.is_closed() + assert test_client.is_closed() + + @pytest.mark.respx(base_url=base_url) + def test_client_response_validation_error(self, respx_mock: MockRouter, client: ImageKit) -> None: + class Model(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) + + with pytest.raises(APIResponseValidationError) as exc: + client.get("/foo", cast_to=Model) + + assert isinstance(exc.value.__cause__, ValidationError) + + def test_client_max_retries_validation(self) -> None: + with pytest.raises(TypeError, match=r"max_retries cannot be None"): + ImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + max_retries=cast(Any, None), + ) + + @pytest.mark.respx(base_url=base_url) + def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + name: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) + + strict_client = ImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + + with pytest.raises(APIResponseValidationError): + strict_client.get("/foo", cast_to=Model) + + non_strict_client = ImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=False + ) + + response = non_strict_client.get("/foo", cast_to=Model) + assert isinstance(response, str) # type: ignore[unreachable] + + strict_client.close() + non_strict_client.close() + + @pytest.mark.parametrize( + "remaining_retries,retry_after,timeout", + [ + [3, "20", 20], + [3, "0", 0.5], + [3, "-10", 0.5], + [3, "60", 60], + [3, "61", 0.5], + [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], + [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], + [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], + [3, "99999999999999999999999999999999999", 0.5], + [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "", 0.5], + [2, "", 0.5 * 2.0], + [1, "", 0.5 * 4.0], + [-1100, "", 8], # test large number potentially overflowing + ], + ) + @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) + def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float, client: ImageKit + ) -> None: + headers = httpx.Headers({"retry-after": retry_after}) + options = FinalRequestOptions(method="get", url="/foo", max_retries=3) + calculated = client._calculate_retry_timeout(remaining_retries, options, headers) + assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] + + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: ImageKit) -> None: + respx_mock.post("/api/v1/files/upload").mock(side_effect=httpx.TimeoutException("Test timeout error")) + + with pytest.raises(APITimeoutError): + client.files.with_streaming_response.upload(file=b"raw file contents", file_name="fileName").__enter__() + + assert _get_open_connections(client) == 0 + + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: ImageKit) -> None: + respx_mock.post("/api/v1/files/upload").mock(return_value=httpx.Response(500)) + + with pytest.raises(APIStatusError): + client.files.with_streaming_response.upload(file=b"raw file contents", file_name="fileName").__enter__() + assert _get_open_connections(client) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.parametrize("failure_mode", ["status", "exception"]) + def test_retries_taken( + self, + client: ImageKit, + failures_before_success: int, + failure_mode: Literal["status", "exception"], + respx_mock: MockRouter, + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + if failure_mode == "exception": + raise RuntimeError("oops") + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/api/v1/files/upload").mock(side_effect=retry_handler) + + response = client.files.with_raw_response.upload(file=b"raw file contents", file_name="fileName") + + assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_omit_retry_count_header( + self, client: ImageKit, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/api/v1/files/upload").mock(side_effect=retry_handler) + + response = client.files.with_raw_response.upload( + file=b"raw file contents", file_name="fileName", extra_headers={"x-stainless-retry-count": Omit()} + ) + + assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_overwrite_retry_count_header( + self, client: ImageKit, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/api/v1/files/upload").mock(side_effect=retry_handler) + + response = client.files.with_raw_response.upload( + file=b"raw file contents", file_name="fileName", extra_headers={"x-stainless-retry-count": "42"} + ) + + assert response.http_request.headers.get("x-stainless-retry-count") == "42" + + def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects(self, respx_mock: MockRouter, client: ImageKit) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects_disabled(self, respx_mock: MockRouter, client: ImageKit) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + client.post("/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" + + +class TestAsyncImageKit: + @pytest.mark.respx(base_url=base_url) + async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncImageKit) -> None: + respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await async_client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + @pytest.mark.respx(base_url=base_url) + async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_client: AsyncImageKit) -> None: + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') + ) + + response = await async_client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + def test_copy(self, async_client: AsyncImageKit) -> None: + copied = async_client.copy() + assert id(copied) != id(async_client) + + copied = async_client.copy(private_key="another My Private Key") + assert copied.private_key == "another My Private Key" + assert async_client.private_key == "My Private Key" + + copied = async_client.copy(password="another My Password") + assert copied.password == "another My Password" + assert async_client.password == "My Password" + + def test_copy_default_options(self, async_client: AsyncImageKit) -> None: + # options that have a default are overridden correctly + copied = async_client.copy(max_retries=7) + assert copied.max_retries == 7 + assert async_client.max_retries == 2 + + copied2 = copied.copy(max_retries=6) + assert copied2.max_retries == 6 + assert copied.max_retries == 7 + + # timeout + assert isinstance(async_client.timeout, httpx.Timeout) + copied = async_client.copy(timeout=None) + assert copied.timeout is None + assert isinstance(async_client.timeout, httpx.Timeout) + + async def test_copy_default_headers(self) -> None: + client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, + ) + assert client.default_headers["X-Foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert copied.default_headers["X-Foo"] == "bar" + + # merges already given headers + copied = client.copy(default_headers={"X-Bar": "stainless"}) + assert copied.default_headers["X-Foo"] == "bar" + assert copied.default_headers["X-Bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_headers={"X-Foo": "stainless"}) + assert copied.default_headers["X-Foo"] == "stainless" + + # set_default_headers + + # completely overrides already set values + copied = client.copy(set_default_headers={}) + assert copied.default_headers.get("X-Foo") is None + + copied = client.copy(set_default_headers={"X-Bar": "Robert"}) + assert copied.default_headers["X-Bar"] == "Robert" + + with pytest.raises( + ValueError, + match="`default_headers` and `set_default_headers` arguments are mutually exclusive", + ): + client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + await client.close() + + async def test_copy_default_query(self) -> None: + client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_query={"foo": "bar"}, + ) + assert _get_params(client)["foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert _get_params(copied)["foo"] == "bar" + + # merges already given params + copied = client.copy(default_query={"bar": "stainless"}) + params = _get_params(copied) + assert params["foo"] == "bar" + assert params["bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_query={"foo": "stainless"}) + assert _get_params(copied)["foo"] == "stainless" + + # set_default_query + + # completely overrides already set values + copied = client.copy(set_default_query={}) + assert _get_params(copied) == {} + + copied = client.copy(set_default_query={"bar": "Robert"}) + assert _get_params(copied)["bar"] == "Robert" + + with pytest.raises( + ValueError, + # TODO: update + match="`default_query` and `set_default_query` arguments are mutually exclusive", + ): + client.copy(set_default_query={}, default_query={"foo": "Bar"}) + + await client.close() + + def test_copy_signature(self, async_client: AsyncImageKit) -> None: + # ensure the same parameters that can be passed to the client are defined in the `.copy()` method + init_signature = inspect.signature( + # mypy doesn't like that we access the `__init__` property. + async_client.__init__, # type: ignore[misc] + ) + copy_signature = inspect.signature(async_client.copy) + exclude_params = {"transport", "proxies", "_strict_response_validation"} + + for name in init_signature.parameters.keys(): + if name in exclude_params: + continue + + copy_param = copy_signature.parameters.get(name) + assert copy_param is not None, f"copy() signature is missing the {name} param" + + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") + def test_copy_build_request(self, async_client: AsyncImageKit) -> None: + options = FinalRequestOptions(method="get", url="/foo") + + def build_request(options: FinalRequestOptions) -> None: + client_copy = async_client.copy() + client_copy._build_request(options) + + # ensure that the machinery is warmed up before tracing starts. + build_request(options) + gc.collect() + + tracemalloc.start(1000) + + snapshot_before = tracemalloc.take_snapshot() + ITERATIONS = 10 + for _ in range(ITERATIONS): + build_request(options) -class TestClientAndImageKitObjInit(ClientTestCase): - """ - Tests client and Imagekit classes object initialization - """ + gc.collect() + snapshot_after = tracemalloc.take_snapshot() + + tracemalloc.stop() + + def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: + if diff.count == 0: + # Avoid false positives by considering only leaks (i.e. allocations that persist). + return + + if diff.count % ITERATIONS != 0: + # Avoid false positives by considering only leaks that appear per iteration. + return + + for frame in diff.traceback: + if any( + frame.filename.endswith(fragment) + for fragment in [ + # to_raw_response_wrapper leaks through the @functools.wraps() decorator. + # + # removing the decorator fixes the leak for reasons we don't understand. + "imagekitio/_legacy_response.py", + "imagekitio/_response.py", + # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. + "imagekitio/_compat.py", + # Standard library leaks we don't care about. + "/logging/__init__.py", + ] + ): + return + + leaks.append(diff) + + leaks: list[tracemalloc.StatisticDiff] = [] + for diff in snapshot_after.compare_to(snapshot_before, "traceback"): + add_leak(leaks, diff) + if leaks: + for leak in leaks: + print("MEMORY LEAK:", leak) + for frame in leak.traceback: + print(frame) + raise AssertionError() + + async def test_request_timeout(self, async_client: AsyncImageKit) -> None: + request = async_client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + request = async_client._build_request( + FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) + ) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(100.0) + + async def test_client_timeout_option(self) -> None: + client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + timeout=httpx.Timeout(0), + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(0) + + await client.close() + + async def test_http_client_timeout_option(self) -> None: + # custom timeout given to the httpx client should be used + async with httpx.AsyncClient(timeout=None) as http_client: + client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=http_client, + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(None) + + await client.close() + + # no timeout given to the httpx client should not use the httpx default + async with httpx.AsyncClient() as http_client: + client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=http_client, + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + await client.close() + + # explicitly passing the default timeout currently results in it being ignored + async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: + client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=http_client, + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT # our default + + await client.close() + + def test_invalid_http_client(self) -> None: + with pytest.raises(TypeError, match="Invalid `http_client` arg"): + with httpx.Client() as http_client: + AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=cast(Any, http_client), + ) + + async def test_default_headers_option(self) -> None: + test_client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, + ) + request = test_client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "bar" + assert request.headers.get("x-stainless-lang") == "python" + + test_client2 = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_headers={ + "X-Foo": "stainless", + "X-Stainless-Lang": "my-overriding-header", + }, + ) + request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "stainless" + assert request.headers.get("x-stainless-lang") == "my-overriding-header" + + await test_client.close() + await test_client2.close() + + def test_validate_headers(self) -> None: + client = AsyncImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert "Basic" in request.headers.get("Authorization") + + with pytest.raises(ImageKitError): + with update_env( + **{ + "IMAGEKIT_PRIVATE_KEY": Omit(), + "OPTIONAL_IMAGEKIT_IGNORES_THIS": Omit(), + } + ): + client2 = AsyncImageKit( + base_url=base_url, private_key=None, password=None, _strict_response_validation=True + ) + _ = client2 + + async def test_default_query_option(self) -> None: + client = AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + default_query={"query_param": "bar"}, + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + url = httpx.URL(request.url) + assert dict(url.params) == {"query_param": "bar"} + + request = client._build_request( + FinalRequestOptions( + method="get", + url="/foo", + params={"foo": "baz", "query_param": "overridden"}, + ) + ) + url = httpx.URL(request.url) + assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} + + await client.close() + + def test_request_extra_json(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": False} + + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"baz": False} + + # `extra_json` takes priority over `json_data` when keys clash + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar", "baz": True}, + extra_json={"baz": None}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": None} + + def test_request_extra_headers(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options(extra_headers={"X-Foo": "Foo"}), + ), + ) + assert request.headers.get("X-Foo") == "Foo" + + # `extra_headers` takes priority over `default_headers` when keys clash + request = client.with_options(default_headers={"X-Bar": "true"})._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_headers={"X-Bar": "false"}, + ), + ), + ) + assert request.headers.get("X-Bar") == "false" - def test_all_variable_is_being_set_to_obj(self) -> None: + def test_request_extra_query(self, client: ImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_query={"my_query_param": "Foo"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"my_query_param": "Foo"} + + # if both `query` and `extra_query` are given, they are merged + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"bar": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"bar": "1", "foo": "2"} + + # `extra_query` takes priority over `query` when keys clash + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"foo": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"foo": "2"} + + def test_multipart_repeating_array(self, async_client: AsyncImageKit) -> None: + request = async_client._build_request( + FinalRequestOptions.construct( + method="post", + url="/foo", + headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, + json_data={"array": ["foo", "bar"]}, + files=[("foo.txt", b"hello world")], + ) + ) + + assert request.read().split(b"\r\n") == [ + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"foo", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"bar", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="foo.txt"; filename="upload"', + b"Content-Type: application/octet-stream", + b"", + b"hello world", + b"--6b7ba517decee4a450543ea6ae821c82--", + b"", + ] + + @pytest.mark.respx(base_url=base_url) + async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncImageKit) -> None: + class Model1(BaseModel): + name: str + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + @pytest.mark.respx(base_url=base_url) + async def test_union_response_different_types(self, respx_mock: MockRouter, async_client: AsyncImageKit) -> None: + """Union of objects with the same field name using a different type""" + + class Model1(BaseModel): + foo: int + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) + + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model1) + assert response.foo == 1 + + @pytest.mark.respx(base_url=base_url) + async def test_non_application_json_content_type_for_json_data( + self, respx_mock: MockRouter, async_client: AsyncImageKit + ) -> None: """ - Tests if variables are properly being set when creating - an object from ImageKit class + Response that sets Content-Type to something other than application/json but returns json data """ - self.assertIsNotNone(self.client.ik_request) - self.assertIsNotNone(self.client.url_obj) - self.assertIsNotNone(self.client.file) + class Model(BaseModel): + foo: int + + respx_mock.get("/foo").mock( + return_value=httpx.Response( + 200, + content=json.dumps({"foo": 2}), + headers={"Content-Type": "application/text"}, + ) + ) + + response = await async_client.get("/foo", cast_to=Model) + assert isinstance(response, Model) + assert response.foo == 2 + + async def test_base_url_setter(self) -> None: + client = AsyncImageKit( + base_url="https://example.com/from_init", + private_key=private_key, + password=password, + _strict_response_validation=True, + ) + assert client.base_url == "https://example.com/from_init/" + + client.base_url = "https://example.com/from_setter" # type: ignore[assignment] + + assert client.base_url == "https://example.com/from_setter/" + + await client.close() + + async def test_base_url_env(self) -> None: + with update_env(IMAGE_KIT_BASE_URL="http://localhost:5000/from/env"): + client = AsyncImageKit(private_key=private_key, password=password, _strict_response_validation=True) + assert client.base_url == "http://localhost:5000/from/env/" + + @pytest.mark.parametrize( + "client", + [ + AsyncImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + ), + AsyncImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=httpx.AsyncClient(), + ), + ], + ids=["standard", "custom http client"], + ) + async def test_base_url_trailing_slash(self, client: AsyncImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + await client.close() + + @pytest.mark.parametrize( + "client", + [ + AsyncImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + ), + AsyncImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=httpx.AsyncClient(), + ), + ], + ids=["standard", "custom http client"], + ) + async def test_base_url_no_trailing_slash(self, client: AsyncImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + await client.close() + + @pytest.mark.parametrize( + "client", + [ + AsyncImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + ), + AsyncImageKit( + base_url="http://localhost:5000/custom/path/", + private_key=private_key, + password=password, + _strict_response_validation=True, + http_client=httpx.AsyncClient(), + ), + ], + ids=["standard", "custom http client"], + ) + async def test_absolute_request_url(self, client: AsyncImageKit) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="https://myapi.com/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "https://myapi.com/foo" + await client.close() + + async def test_copied_client_does_not_close_http(self) -> None: + test_client = AsyncImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + assert not test_client.is_closed() + + copied = test_client.copy() + assert copied is not test_client + + del copied + + await asyncio.sleep(0.2) + assert not test_client.is_closed() + + async def test_client_context_manager(self) -> None: + test_client = AsyncImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + async with test_client as c2: + assert c2 is test_client + assert not c2.is_closed() + assert not test_client.is_closed() + assert test_client.is_closed() + + @pytest.mark.respx(base_url=base_url) + async def test_client_response_validation_error(self, respx_mock: MockRouter, async_client: AsyncImageKit) -> None: + class Model(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) + + with pytest.raises(APIResponseValidationError) as exc: + await async_client.get("/foo", cast_to=Model) + + assert isinstance(exc.value.__cause__, ValidationError) -class TestGetAuthenticationParameters(ClientTestCase): - def test_get_authentication_parameters_without_token(self) -> None: - result = self.client.get_authentication_parameters("", expire=444) - self.assertIsNotNone(result) + async def test_client_max_retries_validation(self) -> None: + with pytest.raises(TypeError, match=r"max_retries cannot be None"): + AsyncImageKit( + base_url=base_url, + private_key=private_key, + password=password, + _strict_response_validation=True, + max_retries=cast(Any, None), + ) - def test_get_authentication_param_with_token(self) -> None: - result = self.client.get_authentication_parameters( - "dc45da6e3286066265a09e", expire=4555 + @pytest.mark.respx(base_url=base_url) + async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + name: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) + + strict_client = AsyncImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=True + ) + + with pytest.raises(APIResponseValidationError): + await strict_client.get("/foo", cast_to=Model) + + non_strict_client = AsyncImageKit( + base_url=base_url, private_key=private_key, password=password, _strict_response_validation=False + ) + + response = await non_strict_client.get("/foo", cast_to=Model) + assert isinstance(response, str) # type: ignore[unreachable] + + await strict_client.close() + await non_strict_client.close() + + @pytest.mark.parametrize( + "remaining_retries,retry_after,timeout", + [ + [3, "20", 20], + [3, "0", 0.5], + [3, "-10", 0.5], + [3, "60", 60], + [3, "61", 0.5], + [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], + [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], + [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], + [3, "99999999999999999999999999999999999", 0.5], + [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "", 0.5], + [2, "", 0.5 * 2.0], + [1, "", 0.5 * 4.0], + [-1100, "", 8], # test large number potentially overflowing + ], + ) + @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) + async def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float, async_client: AsyncImageKit + ) -> None: + headers = httpx.Headers({"retry-after": retry_after}) + options = FinalRequestOptions(method="get", url="/foo", max_retries=3) + calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers) + assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] + + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + async def test_retrying_timeout_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncImageKit + ) -> None: + respx_mock.post("/api/v1/files/upload").mock(side_effect=httpx.TimeoutException("Test timeout error")) + + with pytest.raises(APITimeoutError): + await async_client.files.with_streaming_response.upload( + file=b"raw file contents", file_name="fileName" + ).__aenter__() + + assert _get_open_connections(async_client) == 0 + + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + async def test_retrying_status_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncImageKit + ) -> None: + respx_mock.post("/api/v1/files/upload").mock(return_value=httpx.Response(500)) + + with pytest.raises(APIStatusError): + await async_client.files.with_streaming_response.upload( + file=b"raw file contents", file_name="fileName" + ).__aenter__() + assert _get_open_connections(async_client) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.parametrize("failure_mode", ["status", "exception"]) + async def test_retries_taken( + self, + async_client: AsyncImageKit, + failures_before_success: int, + failure_mode: Literal["status", "exception"], + respx_mock: MockRouter, + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + if failure_mode == "exception": + raise RuntimeError("oops") + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/api/v1/files/upload").mock(side_effect=retry_handler) + + response = await client.files.with_raw_response.upload(file=b"raw file contents", file_name="fileName") + + assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + async def test_omit_retry_count_header( + self, async_client: AsyncImageKit, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/api/v1/files/upload").mock(side_effect=retry_handler) + + response = await client.files.with_raw_response.upload( + file=b"raw file contents", file_name="fileName", extra_headers={"x-stainless-retry-count": Omit()} + ) + + assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("imagekitio._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + async def test_overwrite_retry_count_header( + self, async_client: AsyncImageKit, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/api/v1/files/upload").mock(side_effect=retry_handler) + + response = await client.files.with_raw_response.upload( + file=b"raw file contents", file_name="fileName", extra_headers={"x-stainless-retry-count": "42"} ) - self.assertIsNotNone(result) + + assert response.http_request.headers.get("x-stainless-retry-count") == "42" + + async def test_get_platform(self) -> None: + platform = await asyncify(get_platform)() + assert isinstance(platform, (str, OtherPlatform)) + + async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultAsyncHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + async def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultAsyncHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects(self, respx_mock: MockRouter, async_client: AsyncImageKit) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = await async_client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects_disabled(self, respx_mock: MockRouter, async_client: AsyncImageKit) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + await async_client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py new file mode 100644 index 00000000..d0caf5c4 --- /dev/null +++ b/tests/test_deepcopy.py @@ -0,0 +1,58 @@ +from imagekitio._utils import deepcopy_minimal + + +def assert_different_identities(obj1: object, obj2: object) -> None: + assert obj1 == obj2 + assert id(obj1) != id(obj2) + + +def test_simple_dict() -> None: + obj1 = {"foo": "bar"} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + + +def test_nested_dict() -> None: + obj1 = {"foo": {"bar": True}} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert_different_identities(obj1["foo"], obj2["foo"]) + + +def test_complex_nested_dict() -> None: + obj1 = {"foo": {"bar": [{"hello": "world"}]}} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert_different_identities(obj1["foo"], obj2["foo"]) + assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"]) + assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0]) + + +def test_simple_list() -> None: + obj1 = ["a", "b", "c"] + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + + +def test_nested_list() -> None: + obj1 = ["a", [1, 2, 3]] + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert_different_identities(obj1[1], obj2[1]) + + +class MyObject: ... + + +def test_ignores_other_types() -> None: + # custom classes + my_obj = MyObject() + obj1 = {"foo": my_obj} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert obj1["foo"] is my_obj + + # tuples + obj3 = ("a", "b") + obj4 = deepcopy_minimal(obj3) + assert obj3 is obj4 diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py new file mode 100644 index 00000000..396cbd05 --- /dev/null +++ b/tests/test_extract_files.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import Sequence + +import pytest + +from imagekitio._types import FileTypes +from imagekitio._utils import extract_files + + +def test_removes_files_from_input() -> None: + query = {"foo": "bar"} + assert extract_files(query, paths=[]) == [] + assert query == {"foo": "bar"} + + query2 = {"foo": b"Bar", "hello": "world"} + assert extract_files(query2, paths=[["foo"]]) == [("foo", b"Bar")] + assert query2 == {"hello": "world"} + + query3 = {"foo": {"foo": {"bar": b"Bar"}}, "hello": "world"} + assert extract_files(query3, paths=[["foo", "foo", "bar"]]) == [("foo[foo][bar]", b"Bar")] + assert query3 == {"foo": {"foo": {}}, "hello": "world"} + + query4 = {"foo": {"bar": b"Bar", "baz": "foo"}, "hello": "world"} + assert extract_files(query4, paths=[["foo", "bar"]]) == [("foo[bar]", b"Bar")] + assert query4 == {"hello": "world", "foo": {"baz": "foo"}} + + +def test_multiple_files() -> None: + query = {"documents": [{"file": b"My first file"}, {"file": b"My second file"}]} + assert extract_files(query, paths=[["documents", "", "file"]]) == [ + ("documents[][file]", b"My first file"), + ("documents[][file]", b"My second file"), + ] + assert query == {"documents": [{}, {}]} + + +@pytest.mark.parametrize( + "query,paths,expected", + [ + [ + {"foo": {"bar": "baz"}}, + [["foo", "", "bar"]], + [], + ], + [ + {"foo": ["bar", "baz"]}, + [["foo", "bar"]], + [], + ], + [ + {"foo": {"bar": "baz"}}, + [["foo", "foo"]], + [], + ], + ], + ids=["dict expecting array", "array expecting dict", "unknown keys"], +) +def test_ignores_incorrect_paths( + query: dict[str, object], + paths: Sequence[Sequence[str]], + expected: list[tuple[str, FileTypes]], +) -> None: + assert extract_files(query, paths=paths) == expected diff --git a/tests/test_files.py b/tests/test_files.py new file mode 100644 index 00000000..79dc2ef2 --- /dev/null +++ b/tests/test_files.py @@ -0,0 +1,51 @@ +from pathlib import Path + +import anyio +import pytest +from dirty_equals import IsDict, IsList, IsBytes, IsTuple + +from imagekitio._files import to_httpx_files, async_to_httpx_files + +readme_path = Path(__file__).parent.parent.joinpath("README.md") + + +def test_pathlib_includes_file_name() -> None: + result = to_httpx_files({"file": readme_path}) + print(result) + assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) + + +def test_tuple_input() -> None: + result = to_httpx_files([("file", readme_path)]) + print(result) + assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes()))) + + +@pytest.mark.asyncio +async def test_async_pathlib_includes_file_name() -> None: + result = await async_to_httpx_files({"file": readme_path}) + print(result) + assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) + + +@pytest.mark.asyncio +async def test_async_supports_anyio_path() -> None: + result = await async_to_httpx_files({"file": anyio.Path(readme_path)}) + print(result) + assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) + + +@pytest.mark.asyncio +async def test_async_tuple_input() -> None: + result = await async_to_httpx_files([("file", readme_path)]) + print(result) + assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes()))) + + +def test_string_not_allowed() -> None: + with pytest.raises(TypeError, match="Expected file types input to be a FileContent type or to be a tuple"): + to_httpx_files( + { + "file": "foo", # type: ignore + } + ) diff --git a/tests/test_files_ops.py b/tests/test_files_ops.py deleted file mode 100644 index 02f0a55a..00000000 --- a/tests/test_files_ops.py +++ /dev/null @@ -1,634 +0,0 @@ -import base64 -import json -import os -from unittest.mock import MagicMock - -from imagekitio.client import ImageKit -from imagekitio.constants.url import URL -from tests.dummy_data.file import ( - FAILED_DELETE_RESP, - SUCCESS_DETAIL_MSG, - SUCCESS_LIST_RESP_MESSAGE, - SUCCESS_PURGE_CACHE_MSG, - SUCCESS_PURGE_CACHE_STATUS_MSG, -) -from tests.helpers import ( - ClientTestCase, - get_mocked_failed_resp, - get_mocked_failed_resp_text, - get_mocked_success_resp, -) -from imagekitio.utils.formatter import request_formatter - - -imagekit_obj = ImageKit( - private_key="private_fake:", public_key="public_fake123:", url_endpoint="fake.com", -) - - -class TestUpload(ClientTestCase): - """ - TestUpload class used to test upload method - """ - - image = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "dummy_data/image.png" - ) - filename = "test" - - def test_upload_fails_on_unauthenticated_request(self): - """ - Tests if the unauthenticated request restricted - - """ - - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.upload(file=self.image, file_name=self.filename) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_binary_upload_succeeds(self): - """ - Tests if upload succeeds - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - file = open(self.image, "rb") - file.close() - resp = self.client.upload(file=file, file_name=self.filename) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_base64_upload_succeeds(self): - """ - Tests if upload succeeds - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - with open(self.image, mode="rb") as img: - imgstr = base64.b64encode(img.read()) - - resp = self.client.upload(file=imgstr, file_name=self.filename) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_url_upload_succeeds(self): - """ - Tests if url upload succeeds - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.upload(file="example.com/abc.jpg", file_name=self.filename) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_file_upload_succeeds(self): - """ - Tests if file upload succeeds - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - - # generate expected encoded private key for the auth headers - private_key_file_upload = ClientTestCase.private_key - if private_key_file_upload != ":": - private_key_file_upload += ":" - encoded_private_key = base64.b64encode(private_key_file_upload.encode()).decode( - "utf-8" - ) - - resp = self.client.upload_file(file=self.image, file_name=self.filename) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - self.client.ik_request.request.assert_called_once_with( - "Post", - url=URL.UPLOAD_URL.value, - files={ - 'file': (None, self.image), - 'fileName': (None, self.filename) - }, - data={}, - headers={'Accept-Encoding': 'gzip, deflate', 'Authorization': "Basic {}".format(encoded_private_key)} - ) - - - def test_upload_fails_without_file_or_file_name(self) -> None: - """Test upload raises error on missing required params - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(TypeError, self.client.upload, file_name=self.filename) - self.assertRaises(TypeError, self.client.upload, file=self.image) - - def test_absence_of_params_gives_proper_resp(self) -> None: - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.upload( - file=self.image, - file_name="x", - options={ - "is_private_file": "", - "tags": None, - "custom_coordinates": None, - "use_unique_file_name": None, - "folder": None - - } - ) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_all_params_being_passed_on_upload(self) -> None: - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.upload( - file=self.image, - file_name="fileabc", - options={ - "is_private_file": True, - "tags": ["abc"], - "response_fields": ["is_private_file", "tags"], - "custom_coordinates": "10,10,100,100", - "use_unique_file_name": True, - "folder": "abc" - } - ) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_upload_file_fails_without_file_or_file_name(self) -> None: - """Test upload raises error on missing required params - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(TypeError, self.client.upload_file, file_name=self.filename) - self.assertRaises(TypeError, self.client.upload_file, file=self.image) - - def test_upload_file_fails_without_json_response_from_server(self) -> None: - """Test upload raises error on non json response - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp_text() - ) - resp = self.client.upload( - file=self.image, - file_name="fileabc", - options={ - "is_private_file": True, - "tags": ["abc"], - "response_fields": ["is_private_file", "tags"], - "custom_coordinates": "10,10,100,100", - "use_unique_file_name": True, - "folder": "abc" - } - ) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - -class TestListFiles(ClientTestCase): - """ - TestListFiles class used to test list_files method - """ - - def test_list_files_fails_on_unauthenticated_request(self) -> None: - """ Tests unauthenticated request restricted for list_files method - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.list_files(self.options) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_list_files_succeeds_with_basic_request(self) -> None: - """ - Tests if list_files work with skip and limit - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp(message=SUCCESS_LIST_RESP_MESSAGE) - ) - - resp = self.client.list_files(self.options) - - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_list_accepting_all_parameter(self): - """ - checking if list accept all parameter - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.list_files( - options={ - "file_type": "image", - "tags": ["tag1", "tag2"], - "include_folder": True, - "name": "new-dir", - "limit": "1", - "skip": "1", - }, - ) - - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - -class TestGetFileDetails(ClientTestCase): - """ - TestGetFileDetails class used to test get_file_details method - """ - - file_id = "fake_file_id1234" - file_url = "https://example.com/default.jpg" - - def test_get_file_details_fails_on_unauthenticated_request(self) -> None: - """Tests if get_file_details raise error on unauthenticated request - """ - - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.get_file_details(self.file_id) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_file_details_succeeds_with_id(self) -> None: - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp(message=SUCCESS_DETAIL_MSG) - ) - resp = self.client.get_file_details(self.file_id) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_file_details_succeeds_with_url(self) -> None: - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp(message=SUCCESS_DETAIL_MSG) - ) - resp = self.client.get_file_details(self.file_url) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - -class TestDeleteFile(ClientTestCase): - file_id = "fax_abx1223" - - bulk_delete_ids = ["fake_123", "fake_222"] - - def test_bulk_delete_fails_on_unauthenticated_request(self) -> None: - """Test bulk_delete on unauthenticated request - this function checks if raises error on unauthenticated request - to check if bulk_delete is only restricted to authenticated - requests - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.bulk_delete(self.bulk_delete_ids) - - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_bulk_file_delete_fails_on_unauthenticated_request(self) -> None: - """Test bulk_file_delete on unauthenticated request - this function checks if raises error on unauthenticated request - to check if bulk_delete is only restricted to authenticated - requests - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.bulk_file_delete(self.bulk_delete_ids) - - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_file_delete_fails_on_item_not_found(self): - """Test delete_file on unavailable content - this function raising expected error if the file - is not available - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp(message=FAILED_DELETE_RESP) - ) - resp = self.client.delete_file(self.file_id) - - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_file_delete_succeeds(self): - """Test delete file on authenticated request - this function tests if delete_file working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp({"error": None, "response": None}) - ) - resp = self.client.delete_file(self.file_id) - - self.assertIsNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_bulk_file_delete_succeeds(self): - """Test bulk_delete on authenticated request - this function tests if bulk_file_delete working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp({"error": None, "response": {'successfullyDeletedFileIds': ['5e785a03ed03082733b979ec', '5e787c4427dd2a6c2fc564a5']}}) - ) - resp = self.client.bulk_file_delete(self.bulk_delete_ids) - - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - -class TestPurgeCache(ClientTestCase): - fake_image_url = "https://example.com/fakeid/fakeimage.jpg" - - def test_purge_cache_fails_on_unauthenticated_request(self) -> None: - """Test purge_cache unauthenticated request - this function checks if raises error on unauthenticated request - to check if purge_cache is only restricted to authenticated request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.purge_cache(self.fake_image_url) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_purge_file_cache_fails_on_unauthenticated_request(self) -> None: - """Test purge_cache unauthenticated request - this function checks if raises error on unauthenticated request - to check if purge_cache is only restricted to authenticated request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.purge_file_cache(self.fake_image_url) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_purge_cache_fails_without_passing_file_url(self) -> None: - """Test purge_cache raises error on invalid_body request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(TypeError, self.client.purge_cache) - - def test_purge_file_cache_fails_without_passing_file_url(self) -> None: - """Test purge_file_cache raises error on invalid_body request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(TypeError, self.client.purge_file_cache) - - def test_purge_cache_succeeds(self) -> None: - """Test purge_cache working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp(message=SUCCESS_PURGE_CACHE_MSG) - ) - resp = self.client.purge_cache(self.fake_image_url) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - self.assertIn("request_id", resp["response"]) - - def test_purge_file_cache_succeeds(self) -> None: - """Test purge_file_cache working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp(message=SUCCESS_PURGE_CACHE_MSG) - ) - resp = self.client.purge_file_cache(self.fake_image_url) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - self.assertIn("request_id", resp["response"]) - - -class TestPurgeCacheStatus(ClientTestCase): - cache_request_id = "fake1234" - - def test_get_purge_cache_status_fails_on_unauthenticated_request(self) -> None: - """Test get_purge_cache_status unauthenticated request - this function checks if raises error on unauthenticated request - to check if get_purge_cache_status is only restricted to authenticated - user - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.get_purge_cache_status(self.cache_request_id) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_get_purge_file_cache_status_fails_on_unauthenticated_request(self) -> None: - """Test get_purge_file_cache_status unauthenticated request - this function checks if raises error on unauthenticated request - to check if get_purge_cache_status is only restricted to authenticated - user - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.get_purge_file_cache_status(self.cache_request_id) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_purge_cache_status_fails_without_passing_file_url(self) -> None: - """Test purge_cache raises error on invalid_body request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(TypeError, self.client.get_purge_cache_status) - - def test_purge_file_cache_status_fails_without_passing_file_url(self) -> None: - """Test purge_file_cache raises error on invalid_body request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(TypeError, self.client.get_purge_file_cache_status) - - def test_purge_cache_status_succeeds(self) -> None: - """Test get_purge_cache_status working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp(message=SUCCESS_PURGE_CACHE_STATUS_MSG) - ) - resp = self.client.get_purge_cache_status(self.cache_request_id) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_purge_cache_status_fails_without_passing_file_id(self) -> None: - """Test purge_cache raises error on invalid_body request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(TypeError, self.client.get_metadata()) - - def test_purge_file_cache_status_succeeds(self) -> None: - """Test get_purge_file_cache_status working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp(message=SUCCESS_PURGE_CACHE_STATUS_MSG) - ) - resp = self.client.get_purge_file_cache_status(self.cache_request_id) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - -class TestGetMetaData(ClientTestCase): - file_id = "fake_file_xbc" - - def test_get_metadata_fails_on_unauthenticated_request(self) -> None: - """Tests get_metadata raise error on unauthenticated request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.get_metadata(file_id=self.file_id) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_get_file_metadata_fails_on_unauthenticated_request(self) -> None: - """Tests get_file_metadata raise error on unauthenticated request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.get_file_metadata(file_id=self.file_id) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_get_metadata_succeeds(self): - """Tests if get_metadata working properly - """ - - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.get_metadata(file_id=self.file_id) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_get_file_metadata_succeeds(self): - """Tests if get_file_metadata working properly - """ - - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.get_file_metadata(file_id=self.file_id) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_get_remote_url_metadata_file_url(self) -> None: - """Test get_remote_url_metadata_ raises error on invalid_body request - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - self.assertRaises(ValueError, self.client.get_remote_url_metadata) - - def test_get_remote_url_metadata_succeeds(self): - """Tests if get_remote_url_metadata working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.get_remote_url_metadata( - remote_file_url="http://imagekit.io/default.jpg" - ) - self.assertIsNone(resp["error"]) - self.assertIsNotNone("response") - - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.get_metadata(file_id=self.file_id) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - - def test_get_remote_file_url_metadata_succeeds(self): - """Tests if get_remote_url_metadata working properly - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - resp = self.client.get_remote_file_url_metadata( - remote_file_url="http://imagekit.io/default.jpg" - ) - self.assertIsNone(resp["error"]) - self.assertIsNotNone("response") - - -class TestUpdateFileDetails(ClientTestCase): - """ - TestUpdateFileDetails class used to update file details method - """ - - file_id = "fake_123" - - valid_options = {"tags": ["tag1", "tag2"], "custom_coordinates": "10,10,100,100"} - - def test_update_file_details_fails_on_unauthenticated_request(self): - """ - Tests if the unauthenticated request restricted - - """ - - self.client.ik_request.request = MagicMock( - return_value=get_mocked_failed_resp() - ) - resp = self.client.update_file_details( - file_id=self.file_id, options=self.valid_options - ) - self.assertIsNotNone(resp["error"]) - self.assertIsNone(resp["response"]) - - def test_update_file_details_succeeds_with_id(self): - """ - Tests if update_file_details succeeds with file_url - """ - self.client.ik_request.request = MagicMock( - return_value=get_mocked_success_resp() - ) - - # generate expected encoded private key for the auth headers - private_key_file_upload = ClientTestCase.private_key - if private_key_file_upload != ":": - private_key_file_upload += ":" - encoded_private_key = base64.b64encode(private_key_file_upload.encode()).decode( - "utf-8" - ) - - resp = self.client.update_file_details( - file_id=self.file_id, options=self.valid_options - ) - self.assertIsNone(resp["error"]) - self.assertIsNotNone(resp["response"]) - self.client.ik_request.request.assert_called_once_with( - method="Patch", - url="{}/{}/details/".format(URL.BASE_URL.value, self.file_id), - headers={'Content-Type': 'application/json', 'Authorization': "Basic {}".format(encoded_private_key)}, - data=json.dumps(request_formatter(self.valid_options)) - ) - - def test_file_details_succeeds_with_url(self): - self.client.ik_request = MagicMock(return_value=get_mocked_success_resp()) diff --git a/tests/test_generate_url.py b/tests/test_generate_url.py deleted file mode 100644 index bd0f9052..00000000 --- a/tests/test_generate_url.py +++ /dev/null @@ -1,521 +0,0 @@ -import unittest - -from imagekitio.client import ImageKit -from imagekitio.constants.defaults import Default - - -class TestGenerateURL(unittest.TestCase): - def setUp(self) -> None: - self.client = ImageKit( - private_key="private_key_test", - public_key="public_key_test", - url_endpoint="https://test-domain.com/test-endpoint", - ) - - def test_generate_url_with_path(self): - options = { - "path": "/default-image.jpg", - "transformation": [{"height": "300", "width": "400"}], - } - url = self.client.url(options) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/tr:h-300,w-400/default-image.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_url_contains_ik_sdk_version(self): - options = { - "path": "/default-image.jpg", - "transformation": [{"height": "300", "width": "400"}], - } - url = self.client.url(options) - self.assertIn("ik-sdk-version", url) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/tr:h-300,w-400/default-image.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_without_leading_slash_in_path(self): - options = { - "path": "default-image.jpg", - "transformation": [{"height": "300", "width": "400"}], - } - url = self.client.url(options) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/tr:h-300,w-400/default-image.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_overriding_url_endpoint_generation_consists_new_url(self): - """ - Overriding urlEndpoint parameter. Passing a urlEndpoint value which is - different from what I've used during SDK initialization and see if the url - returned is using this new parameter - """ - options = { - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/new/endpoint/", - "transformation": [{"height": "300", "width": "400"}], - } - - url = self.client.url(options) - self.assertEqual( - url, - "https://ik.imagekit.io/new/endpoint/tr:h-300,w-400/default-image.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_overriding_url_endpoint_without_slash_generation_consists_new_url(self): - """ - Overriding urlEndpoint parameter. Passing a urlEndpoint value without slash - """ - options = { - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/new/endpoint", - "transformation": [{"height": "300", "width": "400"}], - } - - url = self.client.url(options) - self.assertEqual( - url, - "https://ik.imagekit.io/new/endpoint/tr:h-300,w-400/default-image.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_query_parameters(self): - options = { - "path": "/default-image.jpg", - "query_parameters": { - "param1": "value1", - "param2": "value2" - }, - "transformation": [ - { - "height": "300", - "width": "400" - } - ], - } - url = self.client.url(options) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/tr:h-300,w-400/default-image.jpg?param1=value1¶m2=value2&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_with_src(self): - options = { - "src": "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg", - "transformation": [ - { - "height": "300", - "width": "400", - "format": "jpg", - "progressive": "true", - "effect_contrast": "1", - }, - {"rotation": 90}, - ], - } - url = self.client.url(options) - self.assertEqual( - url, - "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg?tr=h-300%2Cw-400%2Cf-jpg%2Cpr-true%2Ce-contrast-1%3Art-90&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_with_src_with_query_params_double(self): - options = { - "src": "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg?queryparam1=value1", - "query_parameters": { - "param1": "value1" - }, - "transformation": [ - { - "height": "300", - "width": "400", - "format": "jpg", - "progressive": "true", - "effect_contrast": "1", - }, - {"rotation": 90}, - ], - } - url = self.client.url(options) - # @TODO - adjust value of param1=value1 in test case but it should be there - self.assertEqual( - url, - "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg?queryparam1=value1¶m1=value1&tr=h-300%2Cw-400%2Cf-jpg%2Cpr-true%2Ce-contrast-1%3Art-90&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ) - ) - - def test_generate_url_with_path_and_signed(self): - options = { - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{"height": "300", "width": "400"}], - "signed": True, - } - - url = self.client.url(options) - self.assertIsNot(url, "") - self.assertIn(options["url_endpoint"], url) - self.assertIn("300", url) - self.assertIn("300", url) - - self.assertNotIn("&&", url) - self.assertNotIn("??", url) - - url = self.client.url(options) - self.assertEqual(url.split("default-image.jpg")[1][:1], "?") - self.assertNotEqual(url.split("default-image.jpg")[0][-2:], "//") - - def test_generate_url_with_path_and_signed_in_proper_form(self): - """ - Check path param url generation doesn't contain double slash - """ - options = { - "path": "/test-signed-url.jpg", - "signed": True, - "transformation": [{"width": 100}], - } - - url = self.client.url(options) - self.assertIn(Default.SIGNATURE_PARAMETER.value, url) - - def test_generate_url_signed_without_expiry_does_not_have_timestamp_parameter(self): - """ - Check query params does not contain timestamp parameter if expire_seconds isn't specified. - """ - options = { - "path": "/test-signed-url.jpg", - "signed": True, - "transformation": [{"width": 100}], - } - - url = self.client.url(options) - self.assertNotIn(Default.TIMESTAMP_PARAMETER.value, url) - - def test_url_with_new_transformation_returns_as_it_is(self): - options = { - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{"height": "300", "fake_xxxx": "400"}], - "transformation_position": "query", - } - - url = self.client.url(options) - self.assertIn("fake_xxxx", url) - self.assertEqual( - url, - "https://ik.imagekit.io/your_imagekit_id/endpoint/default-image.jpg?tr=h-300%2Cfake_xxxx-400&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_query_url_generation_transformation_as_query_and_transformations_in_url( - self, - ): - options = { - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{"height": "300"}], - "transformation_position": "query", - } - - url = self.client.url(options) - self.assertEqual( - url, - "https://ik.imagekit.io/your_imagekit_id/endpoint/default-image.jpg?tr=h-300&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_with_chained_transformations(self): - options = { - "src": "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg", - "transformation": [ - { - "height": "300", - "width": "400", - "format": "jpg", - "progressive": "true", - "effect_contrast": "1", - }, - {"rotation": 90}, - ], - } - url = self.client.url(options) - self.assertEqual( - url, - "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg?tr=h-300%2Cw-400%2Cf-jpg%2Cpr-true%2Ce-contrast-1%3Art-90&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_url_check_query_param_are_added_correctly(self): - options = { - "path": "/default-image.jpg?client=123&user=5", - "transformation": [{"height": "300", "width": "400"}], - "transformation_position": "query", - } - url = self.client.url(options) - self.assertEqual(url, - "https://test-domain.com/test-endpoint/default-image.jpg?client=123&user=5&tr=h-300%2Cw-400&ik-sdk-version={}".format( - Default.SDK_VERSION.value)) - - def test_generate_url_with_src_query_parameters_merge_correctly(self): - options = { - "src": "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg?client=123&ab=c", - "transformation": [ - { - "height": "300", - "width": "400", - "format": "jpg", - "progressive": "true", - "effect_contrast": "1", - }, - {"rotation": 90}, - ], - } - url = self.client.url(options) - self.assertEqual( - url, - "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg?client=123&ab=c&tr=h-300%2Cw-400%2Cf-jpg%2Cpr-true%2Ce-contrast-1%3Art-90&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_with_src_and_transformation_position_path(self): - options = { - "src": "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg", - "transformation": [ - { - "height": "300", - "width": "400", - "format": "jpg", - "progressive": "true", - "effect_contrast": "1", - }, - {"rotation": 90}, - ], - "transformation_position": "path", - } - url = self.client.url(options) - self.assertEqual( - url, - "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg?tr=h-300%2Cw-400%2Cf-jpg%2Cpr-true%2Ce-contrast-1%3Art-90&ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_url_with_invalid_trans_pos(self): - options = { - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{"height": "300", "width": "400"}], - "signed": True, - "transformation_position": "fake", - } - self.assertRaises((KeyError, ValueError), self.client.url, options) - - def test_url_without_path_and_src(self): - options = { - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{"height": "300", "width": "400"}], - "signed": True, - } - self.assertEqual(self.client.url(options), "") - - def test_url_contains_sdk_version(self): - options = { - "path": "/default-image.jpg", - "url_endpoint": "https://ik.imagekit.io/your_imagekit_id/endpoint/", - "transformation": [{"height": "300", "width": "400"}], - "signed": True, - "transformation_position": "query", - } - - self.assertIn("ik-sdk-version", self.client.url(options)) - - def test_url_contains_slash_if_transformation_position_is_path(self): - options = { - "path": "/default-image.jpg", - "transformation": [ - { - "height": "300", - "width": "400", - "format": "jpg", - "progressive": "true", - "effect_sharpen": "-", - "effect_contrast": "1", - }, - {"rotation": 90}, - ], - "transformation_position": "path", - } - url = self.client.url(options) - self.assertEqual(url.split("tr:h-300")[0][-1], "/") - self.assertNotEqual(url.split("default-image.jpg")[0][-2:], "//") - - def test_url_signed_with_expire_in_seconds(self): - options = { - "path": "/default-image.jpg", - "transformation": [ - { - "width": "400", - }, - ], - "signed": True, - "expire_seconds": 100, - } - url = self.client.url(options) - self.assertIn("ik-t", url) - - def test_generate_url_with_path_and_src_uses_path(self): - """ - In case when both path and src fields are provided, the `path` should be preferred - """ - options = { - "path": "/default-image.jpg", - "src": "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg", - "transformation": [{"height": "300", "width": "400"}], - } - url = self.client.url(options) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/tr:h-300,w-400/default-image.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_with_all_params(self): - """ - In case where all transformation parameters are passed - """ - options = { - "path": "/test_path.jpg", - "src": "https://ik.imagekit.io/ldt7znpgpjs/test_YhNhoRxWt.jpg", - "transformation": [{ - "height": 300, - "width": 400, - "aspect_ratio": '4-3', - "quality": 40, - "crop": 'force', - "crop_mode": 'extract', - "focus": 'left', - "format": 'jpeg', - "radius": 50, - "bg": "A94D34", - "border": "5-A94D34", - "rotation": 90, - "blur": 10, - "named": "some_name", - "overlay_x": 35, - "overlay_y": 35, - "overlay_focus": "bottom", - "overlay_height": 20, - "overlay_width": 20, - "overlay_image": "/folder/file.jpg", # leading slash case - "overlay_image_trim": False, - "overlay_image_aspect_ratio": "4:3", - "overlay_image_background": "0F0F0F", - "overlay_image_border": "10_0F0F0F", - "overlay_image_dpr": 2, - "overlay_image_quality": 50, - "overlay_image_cropping": "force", - "overlay_text": "two words", - "overlay_text_font_size": 20, - "overlay_text_font_family": "Open Sans", - "overlay_text_color": "00FFFF", - "overlay_text_transparency": 5, - "overlay_text_typography": "b", - "overlay_background": "00AAFF55", - "overlay_text_encoded": "b3ZlcmxheSBtYWRlIGVhc3k%3D", - "overlay_text_width": 50, - "overlay_text_background": "00AAFF55", - "overlay_text_padding": 40, - "overlay_text_inner_alignment": "left", - "overlay_radius": 10, - "progressive": "true", - "lossless": "true", - "trim": 5, - "metadata": "true", - "color_profile": "true", - "default_image": "folder/file.jpg/", # trailing slash case - "dpr": 3, - "effect_sharpen": 10, - "effect_usm": "2-2-0.8-0.024", - "effect_contrast": "true", - "effect_gray": "true", - "original": True, ## Boolean handling - }] - } - url = self.client.url(options) - print(url) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/tr:h-300,w-400,ar-4-3,q-40,c-force,cm-extract,fo-left,f-jpeg,r-50,bg-A94D34,b-5-A94D34,rt-90,bl-10,n-some_name,ox-35,oy-35,ofo-bottom,oh-20,ow-20,oi-folder@@file.jpg,oit-false,oiar-4:3,oibg-0F0F0F,oib-10_0F0F0F,oidpr-2,oiq-50,oic-force,ot-two words,ots-20,otf-Open Sans,otc-00FFFF,oa-5,ott-b,obg-00AAFF55,ote-b3ZlcmxheSBtYWRlIGVhc3k%3D,otw-50,otbg-00AAFF55,otp-40,otia-left,or-10,pr-true,lo-true,t-5,md-true,cp-true,di-folder@@file.jpg,dpr-3,e-sharpen-10,e-usm-2-2-0.8-0.024,e-contrast-true,e-grayscale-true,orig-true/test_path.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_get_signature_with_100_expire_seconds(self): - url = "https://test-domain.com/test-endpoint/tr:w-100/test-signed-url.png" - signature = self.client.url_obj.get_signature( - "private_key_test", url, "https://test-domain.com/test-endpoint/", 100) - self.assertEqual(signature, "5e5037a31a7121cbe2964e220b4338cc6e1ba66d") - - def test_get_signature_without_expire_seconds(self): - url = "https://test-domain.com/test-endpoint/tr:w-100/test-signed-url.png" - signature = self.client.url_obj.get_signature( - "private_key_test", url, "https://test-domain.com/test-endpoint/", 0) - self.assertEqual(signature, "41b3075c40bc84147eb71b8b49ae7fbf349d0f00") - - def test_get_signature_without_expire_seconds_without_slash(self): - url = "https://test-domain.com/test-endpoint/tr:w-100/test-signed-url.png" - signature = self.client.url_obj.get_signature( - "private_key_test", url, "https://test-domain.com/test-endpoint", 0) - self.assertEqual(signature, "41b3075c40bc84147eb71b8b49ae7fbf349d0f00") - - - def test_generate_url_without_transforms(self): - options = { - "path": "/coffee.jpg", - "signed": False, - "expire_seconds": 10 - } - - url = self.client.url(options) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/coffee.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) - - def test_generate_url_without_transforms_src(self): - options = { - "src": "https://test-domain.com/test-endpoint/coffee.jpg", - "signed": False, - "expire_seconds": 10 - } - - url = self.client.url(options) - self.assertEqual( - url, - "https://test-domain.com/test-endpoint/coffee.jpg?ik-sdk-version={}".format( - Default.SDK_VERSION.value - ), - ) \ No newline at end of file diff --git a/tests/test_models.py b/tests/test_models.py new file mode 100644 index 00000000..561b9831 --- /dev/null +++ b/tests/test_models.py @@ -0,0 +1,963 @@ +import json +from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast +from datetime import datetime, timezone +from typing_extensions import Literal, Annotated, TypeAliasType + +import pytest +import pydantic +from pydantic import Field + +from imagekitio._utils import PropertyInfo +from imagekitio._compat import PYDANTIC_V1, parse_obj, model_dump, model_json +from imagekitio._models import DISCRIMINATOR_CACHE, BaseModel, construct_type + + +class BasicModel(BaseModel): + foo: str + + +@pytest.mark.parametrize("value", ["hello", 1], ids=["correct type", "mismatched"]) +def test_basic(value: object) -> None: + m = BasicModel.construct(foo=value) + assert m.foo == value + + +def test_directly_nested_model() -> None: + class NestedModel(BaseModel): + nested: BasicModel + + m = NestedModel.construct(nested={"foo": "Foo!"}) + assert m.nested.foo == "Foo!" + + # mismatched types + m = NestedModel.construct(nested="hello!") + assert cast(Any, m.nested) == "hello!" + + +def test_optional_nested_model() -> None: + class NestedModel(BaseModel): + nested: Optional[BasicModel] + + m1 = NestedModel.construct(nested=None) + assert m1.nested is None + + m2 = NestedModel.construct(nested={"foo": "bar"}) + assert m2.nested is not None + assert m2.nested.foo == "bar" + + # mismatched types + m3 = NestedModel.construct(nested={"foo"}) + assert isinstance(cast(Any, m3.nested), set) + assert cast(Any, m3.nested) == {"foo"} + + +def test_list_nested_model() -> None: + class NestedModel(BaseModel): + nested: List[BasicModel] + + m = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) + assert m.nested is not None + assert isinstance(m.nested, list) + assert len(m.nested) == 2 + assert m.nested[0].foo == "bar" + assert m.nested[1].foo == "2" + + # mismatched types + m = NestedModel.construct(nested=True) + assert cast(Any, m.nested) is True + + m = NestedModel.construct(nested=[False]) + assert cast(Any, m.nested) == [False] + + +def test_optional_list_nested_model() -> None: + class NestedModel(BaseModel): + nested: Optional[List[BasicModel]] + + m1 = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) + assert m1.nested is not None + assert isinstance(m1.nested, list) + assert len(m1.nested) == 2 + assert m1.nested[0].foo == "bar" + assert m1.nested[1].foo == "2" + + m2 = NestedModel.construct(nested=None) + assert m2.nested is None + + # mismatched types + m3 = NestedModel.construct(nested={1}) + assert cast(Any, m3.nested) == {1} + + m4 = NestedModel.construct(nested=[False]) + assert cast(Any, m4.nested) == [False] + + +def test_list_optional_items_nested_model() -> None: + class NestedModel(BaseModel): + nested: List[Optional[BasicModel]] + + m = NestedModel.construct(nested=[None, {"foo": "bar"}]) + assert m.nested is not None + assert isinstance(m.nested, list) + assert len(m.nested) == 2 + assert m.nested[0] is None + assert m.nested[1] is not None + assert m.nested[1].foo == "bar" + + # mismatched types + m3 = NestedModel.construct(nested="foo") + assert cast(Any, m3.nested) == "foo" + + m4 = NestedModel.construct(nested=[False]) + assert cast(Any, m4.nested) == [False] + + +def test_list_mismatched_type() -> None: + class NestedModel(BaseModel): + nested: List[str] + + m = NestedModel.construct(nested=False) + assert cast(Any, m.nested) is False + + +def test_raw_dictionary() -> None: + class NestedModel(BaseModel): + nested: Dict[str, str] + + m = NestedModel.construct(nested={"hello": "world"}) + assert m.nested == {"hello": "world"} + + # mismatched types + m = NestedModel.construct(nested=False) + assert cast(Any, m.nested) is False + + +def test_nested_dictionary_model() -> None: + class NestedModel(BaseModel): + nested: Dict[str, BasicModel] + + m = NestedModel.construct(nested={"hello": {"foo": "bar"}}) + assert isinstance(m.nested, dict) + assert m.nested["hello"].foo == "bar" + + # mismatched types + m = NestedModel.construct(nested={"hello": False}) + assert cast(Any, m.nested["hello"]) is False + + +def test_unknown_fields() -> None: + m1 = BasicModel.construct(foo="foo", unknown=1) + assert m1.foo == "foo" + assert cast(Any, m1).unknown == 1 + + m2 = BasicModel.construct(foo="foo", unknown={"foo_bar": True}) + assert m2.foo == "foo" + assert cast(Any, m2).unknown == {"foo_bar": True} + + assert model_dump(m2) == {"foo": "foo", "unknown": {"foo_bar": True}} + + +def test_strict_validation_unknown_fields() -> None: + class Model(BaseModel): + foo: str + + model = parse_obj(Model, dict(foo="hello!", user="Robert")) + assert model.foo == "hello!" + assert cast(Any, model).user == "Robert" + + assert model_dump(model) == {"foo": "hello!", "user": "Robert"} + + +def test_aliases() -> None: + class Model(BaseModel): + my_field: int = Field(alias="myField") + + m = Model.construct(myField=1) + assert m.my_field == 1 + + # mismatched types + m = Model.construct(myField={"hello": False}) + assert cast(Any, m.my_field) == {"hello": False} + + +def test_repr() -> None: + model = BasicModel(foo="bar") + assert str(model) == "BasicModel(foo='bar')" + assert repr(model) == "BasicModel(foo='bar')" + + +def test_repr_nested_model() -> None: + class Child(BaseModel): + name: str + age: int + + class Parent(BaseModel): + name: str + child: Child + + model = Parent(name="Robert", child=Child(name="Foo", age=5)) + assert str(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" + assert repr(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" + + +def test_optional_list() -> None: + class Submodel(BaseModel): + name: str + + class Model(BaseModel): + items: Optional[List[Submodel]] + + m = Model.construct(items=None) + assert m.items is None + + m = Model.construct(items=[]) + assert m.items == [] + + m = Model.construct(items=[{"name": "Robert"}]) + assert m.items is not None + assert len(m.items) == 1 + assert m.items[0].name == "Robert" + + +def test_nested_union_of_models() -> None: + class Submodel1(BaseModel): + bar: bool + + class Submodel2(BaseModel): + thing: str + + class Model(BaseModel): + foo: Union[Submodel1, Submodel2] + + m = Model.construct(foo={"thing": "hello"}) + assert isinstance(m.foo, Submodel2) + assert m.foo.thing == "hello" + + +def test_nested_union_of_mixed_types() -> None: + class Submodel1(BaseModel): + bar: bool + + class Model(BaseModel): + foo: Union[Submodel1, Literal[True], Literal["CARD_HOLDER"]] + + m = Model.construct(foo=True) + assert m.foo is True + + m = Model.construct(foo="CARD_HOLDER") + assert m.foo == "CARD_HOLDER" + + m = Model.construct(foo={"bar": False}) + assert isinstance(m.foo, Submodel1) + assert m.foo.bar is False + + +def test_nested_union_multiple_variants() -> None: + class Submodel1(BaseModel): + bar: bool + + class Submodel2(BaseModel): + thing: str + + class Submodel3(BaseModel): + foo: int + + class Model(BaseModel): + foo: Union[Submodel1, Submodel2, None, Submodel3] + + m = Model.construct(foo={"thing": "hello"}) + assert isinstance(m.foo, Submodel2) + assert m.foo.thing == "hello" + + m = Model.construct(foo=None) + assert m.foo is None + + m = Model.construct() + assert m.foo is None + + m = Model.construct(foo={"foo": "1"}) + assert isinstance(m.foo, Submodel3) + assert m.foo.foo == 1 + + +def test_nested_union_invalid_data() -> None: + class Submodel1(BaseModel): + level: int + + class Submodel2(BaseModel): + name: str + + class Model(BaseModel): + foo: Union[Submodel1, Submodel2] + + m = Model.construct(foo=True) + assert cast(bool, m.foo) is True + + m = Model.construct(foo={"name": 3}) + if PYDANTIC_V1: + assert isinstance(m.foo, Submodel2) + assert m.foo.name == "3" + else: + assert isinstance(m.foo, Submodel1) + assert m.foo.name == 3 # type: ignore + + +def test_list_of_unions() -> None: + class Submodel1(BaseModel): + level: int + + class Submodel2(BaseModel): + name: str + + class Model(BaseModel): + items: List[Union[Submodel1, Submodel2]] + + m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) + assert len(m.items) == 2 + assert isinstance(m.items[0], Submodel1) + assert m.items[0].level == 1 + assert isinstance(m.items[1], Submodel2) + assert m.items[1].name == "Robert" + + m = Model.construct(items=[{"level": -1}, 156]) + assert len(m.items) == 2 + assert isinstance(m.items[0], Submodel1) + assert m.items[0].level == -1 + assert cast(Any, m.items[1]) == 156 + + +def test_union_of_lists() -> None: + class SubModel1(BaseModel): + level: int + + class SubModel2(BaseModel): + name: str + + class Model(BaseModel): + items: Union[List[SubModel1], List[SubModel2]] + + # with one valid entry + m = Model.construct(items=[{"name": "Robert"}]) + assert len(m.items) == 1 + assert isinstance(m.items[0], SubModel2) + assert m.items[0].name == "Robert" + + # with two entries pointing to different types + m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) + assert len(m.items) == 2 + assert isinstance(m.items[0], SubModel1) + assert m.items[0].level == 1 + assert isinstance(m.items[1], SubModel1) + assert cast(Any, m.items[1]).name == "Robert" + + # with two entries pointing to *completely* different types + m = Model.construct(items=[{"level": -1}, 156]) + assert len(m.items) == 2 + assert isinstance(m.items[0], SubModel1) + assert m.items[0].level == -1 + assert cast(Any, m.items[1]) == 156 + + +def test_dict_of_union() -> None: + class SubModel1(BaseModel): + name: str + + class SubModel2(BaseModel): + foo: str + + class Model(BaseModel): + data: Dict[str, Union[SubModel1, SubModel2]] + + m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) + assert len(list(m.data.keys())) == 2 + assert isinstance(m.data["hello"], SubModel1) + assert m.data["hello"].name == "there" + assert isinstance(m.data["foo"], SubModel2) + assert m.data["foo"].foo == "bar" + + # TODO: test mismatched type + + +def test_double_nested_union() -> None: + class SubModel1(BaseModel): + name: str + + class SubModel2(BaseModel): + bar: str + + class Model(BaseModel): + data: Dict[str, List[Union[SubModel1, SubModel2]]] + + m = Model.construct(data={"foo": [{"bar": "baz"}, {"name": "Robert"}]}) + assert len(m.data["foo"]) == 2 + + entry1 = m.data["foo"][0] + assert isinstance(entry1, SubModel2) + assert entry1.bar == "baz" + + entry2 = m.data["foo"][1] + assert isinstance(entry2, SubModel1) + assert entry2.name == "Robert" + + # TODO: test mismatched type + + +def test_union_of_dict() -> None: + class SubModel1(BaseModel): + name: str + + class SubModel2(BaseModel): + foo: str + + class Model(BaseModel): + data: Union[Dict[str, SubModel1], Dict[str, SubModel2]] + + m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) + assert len(list(m.data.keys())) == 2 + assert isinstance(m.data["hello"], SubModel1) + assert m.data["hello"].name == "there" + assert isinstance(m.data["foo"], SubModel1) + assert cast(Any, m.data["foo"]).foo == "bar" + + +def test_iso8601_datetime() -> None: + class Model(BaseModel): + created_at: datetime + + expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc) + + if PYDANTIC_V1: + expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}' + else: + expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' + + model = Model.construct(created_at="2019-12-27T18:11:19.117Z") + assert model.created_at == expected + assert model_json(model) == expected_json + + model = parse_obj(Model, dict(created_at="2019-12-27T18:11:19.117Z")) + assert model.created_at == expected + assert model_json(model) == expected_json + + +def test_does_not_coerce_int() -> None: + class Model(BaseModel): + bar: int + + assert Model.construct(bar=1).bar == 1 + assert Model.construct(bar=10.9).bar == 10.9 + assert Model.construct(bar="19").bar == "19" # type: ignore[comparison-overlap] + assert Model.construct(bar=False).bar is False + + +def test_int_to_float_safe_conversion() -> None: + class Model(BaseModel): + float_field: float + + m = Model.construct(float_field=10) + assert m.float_field == 10.0 + assert isinstance(m.float_field, float) + + m = Model.construct(float_field=10.12) + assert m.float_field == 10.12 + assert isinstance(m.float_field, float) + + # number too big + m = Model.construct(float_field=2**53 + 1) + assert m.float_field == 2**53 + 1 + assert isinstance(m.float_field, int) + + +def test_deprecated_alias() -> None: + class Model(BaseModel): + resource_id: str = Field(alias="model_id") + + @property + def model_id(self) -> str: + return self.resource_id + + m = Model.construct(model_id="id") + assert m.model_id == "id" + assert m.resource_id == "id" + assert m.resource_id is m.model_id + + m = parse_obj(Model, {"model_id": "id"}) + assert m.model_id == "id" + assert m.resource_id == "id" + assert m.resource_id is m.model_id + + +def test_omitted_fields() -> None: + class Model(BaseModel): + resource_id: Optional[str] = None + + m = Model.construct() + assert m.resource_id is None + assert "resource_id" not in m.model_fields_set + + m = Model.construct(resource_id=None) + assert m.resource_id is None + assert "resource_id" in m.model_fields_set + + m = Model.construct(resource_id="foo") + assert m.resource_id == "foo" + assert "resource_id" in m.model_fields_set + + +def test_to_dict() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert m.to_dict() == {"FOO": "hello"} + assert m.to_dict(use_api_names=False) == {"foo": "hello"} + + m2 = Model() + assert m2.to_dict() == {} + assert m2.to_dict(exclude_unset=False) == {"FOO": None} + assert m2.to_dict(exclude_unset=False, exclude_none=True) == {} + assert m2.to_dict(exclude_unset=False, exclude_defaults=True) == {} + + m3 = Model(FOO=None) + assert m3.to_dict() == {"FOO": None} + assert m3.to_dict(exclude_none=True) == {} + assert m3.to_dict(exclude_defaults=True) == {} + + class Model2(BaseModel): + created_at: datetime + + time_str = "2024-03-21T11:39:01.275859" + m4 = Model2.construct(created_at=time_str) + assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)} + assert m4.to_dict(mode="json") == {"created_at": time_str} + + if PYDANTIC_V1: + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.to_dict(warnings=False) + + +def test_forwards_compat_model_dump_method() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert m.model_dump() == {"foo": "hello"} + assert m.model_dump(include={"bar"}) == {} + assert m.model_dump(exclude={"foo"}) == {} + assert m.model_dump(by_alias=True) == {"FOO": "hello"} + + m2 = Model() + assert m2.model_dump() == {"foo": None} + assert m2.model_dump(exclude_unset=True) == {} + assert m2.model_dump(exclude_none=True) == {} + assert m2.model_dump(exclude_defaults=True) == {} + + m3 = Model(FOO=None) + assert m3.model_dump() == {"foo": None} + assert m3.model_dump(exclude_none=True) == {} + + if PYDANTIC_V1: + with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): + m.model_dump(round_trip=True) + + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.model_dump(warnings=False) + + +def test_compat_method_no_error_for_warnings() -> None: + class Model(BaseModel): + foo: Optional[str] + + m = Model(foo="hello") + assert isinstance(model_dump(m, warnings=False), dict) + + +def test_to_json() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert json.loads(m.to_json()) == {"FOO": "hello"} + assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"} + + if PYDANTIC_V1: + assert m.to_json(indent=None) == '{"FOO": "hello"}' + else: + assert m.to_json(indent=None) == '{"FOO":"hello"}' + + m2 = Model() + assert json.loads(m2.to_json()) == {} + assert json.loads(m2.to_json(exclude_unset=False)) == {"FOO": None} + assert json.loads(m2.to_json(exclude_unset=False, exclude_none=True)) == {} + assert json.loads(m2.to_json(exclude_unset=False, exclude_defaults=True)) == {} + + m3 = Model(FOO=None) + assert json.loads(m3.to_json()) == {"FOO": None} + assert json.loads(m3.to_json(exclude_none=True)) == {} + + if PYDANTIC_V1: + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.to_json(warnings=False) + + +def test_forwards_compat_model_dump_json_method() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert json.loads(m.model_dump_json()) == {"foo": "hello"} + assert json.loads(m.model_dump_json(include={"bar"})) == {} + assert json.loads(m.model_dump_json(include={"foo"})) == {"foo": "hello"} + assert json.loads(m.model_dump_json(by_alias=True)) == {"FOO": "hello"} + + assert m.model_dump_json(indent=2) == '{\n "foo": "hello"\n}' + + m2 = Model() + assert json.loads(m2.model_dump_json()) == {"foo": None} + assert json.loads(m2.model_dump_json(exclude_unset=True)) == {} + assert json.loads(m2.model_dump_json(exclude_none=True)) == {} + assert json.loads(m2.model_dump_json(exclude_defaults=True)) == {} + + m3 = Model(FOO=None) + assert json.loads(m3.model_dump_json()) == {"foo": None} + assert json.loads(m3.model_dump_json(exclude_none=True)) == {} + + if PYDANTIC_V1: + with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): + m.model_dump_json(round_trip=True) + + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.model_dump_json(warnings=False) + + +def test_type_compat() -> None: + # our model type can be assigned to Pydantic's model type + + def takes_pydantic(model: pydantic.BaseModel) -> None: # noqa: ARG001 + ... + + class OurModel(BaseModel): + foo: Optional[str] = None + + takes_pydantic(OurModel()) + + +def test_annotated_types() -> None: + class Model(BaseModel): + value: str + + m = construct_type( + value={"value": "foo"}, + type_=cast(Any, Annotated[Model, "random metadata"]), + ) + assert isinstance(m, Model) + assert m.value == "foo" + + +def test_discriminated_unions_invalid_data() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + m = construct_type( + value={"type": "b", "data": "foo"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + m = construct_type( + value={"type": "a", "data": 100}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, A) + assert m.type == "a" + if PYDANTIC_V1: + # pydantic v1 automatically converts inputs to strings + # if the expected type is a str + assert m.data == "100" + else: + assert m.data == 100 # type: ignore[comparison-overlap] + + +def test_discriminated_unions_unknown_variant() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + m = construct_type( + value={"type": "c", "data": None, "new_thing": "bar"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + + # just chooses the first variant + assert isinstance(m, A) + assert m.type == "c" # type: ignore[comparison-overlap] + assert m.data == None # type: ignore[unreachable] + assert m.new_thing == "bar" + + +def test_discriminated_unions_invalid_data_nested_unions() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + class C(BaseModel): + type: Literal["c"] + + data: bool + + m = construct_type( + value={"type": "b", "data": "foo"}, + type_=cast(Any, Annotated[Union[Union[A, B], C], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + m = construct_type( + value={"type": "c", "data": "foo"}, + type_=cast(Any, Annotated[Union[Union[A, B], C], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, C) + assert m.type == "c" + assert m.data == "foo" # type: ignore[comparison-overlap] + + +def test_discriminated_unions_with_aliases_invalid_data() -> None: + class A(BaseModel): + foo_type: Literal["a"] = Field(alias="type") + + data: str + + class B(BaseModel): + foo_type: Literal["b"] = Field(alias="type") + + data: int + + m = construct_type( + value={"type": "b", "data": "foo"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="foo_type")]), + ) + assert isinstance(m, B) + assert m.foo_type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + m = construct_type( + value={"type": "a", "data": 100}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="foo_type")]), + ) + assert isinstance(m, A) + assert m.foo_type == "a" + if PYDANTIC_V1: + # pydantic v1 automatically converts inputs to strings + # if the expected type is a str + assert m.data == "100" + else: + assert m.data == 100 # type: ignore[comparison-overlap] + + +def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None: + class A(BaseModel): + type: Literal["a"] + + data: bool + + class B(BaseModel): + type: Literal["a"] + + data: int + + m = construct_type( + value={"type": "a", "data": "foo"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, B) + assert m.type == "a" + assert m.data == "foo" # type: ignore[comparison-overlap] + + +def test_discriminated_unions_invalid_data_uses_cache() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + UnionType = cast(Any, Union[A, B]) + + assert not DISCRIMINATOR_CACHE.get(UnionType) + + m = construct_type( + value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")]) + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + discriminator = DISCRIMINATOR_CACHE.get(UnionType) + assert discriminator is not None + + m = construct_type( + value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")]) + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + # if the discriminator details object stays the same between invocations then + # we hit the cache + assert DISCRIMINATOR_CACHE.get(UnionType) is discriminator + + +@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") +def test_type_alias_type() -> None: + Alias = TypeAliasType("Alias", str) # pyright: ignore + + class Model(BaseModel): + alias: Alias + union: Union[int, Alias] + + m = construct_type(value={"alias": "foo", "union": "bar"}, type_=Model) + assert isinstance(m, Model) + assert isinstance(m.alias, str) + assert m.alias == "foo" + assert isinstance(m.union, str) + assert m.union == "bar" + + +@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") +def test_field_named_cls() -> None: + class Model(BaseModel): + cls: str + + m = construct_type(value={"cls": "foo"}, type_=Model) + assert isinstance(m, Model) + assert isinstance(m.cls, str) + + +def test_discriminated_union_case() -> None: + class A(BaseModel): + type: Literal["a"] + + data: bool + + class B(BaseModel): + type: Literal["b"] + + data: List[Union[A, object]] + + class ModelA(BaseModel): + type: Literal["modelA"] + + data: int + + class ModelB(BaseModel): + type: Literal["modelB"] + + required: str + + data: Union[A, B] + + # when constructing ModelA | ModelB, value data doesn't match ModelB exactly - missing `required` + m = construct_type( + value={"type": "modelB", "data": {"type": "a", "data": True}}, + type_=cast(Any, Annotated[Union[ModelA, ModelB], PropertyInfo(discriminator="type")]), + ) + + assert isinstance(m, ModelB) + + +def test_nested_discriminated_union() -> None: + class InnerType1(BaseModel): + type: Literal["type_1"] + + class InnerModel(BaseModel): + inner_value: str + + class InnerType2(BaseModel): + type: Literal["type_2"] + some_inner_model: InnerModel + + class Type1(BaseModel): + base_type: Literal["base_type_1"] + value: Annotated[ + Union[ + InnerType1, + InnerType2, + ], + PropertyInfo(discriminator="type"), + ] + + class Type2(BaseModel): + base_type: Literal["base_type_2"] + + T = Annotated[ + Union[ + Type1, + Type2, + ], + PropertyInfo(discriminator="base_type"), + ] + + model = construct_type( + type_=T, + value={ + "base_type": "base_type_1", + "value": { + "type": "type_2", + }, + }, + ) + assert isinstance(model, Type1) + assert isinstance(model.value, InnerType2) + + +@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2 for now") +def test_extra_properties() -> None: + class Item(BaseModel): + prop: int + + class Model(BaseModel): + __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + other: str + + if TYPE_CHECKING: + + def __getattr__(self, attr: str) -> Item: ... + + model = construct_type( + type_=Model, + value={ + "a": {"prop": 1}, + "other": "foo", + }, + ) + assert isinstance(model, Model) + assert model.a.prop == 1 + assert isinstance(model.a, Item) + assert model.other == "foo" diff --git a/tests/test_qs.py b/tests/test_qs.py new file mode 100644 index 00000000..cfc5985a --- /dev/null +++ b/tests/test_qs.py @@ -0,0 +1,78 @@ +from typing import Any, cast +from functools import partial +from urllib.parse import unquote + +import pytest + +from imagekitio._qs import Querystring, stringify + + +def test_empty() -> None: + assert stringify({}) == "" + assert stringify({"a": {}}) == "" + assert stringify({"a": {"b": {"c": {}}}}) == "" + + +def test_basic() -> None: + assert stringify({"a": 1}) == "a=1" + assert stringify({"a": "b"}) == "a=b" + assert stringify({"a": True}) == "a=true" + assert stringify({"a": False}) == "a=false" + assert stringify({"a": 1.23456}) == "a=1.23456" + assert stringify({"a": None}) == "" + + +@pytest.mark.parametrize("method", ["class", "function"]) +def test_nested_dotted(method: str) -> None: + if method == "class": + serialise = Querystring(nested_format="dots").stringify + else: + serialise = partial(stringify, nested_format="dots") + + assert unquote(serialise({"a": {"b": "c"}})) == "a.b=c" + assert unquote(serialise({"a": {"b": "c", "d": "e", "f": "g"}})) == "a.b=c&a.d=e&a.f=g" + assert unquote(serialise({"a": {"b": {"c": {"d": "e"}}}})) == "a.b.c.d=e" + assert unquote(serialise({"a": {"b": True}})) == "a.b=true" + + +def test_nested_brackets() -> None: + assert unquote(stringify({"a": {"b": "c"}})) == "a[b]=c" + assert unquote(stringify({"a": {"b": "c", "d": "e", "f": "g"}})) == "a[b]=c&a[d]=e&a[f]=g" + assert unquote(stringify({"a": {"b": {"c": {"d": "e"}}}})) == "a[b][c][d]=e" + assert unquote(stringify({"a": {"b": True}})) == "a[b]=true" + + +@pytest.mark.parametrize("method", ["class", "function"]) +def test_array_comma(method: str) -> None: + if method == "class": + serialise = Querystring(array_format="comma").stringify + else: + serialise = partial(stringify, array_format="comma") + + assert unquote(serialise({"in": ["foo", "bar"]})) == "in=foo,bar" + assert unquote(serialise({"a": {"b": [True, False]}})) == "a[b]=true,false" + assert unquote(serialise({"a": {"b": [True, False, None, True]}})) == "a[b]=true,false,true" + + +def test_array_repeat() -> None: + assert unquote(stringify({"in": ["foo", "bar"]})) == "in=foo&in=bar" + assert unquote(stringify({"a": {"b": [True, False]}})) == "a[b]=true&a[b]=false" + assert unquote(stringify({"a": {"b": [True, False, None, True]}})) == "a[b]=true&a[b]=false&a[b]=true" + assert unquote(stringify({"in": ["foo", {"b": {"c": ["d", "e"]}}]})) == "in=foo&in[b][c]=d&in[b][c]=e" + + +@pytest.mark.parametrize("method", ["class", "function"]) +def test_array_brackets(method: str) -> None: + if method == "class": + serialise = Querystring(array_format="brackets").stringify + else: + serialise = partial(stringify, array_format="brackets") + + assert unquote(serialise({"in": ["foo", "bar"]})) == "in[]=foo&in[]=bar" + assert unquote(serialise({"a": {"b": [True, False]}})) == "a[b][]=true&a[b][]=false" + assert unquote(serialise({"a": {"b": [True, False, None, True]}})) == "a[b][]=true&a[b][]=false&a[b][]=true" + + +def test_unknown_array_format() -> None: + with pytest.raises(NotImplementedError, match="Unknown array_format value: foo, choose from comma, repeat"): + stringify({"a": ["foo", "bar"]}, array_format=cast(Any, "foo")) diff --git a/tests/test_required_args.py b/tests/test_required_args.py new file mode 100644 index 00000000..e7cde47a --- /dev/null +++ b/tests/test_required_args.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import pytest + +from imagekitio._utils import required_args + + +def test_too_many_positional_params() -> None: + @required_args(["a"]) + def foo(a: str | None = None) -> str | None: + return a + + with pytest.raises(TypeError, match=r"foo\(\) takes 1 argument\(s\) but 2 were given"): + foo("a", "b") # type: ignore + + +def test_positional_param() -> None: + @required_args(["a"]) + def foo(a: str | None = None) -> str | None: + return a + + assert foo("a") == "a" + assert foo(None) is None + assert foo(a="b") == "b" + + with pytest.raises(TypeError, match="Missing required argument: 'a'"): + foo() + + +def test_keyword_only_param() -> None: + @required_args(["a"]) + def foo(*, a: str | None = None) -> str | None: + return a + + assert foo(a="a") == "a" + assert foo(a=None) is None + assert foo(a="b") == "b" + + with pytest.raises(TypeError, match="Missing required argument: 'a'"): + foo() + + +def test_multiple_params() -> None: + @required_args(["a", "b", "c"]) + def foo(a: str = "", *, b: str = "", c: str = "") -> str | None: + return f"{a} {b} {c}" + + assert foo(a="a", b="b", c="c") == "a b c" + + error_message = r"Missing required arguments.*" + + with pytest.raises(TypeError, match=error_message): + foo() + + with pytest.raises(TypeError, match=error_message): + foo(a="a") + + with pytest.raises(TypeError, match=error_message): + foo(b="b") + + with pytest.raises(TypeError, match=error_message): + foo(c="c") + + with pytest.raises(TypeError, match=r"Missing required argument: 'a'"): + foo(b="a", c="c") + + with pytest.raises(TypeError, match=r"Missing required argument: 'b'"): + foo("a", c="c") + + +def test_multiple_variants() -> None: + @required_args(["a"], ["b"]) + def foo(*, a: str | None = None, b: str | None = None) -> str | None: + return a if a is not None else b + + assert foo(a="foo") == "foo" + assert foo(b="bar") == "bar" + assert foo(a=None) is None + assert foo(b=None) is None + + # TODO: this error message could probably be improved + with pytest.raises( + TypeError, + match=r"Missing required arguments; Expected either \('a'\) or \('b'\) arguments to be given", + ): + foo() + + +def test_multiple_params_multiple_variants() -> None: + @required_args(["a", "b"], ["c"]) + def foo(*, a: str | None = None, b: str | None = None, c: str | None = None) -> str | None: + if a is not None: + return a + if b is not None: + return b + return c + + error_message = r"Missing required arguments; Expected either \('a' and 'b'\) or \('c'\) arguments to be given" + + with pytest.raises(TypeError, match=error_message): + foo(a="foo") + + with pytest.raises(TypeError, match=error_message): + foo(b="bar") + + with pytest.raises(TypeError, match=error_message): + foo() + + assert foo(a=None, b="bar") == "bar" + assert foo(c=None) is None + assert foo(c="foo") == "foo" diff --git a/tests/test_response.py b/tests/test_response.py new file mode 100644 index 00000000..30b86cc6 --- /dev/null +++ b/tests/test_response.py @@ -0,0 +1,277 @@ +import json +from typing import Any, List, Union, cast +from typing_extensions import Annotated + +import httpx +import pytest +import pydantic + +from imagekitio import ImageKit, BaseModel, AsyncImageKit +from imagekitio._response import ( + APIResponse, + BaseAPIResponse, + AsyncAPIResponse, + BinaryAPIResponse, + AsyncBinaryAPIResponse, + extract_response_type, +) +from imagekitio._streaming import Stream +from imagekitio._base_client import FinalRequestOptions + + +class ConcreteBaseAPIResponse(APIResponse[bytes]): ... + + +class ConcreteAPIResponse(APIResponse[List[str]]): ... + + +class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]): ... + + +def test_extract_response_type_direct_classes() -> None: + assert extract_response_type(BaseAPIResponse[str]) == str + assert extract_response_type(APIResponse[str]) == str + assert extract_response_type(AsyncAPIResponse[str]) == str + + +def test_extract_response_type_direct_class_missing_type_arg() -> None: + with pytest.raises( + RuntimeError, + match="Expected type to have a type argument at index 0 but it did not", + ): + extract_response_type(AsyncAPIResponse) + + +def test_extract_response_type_concrete_subclasses() -> None: + assert extract_response_type(ConcreteBaseAPIResponse) == bytes + assert extract_response_type(ConcreteAPIResponse) == List[str] + assert extract_response_type(ConcreteAsyncAPIResponse) == httpx.Response + + +def test_extract_response_type_binary_response() -> None: + assert extract_response_type(BinaryAPIResponse) == bytes + assert extract_response_type(AsyncBinaryAPIResponse) == bytes + + +class PydanticModel(pydantic.BaseModel): ... + + +def test_response_parse_mismatched_basemodel(client: ImageKit) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo"), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + with pytest.raises( + TypeError, + match="Pydantic models must subclass our base model type, e.g. `from imagekitio import BaseModel`", + ): + response.parse(to=PydanticModel) + + +@pytest.mark.asyncio +async def test_async_response_parse_mismatched_basemodel(async_client: AsyncImageKit) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo"), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + with pytest.raises( + TypeError, + match="Pydantic models must subclass our base model type, e.g. `from imagekitio import BaseModel`", + ): + await response.parse(to=PydanticModel) + + +def test_response_parse_custom_stream(client: ImageKit) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo"), + client=client, + stream=True, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + stream = response.parse(to=Stream[int]) + assert stream._cast_to == int + + +@pytest.mark.asyncio +async def test_async_response_parse_custom_stream(async_client: AsyncImageKit) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo"), + client=async_client, + stream=True, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + stream = await response.parse(to=Stream[int]) + assert stream._cast_to == int + + +class CustomModel(BaseModel): + foo: str + bar: int + + +def test_response_parse_custom_model(client: ImageKit) -> None: + response = APIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=CustomModel) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +@pytest.mark.asyncio +async def test_async_response_parse_custom_model(async_client: AsyncImageKit) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse(to=CustomModel) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +def test_response_parse_annotated_type(client: ImageKit) -> None: + response = APIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse( + to=cast("type[CustomModel]", Annotated[CustomModel, "random metadata"]), + ) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +async def test_async_response_parse_annotated_type(async_client: AsyncImageKit) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse( + to=cast("type[CustomModel]", Annotated[CustomModel, "random metadata"]), + ) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +@pytest.mark.parametrize( + "content, expected", + [ + ("false", False), + ("true", True), + ("False", False), + ("True", True), + ("TrUe", True), + ("FalSe", False), + ], +) +def test_response_parse_bool(client: ImageKit, content: str, expected: bool) -> None: + response = APIResponse( + raw=httpx.Response(200, content=content), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + result = response.parse(to=bool) + assert result is expected + + +@pytest.mark.parametrize( + "content, expected", + [ + ("false", False), + ("true", True), + ("False", False), + ("True", True), + ("TrUe", True), + ("FalSe", False), + ], +) +async def test_async_response_parse_bool(client: AsyncImageKit, content: str, expected: bool) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=content), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + result = await response.parse(to=bool) + assert result is expected + + +class OtherModel(BaseModel): + a: str + + +@pytest.mark.parametrize("client", [False], indirect=True) # loose validation +def test_response_parse_expect_model_union_non_json_content(client: ImageKit) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation +async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncImageKit) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" diff --git a/tests/test_streaming.py b/tests/test_streaming.py new file mode 100644 index 00000000..7db128c2 --- /dev/null +++ b/tests/test_streaming.py @@ -0,0 +1,248 @@ +from __future__ import annotations + +from typing import Iterator, AsyncIterator + +import httpx +import pytest + +from imagekitio import ImageKit, AsyncImageKit +from imagekitio._streaming import Stream, AsyncStream, ServerSentEvent + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_basic(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b"event: completion\n" + yield b'data: {"foo":true}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "completion" + assert sse.json() == {"foo": True} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_data_missing_event(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b'data: {"foo":true}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"foo": True} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_event_missing_data(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.data == "" + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_events(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"\n" + yield b"event: completion\n" + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.data == "" + + sse = await iter_next(iterator) + assert sse.event == "completion" + assert sse.data == "" + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_events_with_data(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b'data: {"foo":true}\n' + yield b"\n" + yield b"event: completion\n" + yield b'data: {"bar":false}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": True} + + sse = await iter_next(iterator) + assert sse.event == "completion" + assert sse.json() == {"bar": False} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_data_lines_with_empty_line(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"data: {\n" + yield b'data: "foo":\n' + yield b"data: \n" + yield b"data:\n" + yield b"data: true}\n" + yield b"\n\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": True} + assert sse.data == '{\n"foo":\n\n\ntrue}' + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_data_json_escaped_double_new_line(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b'data: {"foo": "my long\\n\\ncontent"}' + yield b"\n\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": "my long\n\ncontent"} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_data_lines(sync: bool, client: ImageKit, async_client: AsyncImageKit) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"data: {\n" + yield b'data: "foo":\n' + yield b"data: true}\n" + yield b"\n\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": True} + + await assert_empty_iter(iterator) + + +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_special_new_line_character( + sync: bool, + client: ImageKit, + async_client: AsyncImageKit, +) -> None: + def body() -> Iterator[bytes]: + yield b'data: {"content":" culpa"}\n' + yield b"\n" + yield b'data: {"content":" \xe2\x80\xa8"}\n' + yield b"\n" + yield b'data: {"content":"foo"}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": " culpa"} + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": " 
"} + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": "foo"} + + await assert_empty_iter(iterator) + + +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multi_byte_character_multiple_chunks( + sync: bool, + client: ImageKit, + async_client: AsyncImageKit, +) -> None: + def body() -> Iterator[bytes]: + yield b'data: {"content":"' + # bytes taken from the string 'известни' and arbitrarily split + # so that some multi-byte characters span multiple chunks + yield b"\xd0" + yield b"\xb8\xd0\xb7\xd0" + yield b"\xb2\xd0\xb5\xd1\x81\xd1\x82\xd0\xbd\xd0\xb8" + yield b'"}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": "известни"} + + +async def to_aiter(iter: Iterator[bytes]) -> AsyncIterator[bytes]: + for chunk in iter: + yield chunk + + +async def iter_next(iter: Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]) -> ServerSentEvent: + if isinstance(iter, AsyncIterator): + return await iter.__anext__() + + return next(iter) + + +async def assert_empty_iter(iter: Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]) -> None: + with pytest.raises((StopAsyncIteration, RuntimeError)): + await iter_next(iter) + + +def make_event_iterator( + content: Iterator[bytes], + *, + sync: bool, + client: ImageKit, + async_client: AsyncImageKit, +) -> Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]: + if sync: + return Stream(cast_to=object, client=client, response=httpx.Response(200, content=content))._iter_events() + + return AsyncStream( + cast_to=object, client=async_client, response=httpx.Response(200, content=to_aiter(content)) + )._iter_events() diff --git a/tests/test_transform.py b/tests/test_transform.py new file mode 100644 index 00000000..3520530e --- /dev/null +++ b/tests/test_transform.py @@ -0,0 +1,460 @@ +from __future__ import annotations + +import io +import pathlib +from typing import Any, Dict, List, Union, TypeVar, Iterable, Optional, cast +from datetime import date, datetime +from typing_extensions import Required, Annotated, TypedDict + +import pytest + +from imagekitio._types import Base64FileInput, omit, not_given +from imagekitio._utils import ( + PropertyInfo, + transform as _transform, + parse_datetime, + async_transform as _async_transform, +) +from imagekitio._compat import PYDANTIC_V1 +from imagekitio._models import BaseModel + +_T = TypeVar("_T") + +SAMPLE_FILE_PATH = pathlib.Path(__file__).parent.joinpath("sample_file.txt") + + +async def transform( + data: _T, + expected_type: object, + use_async: bool, +) -> _T: + if use_async: + return await _async_transform(data, expected_type=expected_type) + + return _transform(data, expected_type=expected_type) + + +parametrize = pytest.mark.parametrize("use_async", [False, True], ids=["sync", "async"]) + + +class Foo1(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +@parametrize +@pytest.mark.asyncio +async def test_top_level_alias(use_async: bool) -> None: + assert await transform({"foo_bar": "hello"}, expected_type=Foo1, use_async=use_async) == {"fooBar": "hello"} + + +class Foo2(TypedDict): + bar: Bar2 + + +class Bar2(TypedDict): + this_thing: Annotated[int, PropertyInfo(alias="this__thing")] + baz: Annotated[Baz2, PropertyInfo(alias="Baz")] + + +class Baz2(TypedDict): + my_baz: Annotated[str, PropertyInfo(alias="myBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_recursive_typeddict(use_async: bool) -> None: + assert await transform({"bar": {"this_thing": 1}}, Foo2, use_async) == {"bar": {"this__thing": 1}} + assert await transform({"bar": {"baz": {"my_baz": "foo"}}}, Foo2, use_async) == {"bar": {"Baz": {"myBaz": "foo"}}} + + +class Foo3(TypedDict): + things: List[Bar3] + + +class Bar3(TypedDict): + my_field: Annotated[str, PropertyInfo(alias="myField")] + + +@parametrize +@pytest.mark.asyncio +async def test_list_of_typeddict(use_async: bool) -> None: + result = await transform({"things": [{"my_field": "foo"}, {"my_field": "foo2"}]}, Foo3, use_async) + assert result == {"things": [{"myField": "foo"}, {"myField": "foo2"}]} + + +class Foo4(TypedDict): + foo: Union[Bar4, Baz4] + + +class Bar4(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +class Baz4(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_union_of_typeddict(use_async: bool) -> None: + assert await transform({"foo": {"foo_bar": "bar"}}, Foo4, use_async) == {"foo": {"fooBar": "bar"}} + assert await transform({"foo": {"foo_baz": "baz"}}, Foo4, use_async) == {"foo": {"fooBaz": "baz"}} + assert await transform({"foo": {"foo_baz": "baz", "foo_bar": "bar"}}, Foo4, use_async) == { + "foo": {"fooBaz": "baz", "fooBar": "bar"} + } + + +class Foo5(TypedDict): + foo: Annotated[Union[Bar4, List[Baz4]], PropertyInfo(alias="FOO")] + + +class Bar5(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +class Baz5(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_union_of_list(use_async: bool) -> None: + assert await transform({"foo": {"foo_bar": "bar"}}, Foo5, use_async) == {"FOO": {"fooBar": "bar"}} + assert await transform( + { + "foo": [ + {"foo_baz": "baz"}, + {"foo_baz": "baz"}, + ] + }, + Foo5, + use_async, + ) == {"FOO": [{"fooBaz": "baz"}, {"fooBaz": "baz"}]} + + +class Foo6(TypedDict): + bar: Annotated[str, PropertyInfo(alias="Bar")] + + +@parametrize +@pytest.mark.asyncio +async def test_includes_unknown_keys(use_async: bool) -> None: + assert await transform({"bar": "bar", "baz_": {"FOO": 1}}, Foo6, use_async) == { + "Bar": "bar", + "baz_": {"FOO": 1}, + } + + +class Foo7(TypedDict): + bar: Annotated[List[Bar7], PropertyInfo(alias="bAr")] + foo: Bar7 + + +class Bar7(TypedDict): + foo: str + + +@parametrize +@pytest.mark.asyncio +async def test_ignores_invalid_input(use_async: bool) -> None: + assert await transform({"bar": ""}, Foo7, use_async) == {"bAr": ""} + assert await transform({"foo": ""}, Foo7, use_async) == {"foo": ""} + + +class DatetimeDict(TypedDict, total=False): + foo: Annotated[datetime, PropertyInfo(format="iso8601")] + + bar: Annotated[Optional[datetime], PropertyInfo(format="iso8601")] + + required: Required[Annotated[Optional[datetime], PropertyInfo(format="iso8601")]] + + list_: Required[Annotated[Optional[List[datetime]], PropertyInfo(format="iso8601")]] + + union: Annotated[Union[int, datetime], PropertyInfo(format="iso8601")] + + +class DateDict(TypedDict, total=False): + foo: Annotated[date, PropertyInfo(format="iso8601")] + + +class DatetimeModel(BaseModel): + foo: datetime + + +class DateModel(BaseModel): + foo: Optional[date] + + +@parametrize +@pytest.mark.asyncio +async def test_iso8601_format(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + tz = "+00:00" if PYDANTIC_V1 else "Z" + assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] + assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap] + + dt = dt.replace(tzinfo=None) + assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692"} # type: ignore[comparison-overlap] + assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692"} # type: ignore[comparison-overlap] + + assert await transform({"foo": None}, DateDict, use_async) == {"foo": None} # type: ignore[comparison-overlap] + assert await transform(DateModel(foo=None), Any, use_async) == {"foo": None} # type: ignore + assert await transform({"foo": date.fromisoformat("2023-02-23")}, DateDict, use_async) == {"foo": "2023-02-23"} # type: ignore[comparison-overlap] + assert await transform(DateModel(foo=date.fromisoformat("2023-02-23")), DateDict, use_async) == { + "foo": "2023-02-23" + } # type: ignore[comparison-overlap] + + +@parametrize +@pytest.mark.asyncio +async def test_optional_iso8601_format(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + assert await transform({"bar": dt}, DatetimeDict, use_async) == {"bar": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] + + assert await transform({"bar": None}, DatetimeDict, use_async) == {"bar": None} + + +@parametrize +@pytest.mark.asyncio +async def test_required_iso8601_format(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + assert await transform({"required": dt}, DatetimeDict, use_async) == { + "required": "2023-02-23T14:16:36.337692+00:00" + } # type: ignore[comparison-overlap] + + assert await transform({"required": None}, DatetimeDict, use_async) == {"required": None} + + +@parametrize +@pytest.mark.asyncio +async def test_union_datetime(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + assert await transform({"union": dt}, DatetimeDict, use_async) == { # type: ignore[comparison-overlap] + "union": "2023-02-23T14:16:36.337692+00:00" + } + + assert await transform({"union": "foo"}, DatetimeDict, use_async) == {"union": "foo"} + + +@parametrize +@pytest.mark.asyncio +async def test_nested_list_iso6801_format(use_async: bool) -> None: + dt1 = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + dt2 = parse_datetime("2022-01-15T06:34:23Z") + assert await transform({"list_": [dt1, dt2]}, DatetimeDict, use_async) == { # type: ignore[comparison-overlap] + "list_": ["2023-02-23T14:16:36.337692+00:00", "2022-01-15T06:34:23+00:00"] + } + + +@parametrize +@pytest.mark.asyncio +async def test_datetime_custom_format(use_async: bool) -> None: + dt = parse_datetime("2022-01-15T06:34:23Z") + + result = await transform(dt, Annotated[datetime, PropertyInfo(format="custom", format_template="%H")], use_async) + assert result == "06" # type: ignore[comparison-overlap] + + +class DateDictWithRequiredAlias(TypedDict, total=False): + required_prop: Required[Annotated[date, PropertyInfo(format="iso8601", alias="prop")]] + + +@parametrize +@pytest.mark.asyncio +async def test_datetime_with_alias(use_async: bool) -> None: + assert await transform({"required_prop": None}, DateDictWithRequiredAlias, use_async) == {"prop": None} # type: ignore[comparison-overlap] + assert await transform( + {"required_prop": date.fromisoformat("2023-02-23")}, DateDictWithRequiredAlias, use_async + ) == {"prop": "2023-02-23"} # type: ignore[comparison-overlap] + + +class MyModel(BaseModel): + foo: str + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_model_to_dictionary(use_async: bool) -> None: + assert cast(Any, await transform(MyModel(foo="hi!"), Any, use_async)) == {"foo": "hi!"} + assert cast(Any, await transform(MyModel.construct(foo="hi!"), Any, use_async)) == {"foo": "hi!"} + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_empty_model(use_async: bool) -> None: + assert cast(Any, await transform(MyModel.construct(), Any, use_async)) == {} + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_unknown_field(use_async: bool) -> None: + assert cast(Any, await transform(MyModel.construct(my_untyped_field=True), Any, use_async)) == { + "my_untyped_field": True + } + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_mismatched_types(use_async: bool) -> None: + model = MyModel.construct(foo=True) + if PYDANTIC_V1: + params = await transform(model, Any, use_async) + else: + with pytest.warns(UserWarning): + params = await transform(model, Any, use_async) + assert cast(Any, params) == {"foo": True} + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_mismatched_object_type(use_async: bool) -> None: + model = MyModel.construct(foo=MyModel.construct(hello="world")) + if PYDANTIC_V1: + params = await transform(model, Any, use_async) + else: + with pytest.warns(UserWarning): + params = await transform(model, Any, use_async) + assert cast(Any, params) == {"foo": {"hello": "world"}} + + +class ModelNestedObjects(BaseModel): + nested: MyModel + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_nested_objects(use_async: bool) -> None: + model = ModelNestedObjects.construct(nested={"foo": "stainless"}) + assert isinstance(model.nested, MyModel) + assert cast(Any, await transform(model, Any, use_async)) == {"nested": {"foo": "stainless"}} + + +class ModelWithDefaultField(BaseModel): + foo: str + with_none_default: Union[str, None] = None + with_str_default: str = "foo" + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_default_field(use_async: bool) -> None: + # should be excluded when defaults are used + model = ModelWithDefaultField.construct() + assert model.with_none_default is None + assert model.with_str_default == "foo" + assert cast(Any, await transform(model, Any, use_async)) == {} + + # should be included when the default value is explicitly given + model = ModelWithDefaultField.construct(with_none_default=None, with_str_default="foo") + assert model.with_none_default is None + assert model.with_str_default == "foo" + assert cast(Any, await transform(model, Any, use_async)) == {"with_none_default": None, "with_str_default": "foo"} + + # should be included when a non-default value is explicitly given + model = ModelWithDefaultField.construct(with_none_default="bar", with_str_default="baz") + assert model.with_none_default == "bar" + assert model.with_str_default == "baz" + assert cast(Any, await transform(model, Any, use_async)) == {"with_none_default": "bar", "with_str_default": "baz"} + + +class TypedDictIterableUnion(TypedDict): + foo: Annotated[Union[Bar8, Iterable[Baz8]], PropertyInfo(alias="FOO")] + + +class Bar8(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +class Baz8(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_iterable_of_dictionaries(use_async: bool) -> None: + assert await transform({"foo": [{"foo_baz": "bar"}]}, TypedDictIterableUnion, use_async) == { + "FOO": [{"fooBaz": "bar"}] + } + assert cast(Any, await transform({"foo": ({"foo_baz": "bar"},)}, TypedDictIterableUnion, use_async)) == { + "FOO": [{"fooBaz": "bar"}] + } + + def my_iter() -> Iterable[Baz8]: + yield {"foo_baz": "hello"} + yield {"foo_baz": "world"} + + assert await transform({"foo": my_iter()}, TypedDictIterableUnion, use_async) == { + "FOO": [{"fooBaz": "hello"}, {"fooBaz": "world"}] + } + + +@parametrize +@pytest.mark.asyncio +async def test_dictionary_items(use_async: bool) -> None: + class DictItems(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + assert await transform({"foo": {"foo_baz": "bar"}}, Dict[str, DictItems], use_async) == {"foo": {"fooBaz": "bar"}} + + +class TypedDictIterableUnionStr(TypedDict): + foo: Annotated[Union[str, Iterable[Baz8]], PropertyInfo(alias="FOO")] + + +@parametrize +@pytest.mark.asyncio +async def test_iterable_union_str(use_async: bool) -> None: + assert await transform({"foo": "bar"}, TypedDictIterableUnionStr, use_async) == {"FOO": "bar"} + assert cast(Any, await transform(iter([{"foo_baz": "bar"}]), Union[str, Iterable[Baz8]], use_async)) == [ + {"fooBaz": "bar"} + ] + + +class TypedDictBase64Input(TypedDict): + foo: Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")] + + +@parametrize +@pytest.mark.asyncio +async def test_base64_file_input(use_async: bool) -> None: + # strings are left as-is + assert await transform({"foo": "bar"}, TypedDictBase64Input, use_async) == {"foo": "bar"} + + # pathlib.Path is automatically converted to base64 + assert await transform({"foo": SAMPLE_FILE_PATH}, TypedDictBase64Input, use_async) == { + "foo": "SGVsbG8sIHdvcmxkIQo=" + } # type: ignore[comparison-overlap] + + # io instances are automatically converted to base64 + assert await transform({"foo": io.StringIO("Hello, world!")}, TypedDictBase64Input, use_async) == { + "foo": "SGVsbG8sIHdvcmxkIQ==" + } # type: ignore[comparison-overlap] + assert await transform({"foo": io.BytesIO(b"Hello, world!")}, TypedDictBase64Input, use_async) == { + "foo": "SGVsbG8sIHdvcmxkIQ==" + } # type: ignore[comparison-overlap] + + +@parametrize +@pytest.mark.asyncio +async def test_transform_skipping(use_async: bool) -> None: + # lists of ints are left as-is + data = [1, 2, 3] + assert await transform(data, List[int], use_async) is data + + # iterables of ints are converted to a list + data = iter([1, 2, 3]) + assert await transform(data, Iterable[int], use_async) == [1, 2, 3] + + +@parametrize +@pytest.mark.asyncio +async def test_strips_notgiven(use_async: bool) -> None: + assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"} + assert await transform({"foo_bar": not_given}, Foo1, use_async) == {} + + +@parametrize +@pytest.mark.asyncio +async def test_strips_omit(use_async: bool) -> None: + assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"} + assert await transform({"foo_bar": omit}, Foo1, use_async) == {} diff --git a/tests/test_utils/test_datetime_parse.py b/tests/test_utils/test_datetime_parse.py new file mode 100644 index 00000000..2e5023ad --- /dev/null +++ b/tests/test_utils/test_datetime_parse.py @@ -0,0 +1,110 @@ +""" +Copied from https://github.com/pydantic/pydantic/blob/v1.10.22/tests/test_datetime_parse.py +with modifications so it works without pydantic v1 imports. +""" + +from typing import Type, Union +from datetime import date, datetime, timezone, timedelta + +import pytest + +from imagekitio._utils import parse_date, parse_datetime + + +def create_tz(minutes: int) -> timezone: + return timezone(timedelta(minutes=minutes)) + + +@pytest.mark.parametrize( + "value,result", + [ + # Valid inputs + ("1494012444.883309", date(2017, 5, 5)), + (b"1494012444.883309", date(2017, 5, 5)), + (1_494_012_444.883_309, date(2017, 5, 5)), + ("1494012444", date(2017, 5, 5)), + (1_494_012_444, date(2017, 5, 5)), + (0, date(1970, 1, 1)), + ("2012-04-23", date(2012, 4, 23)), + (b"2012-04-23", date(2012, 4, 23)), + ("2012-4-9", date(2012, 4, 9)), + (date(2012, 4, 9), date(2012, 4, 9)), + (datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)), + # Invalid inputs + ("x20120423", ValueError), + ("2012-04-56", ValueError), + (19_999_999_999, date(2603, 10, 11)), # just before watershed + (20_000_000_001, date(1970, 8, 20)), # just after watershed + (1_549_316_052, date(2019, 2, 4)), # nowish in s + (1_549_316_052_104, date(2019, 2, 4)), # nowish in ms + (1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs + (1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns + ("infinity", date(9999, 12, 31)), + ("inf", date(9999, 12, 31)), + (float("inf"), date(9999, 12, 31)), + ("infinity ", date(9999, 12, 31)), + (int("1" + "0" * 100), date(9999, 12, 31)), + (1e1000, date(9999, 12, 31)), + ("-infinity", date(1, 1, 1)), + ("-inf", date(1, 1, 1)), + ("nan", ValueError), + ], +) +def test_date_parsing(value: Union[str, bytes, int, float], result: Union[date, Type[Exception]]) -> None: + if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance] + with pytest.raises(result): + parse_date(value) + else: + assert parse_date(value) == result + + +@pytest.mark.parametrize( + "value,result", + [ + # Valid inputs + # values in seconds + ("1494012444.883309", datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)), + (1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)), + ("1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + (b"1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + (1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + # values in ms + ("1494012444000.883309", datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)), + ("-1494012444000.883309", datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)), + (1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + ("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)), + ("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)), + ("2012-04-23T09:15:00Z", datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)), + ("2012-4-9 4:8:16-0320", datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))), + ("2012-04-23T10:20:30.400+02:30", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))), + ("2012-04-23T10:20:30.400+02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))), + ("2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))), + (b"2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))), + (datetime(2017, 5, 5), datetime(2017, 5, 5)), + (0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)), + # Invalid inputs + ("x20120423091500", ValueError), + ("2012-04-56T09:15:90", ValueError), + ("2012-04-23T11:05:00-25:00", ValueError), + (19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed + (20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed + (1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s + (1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms + (1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs + (1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns + ("infinity", datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("inf", datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("inf ", datetime(9999, 12, 31, 23, 59, 59, 999999)), + (1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)), + (float("inf"), datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("-infinity", datetime(1, 1, 1, 0, 0)), + ("-inf", datetime(1, 1, 1, 0, 0)), + ("nan", ValueError), + ], +) +def test_datetime_parsing(value: Union[str, bytes, int, float], result: Union[datetime, Type[Exception]]) -> None: + if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance] + with pytest.raises(result): + parse_datetime(value) + else: + assert parse_datetime(value) == result diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py new file mode 100644 index 00000000..2c44f188 --- /dev/null +++ b/tests/test_utils/test_proxy.py @@ -0,0 +1,34 @@ +import operator +from typing import Any +from typing_extensions import override + +from imagekitio._utils import LazyProxy + + +class RecursiveLazyProxy(LazyProxy[Any]): + @override + def __load__(self) -> Any: + return self + + def __call__(self, *_args: Any, **_kwds: Any) -> Any: + raise RuntimeError("This should never be called!") + + +def test_recursive_proxy() -> None: + proxy = RecursiveLazyProxy() + assert repr(proxy) == "RecursiveLazyProxy" + assert str(proxy) == "RecursiveLazyProxy" + assert dir(proxy) == [] + assert type(proxy).__name__ == "RecursiveLazyProxy" + assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy" + + +def test_isinstance_does_not_error() -> None: + class AlwaysErrorProxy(LazyProxy[Any]): + @override + def __load__(self) -> Any: + raise RuntimeError("Mocking missing dependency") + + proxy = AlwaysErrorProxy() + assert not isinstance(proxy, dict) + assert isinstance(proxy, LazyProxy) diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py new file mode 100644 index 00000000..b44f2904 --- /dev/null +++ b/tests/test_utils/test_typing.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from typing import Generic, TypeVar, cast + +from imagekitio._utils import extract_type_var_from_base + +_T = TypeVar("_T") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") + + +class BaseGeneric(Generic[_T]): ... + + +class SubclassGeneric(BaseGeneric[_T]): ... + + +class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]): ... + + +class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): ... + + +class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]): ... + + +def test_extract_type_var() -> None: + assert ( + extract_type_var_from_base( + BaseGeneric[int], + index=0, + generic_bases=cast("tuple[type, ...]", (BaseGeneric,)), + ) + == int + ) + + +def test_extract_type_var_generic_subclass() -> None: + assert ( + extract_type_var_from_base( + SubclassGeneric[int], + index=0, + generic_bases=cast("tuple[type, ...]", (BaseGeneric,)), + ) + == int + ) + + +def test_extract_type_var_multiple() -> None: + typ = BaseGenericMultipleTypeArgs[int, str, None] + + generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) + assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int + assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str + assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) + + +def test_extract_type_var_generic_subclass_multiple() -> None: + typ = SubclassGenericMultipleTypeArgs[int, str, None] + + generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) + assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int + assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str + assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) + + +def test_extract_type_var_generic_subclass_different_ordering_multiple() -> None: + typ = SubclassDifferentOrderGenericMultipleTypeArgs[int, str, None] + + generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) + assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int + assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str + assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) diff --git a/tests/test_utils_calculation.py b/tests/test_utils_calculation.py deleted file mode 100644 index 705c43f2..00000000 --- a/tests/test_utils_calculation.py +++ /dev/null @@ -1,15 +0,0 @@ -import unittest -from imagekitio.client import ImageKit -from imagekitio.utils.calculation import get_authenticated_params - - -class TestUtilCalculation(unittest.TestCase): - - def test_get_authenticated_params(self): - """Test authenticated_params returning proper value - :return: param dict - """ - result = get_authenticated_params(token='your_token', expire="1582269249", private_key="private_key_test") - self.assertEqual(result['token'], 'your_token') - self.assertEqual(result['expire'], '1582269249') - self.assertEqual(result['signature'], 'e71bcd6031016b060d349d212e23e85c791decdd') diff --git a/tests/test_utils_formatter.py b/tests/test_utils_formatter.py deleted file mode 100644 index 355a11d6..00000000 --- a/tests/test_utils_formatter.py +++ /dev/null @@ -1,21 +0,0 @@ -import unittest - -from imagekitio.utils.formatter import camel_to_snake, request_formatter - - -class TestFormatterClass(unittest.TestCase): - """ - TestFormatterClass tests if functions on formatter is working properly - """ - - def test_camel_to_snake(self) -> None: - """ - Test if CamelCase to snake_case is being converted - properly by camel_to_snake utility function - """ - self.assertEqual("abc", camel_to_snake("abc")) - self.assertEqual("_abc", camel_to_snake("_abc")) - self.assertEqual("", camel_to_snake("")) - self.assertEqual("my_name", camel_to_snake("myName")) - self.assertEqual("my_name_", camel_to_snake("myName_")) - self.assertEqual("url_endpoint", camel_to_snake("urlEndpoint")) diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..d73dadfc --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import os +import inspect +import traceback +import contextlib +from typing import Any, TypeVar, Iterator, Sequence, cast +from datetime import date, datetime +from typing_extensions import Literal, get_args, get_origin, assert_type + +from imagekitio._types import Omit, NoneType +from imagekitio._utils import ( + is_dict, + is_list, + is_list_type, + is_union_type, + extract_type_arg, + is_sequence_type, + is_annotated_type, + is_type_alias_type, +) +from imagekitio._compat import PYDANTIC_V1, field_outer_type, get_model_fields +from imagekitio._models import BaseModel + +BaseModelT = TypeVar("BaseModelT", bound=BaseModel) + + +def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool: + for name, field in get_model_fields(model).items(): + field_value = getattr(value, name) + if PYDANTIC_V1: + # in v1 nullability was structured differently + # https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields + allow_none = getattr(field, "allow_none", False) + else: + allow_none = False + + assert_matches_type( + field_outer_type(field), + field_value, + path=[*path, name], + allow_none=allow_none, + ) + + return True + + +# Note: the `path` argument is only used to improve error messages when `--showlocals` is used +def assert_matches_type( + type_: Any, + value: object, + *, + path: list[str], + allow_none: bool = False, +) -> None: + if is_type_alias_type(type_): + type_ = type_.__value__ + + # unwrap `Annotated[T, ...]` -> `T` + if is_annotated_type(type_): + type_ = extract_type_arg(type_, 0) + + if allow_none and value is None: + return + + if type_ is None or type_ is NoneType: + assert value is None + return + + origin = get_origin(type_) or type_ + + if is_list_type(type_): + return _assert_list_type(type_, value) + + if is_sequence_type(type_): + assert isinstance(value, Sequence) + inner_type = get_args(type_)[0] + for entry in value: # type: ignore + assert_type(inner_type, entry) # type: ignore + return + + if origin == str: + assert isinstance(value, str) + elif origin == int: + assert isinstance(value, int) + elif origin == bool: + assert isinstance(value, bool) + elif origin == float: + assert isinstance(value, float) + elif origin == bytes: + assert isinstance(value, bytes) + elif origin == datetime: + assert isinstance(value, datetime) + elif origin == date: + assert isinstance(value, date) + elif origin == object: + # nothing to do here, the expected type is unknown + pass + elif origin == Literal: + assert value in get_args(type_) + elif origin == dict: + assert is_dict(value) + + args = get_args(type_) + key_type = args[0] + items_type = args[1] + + for key, item in value.items(): + assert_matches_type(key_type, key, path=[*path, ""]) + assert_matches_type(items_type, item, path=[*path, ""]) + elif is_union_type(type_): + variants = get_args(type_) + + try: + none_index = variants.index(type(None)) + except ValueError: + pass + else: + # special case Optional[T] for better error messages + if len(variants) == 2: + if value is None: + # valid + return + + return assert_matches_type(type_=variants[not none_index], value=value, path=path) + + for i, variant in enumerate(variants): + try: + assert_matches_type(variant, value, path=[*path, f"variant {i}"]) + return + except AssertionError: + traceback.print_exc() + continue + + raise AssertionError("Did not match any variants") + elif issubclass(origin, BaseModel): + assert isinstance(value, type_) + assert assert_matches_model(type_, cast(Any, value), path=path) + elif inspect.isclass(origin) and origin.__name__ == "HttpxBinaryResponseContent": + assert value.__class__.__name__ == "HttpxBinaryResponseContent" + else: + assert None, f"Unhandled field type: {type_}" + + +def _assert_list_type(type_: type[object], value: object) -> None: + assert is_list(value) + + inner_type = get_args(type_)[0] + for entry in value: + assert_type(inner_type, entry) # type: ignore + + +@contextlib.contextmanager +def update_env(**new_env: str | Omit) -> Iterator[None]: + old = os.environ.copy() + + try: + for name, value in new_env.items(): + if isinstance(value, Omit): + os.environ.pop(name, None) + else: + os.environ[name] = value + + yield None + finally: + os.environ.clear() + os.environ.update(old) diff --git a/tox.ini b/tox.ini deleted file mode 100644 index ec49579c..00000000 --- a/tox.ini +++ /dev/null @@ -1,10 +0,0 @@ -[tox] -envlist = py34,py35,py36 -skipsdist = True - -[testenv] -passenv = * -deps = -rrequirements/test.txt -commands = - coverage run --append -m unittest discover tests - coverage report \ No newline at end of file