diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 6dbfcfa8dc7..68ece92b131 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -15,40 +15,37 @@ jobs: # two jobs are defined make-wheel-win-osx and make-wheel-linux. # they do the the same steps, but linux wheels need to be build to target manylinux make-wheel-win-osx: - name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} + name: wheel-win-osx-${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - "windows-latest" - - "macos-latest" + - "macos-13" python-version: - - "2.7" - "3.6" - "3.7" - "3.8" - "3.9" - "3.10" + - "3.11" + - "3.12" architecture: - x64 - x86 - include: - - python-version: "2.7" - extra-requires: "mock" - exclude: - - os: "macos-latest" + - os: "macos-13" architecture: x86 fail-fast: false steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -67,15 +64,12 @@ jobs: (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg - name: Create wheel - # create the wheel using --no-use-pep517 since locally we have pyproject - # this flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies run: | python -m pip install --upgrade pip pip --version - pip install 'setuptools>=44' 'wheel>=0.34' pip list - pip wheel -w dist --no-use-pep517 -v --no-deps . + pip wheel -w dist -v --no-deps . - name: Install wheel # install the created wheel without using the pypi index @@ -105,7 +99,7 @@ jobs: - name: Set up Python for twine # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: "3.8" @@ -122,12 +116,13 @@ jobs: twine upload --skip-existing dist/* make-wheel-linux: - name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} + name: wheel-linux-${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" + - "ubuntu-20.04" python-version: # the versions are - as specified in PEP 425. - cp27-cp27m @@ -137,6 +132,8 @@ jobs: - cp38-cp38 - cp39-cp39 - cp310-cp310 + - cp311-cp311 + - cp312-cp312 architecture: - x64 @@ -146,11 +143,33 @@ jobs: - python-version: "cp27-cp27mu" extra-requires: "mock" + exclude: + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" + python-version: cp27-cp27m + - os: "ubuntu-22.04" + python-version: cp27-cp27mu + - os: "ubuntu-22.04" + python-version: cp36-cp36m + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports + - os: "ubuntu-20.04" + python-version: cp37-cp37m + - os: "ubuntu-20.04" + python-version: cp38-cp38 + - os: "ubuntu-20.04" + python-version: cp39-cp39 + - os: "ubuntu-20.04" + python-version: cp310-cp310 + - os: "ubuntu-20.04" + python-version: cp311-cp311 + - os: "ubuntu-20.04" + python-version: cp312-cp312 + fail-fast: false steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Get python version id: linux-py-version @@ -160,6 +179,7 @@ jobs: # this is from https://github.community/t5/GitHub-Actions/Using-the-output-of-run-inside-of-if-condition/td-p/33920 run: | version="`echo $py_tag | sed --regexp-extended 's/cp([0-9])([0-9]+)-.*/\1.\2/g'`" + version=$([[ $version = "3.11" ]] && echo 3.11.0-rc - 3.11 || echo $version ) echo $version echo "::set-output name=python-version::$version" @@ -177,60 +197,53 @@ jobs: (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg - name: Create wheel for manylinux1 and manylinux2010 for py3 - if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' && matrix.python-version != 'cp311-cp311' && matrix.python-version != 'cp312-cp312' }} # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux2010_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2010_x86_64 # this action generates 3 wheels in dist/. linux, manylinux1 and manylinux2010 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Create wheel for manylinux2014 for py3 if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux2014_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64 # this action generates 2 wheels in dist/. linux and manylinux2014 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Create wheel for manylinux py2 if: ${{ matrix.python-version == 'cp27-cp27m' || matrix.python-version == 'cp27-cp27mu' }} # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux1_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux1_x86_64 # this action generates 2 wheels in dist/. linux and manylinux1 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Set up Python - uses: actions/setup-python@v2 + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} + uses: actions/setup-python@v4 with: python-version: ${{ steps.linux-py-version.outputs.python-version }} architecture: ${{ matrix.architecture }} - name: Check created wheel + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} # check that the wheel is compatible with the current installation. # If it is then does: # - install the created wheel without using the pypi index @@ -249,6 +262,22 @@ jobs: echo Not compatible. Skipping install. fi + - name: Check created wheel 27 + if: ${{ matrix.python-version == 'cp27-cp27m' || matrix.python-version == 'cp27-cp27mu' }} + # check that the wheel is compatible with the current installation. + # - runs the tests + uses: docker://quay.io/pypa/manylinux1_x86_64 + with: + args: | + bash -c " + export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && + python --version && + pip install \"greenlet<2\" \"importlib-metadata;python_version<'3.8'\" && + pip install -f dist --no-index sqlalchemy && + python -c 'from sqlalchemy.util import has_compiled_ext; assert has_compiled_ext()' && + pip install pytest pytest-xdist ${{ matrix.extra-requires }} && + pytest -n2 -q test --nomemory --notimingintensive" + - name: Upload wheels to release # upload the generated wheels to the github release uses: sqlalchemyorg/upload-release-assets@sa @@ -258,9 +287,9 @@ jobs: - name: Set up Python for twine # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: - python-version: "3.8" + python-version: "3.11" - name: Publish wheel # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify @@ -278,12 +307,13 @@ jobs: twine upload --skip-existing dist/*manylinux* make-wheel-linux-arm64: - name: ${{ matrix.python-version }}-arm64-${{ matrix.os }} + name: wheel-linux-arm64-${{ matrix.python-version }}-arm64-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" + - "ubuntu-20.04" python-version: # the versions are - as specified in PEP 425. - cp36-cp36m @@ -291,12 +321,31 @@ jobs: - cp38-cp38 - cp39-cp39 - cp310-cp310 + - cp311-cp311 + - cp312-cp312 + exclude: + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" + python-version: cp36-cp36m + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports + - os: "ubuntu-20.04" + python-version: cp37-cp37m + - os: "ubuntu-20.04" + python-version: cp38-cp38 + - os: "ubuntu-20.04" + python-version: cp39-cp39 + - os: "ubuntu-20.04" + python-version: cp310-cp310 + - os: "ubuntu-20.04" + python-version: cp311-cp311 + - os: "ubuntu-20.04" + python-version: cp312-cp312 fail-fast: false steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Remove tag_build from setup.cfg # sqlalchemy has `tag_build` set to `dev` in setup.cfg. We need to remove it before creating the weel @@ -319,16 +368,13 @@ jobs: # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2014 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux2014_aarch64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_aarch64 # this action generates 2 wheels in dist/. linux and manylinux2014 with: # python-versions is the output of the previous step and is in the form -. Eg cp37-cp37mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Check created wheel # check that the wheel is compatible with the current installation. @@ -355,9 +401,9 @@ jobs: - name: Set up Python for twine # Setup python after creating the wheel, otherwise LD_LIBRARY_PATH gets set and it will break wheel generation # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: - python-version: "3.8" + python-version: "3.11" - name: Publish wheel # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 2a04a1f0485..7634e68d105 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -12,33 +12,36 @@ env: # global env to all steps TOX_WORKERS: -n2 +permissions: + contents: read + jobs: run-test-amd64: - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} + name: test-amd64-${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - - "2.7" - - "3.10" + - "3.11" build-type: - "cext" - "nocext" architecture: - x64 + # abort all jobs as soon as one fails fail-fast: true # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -59,19 +62,19 @@ jobs: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - - "3.10" + - "3.11" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -92,19 +95,19 @@ jobs: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - - "3.10" + - "3.11" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -120,8 +123,8 @@ jobs: # Arm emulation is quite slow (~20min) so for now just run it when merging to main # run-test-arm64: - # name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-latest - # runs-on: ubuntu-latest + # name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-22.04 + # runs-on: ubuntu-22.04 # strategy: # matrix: # python-version: @@ -133,7 +136,7 @@ jobs: # steps: # - name: Checkout repo - # uses: actions/checkout@v2 + # uses: actions/checkout@v3 # - name: Set up emulation # run: | diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 5675ac6ec5d..0693a53e3e5 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -16,25 +16,32 @@ env: # global env to all steps TOX_WORKERS: -n2 +permissions: + contents: read + jobs: run-test: - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} + name: test-${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-20.04" + - "ubuntu-22.04" - "windows-latest" - - "macos-latest" + - "macos-13" python-version: - - "2.7" - "3.6" - "3.7" - "3.8" - "3.9" - "3.10" - - "pypy-3.7" + - "3.11" + - "3.12" + # waiting on https://foss.heptapod.net/pypy/pypy/-/issues/3690 + # which also seems to be in 3.9 + # - "pypy-3.9" build-type: - "cext" - "nocext" @@ -44,38 +51,52 @@ jobs: include: # autocommit tests fail on the ci for some reason - - python-version: "pypy-3.7" - pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" + # - python-version: "pypy-3.9" + # pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" # add aiosqlite on linux - - os: "ubuntu-latest" + - os: "ubuntu-22.04" pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" exclude: - # c-extensions fail to build on windows for python 2.7 - - os: "windows-latest" - python-version: "2.7" - build-type: "cext" # linux and osx do not have x86 python - - os: "ubuntu-latest" + - os: "ubuntu-22.04" architecture: x86 - - os: "macos-latest" + - os: "ubuntu-20.04" architecture: x86 - # pypy does not have cext - - python-version: "pypy-3.7" - build-type: "cext" - - os: "windows-latest" - python-version: "pypy-3.7" + - os: "macos-13" architecture: x86 + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" + python-version: "3.6" + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports + - os: "ubuntu-20.04" + python-version: "3.7" + - os: "ubuntu-20.04" + python-version: "3.8" + - os: "ubuntu-20.04" + python-version: "3.9" + - os: "ubuntu-20.04" + python-version: "3.10" + - os: "ubuntu-20.04" + python-version: "3.11" + - os: "ubuntu-20.04" + python-version: "3.12" + # pypy does not have cext + # - python-version: "pypy-3.9" + # build-type: "cext" + # - os: "windows-latest" + # python-version: "pypy-3.9" + # architecture: x86 fail-fast: false # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -89,26 +110,79 @@ jobs: - name: Run tests run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + run-test-py27: + name: py27-${{ matrix.build-type }}-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - "ubuntu-22.04" + python-version: + - cp27-cp27m + - cp27-cp27mu + build-type: + - "cext" + - "nocext" + + fail-fast: false + + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Run tests + uses: docker://quay.io/pypa/manylinux1_x86_64 + with: + args: | + bash -c " + export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && + sed -i 's/greenlet/greenlet<2,/g' setup.cfg && + python --version && + python -m pip install --upgrade pip && + pip install --upgrade tox setuptools && + pip list && + tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + " + run-test-arm64: - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-latest - runs-on: ubuntu-latest + name: arm64-${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.os }} + runs-on: ${{ matrix.os }} strategy: matrix: + os: + - "ubuntu-22.04" python-version: - cp36-cp36m - cp37-cp37m - cp38-cp38 - cp39-cp39 - cp310-cp310 + - cp311-cp311 + - cp312-cp312 build-type: - "cext" - "nocext" + exclude: + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" + python-version: cp36-cp36m + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports + - os: "ubuntu-20.04" + python-version: cp37-cp37m + - os: "ubuntu-20.04" + python-version: cp38-cp38m + - os: "ubuntu-20.04" + python-version: cp39-cp39m + - os: "ubuntu-20.04" + python-version: cp310-cp310m + - os: "ubuntu-20.04" + python-version: cp311-cp311m fail-fast: false steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up emulation run: | @@ -128,29 +202,30 @@ jobs: " run-mypy: - name: mypy-${{ matrix.python-version }} + name: mypy-${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - - "3.6" + # ubuntu-22.04 does not have: py27, py36. Mypy no longer supports it - "3.7" - "3.8" - "3.9" - "3.10" - + - "3.11" + - "3.12" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -165,25 +240,24 @@ jobs: run: tox -e mypy ${{ matrix.pytest-args }} run-pep8: - name: pep8-${{ matrix.python-version }} + name: pep8-${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: - # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - - "3.9" + - "3.11" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} diff --git a/.gitignore b/.gitignore index c566ded772b..329aa3577a8 100644 --- a/.gitignore +++ b/.gitignore @@ -36,6 +36,9 @@ test/test_schema.db /.ipynb_checkpoints/ *.ipynb /querytest.db -/.mypy_cache /.pytest_cache /db_idents.txt +.DS_Store +.vs +# items that only belong in the 2.0 branch +/lib/sqlalchemy/cyextension diff --git a/.gitreview b/.gitreview index 01d8b1770f7..b51e3f2f858 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,9 @@ [gerrit] -host=gerrit.sqlalchemy.org +host=ssh.gerrit.sqlalchemy.org project=sqlalchemy/sqlalchemy -defaultbranch=main +defaultbranch=rel_1_4 + +# non-standard config, used by publishthing +httphost=gerrit.sqlalchemy.org + + diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ae35977b9d0..8da99d2d387 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,28 +5,36 @@ repos: rev: 21.5b1 hooks: - id: black + additional_dependencies: + - click < 8.1 - repo: https://github.com/sqlalchemyorg/zimports - rev: v0.4.0 + rev: v0.6.0 hooks: - id: zimports - args: - - --keep-unused-type-checking - repo: https://github.com/pycqa/flake8 - rev: 3.9.2 + rev: 6.1.0 hooks: - id: flake8 additional_dependencies: - flake8-import-order + - flake8-import-single==0.1.5 - flake8-builtins - - flake8-docstrings>=1.3.1 + - flake8-future-annotations>=0.0.5 + - flake8-docstrings>=1.6.0 + - flake8-unused-arguments - flake8-rst-docstrings - # flake8-rst-docstrings depdendency, leaving it here + # flake8-rst-docstrings dependency, leaving it here # in case it requires a version pin - pydocstyle - pygments - - +- repo: local + hooks: + - id: black-docs + name: Format docs code block with black + entry: python tools/format_docs_code.py --report-doctest -f + language: system + types: [rst] diff --git a/LICENSE b/LICENSE index 0d9fb6dc4b1..dfe1a4d815b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2005-2021 SQLAlchemy authors and contributors . +Copyright 2005-2025 SQLAlchemy authors and contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/doc/build/Makefile b/doc/build/Makefile index 09d8b29da15..e9684a20738 100644 --- a/doc/build/Makefile +++ b/doc/build/Makefile @@ -4,6 +4,7 @@ # You can set these variables from the command line. SPHINXOPTS = -T -j auto SPHINXBUILD = sphinx-build +AUTOBUILD = sphinx-autobuild --port 8080 --watch ../../lib PAPER = BUILDDIR = output @@ -14,11 +15,12 @@ ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest dist-html site-mako gettext +.PHONY: help clean html autobuild dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest dist-html site-mako gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" + @echo " autobuild autobuild and run a webserver" @echo " gettext to make PO message catalogs" @echo " dist-html same as html, but places files in /doc" @echo " dirhtml to make HTML files named index.html in directories" @@ -45,6 +47,9 @@ html: @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." +autobuild: + $(AUTOBUILD) $(ALLSPHINXOPTS) $(BUILDDIR)/html + gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo diff --git a/doc/build/changelog/changelog_02.rst b/doc/build/changelog/changelog_02.rst index 69805d60980..3d40a79a32a 100644 --- a/doc/build/changelog/changelog_02.rst +++ b/doc/build/changelog/changelog_02.rst @@ -1057,7 +1057,11 @@ :tickets: create_engine now takes only RFC-1738-style strings: - driver://user:password@host:port/database + ``driver://user:password@host:port/database`` + + **update** this format is generally but not exactly RFC-1738, + including that underscores, not dashes or periods, are accepted in the + "scheme" portion. .. change:: :tags: diff --git a/doc/build/changelog/changelog_04.rst b/doc/build/changelog/changelog_04.rst index 9261c1262bc..10e632c93cf 100644 --- a/doc/build/changelog/changelog_04.rst +++ b/doc/build/changelog/changelog_04.rst @@ -540,9 +540,7 @@ to work for subclasses, if they are present, for example:: - sess.query(Company).options( - eagerload_all( - )) + sess.query(Company).options(eagerload_all()) to load Company objects, their employees, and the 'machines' collection of employees who happen to be diff --git a/doc/build/changelog/changelog_06.rst b/doc/build/changelog/changelog_06.rst index cd3b32d95b7..739df36b230 100644 --- a/doc/build/changelog/changelog_06.rst +++ b/doc/build/changelog/changelog_06.rst @@ -2,6 +2,7 @@ 0.6 Changelog ============= + .. changelog:: :version: 0.6.9 :released: Sat May 05 2012 diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst index 4b6b42ec731..3bf8f67f207 100644 --- a/doc/build/changelog/changelog_08.rst +++ b/doc/build/changelog/changelog_08.rst @@ -7,6 +7,7 @@ .. include:: changelog_07.rst :start-line: 5 + .. changelog:: :version: 0.8.7 :released: July 22, 2014 @@ -969,7 +970,7 @@ del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5) - upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed') + upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name="ed") .. change:: :tags: bug, orm @@ -2078,8 +2079,7 @@ to the original, older use case for :meth:`_query.Query.select_from`, which is that of restating the mapped entity in terms of a different selectable:: - session.query(User.name).\ - select_from(user_table.select().where(user_table.c.id > 5)) + session.query(User.name).select_from(user_table.select().where(user_table.c.id > 5)) Which produces:: @@ -2280,11 +2280,11 @@ original. Allows symmetry when using :class:`_engine.Engine` and :class:`_engine.Connection` objects as context managers:: - with conn.connect() as c: # leaves the Connection open - c.execute("...") + with conn.connect() as c: # leaves the Connection open + c.execute("...") with engine.connect() as c: # closes the Connection - c.execute("...") + c.execute("...") .. change:: :tags: engine diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index 7ee874e0261..d00e043326e 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -1708,15 +1708,15 @@ ad-hoc keyword arguments within the :attr:`.Index.kwargs` collection, after construction:: - idx = Index('a', 'b') - idx.kwargs['mysql_someargument'] = True + idx = Index("a", "b") + idx.kwargs["mysql_someargument"] = True To suit the use case of allowing custom arguments at construction time, the :meth:`.DialectKWArgs.argument_for` method now allows this registration:: - Index.argument_for('mysql', 'someargument', False) + Index.argument_for("mysql", "someargument", False) - idx = Index('a', 'b', mysql_someargument=True) + idx = Index("a", "b", mysql_someargument=True) .. seealso:: @@ -1920,7 +1920,7 @@ .. change:: :tags: feature, sql - Added :paramref:`.MetaData.reflect.**dialect_kwargs` + Added :paramref:`.MetaData.reflect.dialect_kwargs` to support dialect-level reflection options for all :class:`_schema.Table` objects reflected. @@ -2647,11 +2647,11 @@ :tags: bug, engine :tickets: 2873 - The :func:`_sa.create_engine` routine and the related - :func:`.make_url` function no longer considers the ``+`` sign - to be a space within the password field. The parsing has been - adjusted to match RFC 1738 exactly, in that both ``username`` - and ``password`` expect only ``:``, ``@``, and ``/`` to be + The :func:`_sa.create_engine` routine and the related :func:`.make_url` + function no longer considers the ``+`` sign to be a space within the + password field. The parsing in this area has been adjusted to match + more closely to how RFC 1738 handles these tokens, in that both + ``username`` and ``password`` expect only ``:``, ``@``, and ``/`` to be encoded. .. seealso:: diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 4d3b84d3b40..addf624de72 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -811,7 +811,7 @@ .. seealso:: - :ref:`updates_order_parameters` + :ref:`tutorial_parameter_ordered_updates` .. change:: :tags: bug, orm diff --git a/doc/build/changelog/changelog_11.rst b/doc/build/changelog/changelog_11.rst index 1988b69b307..c84effc3905 100644 --- a/doc/build/changelog/changelog_11.rst +++ b/doc/build/changelog/changelog_11.rst @@ -20,7 +20,6 @@ :start-line: 5 - .. changelog:: :version: 1.1.18 :released: March 6, 2018 @@ -1076,7 +1075,7 @@ :tickets: 3842 Fixed bug where newly added warning for primary key on insert w/o - autoincrement setting (see :ref:`change_3216`) would fail to emit + autoincrement setting (see :ticket:`3216`) would fail to emit correctly when invoked upon a lower-case :func:`.table` construct. .. change:: 3852 diff --git a/doc/build/changelog/changelog_12.rst b/doc/build/changelog/changelog_12.rst index 6dc7d7f8879..b5d331e717b 100644 --- a/doc/build/changelog/changelog_12.rst +++ b/doc/build/changelog/changelog_12.rst @@ -453,7 +453,7 @@ :tickets: 4352 The column conflict resolution technique discussed at - :ref:`declarative_column_conflicts` is now functional for a :class:`_schema.Column` + :ref:`orm_inheritance_column_conflicts` is now functional for a :class:`_schema.Column` that is also a primary key column. Previously, a check for primary key columns declared on a single-inheritance subclass would occur before the column copy were allowed to pass. diff --git a/doc/build/changelog/changelog_13.rst b/doc/build/changelog/changelog_13.rst index 96002c19ee5..1e14314d089 100644 --- a/doc/build/changelog/changelog_13.rst +++ b/doc/build/changelog/changelog_13.rst @@ -950,8 +950,8 @@ :tags: usecase, postgresql :tickets: 5265 - Added support for columns or type :class:`.ARRAY` of :class:`.Enum`, - :class:`.JSON` or :class:`_postgresql.JSONB` in PostgreSQL. + Added support for columns or type :class:`_sqltypes.ARRAY` of :class:`.Enum`, + :class:`_postgresql.JSON` or :class:`_postgresql.JSONB` in PostgreSQL. Previously a workaround was required in these use cases. @@ -1002,7 +1002,7 @@ :tickets: 5266 Raise an explicit :class:`.exc.CompileError` when adding a table with a - column of type :class:`.ARRAY` of :class:`.Enum` configured with + column of type :class:`_sqltypes.ARRAY` of :class:`.Enum` configured with :paramref:`.Enum.native_enum` set to ``False`` when :paramref:`.Enum.create_constraint` is not set to ``False`` @@ -1966,13 +1966,13 @@ :class:`_types.JSON` - :meth:`.JSON.Comparator.as_string` + :meth:`_sqltypes.JSON.Comparator.as_string` - :meth:`.JSON.Comparator.as_boolean` + :meth:`_sqltypes.JSON.Comparator.as_boolean` - :meth:`.JSON.Comparator.as_float` + :meth:`_sqltypes.JSON.Comparator.as_float` - :meth:`.JSON.Comparator.as_integer` + :meth:`_sqltypes.JSON.Comparator.as_integer` .. change:: :tags: usecase, oracle @@ -2681,7 +2681,7 @@ Fixed bug where the :attr:`_orm.Mapper.all_orm_descriptors` accessor would return an entry for the :class:`_orm.Mapper` itself under the declarative - ``__mapper___`` key, when this is not a descriptor. The ``.is_attribute`` + ``__mapper__`` key, when this is not a descriptor. The ``.is_attribute`` flag that's present on all :class:`.InspectionAttr` objects is now consulted, which has also been modified to be ``True`` for an association proxy, as it was erroneously set to False for this object. @@ -3336,7 +3336,7 @@ :tags: change, orm :tickets: 4412 - Added a new function :func:`.close_all_sessions` which takes + Added a new function :func:`_orm.close_all_sessions` which takes over the task of the :meth:`.Session.close_all` method, which is now deprecated as this is confusing as a classmethod. Pull request courtesy Augustin Trancart. diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index a99a2d13f8c..0b4de1a55f3 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,9 +13,3092 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.55 + :include_notes_from: unreleased_14 + +.. changelog:: + :version: 1.4.54 + :released: September 5, 2024 + + .. change:: + :tags: bug, regression, orm + :tickets: 11728 + :versions: 2.0.33 + + Fixed regression from 1.3 where the column key used for a hybrid property + might be populated with that of the underlying column that it returns, for + a property that returns an ORM mapped column directly, rather than the key + used by the hybrid property itself. + + .. change:: + :tags: change, general + :tickets: 11818 + :versions: 2.0.33 1.4.54 + + The pin for ``setuptools<69.3`` in ``pyproject.toml`` has been removed. + This pin was to prevent a sudden change in setuptools to use :pep:`625` + from taking place, which would change the file name of SQLAlchemy's source + distribution on pypi to be an all lower case name, which is likely to cause + problems with various build environments that expected the previous naming + style. However, the presence of this pin is holding back environments that + otherwise want to use a newer setuptools, so we've decided to move forward + with this change, with the assumption that build environments will have + largely accommodated the setuptools change by now. + + This change was first released in version 2.0.33 however is being + backported to 1.4.54 to support ongoing releases. + + + .. change:: + :tags: bug, postgresql + :tickets: 11819 + :versions: 2.0.33, 1.4.54 + + Fixed critical issue in the asyncpg driver where a rollback or commit that + fails specifically for the ``MissingGreenlet`` condition or any other error + that is not raised by asyncpg itself would discard the asyncpg transaction + in any case, even though the transaction were still idle, leaving to a + server side condition with an idle transaction that then goes back into the + connection pool. The flags for "transaction closed" are now not reset for + errors that are raised outside of asyncpg itself. When asyncpg itself + raises an error for ``.commit()`` or ``.rollback()``, asyncpg does then + discard of this transaction. + + .. change:: + :tags: change, general + + The setuptools "test" command is removed from the 1.4 series as modern + versions of setuptools actively refuse to accommodate this extension being + present. This change was already part of the 2.0 series. To run the + test suite use the ``tox`` command. + +.. changelog:: + :version: 1.4.53 + :released: July 29, 2024 + + .. change:: + :tags: bug, general + :tickets: 11417 + :versions: 2.0.31 + + Set up full Python 3.13 support to the extent currently possible, repairing + issues within internal language helpers as well as the serializer extension + module. + + For version 1.4, this also modernizes the "extras" names in setup.cfg + to use dashes and not underscores for two-word names. Underscore names + are still present to accommodate potential compatibility issues. + + .. change:: + :tags: bug, sql + :tickets: 11471 + :versions: 2.0.31 + + Fixed caching issue where using the :meth:`.TextualSelect.add_cte` method + of the :class:`.TextualSelect` construct would not set a correct cache key + which distinguished between different CTE expressions. + + .. change:: + :tags: bug, engine + :tickets: 11499 + + Adjustments to the C extensions, which are specific to the SQLAlchemy 1.x + series, to work under Python 3.13. Pull request courtesy Ben Beasley. + + .. change:: + :tags: bug, mssql + :tickets: 11514 + :versions: 2.0.32 + + Fixed issue where SQL Server drivers don't support bound parameters when + rendering the "frame specification" for a window function, e.g. "ROWS + BETWEEN", etc. + + + .. change:: + :tags: bug, sql + :tickets: 11544 + :versions: 2.0 + + Fixed caching issue where the + :paramref:`_sql.Select.with_for_update.key_share` element of + :meth:`_sql.Select.with_for_update` was not considered as part of the cache + key, leading to incorrect caching if different variations of this parameter + were used with an otherwise identical statement. + + .. change:: + :tags: bug, orm, regression + :tickets: 11562 + :versions: 2.0.32 + + Fixed regression going back to 1.4 where accessing a collection using the + "dynamic" strategy on a transient object and attempting to query would + raise an internal error rather than the expected :class:`.NoResultFound` + that occurred in 1.3. + + .. change:: + :tags: bug, reflection, sqlite + :tickets: 11582 + :versions: 2.0.32 + + Fixed reflection of computed column in SQLite to properly account + for complex expressions. + + .. change:: + :tags: usecase, engine + :versions: 2.0.31 + + Modified the internal representation used for adapting asyncio calls to + greenlets to allow for duck-typed compatibility with third party libraries + that implement SQLAlchemy's "greenlet-to-asyncio" pattern directly. + Running code within a greenlet that features the attribute + ``__sqlalchemy_greenlet_provider__ = True`` will allow calls to + :func:`sqlalchemy.util.await_only` directly. + + + .. change:: + :tags: bug, mypy + :versions: 2.0.32 + + The deprecated mypy plugin is no longer fully functional with the latest + series of mypy 1.11.0, as changes in the mypy interpreter are no longer + compatible with the approach used by the plugin. If code is dependent on + the mypy plugin with sqlalchemy2-stubs, it's recommended to pin mypy to be + below the 1.11.0 series. Seek upgrading to the 2.0 series of SQLAlchemy + and migrating to the modern type annotations. + + .. seealso:: + + :ref:`mypy_toplevel` + +.. changelog:: + :version: 1.4.52 + :released: March 4, 2024 + + .. change:: + :tags: bug, orm + :tickets: 10365, 11412 + + Fixed bug where ORM :func:`_orm.with_loader_criteria` would not apply + itself to a :meth:`_sql.Select.join` where the ON clause were given as a + plain SQL comparison, rather than as a relationship target or similar. + + This is a backport of the same issue fixed in version 2.0 for 2.0.22. + + **update** - this was found to also fix an issue where + single-inheritance criteria would not be correctly applied to a + subclass entity that only appeared in the ``select_from()`` list, + see :ticket:`11412` + +.. changelog:: + :version: 1.4.51 + :released: January 2, 2024 + + .. change:: + :tags: bug, mysql + :tickets: 10650 + :versions: 2.0.24 + + Fixed regression introduced by the fix in ticket :ticket:`10492` when using + pool pre-ping with PyMySQL version older than 1.0. + + .. change:: + :tags: bug, orm + :tickets: 10782 + :versions: 2.0.24, 1.4.51 + + Improved a fix first implemented for :ticket:`3208` released in version + 0.9.8, where the registry of classes used internally by declarative could + be subject to a race condition in the case where individual mapped classes + are being garbage collected at the same time while new mapped classes are + being constructed, as can happen in some test suite configurations or + dynamic class creation environments. In addition to the weakref check + already added, the list of items being iterated is also copied first to + avoid "list changed while iterating" errors. Pull request courtesy Yilei + Yang. + + + .. change:: + :tags: bug, asyncio + :tickets: 10813 + :versions: 1.4.51, 2.0.25 + + Fixed critical issue in asyncio version of the connection pool where + calling :meth:`_asyncio.AsyncEngine.dispose` would produce a new connection + pool that did not fully re-establish the use of asyncio-compatible mutexes, + leading to the use of a plain ``threading.Lock()`` which would then cause + deadlocks in an asyncio context when using concurrency features like + ``asyncio.gather()``. + +.. changelog:: + :version: 1.4.50 + :released: October 29, 2023 + + .. change:: + :tags: bug, sql + :tickets: 10142 + :versions: 2.0.23 + + Fixed issue where using the same bound parameter more than once with + ``literal_execute=True`` in some combinations with other literal rendering + parameters would cause the wrong values to render due to an iteration + issue. + + .. change:: + :tags: mysql, usecase + :versions: 2.0.20 + + Updated aiomysql dialect since the dialect appears to be maintained again. + Re-added to the ci testing using version 0.2.0. + + .. change:: + :tags: bug, orm + :tickets: 10223 + :versions: 2.0.20 + + Fixed fundamental issue which prevented some forms of ORM "annotations" + from taking place for subqueries which made use of :meth:`_sql.Select.join` + against a relationship target. These annotations are used whenever a + subquery is used in special situations such as within + :meth:`_orm.PropComparator.and_` and other ORM-specific scenarios. + + .. change:: + :tags: bug, sql + :tickets: 10213 + :versions: 2.0.20 + + Fixed issue where unpickling of a :class:`_schema.Column` or other + :class:`_sql.ColumnElement` would fail to restore the correct "comparator" + object, which is used to generate SQL expressions specific to the type + object. + + .. change:: + :tags: bug, mysql + :tickets: 10492 + :versions: 2.0.23 + + Repaired a new incompatibility in the MySQL "pre-ping" routine where the + ``False`` argument passed to ``connection.ping()``, which is intended to + disable an unwanted "automatic reconnect" feature, is being deprecated in + MySQL drivers and backends, and is producing warnings for some versions of + MySQL's native client drivers. It's removed for mysqlclient, whereas for + PyMySQL and drivers based on PyMySQL, the parameter will be deprecated and + removed at some point, so API introspection is used to future proof against + these various stages of removal. + + .. change:: + :tags: schema, bug + :tickets: 10207 + :versions: 2.0.21 + + Modified the rendering of the Oracle only :paramref:`.Identity.order` + parameter that's part of both :class:`.Sequence` and :class:`.Identity` to + only take place for the Oracle backend, and not other backends such as that + of PostgreSQL. A future release will rename the + :paramref:`.Identity.order`, :paramref:`.Sequence.order` and + :paramref:`.Identity.on_null` parameters to Oracle-specific names, + deprecating the old names, these parameters only apply to Oracle. + + .. change:: + :tags: bug, mssql, reflection + :tickets: 10504 + :versions: 2.0.23 + + Fixed issue where identity column reflection would fail + for a bigint column with a large identity start value + (more than 18 digits). + +.. changelog:: + :version: 1.4.49 + :released: July 5, 2023 + + .. change:: + :tags: bug, sql + :tickets: 10042 + :versions: 2.0.18 + + Fixed issue where the :meth:`_sql.ColumnOperators.regexp_match` + when using "flags" would not produce a "stable" cache key, that + is, the cache key would keep changing each time causing cache pollution. + The same issue existed for :meth:`_sql.ColumnOperators.regexp_replace` + with both the flags and the actual replacement expression. + The flags are now represented as fixed modifier strings rendered as + safestrings rather than bound parameters, and the replacement + expression is established within the primary portion of the "binary" + element so that it generates an appropriate cache key. + + Note that as part of this change, the + :paramref:`_sql.ColumnOperators.regexp_match.flags` and + :paramref:`_sql.ColumnOperators.regexp_replace.flags` have been modified to + render as literal strings only, whereas previously they were rendered as + full SQL expressions, typically bound parameters. These parameters should + always be passed as plain Python strings and not as SQL expression + constructs; it's not expected that SQL expression constructs were used in + practice for this parameter, so this is a backwards-incompatible change. + + The change also modifies the internal structure of the expression + generated, for :meth:`_sql.ColumnOperators.regexp_replace` with or without + flags, and for :meth:`_sql.ColumnOperators.regexp_match` with flags. Third + party dialects which may have implemented regexp implementations of their + own (no such dialects could be located in a search, so impact is expected + to be low) would need to adjust the traversal of the structure to + accommodate. + + + .. change:: + :tags: bug, sql + :versions: 2.0.18 + + Fixed issue in mostly-internal :class:`.CacheKey` construct where the + ``__ne__()`` operator were not properly implemented, leading to nonsensical + results when comparing :class:`.CacheKey` instances to each other. + + + + + .. change:: + :tags: bug, extensions + :versions: 2.0.17 + + Fixed issue in mypy plugin for use with mypy 1.4. + + .. change:: + :tags: platform, usecase + + Compatibility improvements to work fully with Python 3.12 + +.. changelog:: + :version: 1.4.48 + :released: April 30, 2023 + + .. change:: + :tags: bug, orm + :tickets: 9728 + :versions: 2.0.12 + + Fixed critical caching issue where the combination of + :func:`_orm.aliased()` and :func:`_hybrid.hybrid_property` expression + compositions would cause a cache key mismatch, leading to cache keys that + held onto the actual :func:`_orm.aliased` object while also not matching + that of equivalent constructs, filling up the cache. + + .. change:: + :tags: bug, orm + :tickets: 9634 + :versions: 2.0.10 + + Fixed bug where various ORM-specific getters such as + :attr:`.ORMExecuteState.is_column_load`, + :attr:`.ORMExecuteState.is_relationship_load`, + :attr:`.ORMExecuteState.loader_strategy_path` etc. would throw an + ``AttributeError`` if the SQL statement itself were a "compound select" + such as a UNION. + + .. change:: + :tags: bug, orm + :tickets: 9590 + :versions: 2.0.9 + + Fixed endless loop which could occur when using "relationship to aliased + class" feature and also indicating a recursive eager loader such as + ``lazy="selectinload"`` in the loader, in combination with another eager + loader on the opposite side. The check for cycles has been fixed to include + aliased class relationships. + +.. changelog:: + :version: 1.4.47 + :released: March 18, 2023 + + .. change:: + :tags: bug, sql + :tickets: 9075 + :versions: 2.0.0rc3 + + Fixed bug / regression where using :func:`.bindparam()` with the same + name as a column in the :meth:`.Update.values` method of + :class:`.Update`, as well as the :meth:`_dml.Insert.values` method of + :class:`_dml.Insert` in 2.0 only, would in some cases silently fail to + honor the SQL expression in which the parameter were presented, + replacing the expression with a new parameter of the same name and + discarding any other elements of the SQL expression, such as SQL + functions, etc. The specific case would be statements that were + constructed against ORM entities rather than plain :class:`.Table` + instances, but would occur if the statement were invoked with a + :class:`.Session` or a :class:`_engine.Connection`. + + :class:`.Update` part of the issue was present in both 2.0 and 1.4 and is + backported to 1.4. + + .. change:: + :tags: bug, oracle + :tickets: 5047 + + Added :class:`_oracle.ROWID` to reflected types as this type may be used in + a "CREATE TABLE" statement. + + .. change:: + :tags: bug, sql + :tickets: 7664 + + Fixed stringify for a the :class:`.CreateSchema` and :class:`.DropSchema` + DDL constructs, which would fail with an ``AttributeError`` when + stringified without a dialect. + + + .. change:: + :tags: usecase, mysql + :tickets: 9047 + :versions: 2.0.0 + + Added support to MySQL index reflection to correctly reflect the + ``mysql_length`` dictionary, which previously was being ignored. + + .. change:: + :tags: bug, postgresql + :tickets: 9048 + :versions: 2.0.0 + + Added support to the asyncpg dialect to return the ``cursor.rowcount`` + value for SELECT statements when available. While this is not a typical use + for ``cursor.rowcount``, the other PostgreSQL dialects generally provide + this value. Pull request courtesy Michael Gorven. + + .. change:: + :tags: bug, mssql + :tickets: 9133 + + Fixed bug where a schema name given with brackets, but no dots inside the + name, for parameters such as :paramref:`_schema.Table.schema` would not be + interpreted within the context of the SQL Server dialect's documented + behavior of interpreting explicit brackets as token delimiters, first added + in 1.2 for #2626, when referring to the schema name in reflection + operations. The original assumption for #2626's behavior was that the + special interpretation of brackets was only significant if dots were + present, however in practice, the brackets are not included as part of the + identifier name for all SQL rendering operations since these are not valid + characters within regular or delimited identifiers. Pull request courtesy + Shan. + + + .. change:: + :tags: bug, mypy + :versions: 2.0.0rc3 + + Adjustments made to the mypy plugin to accommodate for some potential + changes being made for issue #236 sqlalchemy2-stubs when using SQLAlchemy + 1.4. These changes are being kept in sync within SQLAlchemy 2.0. + The changes are also backwards compatible with older versions of + sqlalchemy2-stubs. + + + .. change:: + :tags: bug, mypy + :tickets: 9102 + :versions: 2.0.0rc3 + + Fixed crash in mypy plugin which could occur on both 1.4 and 2.0 versions + if a decorator for the :func:`_orm.registry.mapped` decorator were used + that was referenced in an expression with more than two components (e.g. + ``@Backend.mapper_registry.mapped``). This scenario is now ignored; when + using the plugin, the decorator expression needs to be two components (i.e. + ``@reg.mapped``). + + .. change:: + :tags: bug, sql + :tickets: 9506 + + Fixed critical SQL caching issue where use of the + :meth:`_sql.Operators.op` custom operator function would not produce an appropriate + cache key, leading to reduce the effectiveness of the SQL cache. + + +.. changelog:: + :version: 1.4.46 + :released: January 3, 2023 + + .. change:: + :tags: bug, engine + :tickets: 8974 + :versions: 2.0.0rc1 + + Fixed a long-standing race condition in the connection pool which could + occur under eventlet/gevent monkeypatching schemes in conjunction with the + use of eventlet/gevent ``Timeout`` conditions, where a connection pool + checkout that's interrupted due to the timeout would fail to clean up the + failed state, causing the underlying connection record and sometimes the + database connection itself to "leak", leaving the pool in an invalid state + with unreachable entries. This issue was first identified and fixed in + SQLAlchemy 1.2 for :ticket:`4225`, however the failure modes detected in + that fix failed to accommodate for ``BaseException``, rather than + ``Exception``, which prevented eventlet/gevent ``Timeout`` from being + caught. In addition, a block within initial pool connect has also been + identified and hardened with a ``BaseException`` -> "clean failed connect" + block to accommodate for the same condition in this location. + Big thanks to Github user @niklaus for their tenacious efforts in + identifying and describing this intricate issue. + + .. change:: + :tags: bug, postgresql + :tickets: 9023 + :versions: 2.0.0rc1 + + Fixed bug where the PostgreSQL + :paramref:`_postgresql.Insert.on_conflict_do_update.constraint` parameter + would accept an :class:`.Index` object, however would not expand this index + out into its individual index expressions, instead rendering its name in an + ON CONFLICT ON CONSTRAINT clause, which is not accepted by PostgreSQL; the + "constraint name" form only accepts unique or exclude constraint names. The + parameter continues to accept the index but now expands it out into its + component expressions for the render. + + .. change:: + :tags: bug, general + :tickets: 8995 + :versions: 2.0.0rc1 + + Fixed regression where the base compat module was calling upon + ``platform.architecture()`` in order to detect some system properties, + which results in an over-broad system call against the system-level + ``file`` call that is unavailable under some circumstances, including + within some secure environment configurations. + + .. change:: + :tags: usecase, postgresql + :tickets: 8393 + :versions: 2.0.0b5 + + Added the PostgreSQL type ``MACADDR8``. + Pull request courtesy of Asim Farooq. + + .. change:: + :tags: bug, sqlite + :tickets: 8969 + :versions: 2.0.0b5 + + Fixed regression caused by new support for reflection of partial indexes on + SQLite added in 1.4.45 for :ticket:`8804`, where the ``index_list`` pragma + command in very old versions of SQLite (possibly prior to 3.8.9) does not + return the current expected number of columns, leading to exceptions raised + when reflecting tables and indexes. + + .. change:: + :tags: bug, tests + :versions: 2.0.0rc1 + + Fixed issue in tox.ini file where changes in the tox 4.0 series to the + format of "passenv" caused tox to not function correctly, in particular + raising an error as of tox 4.0.6. + + .. change:: + :tags: bug, tests + :tickets: 9002 + :versions: 2.0.0rc1 + + Added new exclusion rule for third party dialects called + ``unusual_column_name_characters``, which can be "closed" for third party + dialects that don't support column names with unusual characters such as + dots, slashes, or percent signs in them, even if the name is properly + quoted. + + + .. change:: + :tags: bug, sql + :tickets: 9009 + :versions: 2.0.0b5 + + Added parameter + :paramref:`.FunctionElement.column_valued.joins_implicitly`, which is + useful in preventing the "cartesian product" warning when making use of + table-valued or column-valued functions. This parameter was already + introduced for :meth:`.FunctionElement.table_valued` in :ticket:`7845`, + however it failed to be added for :meth:`.FunctionElement.column_valued` + as well. + + .. change:: + :tags: change, general + :tickets: 8983 + + A new deprecation "uber warning" is now emitted at runtime the + first time any SQLAlchemy 2.0 deprecation warning would normally be + emitted, but the ``SQLALCHEMY_WARN_20`` environment variable is not set. + The warning emits only once at most, before setting a boolean to prevent + it from emitting a second time. + + This deprecation warning intends to notify users who may not have set an + appropriate constraint in their requirements files to block against a + surprise SQLAlchemy 2.0 upgrade and also alert that the SQLAlchemy 2.0 + upgrade process is available, as the first full 2.0 release is expected + very soon. The deprecation warning can be silenced by setting the + environment variable ``SQLALCHEMY_SILENCE_UBER_WARNING`` to ``"1"``. + + .. seealso:: + + :ref:`migration_20_toplevel` + + .. change:: + :tags: bug, orm + :tickets: 9033 + :versions: 2.0.0rc1 + + Fixed issue in the internal SQL traversal for DML statements like + :class:`_dml.Update` and :class:`_dml.Delete` which would cause among other + potential issues, a specific issue using lambda statements with the ORM + update/delete feature. + + .. change:: + :tags: bug, sql + :tickets: 8989 + :versions: 2.0.0b5 + + Fixed bug where SQL compilation would fail (assertion fail in 2.0, NoneType + error in 1.4) when using an expression whose type included + :meth:`_types.TypeEngine.bind_expression`, in the context of an "expanding" + (i.e. "IN") parameter in conjunction with the ``literal_binds`` compiler + parameter. + + .. change:: + :tags: bug, sql + :tickets: 9029 + :versions: 2.0.0rc1 + + Fixed issue in lambda SQL feature where the calculated type of a literal + value would not take into account the type coercion rules of the "compared + to type", leading to a lack of typing information for SQL expressions, such + as comparisons to :class:`_types.JSON` elements and similar. + +.. changelog:: + :version: 1.4.45 + :released: December 10, 2022 + + .. change:: + :tags: bug, orm + :tickets: 8862 + :versions: 2.0.0rc1 + + Fixed bug where :meth:`_orm.Session.merge` would fail to preserve the + current loaded contents of relationship attributes that were indicated with + the :paramref:`_orm.relationship.viewonly` parameter, thus defeating + strategies that use :meth:`_orm.Session.merge` to pull fully loaded objects + from caches and other similar techniques. In a related change, fixed issue + where an object that contains a loaded relationship that was nonetheless + configured as ``lazy='raise'`` on the mapping would fail when passed to + :meth:`_orm.Session.merge`; checks for "raise" are now suspended within + the merge process assuming the :paramref:`_orm.Session.merge.load` + parameter remains at its default of ``True``. + + Overall, this is a behavioral adjustment to a change introduced in the 1.4 + series as of :ticket:`4994`, which took "merge" out of the set of cascades + applied by default to "viewonly" relationships. As "viewonly" relationships + aren't persisted under any circumstances, allowing their contents to + transfer during "merge" does not impact the persistence behavior of the + target object. This allows :meth:`_orm.Session.merge` to correctly suit one + of its use cases, that of adding objects to a :class:`.Session` that were + loaded elsewhere, often for the purposes of restoring from a cache. + + + .. change:: + :tags: bug, orm + :tickets: 8881 + :versions: 2.0.0rc1 + + Fixed issues in :func:`_orm.with_expression` where expressions that were + composed of columns that were referenced from the enclosing SELECT would + not render correct SQL in some contexts, in the case where the expression + had a label name that matched the attribute which used + :func:`_orm.query_expression`, even when :func:`_orm.query_expression` had + no default expression. For the moment, if the :func:`_orm.query_expression` + does have a default expression, that label name is still used for that + default, and an additional label with the same name will continue to be + ignored. Overall, this case is pretty thorny so further adjustments might + be warranted. + + .. change:: + :tags: bug, sqlite + :tickets: 8866 + + Backported a fix for SQLite reflection of unique constraints in attached + schemas, released in 2.0 as a small part of :ticket:`4379`. Previously, + unique constraints in attached schemas would be ignored by SQLite + reflection. Pull request courtesy Michael Gorven. + + .. change:: + :tags: bug, asyncio + :tickets: 8952 + :versions: 2.0.0b5 + + Removed non-functional ``merge()`` method from + :class:`_asyncio.AsyncResult`. This method has never worked and was + included with :class:`_asyncio.AsyncResult` in error. + + .. change:: + :tags: bug, oracle + :tickets: 8708 + :versions: 2.0.0b4 + + Continued fixes for Oracle fix :ticket:`8708` released in 1.4.43 where + bound parameter names that start with underscores, which are disallowed by + Oracle, were still not being properly escaped in all circumstances. + + + .. change:: + :tags: bug, postgresql + :tickets: 8748 + :versions: 2.0.0rc1 + + Made an adjustment to how the PostgreSQL dialect considers column types + when it reflects columns from a table, to accommodate for alternative + backends which may return NULL from the PG ``format_type()`` function. + + .. change:: + :tags: usecase, sqlite + :tickets: 8903 + :versions: 2.0.0rc1 + + Added support for the SQLite backend to reflect the "DEFERRABLE" and + "INITIALLY" keywords which may be present on a foreign key construct. Pull + request courtesy Michael Gorven. + + .. change:: + :tags: usecase, sql + :tickets: 8800 + :versions: 2.0.0rc1 + + An informative re-raise is now thrown in the case where any "literal + bindparam" render operation fails, indicating the value itself and + the datatype in use, to assist in debugging when literal params + are being rendered in a statement. + + .. change:: + :tags: usecase, sqlite + :tickets: 8804 + :versions: 2.0.0rc1 + + Added support for reflection of expression-oriented WHERE criteria included + in indexes on the SQLite dialect, in a manner similar to that of the + PostgreSQL dialect. Pull request courtesy Tobias Pfeiffer. + + .. change:: + :tags: bug, sql + :tickets: 8827 + :versions: 2.0.0rc1 + + Fixed a series of issues regarding the position and sometimes the identity + of rendered bound parameters, such as those used for SQLite, asyncpg, + MySQL, Oracle and others. Some compiled forms would not maintain the order + of parameters correctly, such as the PostgreSQL ``regexp_replace()`` + function, the "nesting" feature of the :class:`.CTE` construct first + introduced in :ticket:`4123`, and selectable tables formed by using the + :meth:`.FunctionElement.column_valued` method with Oracle. + + + .. change:: + :tags: bug, oracle + :tickets: 8945 + :versions: 2.0.0b5 + + Fixed issue in Oracle compiler where the syntax for + :meth:`.FunctionElement.column_valued` was incorrect, rendering the name + ``COLUMN_VALUE`` without qualifying the source table correctly. + + .. change:: + :tags: bug, engine + :tickets: 8963 + :versions: 2.0.0rc1 + + Fixed issue where :meth:`_engine.Result.freeze` method would not work for + textual SQL using either :func:`_sql.text` or + :meth:`_engine.Connection.exec_driver_sql`. + + +.. changelog:: + :version: 1.4.44 + :released: November 12, 2022 + + .. change:: + :tags: bug, sql + :tickets: 8790 + + Fixed critical memory issue identified in cache key generation, where for + very large and complex ORM statements that make use of lots of ORM aliases + with subqueries, cache key generation could produce excessively large keys + that were orders of magnitude bigger than the statement itself. Much thanks + to Rollo Konig Brock for their very patient, long term help in finally + identifying this issue. + + .. change:: + :tags: bug, postgresql, mssql + :tickets: 8770 + + For the PostgreSQL and SQL Server dialects only, adjusted the compiler so + that when rendering column expressions in the RETURNING clause, the "non + anon" label that's used in SELECT statements is suggested for SQL + expression elements that generate a label; the primary example is a SQL + function that may be emitting as part of the column's type, where the label + name should match the column's name by default. This restores a not-well + defined behavior that had changed in version 1.4.21 due to :ticket:`6718`, + :ticket:`6710`. The Oracle dialect has a different RETURNING implementation + and was not affected by this issue. Version 2.0 features an across the + board change for its widely expanded support of RETURNING on other + backends. + + + .. change:: + :tags: bug, oracle + + Fixed issue in the Oracle dialect where an INSERT statement that used + ``insert(some_table).values(...).returning(some_table)`` against a full + :class:`.Table` object at once would fail to execute, raising an exception. + + .. change:: + :tags: bug, tests + :tickets: 8793 + + Fixed issue where the ``--disable-asyncio`` parameter to the test suite + would fail to not actually run greenlet tests and would also not prevent + the suite from using a "wrapping" greenlet for the whole suite. This + parameter now ensures that no greenlet or asyncio use will occur within the + entire run when set. + + .. change:: + :tags: bug, tests + + Adjusted the test suite which tests the Mypy plugin to accommodate for + changes in Mypy 0.990 regarding how it handles message output, which affect + how sys.path is interpreted when determining if notes and errors should be + printed for particular files. The change broke the test suite as the files + within the test directory itself no longer produced messaging when run + under the mypy API. + +.. changelog:: + :version: 1.4.43 + :released: November 4, 2022 + + .. change:: + :tags: bug, orm + :tickets: 8738 + + Fixed issue in joined eager loading where an assertion fail would occur + with a particular combination of outer/inner joined eager loads, when + eager loading across three mappers where the middle mapper was + an inherited subclass mapper. + + + .. change:: + :tags: bug, oracle + :tickets: 8708 + + Fixed issue where bound parameter names, including those automatically + derived from similarly-named database columns, which contained characters + that normally require quoting with Oracle would not be escaped when using + "expanding parameters" with the Oracle dialect, causing execution errors. + The usual "quoting" for bound parameters used by the Oracle dialect is not + used with the "expanding parameters" architecture, so escaping for a large + range of characters is used instead, now using a list of characters/escapes + that are specific to Oracle. + + + + .. change:: + :tags: bug, orm + :tickets: 8721 + + Fixed bug involving :class:`.Select` constructs, where combinations of + :meth:`.Select.select_from` with :meth:`.Select.join`, as well as when + using :meth:`.Select.join_from`, would cause the + :func:`_orm.with_loader_criteria` feature as well as the IN criteria needed + for single-table inheritance queries to not render, in cases where the + columns clause of the query did not explicitly include the left-hand side + entity of the JOIN. The correct entity is now transferred to the + :class:`.Join` object that's generated internally, so that the criteria + against the left side entity is correctly added. + + + .. change:: + :tags: bug, mssql + :tickets: 8714 + + Fixed issue with :meth:`.Inspector.has_table`, which when used against a + temporary table with the SQL Server dialect would fail on some Azure + variants, due to an unnecessary information schema query that is not + supported on those server versions. Pull request courtesy Mike Barry. + + .. change:: + :tags: bug, orm + :tickets: 8711 + + An informative exception is now raised when the + :func:`_orm.with_loader_criteria` option is used as a loader option added + to a specific "loader path", such as when using it within + :meth:`.Load.options`. This use is not supported as + :func:`_orm.with_loader_criteria` is only intended to be used as a top + level loader option. Previously, an internal error would be generated. + + .. change:: + :tags: bug, oracle + :tickets: 8744 + + Fixed issue where the ``nls_session_parameters`` view queried on first + connect in order to get the default decimal point character may not be + available depending on Oracle connection modes, and would therefore raise + an error. The approach to detecting decimal char has been simplified to + test a decimal value directly, instead of reading system views, which + works on any backend / driver. + + + .. change:: + :tags: bug, orm + :tickets: 8753 + + Improved "dictionary mode" for :meth:`_orm.Session.get` so that synonym + names which refer to primary key attribute names may be indicated in the + named dictionary. + + .. change:: + :tags: bug, engine, regression + :tickets: 8717 + + Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be be + called in all cases when a :class:`_engine.Connection` were closed and was + in the process of returning its DBAPI connection to the connection pool. + + The scenario was when the :class:`_engine.Connection` had already emitted + ``.rollback()`` on its DBAPI connection within the process of returning + the connection to the pool, where it would then instruct the connection + pool to forego doing its own "reset" to save on the additional method + call. However, this prevented custom pool reset schemes from being + used within this hook, as such hooks by definition are doing more than + just calling ``.rollback()``, and need to be invoked under all + circumstances. This was a regression that appeared in version 1.4. + + For version 1.4, the :meth:`.PoolEvents.checkin` remains viable as an + alternate event hook to use for custom "reset" implementations. Version 2.0 + will feature an improved version of :meth:`.PoolEvents.reset` which is + called for additional scenarios such as termination of asyncio connections, + and is also passed contextual information about the reset, to allow for + "custom connection reset" schemes which can respond to different reset + scenarios in different ways. + + .. change:: + :tags: bug, orm + :tickets: 8704 + + Fixed issue where "selectin_polymorphic" loading for inheritance mappers + would not function correctly if the :paramref:`_orm.Mapper.polymorphic_on` + parameter referred to a SQL expression that was not directly mapped on the + class. + + .. change:: + :tags: bug, orm + :tickets: 8710 + + Fixed issue where the underlying DBAPI cursor would not be closed when + using the :class:`_orm.Query` object as an iterator, if a user-defined exception + case were raised within the iteration process, thereby causing the iterator + to be closed by the Python interpreter. When using + :meth:`_orm.Query.yield_per` to create server-side cursors, this would lead + to the usual MySQL-related issues with server side cursors out of sync, + and without direct access to the :class:`.Result` object, end-user code + could not access the cursor in order to close it. + + To resolve, a catch for ``GeneratorExit`` is applied within the iterator + method, which will close the result object in those cases when the + iterator were interrupted, and by definition will be closed by the + Python interpreter. + + As part of this change as implemented for the 1.4 series, ensured that + ``.close()`` methods are available on all :class:`.Result` implementations + including :class:`.ScalarResult`, :class:`.MappingResult`. The 2.0 + version of this change also includes new context manager patterns for use + with :class:`.Result` classes. + + .. change:: + :tags: bug, engine + :tickets: 8710 + + Ensured all :class:`.Result` objects include a :meth:`.Result.close` method + as well as a :attr:`.Result.closed` attribute, including on + :class:`.ScalarResult` and :class:`.MappingResult`. + + .. change:: + :tags: bug, mssql, reflection + :tickets: 8700 + + Fixed issue with :meth:`.Inspector.has_table`, which when used against a + view with the SQL Server dialect would erroneously return ``False``, due to + a regression in the 1.4 series which removed support for this on SQL + Server. The issue is not present in the 2.0 series which uses a different + reflection architecture. Test support is added to ensure ``has_table()`` + remains working per spec re: views. + + .. change:: + :tags: bug, sql + :tickets: 8724 + + Fixed issue which prevented the :func:`_sql.literal_column` construct from + working properly within the context of a :class:`.Select` construct as well + as other potential places where "anonymized labels" might be generated, if + the literal expression contained characters which could interfere with + format strings, such as open parenthesis, due to an implementation detail + of the "anonymous label" structure. + + +.. changelog:: + :version: 1.4.42 + :released: October 16, 2022 + + .. change:: + :tags: bug, asyncio + :tickets: 8516 + + Improved implementation of ``asyncio.shield()`` used in context managers as + added in :ticket:`8145`, such that the "close" operation is enclosed within + an ``asyncio.Task`` which is then strongly referenced as the operation + proceeds. This is per Python documentation indicating that the task is + otherwise not strongly referenced. + + .. change:: + :tags: bug, orm + :tickets: 8614 + + The :paramref:`_orm.Session.execute.bind_arguments` dictionary is no longer + mutated when passed to :meth:`_orm.Session.execute` and similar; instead, + it's copied to an internal dictionary for state changes. Among other + things, this fixes and issue where the "clause" passed to the + :meth:`_orm.Session.get_bind` method would be incorrectly referring to the + :class:`_sql.Select` construct used for the "fetch" synchronization + strategy, when the actual query being emitted was a :class:`_dml.Delete` or + :class:`_dml.Update`. This would interfere with recipes for "routing + sessions". + + .. change:: + :tags: bug, orm + :tickets: 7094 + + A warning is emitted in ORM configurations when an explicit + :func:`_orm.remote` annotation is applied to columns that are local to the + immediate mapped class, when the referenced class does not include any of + the same table columns. Ideally this would raise an error at some point as + it's not correct from a mapping point of view. + + .. change:: + :tags: bug, orm + :tickets: 7545 + + A warning is emitted when attempting to configure a mapped class within an + inheritance hierarchy where the mapper is not given any polymorphic + identity, however there is a polymorphic discriminator column assigned. + Such classes should be abstract if they never intend to load directly. + + + .. change:: + :tags: bug, mssql, regression + :tickets: 8525 + + Fixed yet another regression in SQL Server isolation level fetch (see + :ticket:`8231`, :ticket:`8475`), this time with "Microsoft Dynamics CRM + Database via Azure Active Directory", which apparently lacks the + ``system_views`` view entirely. Error catching has been extended that under + no circumstances will this method ever fail, provided database connectivity + is present. + + .. change:: + :tags: orm, bug, regression + :tickets: 8569 + + Fixed regression for 1.4 in :func:`_orm.contains_eager` where the "wrap in + subquery" logic of :func:`_orm.joinedload` would be inadvertently triggered + for use of the :func:`_orm.contains_eager` function with similar statements + (e.g. those that use ``distinct()``, ``limit()`` or ``offset()``), which + would then lead to secondary issues with queries that used some + combinations of SQL label names and aliasing. This "wrapping" is not + appropriate for :func:`_orm.contains_eager` which has always had the + contract that the user-defined SQL statement is unmodified with the + exception of adding the appropriate columns to be fetched. + + .. change:: + :tags: bug, orm, regression + :tickets: 8507 + + Fixed regression where using ORM update() with synchronize_session='fetch' + would fail due to the use of evaluators that are now used to determine the + in-Python value for expressions in the the SET clause when refreshing + objects; if the evaluators make use of math operators against non-numeric + values such as PostgreSQL JSONB, the non-evaluable condition would fail to + be detected correctly. The evaluator now limits the use of math mutation + operators to numeric types only, with the exception of "+" that continues + to work for strings as well. SQLAlchemy 2.0 may alter this further by + fetching the SET values completely rather than using evaluation. + + .. change:: + :tags: usecase, postgresql + :tickets: 8574 + + :class:`_postgresql.aggregate_order_by` now supports cache generation. + + .. change:: + :tags: bug, mysql + :tickets: 8588 + + Adjusted the regular expression used to match "CREATE VIEW" when + testing for views to work more flexibly, no longer requiring the + special keyword "ALGORITHM" in the middle, which was intended to be + optional but was not working correctly. The change allows view reflection + to work more completely on MySQL-compatible variants such as StarRocks. + Pull request courtesy John Bodley. + + .. change:: + :tags: bug, engine + :tickets: 8536 + + Fixed issue where mixing "*" with additional explicitly-named column + expressions within the columns clause of a :func:`_sql.select` construct + would cause result-column targeting to sometimes consider the label name or + other non-repeated names to be an ambiguous target. + +.. changelog:: + :version: 1.4.41 + :released: September 6, 2022 + + .. change:: + :tags: bug, sql + :tickets: 8441 + + Fixed issue where use of the :func:`_sql.table` construct, passing a string + for the :paramref:`_sql.table.schema` parameter, would fail to take the + "schema" string into account when producing a cache key, thus leading to + caching collisions if multiple, same-named :func:`_sql.table` constructs + with different schemas were used. + + + .. change:: + :tags: bug, events, orm + :tickets: 8467 + + Fixed event listening issue where event listeners added to a superclass + would be lost if a subclass were created which then had its own listeners + associated. The practical example is that of the :class:`.sessionmaker` + class created after events have been associated with the + :class:`_orm.Session` class. + + .. change:: + :tags: orm, bug + :tickets: 8401 + + Hardened the cache key strategy for the :func:`_orm.aliased` and + :func:`_orm.with_polymorphic` constructs. While no issue involving actual + statements being cached can easily be demonstrated (if at all), these two + constructs were not including enough of what makes them unique in their + cache keys for caching on the aliased construct alone to be accurate. + + .. change:: + :tags: bug, orm, regression + :tickets: 8456 + + Fixed regression appearing in the 1.4 series where a joined-inheritance + query placed as a subquery within an enclosing query for that same entity + would fail to render the JOIN correctly for the inner query. The issue + manifested in two different ways prior and subsequent to version 1.4.18 + (related issue :ticket:`6595`), in one case rendering JOIN twice, in the + other losing the JOIN entirely. To resolve, the conditions under which + "polymorphic loading" are applied have been scaled back to not be invoked + for simple joined inheritance queries. + + .. change:: + :tags: bug, orm + :tickets: 8446 + + Fixed issue in :mod:`sqlalchemy.ext.mutable` extension where collection + links to the parent object would be lost if the object were merged with + :meth:`.Session.merge` while also passing :paramref:`.Session.merge.load` + as False. + + .. change:: + :tags: bug, orm + :tickets: 8399 + + Fixed issue involving :func:`_orm.with_loader_criteria` where a closure + variable used as bound parameter value within the lambda would not carry + forward correctly into additional relationship loaders such as + :func:`_orm.selectinload` and :func:`_orm.lazyload` after the statement + were cached, using the stale originally-cached value instead. + + + .. change:: + :tags: bug, mssql, regression + :tickets: 8475 + + Fixed regression caused by the fix for :ticket:`8231` released in 1.4.40 + where connection would fail if the user did not have permission to query + the ``dm_exec_sessions`` or ``dm_pdw_nodes_exec_sessions`` system views + when trying to determine the current transaction isolation level. + + .. change:: + :tags: bug, asyncio + :tickets: 8419 + + Integrated support for asyncpg's ``terminate()`` method call for cases + where the connection pool is recycling a possibly timed-out connection, + where a connection is being garbage collected that wasn't gracefully + closed, as well as when the connection has been invalidated. This allows + asyncpg to abandon the connection without waiting for a response that may + incur long timeouts. + +.. changelog:: + :version: 1.4.40 + :released: August 8, 2022 + + .. change:: + :tags: bug, orm + :tickets: 8357 + + Fixed issue where referencing a CTE multiple times in conjunction with a + polymorphic SELECT could result in multiple "clones" of the same CTE being + constructed, which would then trigger these two CTEs as duplicates. To + resolve, the two CTEs are deep-compared when this occurs to ensure that + they are equivalent, then are treated as equivalent. + + + .. change:: + :tags: bug, orm, declarative + :tickets: 8190 + + Fixed issue where a hierarchy of classes set up as an abstract or mixin + declarative classes could not declare standalone columns on a superclass + that would then be copied correctly to a :class:`_orm.declared_attr` + callable that wanted to make use of them on a descendant class. + + .. change:: + :tags: bug, types + :tickets: 7249 + + Fixed issue where :class:`.TypeDecorator` would not correctly proxy the + ``__getitem__()`` operator when decorating the :class:`_types.ARRAY` + datatype, without explicit workarounds. + + .. change:: + :tags: bug, asyncio + :tickets: 8145 + + Added ``asyncio.shield()`` to the connection and session release process + specifically within the ``__aexit__()`` context manager exit, when using + :class:`.AsyncConnection` or :class:`.AsyncSession` as a context manager + that releases the object when the context manager is complete. This appears + to help with task cancellation when using alternate concurrency libraries + such as ``anyio``, ``uvloop`` that otherwise don't provide an async context + for the connection pool to release the connection properly during task + cancellation. + + + + .. change:: + :tags: bug, postgresql + :tickets: 4392 + + Fixed issue in psycopg2 dialect where the "multiple hosts" feature + implemented for :ticket:`4392`, where multiple ``host:port`` pairs could be + passed in the query string as + ``?host=host1:port1&host=host2:port2&host=host3:port3`` was not implemented + correctly, as it did not propagate the "port" parameter appropriately. + Connections that didn't use a different "port" likely worked without issue, + and connections that had "port" for some of the entries may have + incorrectly passed on that hostname. The format is now corrected to pass + hosts/ports appropriately. + + As part of this change, maintained support for another multihost style that + worked unintentionally, which is comma-separated + ``?host=h1,h2,h3&port=p1,p2,p3``. This format is more consistent with + libpq's query-string format, whereas the previous format is inspired by a + different aspect of libpq's URI format but is not quite the same thing. + + If the two styles are mixed together, an error is raised as this is + ambiguous. + + .. change:: + :tags: bug, sql + :tickets: 8253 + + Adjusted the SQL compilation for string containment functions + ``.contains()``, ``.startswith()``, ``.endswith()`` to force the use of the + string concatenation operator, rather than relying upon the overload of the + addition operator, so that non-standard use of these operators with for + example bytestrings still produces string concatenation operators. + + + .. change:: + :tags: bug, orm + :tickets: 8235 + + A :func:`_sql.select` construct that is passed a sole '*' argument for + ``SELECT *``, either via string, :func:`_sql.text`, or + :func:`_sql.literal_column`, will be interpreted as a Core-level SQL + statement rather than as an ORM level statement. This is so that the ``*``, + when expanded to match any number of columns, will result in all columns + returned in the result. the ORM- level interpretation of + :func:`_sql.select` needs to know the names and types of all ORM columns up + front which can't be achieved when ``'*'`` is used. + + If ``'*`` is used amongst other expressions simultaneously with an ORM + statement, an error is raised as this can't be interpreted correctly by the + ORM. + + .. change:: + :tags: bug, mssql + :tickets: 8210 + + Fixed issues that prevented the new usage patterns for using DML with ORM + objects presented at :ref:`orm_dml_returning_objects` from working + correctly with the SQL Server pyodbc dialect. + + + .. change:: + :tags: bug, mssql + :tickets: 8231 + + Fixed issue where the SQL Server dialect's query for the current isolation + level would fail on Azure Synapse Analytics, due to the way in which this + database handles transaction rollbacks after an error has occurred. The + initial query has been modified to no longer rely upon catching an error + when attempting to detect the appropriate system view. Additionally, to + better support this database's very specific "rollback" behavior, + implemented new parameter ``ignore_no_transaction_on_rollback`` indicating + that a rollback should ignore Azure Synapse error 'No corresponding + transaction found. (111214)', which is raised if no transaction is present + in conflict with the Python DBAPI. + + Initial patch and valuable debugging assistance courtesy of @ww2406. + + .. seealso:: + + :ref:`azure_synapse_ignore_no_transaction_on_rollback` + + .. change:: + :tags: bug, mypy + :tickets: 8196 + + Fixed a crash of the mypy plugin when using a lambda as a Column + default. Pull request courtesy of tchapi. + + + .. change:: + :tags: usecase, engine + + Implemented new :paramref:`_engine.Connection.execution_options.yield_per` + execution option for :class:`_engine.Connection` in Core, to mirror that of + the same :ref:`yield_per ` option available in + the ORM. The option sets both the + :paramref:`_engine.Connection.execution_options.stream_results` option at + the same time as invoking :meth:`_engine.Result.yield_per`, to provide the + most common streaming result configuration which also mirrors that of the + ORM use case in its usage pattern. + + .. seealso:: + + :ref:`engine_stream_results` - revised documentation + + + .. change:: + :tags: bug, engine + + Fixed bug in :class:`_engine.Result` where the usage of a buffered result + strategy would not be used if the dialect in use did not support an + explicit "server side cursor" setting, when using + :paramref:`_engine.Connection.execution_options.stream_results`. This is in + error as DBAPIs such as that of SQLite and Oracle already use a + non-buffered result fetching scheme, which still benefits from usage of + partial result fetching. The "buffered" strategy is now used in all + cases where :paramref:`_engine.Connection.execution_options.stream_results` + is set. + + + .. change:: + :tags: bug, engine + :tickets: 8199 + + Added :meth:`.FilterResult.yield_per` so that result implementations + such as :class:`.MappingResult`, :class:`.ScalarResult` and + :class:`.AsyncResult` have access to this method. + +.. changelog:: + :version: 1.4.39 + :released: June 24, 2022 + + .. change:: + :tags: bug, orm, regression + :tickets: 8133 + + Fixed regression caused by :ticket:`8133` where the pickle format for + mutable attributes was changed, without a fallback to recognize the old + format, causing in-place upgrades of SQLAlchemy to no longer be able to + read pickled data from previous versions. A check plus a fallback for the + old format is now in place. + +.. changelog:: + :version: 1.4.38 + :released: June 23, 2022 + + .. change:: + :tags: bug, orm, regression + :tickets: 8162 + + Fixed regression caused by :ticket:`8064` where a particular check for + column correspondence was made too liberal, resulting in incorrect + rendering for some ORM subqueries such as those using + :meth:`.PropComparator.has` or :meth:`.PropComparator.any` in conjunction + with joined-inheritance queries that also use legacy aliasing features. + + .. change:: + :tags: bug, engine + :tickets: 8115 + + Repaired a deprecation warning class decorator that was preventing key + objects such as :class:`_engine.Connection` from having a proper + ``__weakref__`` attribute, causing operations like Python standard library + ``inspect.getmembers()`` to fail. + + + .. change:: + :tags: bug, sql + :tickets: 8098 + + Fixed multiple observed race conditions related to :func:`.lambda_stmt`, + including an initial "dogpile" issue when a new Python code object is + initially analyzed among multiple simultaneous threads which created both a + performance issue as well as some internal corruption of state. + Additionally repaired observed race condition which could occur when + "cloning" an expression construct that is also in the process of being + compiled or otherwise accessed in a different thread due to memoized + attributes altering the ``__dict__`` while iterated, for Python versions + prior to 3.10; in particular the lambda SQL construct is sensitive to this + as it holds onto a single statement object persistently. The iteration has + been refined to use ``dict.copy()`` with or without an additional iteration + instead. + + .. change:: + :tags: bug, sql + :tickets: 8084 + + Enhanced the mechanism of :class:`.Cast` and other "wrapping" + column constructs to more fully preserve a wrapped :class:`.Label` + construct, including that the label name will be preserved in the + ``.c`` collection of a :class:`.Subquery`. The label was already + able to render in the SQL correctly on the outside of the construct + which it was wrapped inside. + + .. change:: + :tags: bug, orm, sql + :tickets: 8091 + + Fixed an issue where :meth:`_sql.GenerativeSelect.fetch` would not + be applied when executing a statement using the ORM. + + .. change:: + :tags: bug, orm + :tickets: 8109 + + Fixed issue where a :func:`_orm.with_loader_criteria` option could not be + pickled, as is necessary when it is carried along for propagation to lazy + loaders in conjunction with a caching scheme. Currently, the only form that + is supported as picklable is to pass the "where criteria" as a fixed + module-level callable function that produces a SQL expression. An ad-hoc + "lambda" can't be pickled, and a SQL expression object is usually not fully + picklable directly. + + + .. change:: + :tags: bug, schema + :tickets: 8100, 8101 + + Fixed bugs involving the :paramref:`.Table.include_columns` and the + :paramref:`.Table.resolve_fks` parameters on :class:`.Table`; these + little-used parameters were apparently not working for columns that refer + to foreign key constraints. + + In the first case, not-included columns that refer to foreign keys would + still attempt to create a :class:`.ForeignKey` object, producing errors + when attempting to resolve the columns for the foreign key constraint + within reflection; foreign key constraints that refer to skipped columns + are now omitted from the table reflection process in the same way as + occurs for :class:`.Index` and :class:`.UniqueConstraint` objects with the + same conditions. No warning is produced however, as we likely want to + remove the include_columns warnings for all constraints in 2.0. + + In the latter case, the production of table aliases or subqueries would + fail on an FK related table not found despite the presence of + ``resolve_fks=False``; the logic has been repaired so that if a related + table is not found, the :class:`.ForeignKey` object is still proxied to the + aliased table or subquery (these :class:`.ForeignKey` objects are normally + used in the production of join conditions), but it is sent with a flag that + it's not resolvable. The aliased table / subquery will then work normally, + with the exception that it cannot be used to generate a join condition + automatically, as the foreign key information is missing. This was already + the behavior for such foreign key constraints produced using non-reflection + methods, such as joining :class:`.Table` objects from different + :class:`.MetaData` collections. + + .. change:: + :tags: bug, sql + :tickets: 8113 + + Adjusted the fix made for :ticket:`8056` which adjusted the escaping of + bound parameter names with special characters such that the escaped names + were translated after the SQL compilation step, which broke a published + recipe on the FAQ illustrating how to merge parameter names into the string + output of a compiled SQL string. The change restores the escaped names that + come from ``compiled.params`` and adds a conditional parameter to + :meth:`.SQLCompiler.construct_params` named ``escape_names`` that defaults + to ``True``, restoring the old behavior by default. + + .. change:: + :tags: bug, schema, mssql + :tickets: 8111 + + Fixed issue where :class:`.Table` objects that made use of IDENTITY columns + with a :class:`.Numeric` datatype would produce errors when attempting to + reconcile the "autoincrement" column, preventing construction of the + :class:`.Column` from using the :paramref:`.Column.autoincrement` parameter + as well as emitting errors when attempting to invoke an :class:`_dml.Insert` + construct. + + + .. change:: + :tags: bug, extensions + :tickets: 8133 + + Fixed bug in :class:`.Mutable` where pickling and unpickling of an ORM + mapped instance would not correctly restore state for mappings that + contained multiple :class:`.Mutable`-enabled attributes. + +.. changelog:: + :version: 1.4.37 + :released: May 31, 2022 + + .. change:: + :tags: bug, mssql + :tickets: 8062 + + Fix issue where a password with a leading "{" would result in login failure. + + .. change:: + :tags: bug, sql, postgresql, sqlite + :tickets: 8014 + + Fixed bug where the PostgreSQL + :meth:`_postgresql.Insert.on_conflict_do_update` method and the SQLite + :meth:`_sqlite.Insert.on_conflict_do_update` method would both fail to + correctly accommodate a column with a separate ".key" when specifying the + column using its key name in the dictionary passed to + :paramref:`_postgresql.Insert.on_conflict_do_update.set_`, as well as if + the :attr:`_postgresql.Insert.excluded` collection were used as the + dictionary directly. + + .. change:: + :tags: bug, sql + :tickets: 8073 + + An informative error is raised for the use case where + :meth:`_dml.Insert.from_select` is being passed a "compound select" object such + as a UNION, yet the INSERT statement needs to append additional columns to + support Python-side or explicit SQL defaults from the table metadata. In + this case a subquery of the compound object should be passed. + + .. change:: + :tags: bug, orm + :tickets: 8064 + + Fixed issue where using a :func:`_orm.column_property` construct containing + a subquery against an already-mapped column attribute would not correctly + apply ORM-compilation behaviors to the subquery, including that the "IN" + expression added for a single-table inherits expression would fail to be + included. + + .. change:: + :tags: bug, orm + :tickets: 8001 + + Fixed issue where ORM results would apply incorrect key names to the + returned :class:`.Row` objects in the case where the set of columns to be + selected were changed, such as when using + :meth:`.Select.with_only_columns`. + + .. change:: + :tags: bug, mysql + :tickets: 7966 + + Further adjustments to the MySQL PyODBC dialect to allow for complete + connectivity, which was previously still not working despite fixes in + :ticket:`7871`. + + .. change:: + :tags: bug, sql + :tickets: 7979 + + Fixed an issue where using :func:`.bindparam` with no explicit data or type + given could be coerced into the incorrect type when used in expressions + such as when using :meth:`_types.ARRAY.Comparator.any` and + :meth:`_types.ARRAY.Comparator.all`. + + + .. change:: + :tags: bug, oracle + :tickets: 8053 + + Fixed SQL compiler issue where the "bind processing" function for a bound + parameter would not be correctly applied to a bound value if the bound + parameter's name were "escaped". Concretely, this applies, among other + cases, to Oracle when a :class:`.Column` has a name that itself requires + quoting, such that the quoting-required name is then used for the bound + parameters generated within DML statements, and the datatype in use + requires bind processing, such as the :class:`.Enum` datatype. + + .. change:: + :tags: bug, mssql, reflection + :tickets: 8035 + + Explicitly specify the collation when reflecting table columns using + MSSQL to prevent "collation conflict" errors. + + .. change:: + :tags: bug, orm, oracle, postgresql + :tickets: 8056 + + Fixed bug, likely a regression from 1.3, where usage of column names that + require bound parameter escaping, more concretely when using Oracle with + column names that require quoting such as those that start with an + underscore, or in less common cases with some PostgreSQL drivers when using + column names that contain percent signs, would cause the ORM versioning + feature to not work correctly if the versioning column itself had such a + name, as the ORM assumes certain bound parameter naming conventions that + were being interfered with via the quotes. This issue is related to + :ticket:`8053` and essentially revises the approach towards fixing this, + revising the original issue :ticket:`5653` that created the initial + implementation for generalized bound-parameter name quoting. + + .. change:: + :tags: bug, mysql + :tickets: 8036 + + Added disconnect code for MySQL error 4031, introduced in MySQL >= 8.0.24, + indicating connection idle timeout exceeded. In particular this repairs an + issue where pre-ping could not reconnect on a timed-out connection. Pull + request courtesy valievkarim. + + .. change:: + :tags: bug, sql + :tickets: 8018 + + An informative error is raised if two individual :class:`.BindParameter` + objects share the same name, yet one is used within an "expanding" context + (typically an IN expression) and the other is not; mixing the same name in + these two different styles of usage is not supported and typically the + ``expanding=True`` parameter should be set on the parameters that are to + receive list values outside of IN expressions (where ``expanding`` is set + by default). + + .. change:: + :tags: bug, engine, tests + :tickets: 8019 + + Fixed issue where support for logging "stacklevel" implemented in + :ticket:`7612` required adjustment to work with recently released Python + 3.11.0b1, also repairs the unit tests which tested this feature. + + .. change:: + :tags: usecase, oracle + :tickets: 8066 + + Added two new error codes for Oracle disconnect handling to support early + testing of the new "python-oracledb" driver released by Oracle. + +.. changelog:: + :version: 1.4.36 + :released: April 26, 2022 + + .. change:: + :tags: bug, mysql, regression + :tickets: 7871 + + Fixed a regression in the untested MySQL PyODBC dialect caused by the fix + for :ticket:`7518` in version 1.4.32 where an argument was being propagated + incorrectly upon first connect, leading to a ``TypeError``. + + .. change:: + :tags: bug, orm, regression + :tickets: 7936 + + Fixed regression where the change made for :ticket:`7861`, released in + version 1.4.33, that brought the :class:`_sql.Insert` construct to be partially + recognized as an ORM-enabled statement did not properly transfer the + correct mapper / mapped table state to the :class:`.Session`, causing the + :meth:`.Session.get_bind` method to fail for a :class:`.Session` that was + bound to engines and/or connections using the :paramref:`.Session.binds` + parameter. + + .. change:: + :tags: bug, engine + :tickets: 7875 + + Fixed a memory leak in the C extensions which could occur when calling upon + named members of :class:`.Row` when the member does not exist under Python + 3; in particular this could occur during NumPy transformations when it + attempts to call members such as ``.__array__``, but the issue was + surrounding any ``AttributeError`` thrown by the :class:`.Row` object. This + issue does not apply to version 2.0 which has already transitioned to + Cython. Thanks much to Sebastian Berg for identifying the problem. + + + .. change:: + :tags: bug, postgresql + :tickets: 6515 + + Fixed bug in :class:`_sqltypes.ARRAY` datatype in combination with :class:`.Enum` on + PostgreSQL where using the ``.any()`` or ``.all()`` methods to render SQL + ANY() or ALL(), given members of the Python enumeration as arguments, would + produce a type adaptation failure on all drivers. + + .. change:: + :tags: bug, postgresql + :tickets: 7943 + + Implemented :attr:`_postgresql.UUID.python_type` attribute for the + PostgreSQL :class:`_postgresql.UUID` type object. The attribute will return + either ``str`` or ``uuid.UUID`` based on the + :paramref:`_postgresql.UUID.as_uuid` parameter setting. Previously, this + attribute was unimplemented. Pull request courtesy Alex Grönholm. + + .. change:: + :tags: bug, tests + :tickets: 7919 + + For third party dialects, repaired a missing requirement for the + ``SimpleUpdateDeleteTest`` suite test which was not checking for a working + "rowcount" function on the target dialect. + + + .. change:: + :tags: bug, postgresql + :tickets: 7930 + + Fixed an issue in the psycopg2 dialect when using the + :paramref:`_sa.create_engine.pool_pre_ping` parameter which would cause + user-configured ``AUTOCOMMIT`` isolation level to be inadvertently reset by + the "ping" handler. + + .. change:: + :tags: bug, asyncio + :tickets: 7937 + + Repaired handling of ``contextvar.ContextVar`` objects inside of async + adapted event handlers. Previously, values applied to a ``ContextVar`` + would not be propagated in the specific case of calling upon awaitables + inside of non-awaitable code. + + + .. change:: + :tags: bug, engine + :tickets: 7953 + + Added a warning regarding a bug which exists in the :meth:`_result.Result.columns` + method when passing 0 for the index in conjunction with a :class:`_result.Result` + that will return a single ORM entity, which indicates that the current + behavior of :meth:`_result.Result.columns` is broken in this case as the + :class:`_result.Result` object will yield scalar values and not :class:`.Row` + objects. The issue will be fixed in 2.0, which would be a + backwards-incompatible change for code that relies on the current broken + behavior. Code which wants to receive a collection of scalar values should + use the :meth:`_result.Result.scalars` method, which will return a new + :class:`.ScalarResult` object that yields non-row scalar objects. + + + .. change:: + :tags: bug, schema + :tickets: 7958 + + Fixed bug where :class:`.ForeignKeyConstraint` naming conventions using the + ``referred_column_0`` naming convention key would not work if the foreign + key constraint were set up as a :class:`.ForeignKey` object rather than an + explicit :class:`.ForeignKeyConstraint` object. As this change makes use of + a backport of some fixes from version 2.0, an additional little-known + feature that has likely been broken for many years is also fixed which is + that a :class:`.ForeignKey` object may refer to a referred table by name of + the table alone without using a column name, if the name of the referent + column is the same as that of the referred column. + + The ``referred_column_0`` naming convention key was previously not tested + with the :class:`.ForeignKey` object, only :class:`.ForeignKeyConstraint`, + and this bug reveals that the feature has never worked correctly unless + :class:`.ForeignKeyConstraint` is used for all FK constraints. This bug + traces back to the original introduction of the feature introduced for + :ticket:`3989`. + + .. change:: + :tags: bug, orm, declarative + :tickets: 7900 + + Modified the :class:`.DeclarativeMeta` metaclass to pass ``cls.__dict__`` + into the declarative scanning process to look for attributes, rather than + the separate dictionary passed to the type's ``__init__()`` method. This + allows user-defined base classes that add attributes within an + ``__init_subclass__()`` to work as expected, as ``__init_subclass__()`` can + only affect the ``cls.__dict__`` itself and not the other dictionary. This + is technically a regression from 1.3 where ``__dict__`` was being used. + + + + +.. changelog:: + :version: 1.4.35 + :released: April 6, 2022 + + .. change:: + :tags: bug, sql + :tickets: 7890 + + Fixed bug in newly implemented + :paramref:`.FunctionElement.table_valued.joins_implicitly` feature where + the parameter would not automatically propagate from the original + :class:`.TableValuedAlias` object to the secondary object produced when + calling upon :meth:`.TableValuedAlias.render_derived` or + :meth:`.TableValuedAlias.alias`. + + Additionally repaired these issues in :class:`.TableValuedAlias`: + + * repaired a potential memory issue which could occur when + repeatedly calling :meth:`.TableValuedAlias.render_derived` against + successive copies of the same object (for .alias(), we currently + have to still continue chaining from the previous element. not sure + if this can be improved but this is standard behavior for .alias() + elsewhere) + * repaired issue where the individual element types would be lost when + calling upon :meth:`.TableValuedAlias.render_derived` or + :meth:`.TableValuedAlias.alias`. + + .. change:: + :tags: bug, sql, regression + :tickets: 7903 + + Fixed regression caused by :ticket:`7823` which impacted the caching + system, such that bound parameters that had been "cloned" within ORM + operations, such as polymorphic loading, would in some cases not acquire + their correct execution-time value leading to incorrect bind values being + rendered. + +.. changelog:: + :version: 1.4.34 + :released: March 31, 2022 + + .. change:: + :tags: bug, orm, regression + :tickets: 7878 + + Fixed regression caused by :ticket:`7861` where invoking an + :class:`_sql.Insert` construct which contained ORM entities directly via + :meth:`_orm.Session.execute` would fail. + + .. change:: + :tags: bug, postgresql + :tickets: 7880 + + Scaled back a fix made for :ticket:`6581` where "executemany values" mode + for psycopg2 were disabled for all "ON CONFLICT" styles of INSERT, to + not apply to the "ON CONFLICT DO NOTHING" clause, which does not include + any parameters and is safe for "executemany values" mode. "ON CONFLICT + DO UPDATE" is still blocked from "executemany values" as there may + be additional parameters in the DO UPDATE clause that cannot be batched + (which is the original issue fixed by :ticket:`6581`). + +.. changelog:: + :version: 1.4.33 + :released: March 31, 2022 + + .. change:: + :tags: bug, engine + :tickets: 7853 + + Further clarified connection-level logging to indicate the BEGIN, ROLLBACK + and COMMIT log messages do not actually indicate a real transaction when + the AUTOCOMMIT isolation level is in use; messaging has been extended to + include the BEGIN message itself, and the messaging has also been fixed to + accommodate when the :class:`_engine.Engine` level + :paramref:`_sa.create_engine.isolation_level` parameter was used directly. + + .. change:: + :tags: bug, mssql, regression + :tickets: 7812 + + Fixed regression caused by :ticket:`7160` where FK reflection in + conjunction with a low compatibility level setting (compatibility level 80: + SQL Server 2000) causes an "Ambiguous column name" error. Patch courtesy + @Lin-Your. + + .. change:: + :tags: usecase, schema + :tickets: 7860 + + Added support so that the :paramref:`.Table.to_metadata.referred_schema_fn` + callable passed to :meth:`.Table.to_metadata` may return the value + :attr:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be + reset to None. The :attr:`.RETAIN_SCHEMA` symbol may also be returned from + this function to indicate "no change", which will behave the same as + ``None`` currently does which also indicates no change. + + + .. change:: + :tags: bug, sqlite, reflection + :tickets: 5463 + + Fixed bug where the name of CHECK constraints under SQLite would not be + reflected if the name were created using quotes, as is the case when the + name uses mixed case or special characters. + + + .. change:: + :tags: bug, orm, regression + :tickets: 7868 + + Fixed regression in "dynamic" loader strategy where the + :meth:`_orm.Query.filter_by` method would not be given an appropriate + entity to filter from, in the case where a "secondary" table were present + in the relationship being queried and the mapping were against something + complex such as a "with polymorphic". + + .. change:: + :tags: bug, orm + :tickets: 7801 + + Fixed bug where :func:`_orm.composite` attributes would not work in + conjunction with the :func:`_orm.selectin_polymorphic` loader strategy for + joined table inheritance. + + + .. change:: + :tags: bug, orm, performance + :tickets: 7823 + + Improvements in memory usage by the ORM, removing a significant set of + intermediary expression objects that are typically stored when a copy of an + expression object is created. These clones have been greatly reduced, + reducing the number of total expression objects stored in memory by + ORM mappings by about 30%. + + .. change:: + :tags: usecase, orm + :tickets: 7805 + + Added :paramref:`_orm.with_polymorphic.adapt_on_names` to the + :func:`_orm.with_polymorphic` function, which allows a polymorphic load + (typically with concrete mapping) to be stated against an alternative + selectable that will adapt to the original mapped selectable on column + names alone. + + .. change:: + :tags: usecase, sql + :tickets: 7845 + + Added new parameter + :paramref:`.FunctionElement.table_valued.joins_implicitly`, for the + :meth:`.FunctionElement.table_valued` construct. This parameter indicates + that the given table-valued function implicitly joins to the table it + refers towards, essentially disabling the "from linting" feature, i.e. the + "cartesian product" warning, from taking effect due to the presence of this + parameter. May be used for functions such as ``func.json_each()``. + + .. change:: + :tags: usecase, engine + :tickets: 7877, 7815 + + Added new parameter :paramref:`_engine.Engine.dispose.close`, defaulting to True. + When False, the engine disposal does not touch the connections in the old + pool at all, simply dropping the pool and replacing it. This use case is so + that when the original pool is transferred from a parent process, the + parent process may continue to use those connections. + + .. seealso:: + + :ref:`pooling_multiprocessing` - revised documentation + + .. change:: + :tags: bug, orm + :tickets: 7799 + + Fixed issue where the :func:`_orm.selectin_polymorphic` loader option would + not work with joined inheritance mappers that don't have a fixed + "polymorphic_on" column. Additionally added test support for a wider + variety of usage patterns with this construct. + + .. change:: + :tags: usecase, orm + :tickets: 7861 + + Added new attributes :attr:`.UpdateBase.returning_column_descriptions` and + :attr:`.UpdateBase.entity_description` to allow for inspection of ORM + attributes and entities that are installed as part of an :class:`_sql.Insert`, + :class:`.Update`, or :class:`.Delete` construct. The + :attr:`.Select.column_descriptions` accessor is also now implemented for + Core-only selectables. + + .. change:: + :tags: bug, sql + :tickets: 7876 + + The :paramref:`.bindparam.literal_execute` parameter now takes part + of the cache generation of a :func:`.bindparam`, since it changes + the sql string generated by the compiler. + Previously the correct bind values were used, but the ``literal_execute`` + would be ignored on subsequent executions of the same query. + + .. change:: + :tags: bug, orm + :tickets: 7862 + + Fixed bug in :func:`_orm.with_loader_criteria` function where loader + criteria would not be applied to a joined eager load that were invoked + within the scope of a refresh operation for the parent object. + + .. change:: + :tags: bug, orm + :tickets: 7842 + + Fixed issue where the :class:`_orm.Mapper` would reduce a user-defined + :paramref:`_orm.Mapper.primary_key` argument too aggressively, in the case + of mapping to a ``UNION`` where for some of the SELECT entries, two columns + are essentially equivalent, but in another, they are not, such as in a + recursive CTE. The logic here has been changed to accept a given + user-defined PK as given, where columns will be related to the mapped + selectable but no longer "reduced" as this heuristic can't accommodate for + all situations. + + .. change:: + :tags: bug, ext + :tickets: 7827 + + Improved the error message that's raised for the case where the + :func:`.association_proxy` construct attempts to access a target attribute + at the class level, and this access fails. The particular use case here is + when proxying to a hybrid attribute that does not include a working + class-level implementation. + + + .. change:: + :tags: bug, sql, regression + :tickets: 7798 + + Fixed regression caused by :ticket:`7760` where the new capabilities of + :class:`.TextualSelect` were not fully implemented within the compiler + properly, leading to issues with composed INSERT constructs such as "INSERT + FROM SELECT" and "INSERT...ON CONFLICT" when combined with CTE and textual + statements. + +.. changelog:: + :version: 1.4.32 + :released: March 6, 2022 + + .. change:: + :tags: bug, sql + :tickets: 7721 + + Fixed type-related error messages that would fail for values that were + tuples, due to string formatting syntax, including compile of unsupported + literal values and invalid boolean values. + + .. change:: + :tags: bug, sql, mysql + :tickets: 7720, 7789, 7598 + + Fixed issues in MySQL :class:`_mysql.SET` datatype as well as the generic + :class:`.Enum` datatype where the ``__repr__()`` method would not render + all optional parameters in the string output, impacting the use of these + types in Alembic autogenerate. Pull request for MySQL courtesy Yuki + Nishimine. + + + .. change:: + :tags: bug, sqlite + :tickets: 7736 + + Fixed issue where SQLite unique constraint reflection would fail to detect + a column-inline UNIQUE constraint where the column name had an underscore + in its name. + + .. change:: + :tags: usecase, sqlite + :tickets: 7736 + + Added support for reflecting SQLite inline unique constraints where + the column names are formatted with SQLite "escape quotes" ``[]`` + or `````, which are discarded by the database when producing the + column name. + + .. change:: + :tags: bug, oracle + :tickets: 7676 + + Fixed issue in Oracle dialect where using a column name that requires + quoting when written as a bound parameter, such as ``"_id"``, would not + correctly track a Python generated default value due to the bound-parameter + rewriting missing this value, causing an Oracle error to be raised. + + .. change:: + :tags: bug, tests + :tickets: 7599 + + Improvements to the test suite's integration with pytest such that the + "warnings" plugin, if manually enabled, will not interfere with the test + suite, such that third parties can enable the warnings plugin or make use + of the ``-W`` parameter and SQLAlchemy's test suite will continue to pass. + Additionally, modernized the detection of the "pytest-xdist" plugin so that + plugins can be globally disabled using PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 + without breaking the test suite if xdist were still installed. Warning + filters that promote deprecation warnings to errors are now localized to + SQLAlchemy-specific warnings, or within SQLAlchemy-specific sources for + general Python deprecation warnings, so that non-SQLAlchemy deprecation + warnings emitted from pytest plugins should also not impact the test suite. + + + .. change:: + :tags: bug, sql + + The :class:`_sqltypes.Enum` datatype now emits a warning if the + :paramref:`_sqltypes.Enum.length` argument is specified without also + specifying :paramref:`_sqltypes.Enum.native_enum` as False, as the + parameter is otherwise silently ignored in this case, despite the fact that + the :class:`_sqltypes.Enum` datatype will still render VARCHAR DDL on + backends that don't have a native ENUM datatype such as SQLite. This + behavior may change in a future release so that "length" is honored for all + non-native "enum" types regardless of the "native_enum" setting. + + + .. change:: + :tags: bug, mysql, regression + :tickets: 7518 + + Fixed regression caused by :ticket:`7518` where changing the syntax "SHOW + VARIABLES" to "SELECT @@" broke compatibility with MySQL versions older + than 5.6, including early 5.0 releases. While these are very old MySQL + versions, a change in compatibility was not planned, so version-specific + logic has been restored to fall back to "SHOW VARIABLES" for MySQL server + versions < 5.6. + + .. change:: + :tags: bug, asyncio + + Fixed issues where a descriptive error message was not raised for some + classes of event listening with an async engine, which should instead be a + sync engine instance. + + .. change:: + :tags: bug, mariadb, regression + :tickets: 7738 + + Fixed regression in mariadbconnector dialect as of mariadb connector 1.0.10 + where the DBAPI no longer pre-buffers cursor.lastrowid, leading to errors + when inserting objects with the ORM as well as causing non-availability of + the :attr:`_result.CursorResult.inserted_primary_key` attribute. The + dialect now fetches this value proactively for situations where it applies. + + .. change:: + :tags: usecase, postgresql + :tickets: 7600 + + Added compiler support for the PostgreSQL ``NOT VALID`` phrase when rendering + DDL for the :class:`.CheckConstraint`, :class:`.ForeignKeyConstraint` + and :class:`.ForeignKey` schema constructs. Pull request courtesy + Gilbert Gilb's. + + .. seealso:: + + :ref:`postgresql_constraint_options` + + .. change:: + :tags: bug, orm, regression + :tickets: 7594 + + Fixed regression where the ORM exception that is to be raised when an + INSERT silently fails to actually insert a row (such as from a trigger) + would not be reached, due to a runtime exception raised ahead of time due + to the missing primary key value, thus raising an uninformative exception + rather than the correct one. For 1.4 and above, a new + :class:`_ormexc.FlushError` is added for this case that's raised earlier + than the previous "null identity" exception was for 1.3, as a situation + where the number of rows actually INSERTed does not match what was expected + is a more critical situation in 1.4 as it prevents batching of multiple + objects from working correctly. This is separate from the case where a + newly fetched primary key is fetched as NULL, which continues to raise the + existing "null identity" exception. + + .. change:: + :tags: bug, tests + :tickets: 7045 + + Made corrections to the default pytest configuration regarding how test + discovery is configured, to fix issue where the test suite would not + configure warnings correctly and also attempt to load example suites as + tests, in the specific case where the SQLAlchemy checkout were located in + an absolute path that had a super-directory named "test". + + .. change:: + :tags: bug, orm + :tickets: 7697 + + Fixed issue where using a fully qualified path for the classname in + :func:`_orm.relationship` that nonetheless contained an incorrect name for + path tokens that were not the first token, would fail to raise an + informative error and would instead fail randomly at a later step. + + .. change:: + :tags: bug, oracle, regression + :tickets: 7748 + + Added support to parse "DPI" error codes from cx_Oracle exception objects + such as ``DPI-1080`` and ``DPI-1010``, both of which now indicate a + disconnect scenario as of cx_Oracle 8.3. + + .. change:: + :tags: bug, sql + :tickets: 7760 + + Fixed issue where the :meth:`.HasCTE.add_cte` method as called upon a + :class:`.TextualSelect` instance was not being accommodated by the SQL + compiler. The fix additionally adds more "SELECT"-like compiler behavior to + :class:`.TextualSelect` including that DML CTEs such as UPDATE and INSERT + may be accommodated. + + .. change:: + :tags: bug, engine + :tickets: 7612 + + Adjusted the logging for key SQLAlchemy components including + :class:`_engine.Engine`, :class:`_engine.Connection` to establish an + appropriate stack level parameter, so that the Python logging tokens + ``funcName`` and ``lineno`` when used in custom logging formatters will + report the correct information, which can be useful when filtering log + output; supported on Python 3.8 and above. Pull request courtesy Markus + Gerstel. + + .. change:: + :tags: bug, asyncio + :tickets: 7667 + + Fixed issue where the :meth:`_asyncio.AsyncSession.execute` method failed + to raise an informative exception if the + :paramref:`_engine.Connection.execution_options.stream_results` execution + option were used, which is incompatible with a sync-style + :class:`_result.Result` object when using an asyncio calling style, as the + operation to fetch more rows would need to be awaited. An exception is now + raised in this scenario in the same way one was already raised when the + :paramref:`_engine.Connection.execution_options.stream_results` option + would be used with the :meth:`_asyncio.AsyncConnection.execute` method. + + Additionally, for improved stability with state-sensitive database drivers + such as asyncmy, the cursor is now closed when this error condition is + raised; previously with the asyncmy dialect, the connection would go into + an invalid state with unconsumed server side results remaining. + + +.. changelog:: + :version: 1.4.31 + :released: January 20, 2022 + + .. change:: + :tags: bug, postgresql, regression + :tickets: 7590 + + Fixed regression where the change in :ticket:`7148` to repair ENUM handling + in PostgreSQL broke the use case of an empty ARRAY of ENUM, preventing rows + that contained an empty array from being handled correctly when fetching + results. + + .. change:: + :tags: bug, orm + :tickets: 7591 + + Fixed issue in :meth:`_orm.Session.bulk_save_objects` where the sorting + that takes place when the ``preserve_order`` parameter is set to False + would sort partially on ``Mapper`` objects, which is rejected in Python + 3.11. + + + .. change:: + :tags: bug, mysql, regression + :tickets: 7593 + + Fixed regression in asyncmy dialect caused by :ticket:`7567` where removal + of the PyMySQL dependency broke binary columns, due to the asyncmy dialect + not being properly included within CI tests. + + .. change:: + :tags: mssql + :tickets: 7243 + + Added support for ``FILESTREAM`` when using ``VARBINARY(max)`` + in MSSQL. + + .. seealso:: + + :paramref:`_mssql.VARBINARY.filestream` + +.. changelog:: + :version: 1.4.30 + :released: January 19, 2022 + + .. change:: + :tags: usecase, asyncio + :tickets: 7580 + + Added new method :meth:`.AdaptedConnection.run_async` to the DBAPI + connection interface used by asyncio drivers, which allows methods to be + called against the underlying "driver" connection directly within a + sync-style function where the ``await`` keyword can't be used, such as + within SQLAlchemy event handler functions. The method is analogous to the + :meth:`_asyncio.AsyncConnection.run_sync` method which translates + async-style calls to sync-style. The method is useful for things like + connection-pool on-connect handlers that need to invoke awaitable methods + on the driver connection when it's first created. + + .. seealso:: + + :ref:`asyncio_events_run_async` + + + .. change:: + :tags: bug, orm + :tickets: 7507 + + Fixed issue in joined-inheritance load of additional attributes + functionality in deep multi-level inheritance where an intermediary table + that contained no columns would not be included in the tables joined, + instead linking those tables to their primary key identifiers. While this + works fine, it nonetheless in 1.4 began producing the cartesian product + compiler warning. The logic has been changed so that these intermediary + tables are included regardless. While this does include additional tables + in the query that are not technically necessary, this only occurs for the + highly unusual case of deep 3+ level inheritance with intermediary tables + that have no non primary key columns, potential performance impact is + therefore expected to be negligible. + + .. change:: + :tags: bug, orm + :tickets: 7579 + + Fixed issue where calling upon :meth:`_orm.registry.map_imperatively` more + than once for the same class would produce an unexpected error, rather than + an informative error that the target class is already mapped. This behavior + differed from that of the :func:`_orm.mapper` function which does report an + informative message already. + + .. change:: + :tags: bug, sql, postgresql + :tickets: 7537 + + Added additional rule to the system that determines ``TypeEngine`` + implementations from Python literals to apply a second level of adjustment + to the type, so that a Python datetime with or without tzinfo can set the + ``timezone=True`` parameter on the returned :class:`.DateTime` object, as + well as :class:`.Time`. This helps with some round-trip scenarios on + type-sensitive PostgreSQL dialects such as asyncpg, psycopg3 (2.0 only). + + .. change:: + :tags: bug, postgresql, asyncpg + :tickets: 7537 + + Improved support for asyncpg handling of TIME WITH TIMEZONE, which + was not fully implemented. + + .. change:: + :tags: usecase, postgresql + :tickets: 7561 + + Added string rendering to the :class:`.postgresql.UUID` datatype, so that + stringifying a statement with "literal_binds" that uses this type will + render an appropriate string value for the PostgreSQL backend. Pull request + courtesy José Duarte. + + .. change:: + :tags: bug, orm, asyncio + :tickets: 7524 + + Added missing method :meth:`_asyncio.AsyncSession.invalidate` to the + :class:`_asyncio.AsyncSession` class. + + + .. change:: + :tags: bug, orm, regression + :tickets: 7557 + + Fixed regression which appeared in 1.4.23 which could cause loader options + to be mis-handled in some cases, in particular when using joined table + inheritance in combination with the ``polymorphic_load="selectin"`` option + as well as relationship lazy loading, leading to a ``TypeError``. + + + .. change:: + :tags: bug, mypy + :tickets: 7321 + + Fixed Mypy crash when running id daemon mode caused by a + missing attribute on an internal mypy ``Var`` instance. + + .. change:: + :tags: change, mysql + :tickets: 7518 + + Replace ``SHOW VARIABLES LIKE`` statement with equivalent + ``SELECT @@variable`` in MySQL and MariaDB dialect initialization. + This should avoid mutex contention caused by ``SHOW VARIABLES``, + improving initialization performance. + + .. change:: + :tags: bug, orm, regression + :tickets: 7576 + + Fixed ORM regression where calling the :func:`_orm.aliased` function + against an existing :func:`_orm.aliased` construct would fail to produce + correct SQL if the existing construct were against a fixed table. The fix + allows that the original :func:`_orm.aliased` construct is disregarded if + it were only against a table that's now being replaced. It also allows for + correct behavior when constructing a :func:`_orm.aliased` without a + selectable argument against a :func:`_orm.aliased` that's against a + subuquery, to create an alias of that subquery (i.e. to change its name). + + The nesting behavior of :func:`_orm.aliased` remains in place for the case + where the outer :func:`_orm.aliased` object is against a subquery which in + turn refers to the inner :func:`_orm.aliased` object. This is a relatively + new 1.4 feature that helps to suit use cases that were previously served by + the deprecated ``Query.from_self()`` method. + + .. change:: + :tags: bug, orm + :tickets: 7514 + + Fixed issue where :meth:`_sql.Select.correlate_except` method, when passed + either the ``None`` value or no arguments, would not correlate any elements + when used in an ORM context (that is, passing ORM entities as FROM + clauses), rather than causing all FROM elements to be considered as + "correlated" in the same way which occurs when using Core-only constructs. + + .. change:: + :tags: bug, orm, regression + :tickets: 7505 + + Fixed regression from 1.3 where the "subqueryload" loader strategy would + fail with a stack trace if used against a query that made use of + :meth:`_orm.Query.from_statement` or :meth:`_sql.Select.from_statement`. As + subqueryload requires modifying the original statement, it's not compatible + with the "from_statement" use case, especially for statements made against + the :func:`_sql.text` construct. The behavior now is equivalent to that of + 1.3 and previously, which is that the loader strategy silently degrades to + not be used for such statements, typically falling back to using the + lazyload strategy. + + + .. change:: + :tags: bug, reflection, postgresql, mssql + :tickets: 7382 + + Fixed reflection of covering indexes to report ``include_columns`` as part + of the ``dialect_options`` entry in the reflected index dictionary, thereby + enabling round trips from reflection->create to be complete. Included + columns continue to also be present under the ``include_columns`` key for + backwards compatibility. + + .. change:: + :tags: bug, mysql + :tickets: 7567 + + Removed unnecessary dependency on PyMySQL from the asyncmy dialect. Pull + request courtesy long2ice. + + + .. change:: + :tags: bug, postgresql + :tickets: 7418 + + Fixed handling of array of enum values which require escape characters. + + .. change:: + :tags: bug, sql + :tickets: 7032 + + Added an informative error message when a method object is passed to a SQL + construct. Previously, when such a callable were passed, as is a common + typographical error when dealing with method-chained SQL constructs, they + were interpreted as "lambda SQL" targets to be invoked at compilation time, + which would lead to silent failures. As this feature was not intended to be + used with methods, method objects are now rejected. + +.. changelog:: + :version: 1.4.29 + :released: December 22, 2021 + + .. change:: + :tags: usecase, asyncio + :tickets: 7301 + + Added :func:`_asyncio.async_engine_config` function to create + an async engine from a configuration dict. This otherwise + behaves the same as :func:`_sa.engine_from_config`. + + .. change:: + :tags: bug, orm + :tickets: 7489 + + Fixed issue in new "loader criteria" method + :meth:`_orm.PropComparator.and_` where usage with a loader strategy like + :func:`_orm.selectinload` against a column that was a member of the ``.c.`` + collection of a subquery object, where the subquery would be dynamically + added to the FROM clause of the statement, would be subject to stale + parameter values within the subquery in the SQL statement cache, as the + process used by the loader strategy to replace the parameters at execution + time would fail to accommodate the subquery when received in this form. + + + .. change:: + :tags: bug, orm + :tickets: 7491 + + Fixed recursion overflow which could occur within ORM statement compilation + when using either the :func:`_orm.with_loader_criteria` feature or the the + :meth:`_orm.PropComparator.and_` method within a loader strategy in + conjunction with a subquery which referred to the same entity being altered + by the criteria option, or loaded by the loader strategy. A check for + coming across the same loader criteria option in a recursive fashion has + been added to accommodate for this scenario. + + + .. change:: + :tags: bug, orm, mypy + :tickets: 7462, 7368 + + Fixed issue where the ``__class_getitem__()`` method of the generated + declarative base class by :func:`_orm.as_declarative` would lead to + inaccessible class attributes such as ``__table__``, for cases where a + ``Generic[T]`` style typing declaration were used in the class hierarchy. + This is in continuation from the basic addition of ``__class_getitem__()`` + in :ticket:`7368`. Pull request courtesy Kai Mueller. + + .. change:: + :tags: bug, mypy + :tickets: 7496 + + Fixed mypy regression where the release of mypy 0.930 added additional + internal checks to the format of "named types", requiring that they be + fully qualified and locatable. This broke the mypy plugin for SQLAlchemy, + raising an assertion error, as there was use of symbols such as + ``__builtins__`` and other un-locatable or unqualified names that + previously had not raised any assertions. + + + .. change:: + :tags: bug, engine + :tickets: 7432 + + Corrected the error message for the ``AttributeError`` that's raised when + attempting to write to an attribute on the :class:`_result.Row` class, + which is immutable. The previous message claimed the column didn't exist + which is misleading. + + .. change:: + :tags: bug, mariadb + :tickets: 7457 + + Corrected the error classes inspected for the "is_disconnect" check for the + ``mariadbconnector`` dialect, which was failing for disconnects that + occurred due to common MySQL/MariaDB error codes such as 2006; the DBAPI + appears to currently use the ``mariadb.InterfaceError`` exception class for + disconnect errors such as error code 2006, which has been added to the list + of classes checked. + + + .. change:: + :tags: bug, orm, regression + :tickets: 7447 + + Fixed caching-related issue where the use of a loader option of the form + ``lazyload(aliased(A).bs).joinedload(B.cs)`` would fail to result in the + joinedload being invoked for runs subsequent to the query being cached, due + to a mismatch for the options / object path applied to the objects loaded + for a query with a lead entity that used ``aliased()``. + + + .. change:: + :tags: bug, tests, regression + :tickets: 7450 + + Fixed a regression in the test suite where the test called + ``CompareAndCopyTest::test_all_present`` would fail on some platforms due + to additional testing artifacts being detected. Pull request courtesy Nils + Philippsen. + + + .. change:: + :tags: usecase, orm + :tickets: 7410 + + Added :paramref:`_orm.Session.get.execution_options` parameter which was + previously missing from the :meth:`_orm.Session.get` method. + + .. change:: + :tags: bug, engine, regression + :tickets: 7446 + + Fixed regression in the :func:`_engine.make_url` function used to parse URL + strings where the query string parsing would go into a recursion overflow + if a Python 2 ``u''`` string were used. + +.. changelog:: + :version: 1.4.28 + :released: December 9, 2021 + + .. change:: + :tags: bug, mypy + :tickets: 7321 + + Fixed Mypy crash which would occur when using Mypy plugin against code + which made use of :class:`_orm.declared_attr` methods for non-mapped names + like ``__mapper_args__``, ``__table_args__``, or other dunder names, as the + plugin would try to interpret these as mapped attributes which would then + be later mis-handled. As part of this change, the decorated function is + still converted by the plugin into a generic assignment statement (e.g. + ``__mapper_args__: Any``) so that the argument signature can continue to be + annotated in the same way one would for any other ``@classmethod`` without + Mypy complaining about the wrong argument type for a method that isn't + explicitly ``@classmethod``. + + + + .. change:: + :tags: bug, orm, ext + :tickets: 7425 + + Fixed issue where the internal cloning used by the + :meth:`_orm.PropComparator.any` method on a :func:`_orm.relationship` in + the case where the related class also makes use of ORM polymorphic loading, + would fail if a hybrid property on the related, polymorphic class were used + within the criteria for the ``any()`` operation. + + .. change:: + :tags: bug, platform + :tickets: 7311 + + Python 3.10 has deprecated "distutils" in favor of explicit use of + "setuptools" in :pep:`632`; SQLAlchemy's setup.py has replaced imports + accordingly. However, since setuptools itself only recently added the + replacement symbols mentioned in pep-632 as of November of 2021 in version + 59.0.1, ``setup.py`` still has fallback imports to distutils, as SQLAlchemy + 1.4 does not have a hard setuptools versioning requirement at this time. + SQLAlchemy 2.0 is expected to use a full :pep:`517` installation layout + which will indicate appropriate setuptools versioning up front. + + .. change:: + :tags: bug, sql, regression + :tickets: 7319 + + Extended the :attr:`.TypeDecorator.cache_ok` attribute and corresponding + warning message if this flag is not defined, a behavior first established + for :class:`.TypeDecorator` as part of :ticket:`6436`, to also take place + for :class:`.UserDefinedType`, by generalizing the flag and associated + caching logic to a new common base for these two types, + :class:`.ExternalType` to create :attr:`.UserDefinedType.cache_ok`. + + The change means any current :class:`.UserDefinedType` will now cause SQL + statement caching to no longer take place for statements which make use of + the datatype, along with a warning being emitted, unless the class defines + the :attr:`.UserDefinedType.cache_ok` flag as True. If the datatype cannot + form a deterministic, hashable cache key derived from its arguments, + the attribute may be set to False which will continue to keep caching disabled but will suppress the + warning. In particular, custom datatypes currently used in packages such as + SQLAlchemy-utils will need to implement this flag. The issue was observed + as a result of a SQLAlchemy-utils datatype that is not currently cacheable. + + .. seealso:: + + :attr:`.ExternalType.cache_ok` + + .. change:: + :tags: deprecated, orm + :tickets: 4390 + + Deprecated an undocumented loader option syntax ``".*"``, which appears to + be no different than passing a single asterisk, and will emit a deprecation + warning if used. This syntax may have been intended for something but there + is currently no need for it. + + + .. change:: + :tags: bug, orm, mypy + :tickets: 7368 + + Fixed issue where the :func:`_orm.as_declarative` decorator and similar + functions used to generate the declarative base class would not copy the + ``__class_getitem__()`` method from a given superclass, which prevented the + use of pep-484 generics in conjunction with the ``Base`` class. Pull + request courtesy Kai Mueller. + + .. change:: + :tags: usecase, engine + :tickets: 7400 + + Added support for ``copy()`` and ``deepcopy()`` to the :class:`_url.URL` + class. Pull request courtesy Tom Ritchford. + + .. change:: + :tags: bug, orm, regression + :tickets: 7318 + + Fixed ORM regression where the new behavior of "eager loaders run on + unexpire" added in :ticket:`1763` would lead to loader option errors being + raised inappropriately for the case where a single :class:`_orm.Query` or + :class:`_sql.Select` were used to load multiple kinds of entities, along + with loader options that apply to just one of those kinds of entity like a + :func:`_orm.joinedload`, and later the objects would be refreshed from + expiration, where the loader options would attempt to be applied to the + mismatched object type and then raise an exception. The check for this + mismatch now bypasses raising an error for this case. + + .. change:: + :tags: bug, sql + :tickets: 7394 + + Custom SQL elements, third party dialects, custom or third party datatypes + will all generate consistent warnings when they do not clearly opt in or + out of SQL statement caching, which is achieved by setting the appropriate + attributes on each type of class. The warning links to documentation + sections which indicate the appropriate approach for each type of object in + order for caching to be enabled. + + .. change:: + :tags: bug, sql + :tickets: 7394 + + Fixed missing caching directives for a few lesser used classes in SQL Core + which would cause ``[no key]`` to be logged for elements which made use of + these. + + .. change:: + :tags: bug, postgresql + :tickets: 7394 + + Fixed missing caching directives for :class:`_postgresql.hstore` and + :class:`_postgresql.array` constructs which would cause ``[no key]`` + to be logged for these elements. + + .. change:: + :tags: bug, orm + :tickets: 7394 + + User defined ORM options, such as those illustrated in the dogpile.caching + example which subclass :class:`_orm.UserDefinedOption`, by definition are + handled on every statement execution and do not need to be considered as + part of the cache key for the statement. Caching of the base + :class:`.ExecutableOption` class has been modified so that it is no longer + a :class:`.HasCacheKey` subclass directly, so that the presence of user + defined option objects will not have the unwanted side effect of disabling + statement caching. Only ORM specific loader and criteria options, which are + all internal to SQLAlchemy, now participate within the caching system. + + .. change:: + :tags: bug, orm + :tickets: 7394 + + Fixed issue where mappings that made use of :func:`_orm.synonym` and + potentially other kinds of "proxy" attributes would not in all cases + successfully generate a cache key for their SQL statements, leading to + degraded performance for those statements. + + .. change:: + :tags: sql, usecase + :tickets: 7259 + + "Compound select" methods like :meth:`_sql.Select.union`, + :meth:`_sql.Select.intersect_all` etc. now accept ``*other`` as an argument + rather than ``other`` to allow for multiple additional SELECTs to be + compounded with the parent statement at once. In particular, the change as + applied to :meth:`_sql.CTE.union` and :meth:`_sql.CTE.union_all` now allow + for a so-called "non-linear CTE" to be created with the :class:`_sql.CTE` + construct, whereas previously there was no way to have more than two CTE + sub-elements in a UNION together while still correctly calling upon the CTE + in recursive fashion. Pull request courtesy Eric Masseran. + + .. change:: + :tags: bug, tests + + Implemented support for the test suite to run correctly under Pytest 7. + Previously, only Pytest 6.x was supported for Python 3, however the version + was not pinned on the upper bound in tox.ini. Pytest is not pinned in + tox.ini to be lower than version 8 so that SQLAlchemy versions released + with the current codebase will be able to be tested under tox without + changes to the environment. Much thanks to the Pytest developers for + their help with this issue. + + + .. change:: + :tags: orm, bug + :tickets: 7389 + + Fixed issue where a list mapped with :func:`_orm.relationship` would go + into an endless loop if in-place added to itself, i.e. the ``+=`` operator + were used, as well as if ``.extend()`` were given the same list. + + + .. change:: + :tags: usecase, sql + :tickets: 7386 + + Support multiple clause elements in the :meth:`_sql.Exists.where` method, + unifying the api with the one presented by a normal :func:`_sql.select` + construct. + + .. change:: + :tags: bug, orm + :tickets: 7388 + + Fixed issue where if an exception occurred when the :class:`_orm.Session` + were to close the connection within the :meth:`_orm.Session.commit` method, + when using a context manager for :meth:`_orm.Session.begin` , it would + attempt a rollback which would not be possible as the :class:`_orm.Session` + was in between where the transaction is committed and the connection is + then to be returned to the pool, raising the exception "this + sessiontransaction is in the committed state". This exception can occur + mostly in an asyncio context where CancelledError can be raised. + + .. changelog:: :version: 1.4.27 - :include_notes_from: unreleased_14 + :released: November 11, 2021 + + .. change:: + :tags: bug, engine + :tickets: 7291 + + Fixed issue in future :class:`_future.Connection` object where the + :meth:`_future.Connection.execute` method would not accept a non-dict + mapping object, such as SQLAlchemy's own :class:`.RowMapping` or other + ``abc.collections.Mapping`` object as a parameter dictionary. + + .. change:: + :tags: bug, mysql, mariadb + :tickets: 7167 + + Reorganized the list of reserved words into two separate lists, one for + MySQL and one for MariaDB, so that these diverging sets of words can be + managed more accurately; adjusted the MySQL/MariaDB dialect to switch among + these lists based on either explicitly configured or + server-version-detected "MySQL" or "MariaDB" backend. Added all current + reserved words through MySQL 8 and current MariaDB versions including + recently added keywords like "lead" . Pull request courtesy Kevin Kirsche. + + .. change:: + :tags: bug, orm + :tickets: 7224 + + Fixed bug in "relationship to aliased class" feature introduced at + :ref:`relationship_aliased_class` where it was not possible to create a + loader strategy option targeting an attribute on the target using the + :func:`_orm.aliased` construct directly in a second loader option, such as + ``selectinload(A.aliased_bs).joinedload(aliased_b.cs)``, without explicitly + qualifying using :meth:`_orm.PropComparator.of_type` on the preceding + element of the path. Additionally, targeting the non-aliased class directly + would be accepted (inappropriately), but would silently fail, such as + ``selectinload(A.aliased_bs).joinedload(B.cs)``; this now raises an error + referring to the typing mismatch. + + + .. change:: + :tags: bug, schema + :tickets: 7295 + + Fixed issue in :class:`.Table` where the + :paramref:`.Table.implicit_returning` parameter would not be + accommodated correctly when passed along with + :paramref:`.Table.extend_existing` to augment an existing + :class:`.Table`. + + .. change:: + :tags: bug, postgresql, asyncpg + :tickets: 7283 + + Changed the asyncpg dialect to bind the :class:`.Float` type to the "float" + PostgreSQL type instead of "numeric" so that the value ``float(inf)`` can + be accommodated. Added test suite support for persistence of the "inf" + value. + + + .. change:: + :tags: bug, engine, regression + :tickets: 7274 + :versions: 2.0.0b1 + + Fixed regression where the :meth:`_engine.CursorResult.fetchmany` method + would fail to autoclose a server-side cursor (i.e. when ``stream_results`` + or ``yield_per`` is in use, either Core or ORM oriented results) when the + results were fully exhausted. + + .. change:: + :tags: bug, orm + :tickets: 7274 + :versions: 2.0.0b1 + + All :class:`_result.Result` objects will now consistently raise + :class:`_exc.ResourceClosedError` if they are used after a hard close, + which includes the "hard close" that occurs after calling "single row or + value" methods like :meth:`_result.Result.first` and + :meth:`_result.Result.scalar`. This was already the behavior of the most + common class of result objects returned for Core statement executions, i.e. + those based on :class:`_engine.CursorResult`, so this behavior is not new. + However, the change has been extended to properly accommodate for the ORM + "filtering" result objects returned when using 2.0 style ORM queries, + which would previously behave in "soft closed" style of returning empty + results, or wouldn't actually "soft close" at all and would continue + yielding from the underlying cursor. + + As part of this change, also added :meth:`_result.Result.close` to the base + :class:`_result.Result` class and implemented it for the filtered result + implementations that are used by the ORM, so that it is possible to call + the :meth:`_engine.CursorResult.close` method on the underlying + :class:`_engine.CursorResult` when the the ``yield_per`` execution option + is in use to close a server side cursor before remaining ORM results have + been fetched. This was again already available for Core result sets but the + change makes it available for 2.0 style ORM results as well. + + + .. change:: + :tags: bug, mysql + :tickets: 7281 + :versions: 2.0.0b1 + + Fixed issue in MySQL :meth:`_mysql.Insert.on_duplicate_key_update` which + would render the wrong column name when an expression were used in a VALUES + expression. Pull request courtesy Cristian Sabaila. + + .. change:: + :tags: bug, sql, regression + :tickets: 7292 + + Fixed regression where the row objects returned for ORM queries, which are + now the normal :class:`_sql.Row` objects, would not be interpreted by the + :meth:`_sql.ColumnOperators.in_` operator as tuple values to be broken out + into individual bound parameters, and would instead pass them as single + values to the driver leading to failures. The change to the "expanding IN" + system now accommodates for the expression already being of type + :class:`.TupleType` and treats values accordingly if so. In the uncommon + case of using "tuple-in" with an untyped statement such as a textual + statement with no typing information, a tuple value is detected for values + that implement ``collections.abc.Sequence``, but that are not ``str`` or + ``bytes``, as always when testing for ``Sequence``. + + .. change:: + :tags: usecase, sql + + Added :class:`.TupleType` to the top level ``sqlalchemy`` import namespace. + + .. change:: + :tags: bug, sql + :tickets: 7269 + + Fixed issue where using the feature of using a string label for ordering or + grouping described at :ref:`tutorial_order_by_label` would fail to function + correctly if used on a :class:`.CTE` construct, when the CTE were embedded + inside of an enclosing :class:`_sql.Select` statement that itself was set + up as a scalar subquery. + + + + .. change:: + :tags: bug, orm, regression + :tickets: 7239 + + Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function + correctly on a :class:`_orm.Query` that was produced from + :meth:`_orm.Query.union`, :meth:`_orm.Query.from_self` or similar. + + .. change:: + :tags: bug, orm + :tickets: 7304 + + Fixed issue where deferred polymorphic loading of attributes from a + joined-table inheritance subclass would fail to populate the attribute + correctly if the :func:`_orm.load_only` option were used to originally + exclude that attribute, in the case where the load_only were descending + from a relationship loader option. The fix allows that other valid options + such as ``defer(..., raiseload=True)`` etc. still function as expected. + + .. change:: + :tags: postgresql, usecase, asyncpg + :tickets: 7284 + :versions: 2.0.0b1 + + Added overridable methods ``PGDialect_asyncpg.setup_asyncpg_json_codec`` + and ``PGDialect_asyncpg.setup_asyncpg_jsonb_codec`` codec, which handle the + required task of registering JSON/JSONB codecs for these datatypes when + using asyncpg. The change is that methods are broken out as individual, + overridable methods to support third party dialects that need to alter or + disable how these particular codecs are set up. + + + + .. change:: + :tags: bug, engine + :tickets: 7272 + :versions: 2.0.0b1 + + Fixed issue in future :class:`_future.Engine` where calling upon + :meth:`_future.Engine.begin` and entering the context manager would not + close the connection if the actual BEGIN operation failed for some reason, + such as an event handler raising an exception; this use case failed to be + tested for the future version of the engine. Note that the "future" context + managers which handle ``begin()`` blocks in Core and ORM don't actually run + the "BEGIN" operation until the context managers are actually entered. This + is different from the legacy version which runs the "BEGIN" operation up + front. + + .. change:: + :tags: mssql, bug + :tickets: 7300 + + Adjusted the compiler's generation of "post compile" symbols including + those used for "expanding IN" as well as for the "schema translate map" to + not be based directly on plain bracketed strings with underscores, as this + conflicts directly with SQL Server's quoting format of also using brackets, + which produces false matches when the compiler replaces "post compile" and + "schema translate" symbols. The issue created easy to reproduce examples + both with the :meth:`.Inspector.get_schema_names` method when used in + conjunction with the + :paramref:`_engine.Connection.execution_options.schema_translate_map` + feature, as well in the unlikely case that a symbol overlapping with the + internal name "POSTCOMPILE" would be used with a feature like "expanding + in". + + + .. change:: + :tags: postgresql, pg8000 + :tickets: 7167 + + Improve array handling when using PostgreSQL with the + pg8000 dialect. + + .. change:: + :tags: bug, orm, regression + :tickets: 7244 + + Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function + correctly when :meth:`_orm.Query.join` were joined to an entity which made + use of :meth:`_orm.PropComparator.of_type` to specify an aliased version of + the target entity. The issue also applies to future style ORM queries + constructed with :func:`_sql.select`. + + + .. change:: + :tags: bug, sql, regression + :tickets: 7287 + + Fixed regression where the :func:`_sql.text` construct would no longer be + accepted as a target case in the "whens" list within a :func:`_sql.case` + construct. The regression appears related to an attempt to guard against + some forms of literal values that were considered to be ambiguous when + passed here; however, there's no reason the target cases shouldn't be + interpreted as open-ended SQL expressions just like anywhere else, and a + literal string or tuple will be converted to a bound parameter as would be + the case elsewhere. .. changelog:: :version: 1.4.26 @@ -248,15 +3331,15 @@ This document details individual issue-level changes made throughout :tags: bug, orm :tickets: 7128 - Fixed bug where iterating a :class:`.Result` from a :class:`_orm.Session` + Fixed bug where iterating a :class:`_result.Result` from a :class:`_orm.Session` after that :class:`_orm.Session` were closed would partially attach objects to that session in an essentially invalid state. It now raises an exception with a link to new documentation if an **un-buffered** result is iterated from a :class:`_orm.Session` that was closed or otherwise had the - :meth:`_orm.Session.expunge_all` method called after that :class:`.Result` + :meth:`_orm.Session.expunge_all` method called after that :class:`_result.Result` was generated. The ``prebuffer_rows`` execution option, as is used automatically by the asyncio extension for client-side result sets, may be - used to produce a :class:`.Result` where the ORM objects are prebuffered, + used to produce a :class:`_result.Result` where the ORM objects are prebuffered, and in this case iterating the result will produce a series of detached objects. @@ -1231,7 +4314,7 @@ This document details individual issue-level changes made throughout ``@validates`` validator function or a ``@reconstructor`` reconstruction function, to check for "callable" more liberally such as to accommodate objects based on fundamental attributes like ``__func__`` and - ``__call___``, rather than testing for ``MethodType`` / ``FunctionType``, + ``__call__``, rather than testing for ``MethodType`` / ``FunctionType``, allowing things like cython functions to work properly. Pull request courtesy Miłosz Stypiński. @@ -2094,7 +5177,7 @@ This document details individual issue-level changes made throughout :tickets: 6361 Fixed issue where usage of an explicit :class:`.Sequence` would produce - inconsistent "inline" behavior for an :class:`.Insert` construct that + inconsistent "inline" behavior for an :class:`_sql.Insert` construct that includes multiple values phrases; the first seq would be inline but subsequent ones would be "pre-execute", leading to inconsistent sequence ordering. The sequence expressions are now fully inline. @@ -3541,7 +6624,7 @@ This document details individual issue-level changes made throughout :tags: bug, engine, sqlite :tickets: 5845 - Fixed bug in the 2.0 "future" version of :class:`.Engine` where emitting + Fixed bug in the 2.0 "future" version of :class:`_engine.Engine` where emitting SQL during the :meth:`.EngineEvents.begin` event hook would cause a re-entrant (recursive) condition due to autobegin, affecting among other things the recipe documented for SQLite to allow for savepoints and diff --git a/doc/build/changelog/migration_04.rst b/doc/build/changelog/migration_04.rst index b5031340794..93a2b654fbc 100644 --- a/doc/build/changelog/migration_04.rst +++ b/doc/build/changelog/migration_04.rst @@ -27,7 +27,7 @@ Secondly, anywhere you used to say ``engine=``, :: - myengine = create_engine('sqlite://') + myengine = create_engine("sqlite://") meta = MetaData(myengine) @@ -56,6 +56,7 @@ In 0.3, this code worked: from sqlalchemy import * + class UTCDateTime(types.TypeDecorator): pass @@ -66,6 +67,7 @@ In 0.4, one must do: from sqlalchemy import * from sqlalchemy import types + class UTCDateTime(types.TypeDecorator): pass @@ -119,7 +121,7 @@ when working with mapped classes: :: - session.query(User).filter(and_(User.name == 'fred', User.id > 17)) + session.query(User).filter(and_(User.name == "fred", User.id > 17)) While simple column-based comparisons are no big deal, the class attributes have some new "higher level" constructs @@ -139,18 +141,18 @@ available, including what was previously only available in # return all users who contain a particular address with # the email_address like '%foo%' - filter(User.addresses.any(Address.email_address.like('%foo%'))) + filter(User.addresses.any(Address.email_address.like("%foo%"))) # same, email address equals 'foo@bar.com'. can fall back to keyword # args for simple comparisons - filter(User.addresses.any(email_address = 'foo@bar.com')) + filter(User.addresses.any(email_address="foo@bar.com")) # return all Addresses whose user attribute has the username 'ed' - filter(Address.user.has(name='ed')) + filter(Address.user.has(name="ed")) # return all Addresses whose user attribute has the username 'ed' # and an id > 5 (mixing clauses with kwargs) - filter(Address.user.has(User.id > 5, name='ed')) + filter(Address.user.has(User.id > 5, name="ed")) The ``Column`` collection remains available on mapped classes in the ``.c`` attribute. Note that property-based @@ -199,12 +201,20 @@ any ``Alias`` objects: :: # standard self-referential TreeNode mapper with backref - mapper(TreeNode, tree_nodes, properties={ - 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id)) - }) + mapper( + TreeNode, + tree_nodes, + properties={ + "children": relation( + TreeNode, backref=backref("parent", remote_side=tree_nodes.id) + ) + }, + ) # query for node with child containing "bar" two levels deep - session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar') + session.query(TreeNode).join(["children", "children"], aliased=True).filter_by( + name="bar" + ) To add criterion for each table along the way in an aliased join, you can use ``from_joinpoint`` to keep joining against @@ -215,15 +225,15 @@ the same line of aliases: # search for the treenode along the path "n1/n12/n122" # first find a Node with name="n122" - q = sess.query(Node).filter_by(name='n122') + q = sess.query(Node).filter_by(name="n122") # then join to parent with "n12" - q = q.join('parent', aliased=True).filter_by(name='n12') + q = q.join("parent", aliased=True).filter_by(name="n12") # join again to the next parent with 'n1'. use 'from_joinpoint' # so we join from the previous point, instead of joining off the # root table - q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1') + q = q.join("parent", aliased=True, from_joinpoint=True).filter_by(name="n1") node = q.first() @@ -271,17 +281,24 @@ deep you want to go. Lets show the self-referential :: - nodes = Table('nodes', metadata, - Column('id', Integer, primary_key=True), - Column('parent_id', Integer, ForeignKey('nodes.id')), - Column('name', String(30))) + nodes = Table( + "nodes", + metadata, + Column("id", Integer, primary_key=True), + Column("parent_id", Integer, ForeignKey("nodes.id")), + Column("name", String(30)), + ) + class TreeNode(object): pass - mapper(TreeNode, nodes, properties={ - 'children':relation(TreeNode, lazy=False, join_depth=3) - }) + + mapper( + TreeNode, + nodes, + properties={"children": relation(TreeNode, lazy=False, join_depth=3)}, + ) So what happens when we say: @@ -324,10 +341,13 @@ new type, ``Point``. Stores an x/y coordinate: def __init__(self, x, y): self.x = x self.y = y + def __composite_values__(self): return self.x, self.y + def __eq__(self, other): return other.x == self.x and other.y == self.y + def __ne__(self, other): return not self.__eq__(other) @@ -341,13 +361,15 @@ Let's create a table of vertices storing two points per row: :: - vertices = Table('vertices', metadata, - Column('id', Integer, primary_key=True), - Column('x1', Integer), - Column('y1', Integer), - Column('x2', Integer), - Column('y2', Integer), - ) + vertices = Table( + "vertices", + metadata, + Column("id", Integer, primary_key=True), + Column("x1", Integer), + Column("y1", Integer), + Column("x2", Integer), + Column("y2", Integer), + ) Then, map it ! We'll create a ``Vertex`` object which stores two ``Point`` objects: @@ -359,10 +381,15 @@ stores two ``Point`` objects: self.start = start self.end = end - mapper(Vertex, vertices, properties={ - 'start':composite(Point, vertices.c.x1, vertices.c.y1), - 'end':composite(Point, vertices.c.x2, vertices.c.y2) - }) + + mapper( + Vertex, + vertices, + properties={ + "start": composite(Point, vertices.c.x1, vertices.c.y1), + "end": composite(Point, vertices.c.x2, vertices.c.y2), + }, + ) Once you've set up your composite type, it's usable just like any other type: @@ -370,7 +397,7 @@ like any other type: :: - v = Vertex(Point(3, 4), Point(26,15)) + v = Vertex(Point(3, 4), Point(26, 15)) session.save(v) session.flush() @@ -388,7 +415,7 @@ work as primary keys too, and are usable in ``query.get()``: # a Document class which uses a composite Version # object as primary key - document = query.get(Version(1, 'a')) + document = query.get(Version(1, "a")) ``dynamic_loader()`` relations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -438,16 +465,12 @@ eager in one pass: :: - mapper(Foo, foo_table, properties={ - 'bar':relation(Bar) - }) - mapper(Bar, bar_table, properties={ - 'bat':relation(Bat) - }) + mapper(Foo, foo_table, properties={"bar": relation(Bar)}) + mapper(Bar, bar_table, properties={"bat": relation(Bat)}) mapper(Bat, bat_table) # eager load bar and bat - session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all() + session.query(Foo).options(eagerload_all("bar.bat")).filter(...).all() New Collection API ^^^^^^^^^^^^^^^^^^ @@ -471,7 +494,7 @@ many needs: # use a dictionary relation keyed by a column relation(Item, collection_class=column_mapped_collection(items.c.keyword)) # or named attribute - relation(Item, collection_class=attribute_mapped_collection('keyword')) + relation(Item, collection_class=attribute_mapped_collection("keyword")) # or any function you like relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) @@ -493,12 +516,20 @@ columns or subqueries: :: - mapper(User, users, properties={ - 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')), - 'numposts': column_property( - select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts') - ) - }) + mapper( + User, + users, + properties={ + "fullname": column_property( + (users.c.firstname + users.c.lastname).label("fullname") + ), + "numposts": column_property( + select([func.count(1)], users.c.id == posts.c.user_id) + .correlate(users) + .label("posts") + ), + }, + ) a typical query looks like: @@ -534,7 +565,7 @@ your ``engine`` (or anywhere): from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker - engine = create_engine('myengine://') + engine = create_engine("myengine://") Session = sessionmaker(bind=engine, autoflush=True, transactional=True) # use the new Session() freely @@ -542,7 +573,6 @@ your ``engine`` (or anywhere): sess.save(someobject) sess.flush() - If you need to post-configure your Session, say with an engine, add it later with ``configure()``: @@ -562,7 +592,7 @@ with both ``sessionmaker`` as well as ``create_session()``: Session = scoped_session(sessionmaker(autoflush=True, transactional=True)) Session.configure(bind=engine) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) @@ -573,7 +603,6 @@ with both ``sessionmaker`` as well as ``create_session()``: sess2 = Session() assert sess is sess2 - When using a thread-local ``Session``, the returned class has all of ``Session's`` interface implemented as classmethods, and "assignmapper"'s functionality is @@ -586,11 +615,10 @@ old ``objectstore`` days.... # "assignmapper"-like functionality available via ScopedSession.mapper Session.mapper(User, users_table) - u = User(name='wendy') + u = User(name="wendy") Session.commit() - Sessions are again Weak Referencing By Default ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -624,13 +652,13 @@ Also, ``autoflush=True`` means the ``Session`` will Session = sessionmaker(bind=engine, autoflush=True, transactional=True) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) # wendy is flushed, comes right back from a query - wendy = sess.query(User).filter_by(name='wendy').one() + wendy = sess.query(User).filter_by(name="wendy").one() Transactional methods moved onto sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -649,7 +677,7 @@ background). # use the session - sess.commit() # commit transaction + sess.commit() # commit transaction Sharing a ``Session`` with an enclosing engine-level (i.e. non-ORM) transaction is easy: @@ -745,7 +773,7 @@ Just like it says: :: - b = bindparam('foo', type_=String) + b = bindparam("foo", type_=String) in\_ Function Changed to Accept Sequence or Selectable ------------------------------------------------------ @@ -847,8 +875,18 @@ Out Parameters for Oracle :: - result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5) - assert result.out_parameters == {'y':10, 'z':75} + result = engine.execute( + text( + "begin foo(:x, :y, :z); end;", + bindparams=[ + bindparam("x", Numeric), + outparam("y", Numeric), + outparam("z", Numeric), + ], + ), + x=5, + ) + assert result.out_parameters == {"y": 10, "z": 75} Connection-bound ``MetaData``, ``Sessions`` ------------------------------------------- diff --git a/doc/build/changelog/migration_05.rst b/doc/build/changelog/migration_05.rst index 64b69e15230..3d7bb52df30 100644 --- a/doc/build/changelog/migration_05.rst +++ b/doc/build/changelog/migration_05.rst @@ -64,15 +64,21 @@ Object Relational Mapping :: - session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name) + session.query(User.name, func.count(Address.id).label("numaddresses")).join( + Address + ).group_by(User.name) The tuples returned by any multi-column/entity query are *named*' tuples: :: - for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name): - print("name", row.name, "number", row.numaddresses) + for row in ( + session.query(User.name, func.count(Address.id).label("numaddresses")) + .join(Address) + .group_by(User.name) + ): + print("name", row.name, "number", row.numaddresses) ``Query`` has a ``statement`` accessor, as well as a ``subquery()`` method which allow ``Query`` to be used to @@ -80,10 +86,15 @@ Object Relational Mapping :: - subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery() - recipes = session.query(Recipe).filter(exists(). - where(Recipe.id==recipe_keywords.c.recipe_id). - where(recipe_keywords.c.keyword_id==subq.c.keyword_id) + subq = ( + session.query(Keyword.id.label("keyword_id")) + .filter(Keyword.name.in_(["beans", "carrots"])) + .subquery() + ) + recipes = session.query(Recipe).filter( + exists() + .where(Recipe.id == recipe_keywords.c.recipe_id) + .where(recipe_keywords.c.keyword_id == subq.c.keyword_id) ) * **Explicit ORM aliases are recommended for aliased joins** @@ -223,17 +234,24 @@ Object Relational Mapping :: - mapper(User, users, properties={ - 'addresses':relation(Address, order_by=addresses.c.id) - }, order_by=users.c.id) + mapper( + User, + users, + properties={"addresses": relation(Address, order_by=addresses.c.id)}, + order_by=users.c.id, + ) To set ordering on a backref, use the ``backref()`` function: :: - 'keywords':relation(Keyword, secondary=item_keywords, - order_by=keywords.c.name, backref=backref('items', order_by=items.c.id)) + "keywords": relation( + Keyword, + secondary=item_keywords, + order_by=keywords.c.name, + backref=backref("items", order_by=items.c.id), + ) Using declarative ? To help with the new ``order_by`` requirement, ``order_by`` and friends can now be set using @@ -244,7 +262,7 @@ Object Relational Mapping class MyClass(MyDeclarativeBase): ... - 'addresses':relation("Address", order_by="Address.id") + "addresses": relation("Address", order_by="Address.id") It's generally a good idea to set ``order_by`` on ``relation()s`` which load list-based collections of @@ -402,14 +420,17 @@ Schema/Types convert_result_value methods """ + def bind_processor(self, dialect): def convert(value): return self.convert_bind_param(value, dialect) + return convert def result_processor(self, dialect): def convert(value): return self.convert_result_value(value, dialect) + return convert def convert_result_value(self, value, dialect): @@ -461,10 +482,10 @@ Schema/Types dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec # old way - '2008-06-27 12:00:00.125' + "2008-06-27 12:00:00.125" # new way - '2008-06-27 12:00:00.000125' + "2008-06-27 12:00:00.000125" So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the @@ -481,6 +502,7 @@ Schema/Types :: from sqlalchemy.databases.sqlite import DateTimeMixin + DateTimeMixin.__legacy_microseconds__ = True Connection Pool no longer threadlocal by default @@ -522,7 +544,7 @@ data-driven, it takes ``[args]``. :: - query.join('orders', 'items') + query.join("orders", "items") query.join(User.orders, Order.items) * the ``in_()`` method on columns and similar only accepts a @@ -605,6 +627,7 @@ Removed :: from sqlalchemy.orm import aliased + address_alias = aliased(Address) print(session.query(User, address_alias).join((address_alias, User.addresses)).all()) diff --git a/doc/build/changelog/migration_06.rst b/doc/build/changelog/migration_06.rst index 0867fefe029..73c57bd9311 100644 --- a/doc/build/changelog/migration_06.rst +++ b/doc/build/changelog/migration_06.rst @@ -73,7 +73,7 @@ will use psycopg2: :: - create_engine('postgresql://scott:tiger@localhost/test') + create_engine("postgresql://scott:tiger@localhost/test") However to specify a specific DBAPI backend such as pg8000, add it to the "protocol" section of the URL using a plus @@ -81,7 +81,7 @@ sign "+": :: - create_engine('postgresql+pg8000://scott:tiger@localhost/test') + create_engine("postgresql+pg8000://scott:tiger@localhost/test") Important Dialect Links: @@ -138,8 +138,15 @@ set of PG types: :: - from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\ - VARCHAR, MACADDR, DATE, BYTEA + from sqlalchemy.dialects.postgresql import ( + INTEGER, + BIGINT, + SMALLINT, + VARCHAR, + MACADDR, + DATE, + BYTEA, + ) Above, ``INTEGER`` is actually the plain ``INTEGER`` type from ``sqlalchemy.types``, but the PG dialect makes it @@ -164,7 +171,7 @@ object returns another ``ClauseElement``: :: >>> from sqlalchemy.sql import column - >>> column('foo') == 5 + >>> column("foo") == 5 This so that Python expressions produce SQL expressions when @@ -172,16 +179,15 @@ converted to strings: :: - >>> str(column('foo') == 5) + >>> str(column("foo") == 5) 'foo = :foo_1' But what happens if we say this? :: - >>> if column('foo') == 5: + >>> if column("foo") == 5: ... print("yes") - ... In previous versions of SQLAlchemy, the returned ``_BinaryExpression`` was a plain Python object which @@ -191,11 +197,11 @@ as to that being compared. Meaning: :: - >>> bool(column('foo') == 5) + >>> bool(column("foo") == 5) False - >>> bool(column('foo') == column('foo')) + >>> bool(column("foo") == column("foo")) False - >>> c = column('foo') + >>> c = column("foo") >>> bool(c == c) True >>> @@ -252,7 +258,7 @@ sets: :: - connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'}) + connection.execute(table.insert(), {"data": "row1"}, {"data": "row2"}, {"data": "row3"}) When the ``Connection`` object sends off the given ``insert()`` construct for compilation, it passes to the @@ -268,10 +274,12 @@ works: :: - connection.execute(table.insert(), - {'timestamp':today, 'data':'row1'}, - {'timestamp':today, 'data':'row2'}, - {'data':'row3'}) + connection.execute( + table.insert(), + {"timestamp": today, "data": "row1"}, + {"timestamp": today, "data": "row2"}, + {"data": "row3"}, + ) Because the third row does not specify the 'timestamp' column. Previous versions of SQLAlchemy would simply insert @@ -392,7 +400,7 @@ with tables or metadata objects: from sqlalchemy.schema import DDL - DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata) + DDL("CREATE TRIGGER users_trigger ...").execute_at("after-create", metadata) Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD @@ -402,7 +410,7 @@ CONSTRAINT, etc.: from sqlalchemy.schema import Constraint, AddConstraint - AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable) + AddContraint(CheckConstraint("value > 5")).execute_at("after-create", mytable) Additionally, all the DDL objects are now regular ``ClauseElement`` objects just like any other SQLAlchemy @@ -428,20 +436,22 @@ make your own: from sqlalchemy.schema import DDLElement from sqlalchemy.ext.compiler import compiles - class AlterColumn(DDLElement): + class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd + @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s %s ..." % ( element.column.table.name, element.column.name, - element.cmd + element.cmd, ) + engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'")) Deprecated/Removed Schema Elements @@ -566,6 +576,7 @@ To use an inspector: :: from sqlalchemy.engine.reflection import Inspector + insp = Inspector.from_engine(my_engine) print(insp.get_schema_names()) @@ -578,10 +589,10 @@ such as that of PostgreSQL which provides a :: - my_engine = create_engine('postgresql://...') + my_engine = create_engine("postgresql://...") pg_insp = Inspector.from_engine(my_engine) - print(pg_insp.get_table_oid('my_table')) + print(pg_insp.get_table_oid("my_table")) RETURNING Support ================= @@ -600,10 +611,10 @@ columns will be returned as a regular result set: result = connection.execute( - table.insert().values(data='some data').returning(table.c.id, table.c.timestamp) - ) + table.insert().values(data="some data").returning(table.c.id, table.c.timestamp) + ) row = result.first() - print("ID:", row['id'], "Timestamp:", row['timestamp']) + print("ID:", row["id"], "Timestamp:", row["timestamp"]) The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an @@ -740,7 +751,7 @@ that converts unicode back to utf-8, or whatever is desired: def process_result_value(self, value, dialect): if isinstance(value, unicode): - value = value.encode('utf-8') + value = value.encode("utf-8") return value Note that the ``assert_unicode`` flag is now deprecated. @@ -968,9 +979,11 @@ At mapper level: :: mapper(Child, child) - mapper(Parent, parent, properties={ - 'child':relationship(Child, lazy='joined', innerjoin=True) - }) + mapper( + Parent, + parent, + properties={"child": relationship(Child, lazy="joined", innerjoin=True)}, + ) At query time level: diff --git a/doc/build/changelog/migration_07.rst b/doc/build/changelog/migration_07.rst index 12a3c23e6d0..4763b9134c4 100644 --- a/doc/build/changelog/migration_07.rst +++ b/doc/build/changelog/migration_07.rst @@ -244,7 +244,7 @@ with an explicit onclause is now: :: - query.join(SomeClass, SomeClass.id==ParentClass.some_id) + query.join(SomeClass, SomeClass.id == ParentClass.some_id) In 0.6, this usage was considered to be an error, because ``join()`` accepts multiple arguments corresponding to @@ -336,10 +336,12 @@ to the creation of the index outside of the Table. That is: :: - Table('mytable', metadata, - Column('id',Integer, primary_key=True), - Column('name', String(50), nullable=False), - Index('idx_name', 'name') + Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Index("idx_name", "name"), ) The primary rationale here is for the benefit of declarative @@ -348,14 +350,16 @@ The primary rationale here is for the benefit of declarative :: class HasNameMixin(object): - name = Column('name', String(50), nullable=False) + name = Column("name", String(50), nullable=False) + @declared_attr def __table_args__(cls): - return (Index('name'), {}) + return (Index("name"), {}) + class User(HasNameMixin, Base): - __tablename__ = 'user' - id = Column('id', Integer, primary_key=True) + __tablename__ = "user" + id = Column("id", Integer, primary_key=True) `Indexes `_ @@ -373,8 +377,7 @@ The best introduction to window functions is on PostgreSQL's site, where window functions have been supported since version 8.4: -https://www.postgresql.org/docs/9.0/static/tutorial- -window.html +https://www.postgresql.org/docs/current/static/tutorial-window.html SQLAlchemy provides a simple construct typically invoked via an existing function clause, using the ``over()`` method, @@ -386,17 +389,16 @@ tutorial: from sqlalchemy.sql import table, column, select, func - empsalary = table('empsalary', - column('depname'), - column('empno'), - column('salary')) + empsalary = table("empsalary", column("depname"), column("empno"), column("salary")) - s = select([ + s = select( + [ empsalary, - func.avg(empsalary.c.salary). - over(partition_by=empsalary.c.depname). - label('avg') - ]) + func.avg(empsalary.c.salary) + .over(partition_by=empsalary.c.depname) + .label("avg"), + ] + ) print(s) @@ -496,7 +498,7 @@ equivalent to: :: - query.from_self(func.count(literal_column('1'))).scalar() + query.from_self(func.count(literal_column("1"))).scalar() Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a @@ -535,6 +537,7 @@ be used: :: from sqlalchemy import func + session.query(func.count(MyClass.id)).scalar() or for ``count(*)``: @@ -542,7 +545,8 @@ or for ``count(*)``: :: from sqlalchemy import func, literal_column - session.query(func.count(literal_column('*'))).select_from(MyClass).scalar() + + session.query(func.count(literal_column("*"))).select_from(MyClass).scalar() LIMIT/OFFSET clauses now use bind parameters -------------------------------------------- @@ -691,8 +695,11 @@ function, can be mapped. from sqlalchemy import select, func from sqlalchemy.orm import mapper + class Subset(object): pass + + selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) @@ -774,10 +781,11 @@ mutations, the type object must be constructed with :: - Table('mytable', metadata, + Table( + "mytable", + metadata, # .... - - Column('pickled_data', PickleType(mutable=True)) + Column("pickled_data", PickleType(mutable=True)), ) The ``mutable=True`` flag is being phased out, in favor of @@ -1037,7 +1045,7 @@ key column ``id``, the following now produces an error: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) mapper(FooBar, foobar) This because the ``mapper()`` refuses to guess what column @@ -1048,10 +1056,8 @@ explicit: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) - mapper(FooBar, foobar, properties={ - 'id':[foo.c.id, bar.c.id] - }) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) + mapper(FooBar, foobar, properties={"id": [foo.c.id, bar.c.id]}) :ticket:`1896` @@ -1232,14 +1238,14 @@ backend: :: - select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY']) + select([mytable], distinct="ALL", prefixes=["HIGH_PRIORITY"]) The ``prefixes`` keyword or ``prefix_with()`` method should be used for non-standard or unusual prefixes: :: - select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL') + select([mytable]).prefix_with("HIGH_PRIORITY", "ALL") ``useexisting`` superseded by ``extend_existing`` and ``keep_existing`` ----------------------------------------------------------------------- diff --git a/doc/build/changelog/migration_08.rst b/doc/build/changelog/migration_08.rst index a4dc58549ff..4a07518539b 100644 --- a/doc/build/changelog/migration_08.rst +++ b/doc/build/changelog/migration_08.rst @@ -71,16 +71,17 @@ entities. The new system includes these features: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - child_id_one = Column(Integer, ForeignKey('child.id')) - child_id_two = Column(Integer, ForeignKey('child.id')) + child_id_one = Column(Integer, ForeignKey("child.id")) + child_id_two = Column(Integer, ForeignKey("child.id")) child_one = relationship("Child", foreign_keys=child_id_one) child_two = relationship("Child", foreign_keys=child_id_two) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) * relationships against self-referential, composite foreign @@ -90,11 +91,11 @@ entities. The new system includes these features: :: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = Column(Integer, primary_key=True) @@ -102,10 +103,9 @@ entities. The new system includes these features: parent_id = Column(Integer) name = Column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, the ``Folder`` refers to its parent ``Folder`` joining from ``account_id`` to itself, and ``parent_id`` @@ -144,18 +144,19 @@ entities. The new system includes these features: expected in most cases:: class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The new :func:`_orm.relationship` mechanics make use of a SQLAlchemy concept known as :term:`annotations`. These annotations @@ -167,8 +168,9 @@ entities. The new system includes these features: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) @@ -176,11 +178,10 @@ entities. The new system includes these features: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. seealso:: @@ -226,12 +227,11 @@ certain contexts, such as :class:`.AliasedInsp` and A walkthrough of some key capabilities follows:: >>> class User(Base): - ... __tablename__ = 'user' + ... __tablename__ = "user" ... id = Column(Integer, primary_key=True) ... name = Column(String) ... name_syn = synonym(name) ... addresses = relationship("Address") - ... >>> # universal entry point is inspect() >>> b = inspect(User) @@ -285,7 +285,7 @@ A walkthrough of some key capabilities follows:: "user".id = address.user_id >>> # inspect works on instances - >>> u1 = User(id=3, name='x') + >>> u1 = User(id=3, name="x") >>> b = inspect(u1) >>> # it returns the InstanceState @@ -354,10 +354,11 @@ usable anywhere: :: from sqlalchemy.orm import with_polymorphic + palias = with_polymorphic(Person, [Engineer, Manager]) - session.query(Company).\ - join(palias, Company.employees).\ - filter(or_(Engineer.language=='java', Manager.hair=='pointy')) + session.query(Company).join(palias, Company.employees).filter( + or_(Engineer.language == "java", Manager.hair == "pointy") + ) .. seealso:: @@ -377,9 +378,11 @@ by combining it with the new :func:`.with_polymorphic` function:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like @@ -389,26 +392,28 @@ and :meth:`.PropComparator.has`:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) # pass subclasses to eager loads (implicitly applies with_polymorphic) - q = s.query(ParentThing).\ - options( - joinedload_all( - ParentThing.container, - DataContainer.jobs.of_type(SubJob) - )) + q = s.query(ParentThing).options( + joinedload_all(ParentThing.container, DataContainer.jobs.of_type(SubJob)) + ) # control self-referential aliasing with any()/has() Job_A = aliased(Job) - q = s.query(Job).join(DataContainer.jobs).\ - filter( - DataContainer.jobs.of_type(Job_A).\ - any(and_(Job_A.id < Job.id, Job_A.type=='fred') - ) - ) + q = ( + s.query(Job) + .join(DataContainer.jobs) + .filter( + DataContainer.jobs.of_type(Job_A).any( + and_(Job_A.id < Job.id, Job_A.type == "fred") + ) + ) + ) .. seealso:: @@ -429,13 +434,15 @@ with a declarative base class:: Base = declarative_base() + @event.listens_for("load", Base, propagate=True) def on_load(target, context): print("New instance loaded:", target) + # on_load() will be applied to SomeClass class SomeClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" # ... @@ -453,8 +460,9 @@ can be referred to via dotted name in expressions:: class Snack(Base): # ... - peanuts = relationship("nuts.Peanut", - primaryjoin="nuts.Peanut.snack_id == Snack.id") + peanuts = relationship( + "nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id" + ) The resolution allows that any full or partial disambiguating package name can be used. If the @@ -484,17 +492,22 @@ in one step: class ReflectedOne(DeferredReflection, Base): __abstract__ = True + class ReflectedTwo(DeferredReflection, Base): __abstract__ = True + class MyClass(ReflectedOne): - __tablename__ = 'mytable' + __tablename__ = "mytable" + class MyOtherClass(ReflectedOne): - __tablename__ = 'myothertable' + __tablename__ = "myothertable" + class YetAnotherClass(ReflectedTwo): - __tablename__ = 'yetanothertable' + __tablename__ = "yetanothertable" + ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) @@ -535,10 +548,9 @@ Below, we emit an UPDATE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - update({"data":"x"}) + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).update({"data": "x"}) In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the @@ -548,10 +560,9 @@ given ``Engineer`` as a joined subclass of ``Person``: :: - query(Engineer).\ - filter(Person.id==Engineer.id).\ - filter(Person.name=='dilbert').\ - update({"engineer_data":"java"}) + query(Engineer).filter(Person.id == Engineer.id).filter( + Person.name == "dilbert" + ).update({"engineer_data": "java"}) would produce: @@ -649,6 +660,7 @@ For example, to add logarithm support to :class:`.Numeric` types: from sqlalchemy.types import Numeric from sqlalchemy.sql import func + class CustomNumeric(Numeric): class comparator_factory(Numeric.Comparator): def log(self, other): @@ -659,16 +671,17 @@ The new type is usable like any other type: :: - data = Table('data', metadata, - Column('id', Integer, primary_key=True), - Column('x', CustomNumeric(10, 5)), - Column('y', CustomNumeric(10, 5)) - ) + data = Table( + "data", + metadata, + Column("id", Integer, primary_key=True), + Column("x", CustomNumeric(10, 5)), + Column("y", CustomNumeric(10, 5)), + ) stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value) print(conn.execute(stmt).fetchall()) - New features which have come from this immediately include support for PostgreSQL's HSTORE type, as well as new operations associated with PostgreSQL's ARRAY @@ -696,11 +709,13 @@ support this syntax, including PostgreSQL, SQLite, and MySQL. It is not the same thing as the usual ``executemany()`` style of INSERT which remains unchanged:: - users.insert().values([ - {"name": "some name"}, - {"name": "some other name"}, - {"name": "yet another name"}, - ]) + users.insert().values( + [ + {"name": "some name"}, + {"name": "some other name"}, + {"name": "yet another name"}, + ] + ) .. seealso:: @@ -721,6 +736,7 @@ functionality, except on the database side:: from sqlalchemy.types import String from sqlalchemy import func, Table, Column, MetaData + class LowerString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) @@ -728,18 +744,15 @@ functionality, except on the database side:: def column_expression(self, col): return func.lower(col) + metadata = MetaData() - test_table = Table( - 'test_table', - metadata, - Column('data', LowerString) - ) + test_table = Table("test_table", metadata, Column("data", LowerString)) Above, the ``LowerString`` type defines a SQL expression that will be emitted whenever the ``test_table.c.data`` column is rendered in the columns clause of a SELECT statement:: - >>> print(select([test_table]).where(test_table.c.data == 'HI')) + >>> print(select([test_table]).where(test_table.c.data == "HI")) SELECT lower(test_table.data) AS data FROM test_table WHERE test_table.data = lower(:data_1) @@ -789,16 +802,17 @@ against a particular target selectable:: signatures = relationship("Signature", lazy=False) + class Signature(Base): __tablename__ = "signature" id = Column(Integer, primary_key=True) sig_count = column_property( - select([func.count('*')]).\ - where(SnortEvent.signature == id). - correlate_except(SnortEvent) - ) + select([func.count("*")]) + .where(SnortEvent.signature == id) + .correlate_except(SnortEvent) + ) .. seealso:: @@ -818,19 +832,16 @@ and containment methods such as from sqlalchemy.dialects.postgresql import HSTORE - data = Table('data_table', metadata, - Column('id', Integer, primary_key=True), - Column('hstore_data', HSTORE) - ) - - engine.execute( - select([data.c.hstore_data['some_key']]) - ).scalar() + data = Table( + "data_table", + metadata, + Column("id", Integer, primary_key=True), + Column("hstore_data", HSTORE), + ) - engine.execute( - select([data.c.hstore_data.matrix()]) - ).scalar() + engine.execute(select([data.c.hstore_data["some_key"]])).scalar() + engine.execute(select([data.c.hstore_data.matrix()])).scalar() .. seealso:: @@ -861,30 +872,20 @@ results: The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:: - result = conn.execute( - select([mytable.c.arraycol[2]]) - ) + result = conn.execute(select([mytable.c.arraycol[2]])) slice access in SELECT:: - result = conn.execute( - select([mytable.c.arraycol[2:4]]) - ) + result = conn.execute(select([mytable.c.arraycol[2:4]])) slice updates in UPDATE:: - conn.execute( - mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}) - ) + conn.execute(mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]})) freestanding array literals:: >>> from sqlalchemy.dialects import postgresql - >>> conn.scalar( - ... select([ - ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) - ... ]) - ... ) + >>> conn.scalar(select([postgresql.array([1, 2]) + postgresql.array([3, 4, 5])])) [1, 2, 3, 4, 5] array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal:: @@ -912,20 +913,24 @@ everything else. :: - Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True)) - Column('sometimestamp', sqlite.DATETIME( - storage_format=( - "%(year)04d%(month)02d%(day)02d" - "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" - ), - regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})" - ) - ) - Column('somedate', sqlite.DATE( - storage_format="%(month)02d/%(day)02d/%(year)04d", - regexp="(?P\d+)/(?P\d+)/(?P\d+)", - ) - ) + Column("sometimestamp", sqlite.DATETIME(truncate_microseconds=True)) + Column( + "sometimestamp", + sqlite.DATETIME( + storage_format=( + "%(year)04d%(month)02d%(day)02d" + "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" + ), + regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})", + ), + ) + Column( + "somedate", + sqlite.DATE( + storage_format="%(month)02d/%(day)02d/%(year)04d", + regexp="(?P\d+)/(?P\d+)/(?P\d+)", + ), + ) Huge thanks to Nate Dub for the sprinting on this at Pycon 2012. @@ -946,7 +951,7 @@ The "collate" keyword, long accepted by the MySQL dialect, is now established on all :class:`.String` types and will render on any backend, including when features such as :meth:`_schema.MetaData.create_all` and :func:`.cast` is used:: - >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))]) + >>> stmt = select([cast(sometable.c.somechar, String(20, collation="utf8"))]) >>> print(stmt) SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1 FROM sometable @@ -1047,33 +1052,35 @@ The new behavior allows the following test case to work:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) + class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) - user = relationship(User, - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + user = relationship( + User, backref=backref("user_keywords", cascade="all, delete-orphan") + ) - keyword = relationship("Keyword", - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + keyword = relationship( + "Keyword", backref=backref("user_keywords", cascade="all, delete-orphan") + ) # uncomment this to enable the old behavior # __mapper_args__ = {"legacy_is_orphan": True} + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) + from sqlalchemy import create_engine from sqlalchemy.orm import Session @@ -1103,7 +1110,6 @@ The new behavior allows the following test case to work:: session.commit() - :ticket:`2655` The after_attach event fires after the item is associated with the Session instead of before; before_attach added @@ -1129,9 +1135,9 @@ use cases should use the new "before_attach" event: @event.listens_for(Session, "before_attach") def before_attach(session, instance): - instance.some_necessary_attribute = session.query(Widget).\ - filter_by(instance.widget_name).\ - first() + instance.some_necessary_attribute = ( + session.query(Widget).filter_by(instance.widget_name).first() + ) :ticket:`2464` @@ -1146,11 +1152,13 @@ parent: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - correlate(Parent).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = ( + session.query(Entity.value) + .filter(Entity.id == Parent.entity_id) + .correlate(Parent) + .as_scalar() + ) + session.query(Parent).filter(subq == "some value") This was the opposite behavior of a plain ``select()`` construct which would assume auto-correlation by default. @@ -1158,10 +1166,8 @@ The above statement in 0.8 will correlate automatically: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = session.query(Entity.value).filter(Entity.id == Parent.entity_id).as_scalar() + session.query(Parent).filter(subq == "some value") like in ``select()``, correlation can be disabled by calling ``query.correlate(None)`` or manually set by passing an @@ -1187,8 +1193,8 @@ objects relative to what's being selected:: from sqlalchemy.sql import table, column, select - t1 = table('t1', column('x')) - t2 = table('t2', column('y')) + t1 = table("t1", column("x")) + t2 = table("t2", column("y")) s = select([t1, t2]).correlate(t1) print(s) @@ -1263,8 +1269,8 @@ doing something like this: :: - scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo') - select([sometable]).where(sometable.c.id==scalar_subq) + scalar_subq = select([someothertable.c.id]).where(someothertable.c.data == "foo") + select([sometable]).where(sometable.c.id == scalar_subq) SQL Server doesn't allow an equality comparison to a scalar SELECT, that is, "x = (SELECT something)". The MSSQL dialect @@ -1313,32 +1319,28 @@ key would be ignored, inconsistently versus when :: # before 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # would be accessible like this - s.c.col1 # would raise AttributeError + s.c.column_one # would be accessible like this + s.c.col1 # would raise AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # would raise AttributeError - s.c.table1_col1 # would be accessible like this + s.c.table1_column_one # would raise AttributeError + s.c.table1_col1 # would be accessible like this In 0.8, :attr:`_schema.Column.key` is honored in both cases: :: # with 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # works - s.c.col1 # AttributeError + s.c.column_one # works + s.c.col1 # AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # works - s.c.table1_col1 # AttributeError + s.c.table1_column_one # works + s.c.table1_col1 # AttributeError All other behavior regarding "name" and "key" are the same, including that the rendered SQL will still use the form @@ -1408,8 +1410,8 @@ warning: :: - t1 = table('t1', column('x')) - t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" + t1 = table("t1", column("x")) + t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" :ticket:`2415` @@ -1439,7 +1441,7 @@ always compared case-insensitively: :: >>> row = result.fetchone() - >>> row['foo'] == row['FOO'] == row['Foo'] + >>> row["foo"] == row["FOO"] == row["Foo"] True This was for the benefit of a few dialects which in the diff --git a/doc/build/changelog/migration_09.rst b/doc/build/changelog/migration_09.rst index 70fa49e3439..2e45695abba 100644 --- a/doc/build/changelog/migration_09.rst +++ b/doc/build/changelog/migration_09.rst @@ -60,8 +60,7 @@ Using a :class:`_query.Query` in conjunction with a composite attribute now retu type maintained by that composite, rather than being broken out into individual columns. Using the mapping setup at :ref:`mapper_composite`:: - >>> session.query(Vertex.start, Vertex.end).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start, Vertex.end).filter(Vertex.start == Point(3, 4)).all() [(Point(x=3, y=4), Point(x=5, y=6))] This change is backwards-incompatible with code that expects the individual attribute @@ -69,8 +68,9 @@ to be expanded into individual columns. To get that behavior, use the ``.clause accessor:: - >>> session.query(Vertex.start.clauses, Vertex.end.clauses).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start.clauses, Vertex.end.clauses).filter( + ... Vertex.start == Point(3, 4) + ... ).all() [(3, 4, 5, 6)] .. seealso:: @@ -93,9 +93,11 @@ Consider the following example against the usual ``User`` mapping:: select_stmt = select([User]).where(User.id == 7).alias() - q = session.query(User).\ - join(select_stmt, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .join(select_stmt, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) The above statement predictably renders SQL like the following:: @@ -109,10 +111,12 @@ If we wanted to reverse the order of the left and right elements of the JOIN, the documentation would lead us to believe we could use :meth:`_query.Query.select_from` to do so:: - q = session.query(User).\ - select_from(select_stmt).\ - join(User, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .select_from(select_stmt) + .join(User, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) However, in version 0.8 and earlier, the above use of :meth:`_query.Query.select_from` would apply the ``select_stmt`` to **replace** the ``User`` entity, as it @@ -137,7 +141,7 @@ to selecting from a customized :func:`.aliased` construct:: select_stmt = select([User]).where(User.id == 7) user_from_stmt = aliased(User, select_stmt.alias()) - q = session.query(user_from_stmt).filter(user_from_stmt.name == 'ed') + q = session.query(user_from_stmt).filter(user_from_stmt.name == "ed") So with SQLAlchemy 0.9, our query that selects from ``select_stmt`` produces the SQL we expect:: @@ -180,17 +184,20 @@ The change is illustrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) a = relationship("A", backref=backref("bs", viewonly=True)) + e = create_engine("sqlite://") Base.metadata.create_all(e) @@ -229,16 +236,17 @@ the "association" row being present or not when the comparison is against Consider this mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(Integer, ForeignKey('b.id'), primary_key=True) + b_id = Column(Integer, ForeignKey("b.id"), primary_key=True) b = relationship("B") b_value = association_proxy("b", "value") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) value = Column(String) @@ -323,21 +331,24 @@ proxied value. E.g.:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) bname = association_proxy("b", "name") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) name = Column(String) + a1 = A() # this is how m2o's always have worked @@ -370,17 +381,19 @@ This is a small change demonstrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) sess = Session(e) - a1 = A(data='a1') + a1 = A(data="a1") sess.add(a1) sess.commit() # a1 is now expired @@ -388,11 +401,23 @@ This is a small change demonstrated as follows:: assert inspect(a1).attrs.data.history == (None, None, None) # in 0.8, this would fail to load the unloaded state. - assert attributes.get_history(a1, 'data') == ((), ['a1',], ()) + assert attributes.get_history(a1, "data") == ( + (), + [ + "a1", + ], + (), + ) # load_history() is now equivalent to get_history() with # passive=PASSIVE_OFF ^ INIT_OK - assert inspect(a1).attrs.data.load_history() == ((), ['a1',], ()) + assert inspect(a1).attrs.data.load_history() == ( + (), + [ + "a1", + ], + (), + ) :ticket:`2787` @@ -452,14 +477,10 @@ use the :meth:`.TypeEngine.with_variant` method:: from sqlalchemy.dialects.mysql import INTEGER d = Date().with_variant( - DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), - "sqlite" - ) + DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), "sqlite" + ) - i = Integer().with_variant( - INTEGER(display_width=5), - "mysql" - ) + i = Integer().with_variant(INTEGER(display_width=5), "mysql") :meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy 0.7.2. So code that is running on the 0.8 series can be corrected to use @@ -549,7 +570,7 @@ The precedence rules for COLLATE have been changed Previously, an expression like the following:: - print((column('x') == 'somevalue').collate("en_EN")) + print((column("x") == "somevalue").collate("en_EN")) would produce an expression like this:: @@ -567,7 +588,7 @@ The potentially backwards incompatible change arises if the :meth:`.ColumnOperators.collate` operator is being applied to the right-hand column, as follows:: - print(column('x') == literal('somevalue').collate("en_EN")) + print(column("x") == literal("somevalue").collate("en_EN")) In 0.8, this produces:: @@ -584,11 +605,11 @@ The :meth:`.ColumnOperators.collate` operator now works more appropriately withi generated:: >>> # 0.8 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) (x COLLATE en_EN) DESC >>> # 0.9 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) x COLLATE en_EN DESC :ticket:`2879` @@ -604,7 +625,7 @@ The :class:`_postgresql.ENUM` type will now apply escaping to single quote signs within the enumerated values:: >>> from sqlalchemy.dialects import postgresql - >>> type = postgresql.ENUM('one', 'two', "three's", name="myenum") + >>> type = postgresql.ENUM("one", "two", "three's", name="myenum") >>> from sqlalchemy.dialects.postgresql import base >>> print(base.CreateEnumType(type).compile(dialect=postgresql.dialect())) CREATE TYPE myenum AS ENUM ('one','two','three''s') @@ -633,6 +654,7 @@ from all locations in which it had been established:: """listen for before_insert""" # ... + event.remove(MyClass, "before_insert", my_before_insert) In the example above, the ``propagate=True`` flag is set. This @@ -689,13 +711,9 @@ Setting an option on path that is based on a subclass requires that all links in the path be spelled out as class bound attributes, since the :meth:`.PropComparator.of_type` method needs to be called:: - session.query(Company).\ - options( - subqueryload_all( - Company.employees.of_type(Engineer), - Engineer.machines - ) - ) + session.query(Company).options( + subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines) + ) **New Way** @@ -726,7 +744,6 @@ but the intent is clearer:: query(User).options(defaultload("orders").defaultload("items").subqueryload("keywords")) - The dotted style can still be taken advantage of, particularly in the case of skipping over several path elements:: @@ -791,7 +808,6 @@ others:: # undefer all Address columns query(User).options(defaultload(User.addresses).undefer("*")) - :ticket:`1418` @@ -826,7 +842,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.alias() stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) # or into a cte(): @@ -834,7 +851,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.cte("x") stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) :ticket:`2877` @@ -850,9 +868,9 @@ compatible construct can be passed to the new method :meth:`_expression.Insert.f where it will be used to render an ``INSERT .. SELECT`` construct:: >>> from sqlalchemy.sql import table, column - >>> t1 = table('t1', column('a'), column('b')) - >>> t2 = table('t2', column('x'), column('y')) - >>> print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5))) + >>> t1 = table("t1", column("a"), column("b")) + >>> t2 = table("t2", column("x"), column("y")) + >>> print(t1.insert().from_select(["a", "b"], t2.select().where(t2.c.y == 5))) INSERT INTO t1 (a, b) SELECT t2.x, t2.y FROM t2 WHERE t2.y = :y_1 @@ -861,7 +879,7 @@ The construct is smart enough to also accommodate ORM objects such as classes and :class:`_query.Query` objects:: s = Session() - q = s.query(User.id, User.name).filter_by(name='ed') + q = s.query(User.id, User.name).filter_by(name="ed") ins = insert(Address).from_select((Address.id, Address.email_address), q) rendering:: @@ -920,9 +938,10 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both from sqlalchemy.dialects.mysql import DOUBLE import decimal - data = Table('data', metadata, - Column('double_value', - mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)) + data = Table( + "data", + metadata, + Column("double_value", mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)), ) conn.execute( @@ -938,7 +957,6 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both # much precision for DOUBLE assert result == decimal.Decimal("45.768392065789") - :ticket:`2867` @@ -1004,8 +1022,9 @@ from a backref:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") @@ -1015,21 +1034,22 @@ from a backref:: print("A.bs validator") return item + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) @validates("a", include_backrefs=False) def validate_a(self, key, item): print("B.a validator") return item + a1 = A() a1.bs.append(B()) # prints only "A.bs validator" - :ticket:`1535` @@ -1262,14 +1282,9 @@ without any subqueries generated:: employee_alias = with_polymorphic(Person, [Engineer, Manager], flat=True) - session.query(Company).join( - Company.employees.of_type(employee_alias) - ).filter( - or_( - Engineer.primary_language == 'python', - Manager.manager_name == 'dilbert' - ) - ) + session.query(Company).join(Company.employees.of_type(employee_alias)).filter( + or_(Engineer.primary_language == "python", Manager.manager_name == "dilbert") + ) Generates (everywhere except SQLite):: @@ -1295,7 +1310,9 @@ on the right side. Normally, a joined eager load chain like the following:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) Would not produce an inner join; because of the LEFT OUTER JOIN from user->order, joined eager loading could not use an INNER join from order->items without changing @@ -1311,7 +1328,9 @@ the new "right-nested joins are OK" logic would kick in, and we'd get:: Since we missed the boat on that, to avoid further regressions we've added the above functionality by specifying the string ``"nested"`` to :paramref:`_orm.joinedload.innerjoin`:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested")) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested") + ) This feature is new in 0.9.4. @@ -1406,16 +1425,18 @@ replacement operation, which in turn should cause the item to be removed from a previous collection:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(ForeignKey('parent.id')) + parent_id = Column(ForeignKey("parent.id")) + p1 = Parent() p2 = Parent() @@ -1520,7 +1541,7 @@ Starting with a table such as this:: from sqlalchemy import Table, Boolean, Integer, Column, MetaData - t1 = Table('t', MetaData(), Column('x', Boolean()), Column('y', Integer)) + t1 = Table("t", MetaData(), Column("x", Boolean()), Column("y", Integer)) A select construct will now render the boolean column as a binary expression on backends that don't feature ``true``/``false`` constant behavior:: @@ -1535,8 +1556,9 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi "short circuit" behavior, that is truncating a rendered expression, when a :func:`.true` or :func:`.false` constant is present:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=postgresql.dialect())) + >>> print( + ... select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=postgresql.dialect()) + ... ) SELECT t.x, t.y FROM t WHERE false :func:`.true` can be used as the base to build up an expression:: @@ -1549,8 +1571,7 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi The boolean constants :func:`.true` and :func:`.false` themselves render as ``0 = 1`` and ``1 = 1`` for a backend with no boolean constants:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=mysql.dialect())) + >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=mysql.dialect())) SELECT t.x, t.y FROM t WHERE 0 = 1 Interpretation of ``None``, while not particularly valid SQL, is at least @@ -1581,7 +1602,7 @@ E.g. an example like:: from sqlalchemy.sql import table, column, select, func - t = table('t', column('c1'), column('c2')) + t = table("t", column("c1"), column("c2")) expr = (func.foo(t.c.c1) + t.c.c2).label("expr") stmt = select([expr]).order_by(expr) @@ -1620,16 +1641,16 @@ The ``__eq__()`` method now compares both sides as a tuple and also an ``__lt__()`` method has been added:: users.insert().execute( - dict(user_id=1, user_name='foo'), - dict(user_id=2, user_name='bar'), - dict(user_id=3, user_name='def'), - ) + dict(user_id=1, user_name="foo"), + dict(user_id=2, user_name="bar"), + dict(user_id=3, user_name="def"), + ) rows = users.select().order_by(users.c.user_name).execute().fetchall() - eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')]) + eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")]) - eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')]) + eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")]) :ticket:`2848` @@ -1667,7 +1688,7 @@ Above, ``bp`` remains unchanged, but the ``String`` type will be used when the statement is executed, which we can see by examining the ``binds`` dictionary:: >>> compiled = stmt.compile() - >>> compiled.binds['some_col'].type + >>> compiled.binds["some_col"].type String The feature allows custom types to take their expected effect within INSERT/UPDATE @@ -1727,10 +1748,10 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) >>> t2.c.t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() @@ -1738,16 +1759,23 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKeyConstraint >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, - ... Column('t1a'), Column('t1b'), - ... ForeignKeyConstraint(['t1a', 't1b'], ['t1.a', 't1.b'])) + >>> t2 = Table( + ... "t2", + ... metadata, + ... Column("t1a"), + ... Column("t1b"), + ... ForeignKeyConstraint(["t1a", "t1b"], ["t1.a", "t1.b"]), + ... ) >>> t2.c.t1a.type NullType() >>> t2.c.t1b.type NullType() - >>> t1 = Table('t1', metadata, - ... Column('a', Integer, primary_key=True), - ... Column('b', Integer, primary_key=True)) + >>> t1 = Table( + ... "t1", + ... metadata, + ... Column("a", Integer, primary_key=True), + ... Column("b", Integer, primary_key=True), + ... ) >>> t2.c.t1a.type Integer() >>> t2.c.t1b.type @@ -1758,13 +1786,13 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) - >>> t3 = Table('t3', metadata, Column('t2t1id', ForeignKey('t2.t1id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) + >>> t3 = Table("t3", metadata, Column("t2t1id", ForeignKey("t2.t1id"))) >>> t2.c.t1id.type NullType() >>> t3.c.t2t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() >>> t3.c.t2t1id.type diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 68fb0bd7773..0c5f9187dce 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -71,15 +71,16 @@ once, a query as a pre-compiled unit begins to be feasible:: bakery = baked.bakery() + def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter(User.name == bindparam('username')) + baked_query += lambda q: q.filter(User.name == bindparam("username")) baked_query += lambda q: q.order_by(User.id) if email: - baked_query += lambda q: q.filter(User.email == bindparam('email')) + baked_query += lambda q: q.filter(User.email == bindparam("email")) result = baked_query(session).params(username=username, email=email).all() @@ -109,10 +110,11 @@ call upon mixin-established columns and will receive a reference to the correct @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Above, ``SomeClass.foobar_prop`` will be invoked against ``SomeClass``, @@ -132,10 +134,11 @@ this:: @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Previously, ``SomeClass`` would be mapped with one particular copy of @@ -167,16 +170,19 @@ applied:: @declared_attr.cascading def id(cls): if has_inherited_table(cls): - return Column(ForeignKey('myclass.id'), primary_key=True) + return Column(ForeignKey("myclass.id"), primary_key=True) else: return Column(Integer, primary_key=True) + class MyClass(HasIdMixin, Base): - __tablename__ = 'myclass' + __tablename__ = "myclass" # ... + class MySubClass(MyClass): - "" + """ """ + # ... .. seealso:: @@ -189,13 +195,17 @@ on the abstract base:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import (declarative_base, declared_attr, - AbstractConcreteBase) + from sqlalchemy.ext.declarative import ( + declarative_base, + declared_attr, + AbstractConcreteBase, + ) Base = declarative_base() + class Something(Base): - __tablename__ = u'something' + __tablename__ = "something" id = Column(Integer, primary_key=True) @@ -212,9 +222,8 @@ on the abstract base:: class Concrete(Abstract): - __tablename__ = u'cca' - __mapper_args__ = {'polymorphic_identity': 'cca', 'concrete': True} - + __tablename__ = "cca" + __mapper_args__ = {"polymorphic_identity": "cca", "concrete": True} The above mapping will set up a table ``cca`` with both an ``id`` and a ``something_id`` column, and ``Concrete`` will also have a relationship @@ -240,17 +249,19 @@ of load that's improved the most:: Base = declarative_base() + class Foo(Base): __table__ = Table( - 'foo', Base.metadata, - Column('id', Integer, primary_key=True), - Column('a', Integer(), nullable=False), - Column('b', Integer(), nullable=False), - Column('c', Integer(), nullable=False), + "foo", + Base.metadata, + Column("id", Integer, primary_key=True), + Column("a", Integer(), nullable=False), + Column("b", Integer(), nullable=False), + Column("c", Integer(), nullable=False), ) - engine = create_engine( - 'mysql+mysqldb://scott:tiger@localhost/test', echo=True) + + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True) sess = Session(engine) @@ -385,32 +396,29 @@ of inheritance-oriented scenarios, including: * Binding to a Mixin or Abstract Class:: class MyClass(SomeMixin, Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" # ... - session = Session(binds={SomeMixin: some_engine}) + session = Session(binds={SomeMixin: some_engine}) * Binding to inherited concrete subclasses individually based on table:: class BaseClass(Base): - __tablename__ = 'base' + __tablename__ = "base" # ... + class ConcreteSubClass(BaseClass): - __tablename__ = 'concrete' + __tablename__ = "concrete" # ... - __mapper_args__ = {'concrete': True} - + __mapper_args__ = {"concrete": True} - session = Session(binds={ - base_table: some_engine, - concrete_table: some_other_engine - }) + session = Session(binds={base_table: some_engine, concrete_table: some_other_engine}) :ticket:`3035` @@ -446,10 +454,10 @@ These scenarios include: statement as well as for the SELECT used by the "fetch" strategy:: session.query(User).filter(User.id == 15).update( - {"name": "foob"}, synchronize_session='fetch') + {"name": "foob"}, synchronize_session="fetch" + ) - session.query(User).filter(User.id == 15).delete( - synchronize_session='fetch') + session.query(User).filter(User.id == 15).delete(synchronize_session="fetch") * Queries against individual columns:: @@ -488,7 +496,7 @@ at the attribute. Below this is illustrated using the return self.value + 5 - inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar' + inspect(SomeObject).all_orm_descriptors.some_prop.info["foo"] = "bar" It is also available as a constructor argument for all :class:`.SchemaItem` objects (e.g. :class:`_schema.ForeignKey`, :class:`.UniqueConstraint` etc.) as well @@ -510,20 +518,19 @@ as the "order by label" logic introduced in 0.9 (see :ref:`migration_1068`). Given a mapping like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) - A.b = column_property( - select([func.max(B.id)]).where(B.a_id == A.id).correlate(A) - ) + A.b = column_property(select([func.max(B.id)]).where(B.a_id == A.id).correlate(A)) A simple scenario that included "A.b" twice would fail to render correctly:: @@ -550,12 +557,12 @@ There were also many scenarios where the "order by" logic would fail to order by label, for example if the mapping were "polymorphic":: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'with_polymorphic': '*'} + __mapper_args__ = {"polymorphic_on": type, "with_polymorphic": "*"} The order_by would fail to use the label, as it would be anonymized due to the polymorphic loading:: @@ -592,7 +599,7 @@ any SQL expression, in addition to integer values, as arguments. The ORM this is used to allow a bound parameter to be passed, which can be substituted with a value later:: - sel = select([table]).limit(bindparam('mylimit')).offset(bindparam('myoffset')) + sel = select([table]).limit(bindparam("mylimit")).offset(bindparam("myoffset")) Dialects which don't support non-integer LIMIT or OFFSET expressions may continue to not support this behavior; third party dialects may also need modification @@ -702,12 +709,12 @@ CHECK Constraints now support the ``%(column_0_name)s`` token in naming conventi The ``%(column_0_name)s`` will derive from the first column found in the expression of a :class:`.CheckConstraint`:: - metadata = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata, - Column('value', Integer), + foo = Table( + "foo", + metadata, + Column("value", Integer), ) CheckConstraint(foo.c.value > 5) @@ -743,10 +750,7 @@ Since at least version 0.8, a :class:`.Constraint` has had the ability to m = MetaData() - t = Table('t', m, - Column('a', Integer), - Column('b', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer)) uq = UniqueConstraint(t.c.a, t.c.b) # will auto-attach to Table @@ -762,12 +766,12 @@ the :class:`.Constraint` is also added:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) uq = UniqueConstraint(a, b) - t = Table('t', m, a, b) + t = Table("t", m, a, b) assert uq in t.constraints # constraint auto-attached @@ -781,12 +785,12 @@ tracking for the addition of names to a :class:`_schema.Table`:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") - t = Table('t', m, a, b) + t = Table("t", m, a, b) # constraint *not* auto-attached, as we do not have tracking # to locate when a name 'b' becomes available on the table @@ -806,18 +810,17 @@ the :class:`.Constraint` is constructed:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - t = Table('t', m, a, b) + t = Table("t", m, a, b) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") # constraint auto-attached normally as in older versions assert uq in t.constraints - :ticket:`3341` :ticket:`3411` @@ -838,12 +841,11 @@ expressions are rendered as constants into the SELECT statement:: m = MetaData() t = Table( - 't', m, - Column('x', Integer), - Column('y', Integer, default=func.somefunction())) + "t", m, Column("x", Integer), Column("y", Integer, default=func.somefunction()) + ) stmt = select([t.c.x]) - print(t.insert().from_select(['x'], stmt)) + print(t.insert().from_select(["x"], stmt)) Will render:: @@ -870,9 +872,10 @@ embedded in SQL to render correctly, such as:: metadata = MetaData() - tbl = Table("derp", metadata, - Column("arr", ARRAY(Text), - server_default=array(["foo", "bar", "baz"])), + tbl = Table( + "derp", + metadata, + Column("arr", ARRAY(Text), server_default=array(["foo", "bar", "baz"])), ) print(CreateTable(tbl).compile(dialect=postgresql.dialect())) @@ -981,8 +984,9 @@ emitted for ten of the parameter sets, out of a total of 1000:: warnings.filterwarnings("once") for i in range(1000): - e.execute(select([cast( - ('foo_%d' % random.randint(0, 1000000)).encode('ascii'), Unicode)])) + e.execute( + select([cast(("foo_%d" % random.randint(0, 1000000)).encode("ascii"), Unicode)]) + ) The format of the warning here is:: @@ -1015,40 +1019,41 @@ onto the class. The string names are now resolved as attribute names in earnest:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) Above, the column ``user_name`` is mapped as ``name``. Previously, a call to :meth:`_query.Query.update` that was passed strings would have to have been called as follows:: - session.query(User).update({'user_name': 'moonbeam'}) + session.query(User).update({"user_name": "moonbeam"}) The given string is now resolved against the entity:: - session.query(User).update({'name': 'moonbeam'}) + session.query(User).update({"name": "moonbeam"}) It is typically preferable to use the attribute directly, to avoid any ambiguity:: - session.query(User).update({User.name: 'moonbeam'}) + session.query(User).update({User.name: "moonbeam"}) The change also indicates that synonyms and hybrid attributes can be referred to by string name as well:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) @hybrid_property def fullname(self): return self.name - session.query(User).update({'fullname': 'moonbeam'}) + + session.query(User).update({"fullname": "moonbeam"}) :ticket:`3228` @@ -1108,13 +1113,14 @@ it only became apparent as a result of :ticket:`3371`. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) a = relationship("A") Given ``A``, with primary key of 7, but which we changed to be 10 @@ -1254,15 +1260,16 @@ attributes, a change in behavior can be seen here when assigning None. Given a mapping:: class A(Base): - __tablename__ = 'table_a' + __tablename__ = "table_a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'table_b' + __tablename__ = "table_b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('table_a.id')) + a_id = Column(ForeignKey("table_a.id")) a = relationship(A) In 1.0, the relationship-bound attribute takes precedence over the FK-bound @@ -1277,7 +1284,7 @@ only takes effect if a value is assigned; the None is not considered:: session.flush() b1 = B() - b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 + b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 b2 = B() b2.a = None # we expect a_id to be None; takes precedence only in 1.0 @@ -1339,7 +1346,7 @@ with yield-per (subquery loading could be in theory, however). When this error is raised, the :func:`.lazyload` option can be sent with an asterisk:: - q = sess.query(Object).options(lazyload('*')).yield_per(100) + q = sess.query(Object).options(lazyload("*")).yield_per(100) or use :meth:`_query.Query.enable_eagerloads`:: @@ -1348,8 +1355,11 @@ or use :meth:`_query.Query.enable_eagerloads`:: The :func:`.lazyload` option has the advantage that additional many-to-one joined loader options can still be used:: - q = sess.query(Object).options( - lazyload('*'), joinedload("some_manytoone")).yield_per(100) + q = ( + sess.query(Object) + .options(lazyload("*"), joinedload("some_manytoone")) + .yield_per(100) + ) .. _bug_3233: @@ -1370,15 +1380,17 @@ Starting with a mapping as:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) A query that joins to ``A.bs`` twice:: @@ -1392,9 +1404,9 @@ Will render:: The query deduplicates the redundant ``A.bs`` because it is attempting to support a case like the following:: - s.query(A).join(A.bs).\ - filter(B.foo == 'bar').\ - reset_joinpoint().join(A.bs, B.cs).filter(C.bar == 'bat') + s.query(A).join(A.bs).filter(B.foo == "bar").reset_joinpoint().join(A.bs, B.cs).filter( + C.bar == "bat" + ) That is, the ``A.bs`` is part of a "path". As part of :ticket:`3367`, arriving at the same endpoint twice without it being part of a @@ -1437,31 +1449,33 @@ a mapping as follows:: Base = declarative_base() + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'a'} + __mapper_args__ = {"polymorphic_on": type, "polymorphic_identity": "a"} class ASub1(A): - __mapper_args__ = {'polymorphic_identity': 'asub1'} + __mapper_args__ = {"polymorphic_identity": "asub1"} class ASub2(A): - __mapper_args__ = {'polymorphic_identity': 'asub2'} + __mapper_args__ = {"polymorphic_identity": "asub2"} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey("a.id")) - a = relationship("A", primaryjoin="B.a_id == A.id", backref='b') + a = relationship("A", primaryjoin="B.a_id == A.id", backref="b") + s = Session() @@ -1543,26 +1557,28 @@ Previously, the sample code looked like:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row, result): - return dict( - zip(labels, (proc(row, result) for proc in procs)) - ) + return dict(zip(labels, (proc(row, result) for proc in procs))) + return proc The unused ``result`` member is now removed:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row): - return dict( - zip(labels, (proc(row) for proc in procs)) - ) + return dict(zip(labels, (proc(row) for proc in procs))) + return proc .. seealso:: @@ -1587,7 +1603,8 @@ join eager load will use a right-nested join. ``"nested"`` is now implied when using ``innerjoin=True``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) With the new default, this will render the FROM clause in the form:: @@ -1601,7 +1618,8 @@ optimization parameter to take effect in all cases. To get the older behavior, use ``innerjoin="unnested"``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested")) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested") + ) This will avoid right-nested joins and chain the joins together using all OUTER joins despite the innerjoin directive:: @@ -1626,15 +1644,16 @@ Subqueries no longer applied to uselist=False joined eager loads Given a joined eager load like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) + s = Session() print(s.query(A).options(joinedload(A.b)).limit(5)) @@ -1709,7 +1728,8 @@ Change to single-table-inheritance criteria when using from_self(), count() Given a single-table inheritance mapping, such as:: class Widget(Base): - __table__ = 'widget_table' + __table__ = "widget_table" + class FooWidget(Widget): pass @@ -1769,20 +1789,20 @@ the "single table criteria" when joining on a relationship. Given a mapping as:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) - related_id = Column(ForeignKey('related.id')) + related_id = Column(ForeignKey("related.id")) related = relationship("Related", backref="widget") - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} class Related(Base): - __tablename__ = 'related' + __tablename__ = "related" id = Column(Integer, primary_key=True) It's been the behavior for quite some time that a JOIN on the relationship @@ -1850,7 +1870,7 @@ behavior of passing string values that become parameterized:: # This is a normal Core expression with a string argument - # we aren't talking about this!! - stmt = select([sometable]).where(sometable.c.somecolumn == 'value') + stmt = select([sometable]).where(sometable.c.somecolumn == "value") The Core tutorial has long featured an example of the use of this technique, using a :func:`_expression.select` construct where virtually all components of it @@ -1893,24 +1913,28 @@ one wishes the warnings to be exceptions, the should be used:: import warnings - warnings.simplefilter("error") # all warnings raise an exception + + warnings.simplefilter("error") # all warnings raise an exception Given the above warnings, our statement works just fine, but to get rid of the warnings we would rewrite our statement as follows:: from sqlalchemy import select, text - stmt = select([ - text("a"), - text("b") - ]).where(text("a = b")).select_from(text("sometable")) + + stmt = ( + select([text("a"), text("b")]).where(text("a = b")).select_from(text("sometable")) + ) and as the warnings suggest, we can give our statement more specificity about the text if we use :func:`_expression.column` and :func:`.table`:: from sqlalchemy import select, text, column, table - stmt = select([column("a"), column("b")]).\ - where(text("a = b")).select_from(table("sometable")) + stmt = ( + select([column("a"), column("b")]) + .where(text("a = b")) + .select_from(table("sometable")) + ) Where note also that :func:`.table` and :func:`_expression.column` can now be imported from "sqlalchemy" without the "sql" part. @@ -1927,10 +1951,11 @@ of this change we have enhanced its functionality. When we have a :func:`_expression.select` or :class:`_query.Query` that refers to some column name or named label, we might want to GROUP BY and/or ORDER BY known columns or labels:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).group_by("name").order_by("id_count") + stmt = ( + select([user.c.name, func.count(user.c.id).label("id_count")]) + .group_by("name") + .order_by("id_count") + ) In the above statement we expect to see "ORDER BY id_count", as opposed to a re-statement of the function. The string argument given is actively @@ -1944,10 +1969,9 @@ the ``"name"`` expression has been resolved to ``users.name``!):: However, if we refer to a name that cannot be located, then we get the warning again, as below:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).order_by("some_label") + stmt = select([user.c.name, func.count(user.c.id).label("id_count")]).order_by( + "some_label" + ) The output does what we say, but again it warns us:: @@ -1995,16 +2019,21 @@ that of an "executemany" style of invocation:: counter = itertools.count(1) t = Table( - 'my_table', metadata, - Column('id', Integer, default=lambda: next(counter)), - Column('data', String) + "my_table", + metadata, + Column("id", Integer, default=lambda: next(counter)), + Column("data", String), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {"data": "d3"}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {"data": "d3"}, + ] + ) + ) The above example will invoke ``next(counter)`` for each row individually as would be expected:: @@ -2034,16 +2063,21 @@ value is required; if an omitted value only refers to a server-side default, an exception is raised:: t = Table( - 'my_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', String, server_default='some default') + "my_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String, server_default="some default"), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {}, + ] + ) + ) will raise:: @@ -2109,7 +2143,7 @@ data is needed. A :class:`_schema.Table` can be set up for reflection by passing :paramref:`_schema.Table.autoload_with` alone:: - my_table = Table('my_table', metadata, autoload_with=some_engine) + my_table = Table("my_table", metadata, autoload_with=some_engine) :ticket:`3027` @@ -2224,8 +2258,8 @@ An :class:`_postgresql.ENUM` that is created **without** being explicitly associated with a :class:`_schema.MetaData` object will be created *and* dropped corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`:: - table = Table('sometable', metadata, - Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) + table = Table( + "sometable", metadata, Column("some_enum", ENUM("a", "b", "c", name="myenum")) ) table.create(engine) # will emit CREATE TYPE and CREATE TABLE @@ -2242,11 +2276,9 @@ corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`, wi the exception of :meth:`_schema.Table.create` called with the ``checkfirst=True`` flag:: - my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) + my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata) - table = Table('sometable', metadata, - Column('some_enum', my_enum) - ) + table = Table("sometable", metadata, Column("some_enum", my_enum)) # will fail: ENUM 'my_enum' does not exist table.create(engine) @@ -2256,10 +2288,9 @@ flag:: table.drop(engine) # will emit DROP TABLE, *not* DROP TYPE - metadata.drop_all(engine) # will emit DROP TYPE - - metadata.create_all(engine) # will emit CREATE TYPE + metadata.drop_all(engine) # will emit DROP TYPE + metadata.create_all(engine) # will emit CREATE TYPE :ticket:`3319` @@ -2334,13 +2365,14 @@ so that code like the following may proceed:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) @@ -2357,21 +2389,23 @@ the temporary table:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) m2 = MetaData() user = Table( - "user_tmp", m2, + "user_tmp", + m2, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), + Column("name", VARCHAR(50)), ) # in 0.9, *will create* the new table, overwriting the old one. @@ -2548,11 +2582,13 @@ Code like the following will now function correctly and return floating points on MySQL:: >>> connection.execute( - ... select([ - ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'), - ... matchtable.c.title.match('Dive Python').label('python'), - ... matchtable.c.title - ... ]).order_by(matchtable.c.id) + ... select( + ... [ + ... matchtable.c.title.match("Agile Ruby Programming").label("ruby"), + ... matchtable.c.title.match("Dive Python").label("python"), + ... matchtable.c.title, + ... ] + ... ).order_by(matchtable.c.id) ... ) [ (2.0, 0.0, 'Agile Web Development with Ruby On Rails'), @@ -2614,7 +2650,9 @@ Connecting to SQL Server with PyODBC using a DSN-less connection, e.g. with an explicit hostname, now requires a driver name - SQLAlchemy will no longer attempt to guess a default:: - engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0") + engine = create_engine( + "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0" + ) SQLAlchemy's previously hardcoded default of "SQL Server" is obsolete on Windows, and SQLAlchemy cannot be tasked with guessing the best driver @@ -2642,13 +2680,16 @@ Improved support for CTEs in Oracle CTE support has been fixed up for Oracle, and there is also a new feature :meth:`_expression.CTE.with_suffixes` that can assist with Oracle's special directives:: - included_parts = select([ - part.c.sub_part, part.c.part, part.c.quantity - ]).where(part.c.part == "p1").\ - cte(name="included_parts", recursive=True).\ - suffix_with( + included_parts = ( + select([part.c.sub_part, part.c.part, part.c.quantity]) + .where(part.c.part == "p1") + .cte(name="included_parts", recursive=True) + .suffix_with( "search depth first by part set ord1", - "cycle part set y_cycle to 1 default 0", dialect='oracle') + "cycle part set y_cycle to 1 default 0", + dialect="oracle", + ) + ) :ticket:`3220` diff --git a/doc/build/changelog/migration_11.rst b/doc/build/changelog/migration_11.rst index a2c88ae11d2..6b25bc41685 100644 --- a/doc/build/changelog/migration_11.rst +++ b/doc/build/changelog/migration_11.rst @@ -207,29 +207,35 @@ expression, and ``func.date()`` applied to a datetime expression; both examples will return duplicate rows due to the joined eager load unless explicit typing is applied:: - result = session.query( - func.substr(A.some_thing, 0, 4), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4), A).options(joinedload(A.bs)).all() + ) - users = session.query( - func.date( - User.date_created, 'start of month' - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month").label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) The above examples, in order to retain deduping, should be specified as:: - result = session.query( - func.substr(A.some_thing, 0, 4, type_=String), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4, type_=String), A) + .options(joinedload(A.bs)) + .all() + ) - users = session.query( - func.date( - User.date_created, 'start of month', type_=DateTime - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month", type_=DateTime).label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) Additionally, the treatment of a so-called "unhashable" type is slightly different than its been in previous releases; internally we are using @@ -259,7 +265,6 @@ string value:: >>> some_user = User() >>> q = s.query(User).filter(User.name == some_user) - ... sqlalchemy.exc.ArgumentError: Object <__main__.User object at 0x103167e90> is not legal as a SQL literal value The exception is now immediate when the comparison is made between @@ -292,18 +297,18 @@ refer to specific elements of an "indexable" data type, such as an array or JSON field:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(Integer, primary_key=True) data = Column(JSON) - name = index_property('data', 'name') + name = index_property("data", "name") Above, the ``name`` attribute will read/write the field ``"name"`` from the JSON column ``data``, after initializing it to an empty dictionary:: - >>> person = Person(name='foobar') + >>> person = Person(name="foobar") >>> person.name foobar @@ -346,17 +351,18 @@ no longer inappropriately add the "single inheritance" criteria when the query is against a subquery expression such as an exists:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) data = Column(String) - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} - q = session.query(FooWidget).filter(FooWidget.data == 'bar').exists() + + q = session.query(FooWidget).filter(FooWidget.data == "bar").exists() session.query(q).all() @@ -433,10 +439,12 @@ removed would be lost, and the flush would incorrectly raise an error:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) @@ -522,25 +530,23 @@ the :paramref:`.orm.mapper.passive_deletes` option:: class A(Base): __tablename__ = "a" - id = Column('id', Integer, primary_key=True) + id = Column("id", Integer, primary_key=True) type = Column(String) __mapper_args__ = { - 'polymorphic_on': type, - 'polymorphic_identity': 'a', - 'passive_deletes': True + "polymorphic_on": type, + "polymorphic_identity": "a", + "passive_deletes": True, } class B(A): - __tablename__ = 'b' - b_table_id = Column('b_table_id', Integer, primary_key=True) - bid = Column('bid', Integer, ForeignKey('a.id', ondelete="CASCADE")) - data = Column('data', String) + __tablename__ = "b" + b_table_id = Column("b_table_id", Integer, primary_key=True) + bid = Column("bid", Integer, ForeignKey("a.id", ondelete="CASCADE")) + data = Column("data", String) - __mapper_args__ = { - 'polymorphic_identity': 'b' - } + __mapper_args__ = {"polymorphic_identity": "b"} With the above mapping, the :paramref:`.orm.mapper.passive_deletes` option is configured on the base mapper; it takes effect for all non-base mappers @@ -571,22 +577,24 @@ Same-named backrefs will not raise an error when applied to concrete inheritance The following mapping has always been possible without issue:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a_id", backref="a") + class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a1_id", backref="a1") - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) Above, even though class ``A`` and class ``A1`` have a relationship named ``b``, no conflict warning or error occurs because class ``A1`` is @@ -596,22 +604,22 @@ However, if the relationships were configured the other way, an error would occur:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) a = relationship("A", backref="b") a1 = relationship("A1", backref="b") @@ -634,22 +642,21 @@ on inherited mapper ''; this can cause dependency issues during flush". An example is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") class ASub(A): - __tablename__ = 'a_sub' - id = Column(Integer, ForeignKey('a.id'), primary_key=True) + __tablename__ = "a_sub" + id = Column(Integer, ForeignKey("a.id"), primary_key=True) bs = relationship("B") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - + a_id = Column(ForeignKey("a.id")) This warning dates back to the 0.4 series in 2007 and is based on a version of the unit of work code that has since been entirely rewritten. Currently, there @@ -672,7 +679,7 @@ A hybrid method or property will now reflect the ``__doc__`` value present in the original docstring:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) name = Column(String) @@ -710,9 +717,9 @@ also propagated from the hybrid descriptor itself, rather than from the underlyi expression. That is, accessing ``A.some_name.info`` now returns the same dictionary that you'd get from ``inspect(A).all_orm_descriptors['some_name'].info``:: - >>> A.some_name.info['foo'] = 'bar' + >>> A.some_name.info["foo"] = "bar" >>> from sqlalchemy import inspect - >>> inspect(A).all_orm_descriptors['some_name'].info + >>> inspect(A).all_orm_descriptors["some_name"].info {'foo': 'bar'} Note that this ``.info`` dictionary is **separate** from that of a mapped attribute @@ -739,11 +746,11 @@ consistent. Given:: - u1 = User(id=7, name='x') + u1 = User(id=7, name="x") u1.orders = [ - Order(description='o1', address=Address(id=1, email_address='a')), - Order(description='o2', address=Address(id=1, email_address='b')), - Order(description='o3', address=Address(id=1, email_address='c')) + Order(description="o1", address=Address(id=1, email_address="a")), + Order(description="o2", address=Address(id=1, email_address="b")), + Order(description="o3", address=Address(id=1, email_address="c")), ] sess = Session() @@ -925,32 +932,32 @@ row on a different "path" that doesn't include the attribute. This is a deep use case that's hard to reproduce, but the general idea is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) - c_id = Column(ForeignKey('c.id')) + b_id = Column(ForeignKey("b.id")) + c_id = Column(ForeignKey("c.id")) b = relationship("B") c = relationship("C") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - c_id = Column(ForeignKey('c.id')) + c_id = Column(ForeignKey("c.id")) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - d_id = Column(ForeignKey('d.id')) + d_id = Column(ForeignKey("d.id")) d = relationship("D") class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) @@ -959,7 +966,9 @@ deep use case that's hard to reproduce, but the general idea is as follows:: q = s.query(A) q = q.join(A.b).join(c_alias_1, B.c).join(c_alias_1.d) - q = q.options(contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d)) + q = q.options( + contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d) + ) q = q.join(c_alias_2, A.c) q = q.options(contains_eager(A.c, alias=c_alias_2)) @@ -1121,6 +1130,7 @@ for specific exceptions:: engine = create_engine("postgresql+psycopg2://") + @event.listens_for(engine, "handle_error") def cancel_disconnect(ctx): if isinstance(ctx.original_exception, KeyboardInterrupt): @@ -1149,25 +1159,22 @@ statement:: >>> from sqlalchemy import table, column, select, literal, exists >>> orders = table( - ... 'orders', - ... column('region'), - ... column('amount'), - ... column('product'), - ... column('quantity') + ... "orders", column("region"), column("amount"), column("product"), column("quantity") ... ) >>> >>> upsert = ( ... orders.update() - ... .where(orders.c.region == 'Region1') - ... .values(amount=1.0, product='Product1', quantity=1) - ... .returning(*(orders.c._all_columns)).cte('upsert')) + ... .where(orders.c.region == "Region1") + ... .values(amount=1.0, product="Product1", quantity=1) + ... .returning(*(orders.c._all_columns)) + ... .cte("upsert") + ... ) >>> >>> insert = orders.insert().from_select( ... orders.c.keys(), - ... select([ - ... literal('Region1'), literal(1.0), - ... literal('Product1'), literal(1) - ... ]).where(~exists(upsert.select())) + ... select([literal("Region1"), literal(1.0), literal("Product1"), literal(1)]).where( + ... ~exists(upsert.select()) + ... ), ... ) >>> >>> print(insert) # note formatting added for clarity @@ -1198,13 +1205,13 @@ RANGE and ROWS expressions for window functions:: >>> from sqlalchemy import func - >>> print(func.row_number().over(order_by='x', range_=(-5, 10))) + >>> print(func.row_number().over(order_by="x", range_=(-5, 10))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND :param_2 FOLLOWING) - >>> print(func.row_number().over(order_by='x', rows=(None, 0))) + >>> print(func.row_number().over(order_by="x", rows=(None, 0))) row_number() OVER (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) - >>> print(func.row_number().over(order_by='x', range_=(-2, None))) + >>> print(func.row_number().over(order_by="x", range_=(-2, None))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) :paramref:`.expression.over.range_` and :paramref:`.expression.over.rows` are specified as @@ -1213,7 +1220,7 @@ RANGE and ROWS expressions for window functions:: .. seealso:: - :ref:`window_functions` + :ref:`tutorial_window_functions` :ticket:`3049` @@ -1230,10 +1237,13 @@ correlation of tables that are derived from the same FROM clause as the selectable, e.g. lateral correlation:: >>> from sqlalchemy import table, column, select, true - >>> people = table('people', column('people_id'), column('age'), column('name')) - >>> books = table('books', column('book_id'), column('owner_id')) - >>> subq = select([books.c.book_id]).\ - ... where(books.c.owner_id == people.c.people_id).lateral("book_subq") + >>> people = table("people", column("people_id"), column("age"), column("name")) + >>> books = table("books", column("book_id"), column("owner_id")) + >>> subq = ( + ... select([books.c.book_id]) + ... .where(books.c.owner_id == people.c.people_id) + ... .lateral("book_subq") + ... ) >>> print(select([people]).select_from(people.join(subq, true()))) SELECT people.people_id, people.age, people.name FROM people JOIN LATERAL (SELECT books.book_id AS book_id @@ -1242,7 +1252,7 @@ selectable, e.g. lateral correlation:: .. seealso:: - :ref:`lateral_selects` + :ref:`tutorial_lateral_correlation` :class:`_expression.Lateral` @@ -1262,10 +1272,7 @@ construct similar to an alias:: from sqlalchemy import func - selectable = people.tablesample( - func.bernoulli(1), - name='alias', - seed=func.random()) + selectable = people.tablesample(func.bernoulli(1), name="alias", seed=func.random()) stmt = select([selectable.c.people_id]) Assuming ``people`` with a column ``people_id``, the above @@ -1295,9 +1302,10 @@ What's changed is that this feature no longer turns on automatically for a *composite* primary key; previously, a table definition such as:: Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) Would have "autoincrement" semantics applied to the ``'x'`` column, only @@ -1306,9 +1314,10 @@ disable this, one would have to turn off ``autoincrement`` on all columns:: # old way Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=False) + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=False), ) With the new behavior, the composite primary key will not have autoincrement @@ -1316,9 +1325,10 @@ semantics unless a column is marked explicitly with ``autoincrement=True``:: # column 'y' will be SERIAL/AUTO_INCREMENT/ auto-generating Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), ) In order to anticipate some potential backwards-incompatible scenarios, @@ -1327,9 +1337,10 @@ for missing primary key values on composite primary key columns that don't have autoincrement set up; given a table such as:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) An INSERT emitted with no values for this table will produce this warning:: @@ -1349,9 +1360,10 @@ default or something less common such as a trigger, the presence of a value generator can be indicated using :class:`.FetchedValue`:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True, server_default=FetchedValue()), - Column('y', Integer, primary_key=True, server_default=FetchedValue()) + "b", + metadata, + Column("x", Integer, primary_key=True, server_default=FetchedValue()), + Column("y", Integer, primary_key=True, server_default=FetchedValue()), ) For the very unlikely case where a composite primary key is actually intended @@ -1359,9 +1371,10 @@ to store NULL in one or more of its columns (only supported on SQLite and MySQL) specify the column with ``nullable=True``:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, nullable=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, nullable=True), ) In a related change, the ``autoincrement`` flag may be set to True @@ -1384,19 +1397,19 @@ New operators :meth:`.ColumnOperators.is_distinct_from` and :meth:`.ColumnOperators.isnot_distinct_from` allow the IS DISTINCT FROM and IS NOT DISTINCT FROM sql operation:: - >>> print(column('x').is_distinct_from(None)) + >>> print(column("x").is_distinct_from(None)) x IS DISTINCT FROM NULL Handling is provided for NULL, True and False:: - >>> print(column('x').isnot_distinct_from(False)) + >>> print(column("x").isnot_distinct_from(False)) x IS NOT DISTINCT FROM false For SQLite, which doesn't have this operator, "IS" / "IS NOT" is rendered, which on SQLite works for NULL unlike other backends:: >>> from sqlalchemy.dialects import sqlite - >>> print(column('x').is_distinct_from(None).compile(dialect=sqlite.dialect())) + >>> print(column("x").is_distinct_from(None).compile(dialect=sqlite.dialect())) x IS NOT NULL .. _change_1957: @@ -1445,19 +1458,15 @@ and the column arguments passed to :meth:`_expression.TextClause.columns`:: from sqlalchemy import text - stmt = text("SELECT users.id, addresses.id, users.id, " - "users.name, addresses.email_address AS email " - "FROM users JOIN addresses ON users.id=addresses.user_id " - "WHERE users.id = 1").columns( - User.id, - Address.id, - Address.user_id, - User.name, - Address.email_address - ) - - query = session.query(User).from_statement(stmt).\ - options(contains_eager(User.addresses)) + + stmt = text( + "SELECT users.id, addresses.id, users.id, " + "users.name, addresses.email_address AS email " + "FROM users JOIN addresses ON users.id=addresses.user_id " + "WHERE users.id = 1" + ).columns(User.id, Address.id, Address.user_id, User.name, Address.email_address) + + query = session.query(User).from_statement(stmt).options(contains_eager(User.addresses)) result = query.all() Above, the textual SQL contains the column "id" three times, which would @@ -1478,7 +1487,7 @@ this behavioral change for applications using it are at :ref:`behavior_change_35 .. seealso:: - :ref:`sqlexpression_text_columns` - in the Core tutorial + :ref:`tutorial_select_arbitrary_text` :ref:`behavior_change_3501` - backwards compatibility remarks @@ -1489,7 +1498,7 @@ Another aspect of this change is that the rules for matching columns have also b to rely upon "positional" matching more fully for compiled SQL constructs as well. Given a statement like the following:: - ua = users.alias('ua') + ua = users.alias("ua") stmt = select([users.c.user_id, ua.c.user_id]) The above statement will compile to:: @@ -1512,7 +1521,7 @@ fetch columns:: ua_id = row[ua.c.user_id] # this still raises, however - user_id = row['user_id'] + user_id = row["user_id"] Much less likely to get an "ambiguous column" error message ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1550,10 +1559,7 @@ string/integer/etc values:: three = 3 - t = Table( - 'data', MetaData(), - Column('value', Enum(MyEnum)) - ) + t = Table("data", MetaData(), Column("value", Enum(MyEnum))) e = create_engine("sqlite://") t.create(e) @@ -1600,8 +1606,9 @@ flag is used (1.1.0b2):: >>> from sqlalchemy import Table, MetaData, Column, Enum, create_engine >>> t = Table( - ... 'data', MetaData(), - ... Column('value', Enum("one", "two", "three", validate_strings=True)) + ... "data", + ... MetaData(), + ... Column("value", Enum("one", "two", "three", validate_strings=True)), ... ) >>> e = create_engine("sqlite://") >>> t.create(e) @@ -1674,8 +1681,8 @@ within logging, exception reporting, as well as ``repr()`` of the row itself:: >>> from sqlalchemy import create_engine >>> import random - >>> e = create_engine("sqlite://", echo='debug') - >>> some_value = ''.join(chr(random.randint(52, 85)) for i in range(5000)) + >>> e = create_engine("sqlite://", echo="debug") + >>> some_value = "".join(chr(random.randint(52, 85)) for i in range(5000)) >>> row = e.execute("select ?", [some_value]).first() ... (lines are wrapped for clarity) ... 2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine select ? @@ -1752,6 +1759,7 @@ replacing the ``None`` value:: json_value = Column(JSON(none_as_null=False), default="some default") + # would insert "some default" instead of "'null'", # now will insert "'null'" obj = MyObject(json_value=None) @@ -1769,6 +1777,7 @@ inconsistently vs. all other datatypes:: some_other_value = Column(String(50)) json_value = Column(JSON(none_as_null=False)) + # would result in NULL for some_other_value, # but json "'null'" for json_value. Now results in NULL for both # (the json_value is omitted from the INSERT) @@ -1786,9 +1795,7 @@ would be ignored in all cases:: # would insert SQL NULL and/or trigger defaults, # now inserts "'null'" - session.bulk_insert_mappings( - MyObject, - [{"json_value": None}]) + session.bulk_insert_mappings(MyObject, [{"json_value": None}]) The :class:`_types.JSON` type now implements the :attr:`.TypeEngine.should_evaluate_none` flag, @@ -1847,9 +1854,7 @@ is now in Core. The :class:`_types.ARRAY` type still **only works on PostgreSQL**, however it can be used directly, supporting special array use cases such as indexed access, as well as support for the ANY and ALL:: - mytable = Table("mytable", metadata, - Column("data", ARRAY(Integer, dimensions=2)) - ) + mytable = Table("mytable", metadata, Column("data", ARRAY(Integer, dimensions=2))) expr = mytable.c.data[5][6] @@ -1884,7 +1889,6 @@ such as:: subq = select([mytable.c.value]) select([mytable]).where(12 > any_(subq)) - :ticket:`3516` .. _change_3132: @@ -1897,12 +1901,14 @@ function for the ``array_agg()`` SQL function that returns an array, which is now available using :class:`_functions.array_agg`:: from sqlalchemy import func + stmt = select([func.array_agg(table.c.value)]) A PostgreSQL element for an aggregate ORDER BY is also added via :class:`_postgresql.aggregate_order_by`:: from sqlalchemy.dialects.postgresql import aggregate_order_by + expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc())) stmt = select([expr]) @@ -1914,8 +1920,8 @@ The PG dialect itself also provides an :func:`_postgresql.array_agg` wrapper to ensure the :class:`_postgresql.ARRAY` type:: from sqlalchemy.dialects.postgresql import array_agg - stmt = select([array_agg(table.c.value).contains('foo')]) + stmt = select([array_agg(table.c.value).contains("foo")]) Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, ``rank()``, ``dense_rank()`` and others that require an ordering via @@ -1923,12 +1929,13 @@ Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, :meth:`.FunctionElement.within_group` modifier:: from sqlalchemy import func - stmt = select([ - department.c.id, - func.percentile_cont(0.5).within_group( - department.c.salary.desc() - ) - ]) + + stmt = select( + [ + department.c.id, + func.percentile_cont(0.5).within_group(department.c.salary.desc()), + ] + ) The above statement would produce SQL similar to:: @@ -1956,7 +1963,7 @@ an :class:`_postgresql.ENUM` had to look like this:: # old way class MyEnum(TypeDecorator, SchemaType): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') + impl = postgresql.ENUM("one", "two", "three", name="myenum") def _set_table(self, table): self.impl._set_table(table) @@ -1966,8 +1973,7 @@ can be done like any other type:: # new way class MyEnum(TypeDecorator): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') - + impl = postgresql.ENUM("one", "two", "three", name="myenum") :ticket:`2919` @@ -1987,17 +1993,18 @@ translation works for DDL and SQL generation, as well as with the ORM. For example, if the ``User`` class were assigned the schema "per_user":: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - __table_args__ = {'schema': 'per_user'} + __table_args__ = {"schema": "per_user"} On each request, the :class:`.Session` can be set up to refer to a different schema each time:: session = Session() - session.connection(execution_options={ - "schema_translate_map": {"per_user": "account_one"}}) + session.connection( + execution_options={"schema_translate_map": {"per_user": "account_one"}} + ) # will query from the ``account_one.user`` table session.query(User).get(5) @@ -2072,21 +2079,21 @@ Then, a mapping where we are equating a string "id" column on one table to an integer "id" column on the other:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(StringAsInt, primary_key=True) pets = relationship( - 'Pets', + "Pets", primaryjoin=( - 'foreign(Pets.person_id)' - '==cast(type_coerce(Person.id, Integer), Integer)' - ) + "foreign(Pets.person_id)" "==cast(type_coerce(Person.id, Integer), Integer)" + ), ) + class Pets(Base): - __tablename__ = 'pets' - id = Column('id', Integer, primary_key=True) - person_id = Column('person_id', Integer) + __tablename__ = "pets" + id = Column("id", Integer, primary_key=True) + person_id = Column("person_id", Integer) Above, in the :paramref:`_orm.relationship.primaryjoin` expression, we are using :func:`.type_coerce` to handle bound parameters passed via @@ -2166,8 +2173,7 @@ Column:: class MyObject(Base): # ... - json_value = Column( - JSON(none_as_null=False), nullable=False, default=JSON.NULL) + json_value = Column(JSON(none_as_null=False), nullable=False, default=JSON.NULL) Or, ensure the value is present on the object:: @@ -2182,7 +2188,6 @@ passed to :paramref:`_schema.Column.default` or :paramref:`_schema.Column.server # default=None is the same as omitting it entirely, does not apply JSON NULL json_value = Column(JSON(none_as_null=False), nullable=False, default=None) - .. seealso:: :ref:`change_3514` @@ -2195,9 +2200,11 @@ Columns no longer added redundantly with DISTINCT + ORDER BY A query such as the following will now augment only those columns that are missing from the SELECT list, without duplicates:: - q = session.query(User.id, User.name.label('name')).\ - distinct().\ - order_by(User.id, User.name, User.fullname) + q = ( + session.query(User.id, User.name.label("name")) + .distinct() + .order_by(User.id, User.name, User.fullname) + ) Produces:: @@ -2237,7 +2244,7 @@ now raises an error, whereas previously it would silently pick only the last defined validator:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) @@ -2250,6 +2257,7 @@ last defined validator:: def _validate_data_two(self): assert "y" in data + configure_mappers() Will raise:: @@ -2321,7 +2329,7 @@ passed through the literal quoting system:: >>> from sqlalchemy.schema import MetaData, Table, Column, CreateTable >>> from sqlalchemy.types import String - >>> t = Table('t', MetaData(), Column('x', String(), server_default="hi ' there")) + >>> t = Table("t", MetaData(), Column("x", String(), server_default="hi ' there")) >>> print(CreateTable(t)) CREATE TABLE t ( @@ -2473,7 +2481,7 @@ This includes: one less dimension. Given a column with type ``ARRAY(Integer, dimensions=3)``, we can now perform this expression:: - int_expr = col[5][6][7] # returns an Integer expression object + int_expr = col[5][6][7] # returns an Integer expression object Previously, the indexed access to ``col[5]`` would return an expression of type :class:`.Integer` where we could no longer perform indexed access @@ -2490,7 +2498,7 @@ This includes: the :class:`_postgresql.ARRAY` type, this means that it is now straightforward to produce JSON expressions with multiple levels of indexed access:: - json_expr = json_col['key1']['attr1'][5] + json_expr = json_col["key1"]["attr1"][5] * The "textual" type that is returned by indexed access of :class:`.HSTORE` as well as the "textual" type that is returned by indexed access of @@ -2520,12 +2528,11 @@ support CAST operations to each other without the "astext" aspect. This means that in most cases, an application that was doing this:: - expr = json_col['somekey'].cast(Integer) + expr = json_col["somekey"].cast(Integer) Will now need to change to this:: - expr = json_col['somekey'].astext.cast(Integer) - + expr = json_col["somekey"].astext.cast(Integer) .. _change_2729: @@ -2536,12 +2543,21 @@ A table definition like the following will now emit CREATE TYPE as expected:: enum = Enum( - 'manager', 'place_admin', 'carwash_admin', - 'parking_admin', 'service_admin', 'tire_admin', - 'mechanic', 'carwasher', 'tire_mechanic', name="work_place_roles") + "manager", + "place_admin", + "carwash_admin", + "parking_admin", + "service_admin", + "tire_admin", + "mechanic", + "carwasher", + "tire_mechanic", + name="work_place_roles", + ) + class WorkPlacement(Base): - __tablename__ = 'work_placement' + __tablename__ = "work_placement" id = Column(Integer, primary_key=True) roles = Column(ARRAY(enum)) @@ -2580,10 +2596,11 @@ The new argument :paramref:`.PGInspector.get_view_names.include` allows specification of which sub-types of views should be returned:: from sqlalchemy import inspect + insp = inspect(engine) - plain_views = insp.get_view_names(include='plain') - all_views = insp.get_view_names(include=('plain', 'materialized')) + plain_views = insp.get_view_names(include="plain") + all_views = insp.get_view_names(include=("plain", "materialized")) :ticket:`3588` @@ -2606,9 +2623,6 @@ Support for PyGreSQL The `PyGreSQL `_ DBAPI is now supported. -.. seealso:: - - :ref:`dialect-postgresql-pygresql` The "postgres" module is removed -------------------------------- @@ -2671,9 +2685,7 @@ The MySQL dialect now accepts the value "AUTOCOMMIT" for the parameters:: connection = engine.connect() - connection = connection.execution_options( - isolation_level="AUTOCOMMIT" - ) + connection = connection.execution_options(isolation_level="AUTOCOMMIT") The isolation level makes use of the various "autocommit" attributes provided by most MySQL DBAPIs. @@ -2690,10 +2702,11 @@ on an InnoDB table featured AUTO_INCREMENT on one of its columns which was not the first column, e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) DDL such as the following would be generated:: @@ -2723,12 +2736,13 @@ use the :class:`.PrimaryKeyConstraint` construct explicitly (1.1.0b2) (along with a KEY for the autoincrement column as required by MySQL), e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - PrimaryKeyConstraint('x', 'y'), - UniqueConstraint('y'), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + PrimaryKeyConstraint("x", "y"), + UniqueConstraint("y"), + mysql_engine="InnoDB", ) Along with the change :ref:`change_3216`, composite primary keys with @@ -2738,14 +2752,13 @@ now defaults to the value ``"auto"`` and the ``autoincrement=False`` directives are no longer needed:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) - - Dialect Improvements and Changes - SQLite ========================================= @@ -2852,8 +2865,7 @@ parameters. The four standard levels are supported as well as ``SNAPSHOT``:: engine = create_engine( - "mssql+pyodbc://scott:tiger@ms_2008", - isolation_level="REPEATABLE READ" + "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ" ) .. seealso:: @@ -2872,12 +2884,11 @@ which includes a length, an "un-lengthed" type under SQL Server would copy the "length" parameter as the value ``"max"``:: >>> from sqlalchemy import create_engine, inspect - >>> engine = create_engine('mssql+pyodbc://scott:tiger@ms_2008', echo=True) + >>> engine = create_engine("mssql+pyodbc://scott:tiger@ms_2008", echo=True) >>> engine.execute("create table s (x varchar(max), y varbinary(max))") >>> insp = inspect(engine) >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) max max @@ -2887,8 +2898,7 @@ interprets as "max". The fix then is so that these lengths come out as None, so that the type objects work in non-SQL Server contexts:: >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) None None @@ -2921,10 +2931,11 @@ This aliasing attempts to turn schema-qualified tables into aliases; given a table such as:: account_table = Table( - 'account', metadata, - Column('id', Integer, primary_key=True), - Column('info', String(100)), - schema="customer_schema" + "account", + metadata, + Column("id", Integer, primary_key=True), + Column("info", String(100)), + schema="customer_schema", ) The legacy mode of behavior will attempt to turn a schema-qualified table diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index f0b88c49361..e0fb0e41408 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -80,9 +80,11 @@ that is cacheable as well as more efficient. Given a query as below:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(subqueryload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(subqueryload(User.addresses)) + ) The SQL produced would be the query against ``User`` followed by the subqueryload for ``User.addresses`` (note the parameters are also listed):: @@ -106,9 +108,11 @@ subqueryload for ``User.addresses`` (note the parameters are also listed):: With "selectin" loading, we instead get a SELECT that refers to the actual primary key values loaded in the parent query:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(selectinload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(selectinload(User.addresses)) + ) Produces:: @@ -225,8 +229,9 @@ if not specified, the attribute defaults to ``None``:: from sqlalchemy.orm import query_expression from sqlalchemy.orm import with_expression + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) x = Column(Integer) y = Column(Integer) @@ -234,9 +239,9 @@ if not specified, the attribute defaults to ``None``:: # will be None normally... expr = query_expression() + # but let's give it x + y - a1 = session.query(A).options( - with_expression(A.expr, A.x + A.y)).first() + a1 = session.query(A).options(with_expression(A.expr, A.x + A.y)).first() print(a1.expr) .. seealso:: @@ -259,10 +264,9 @@ Below, we emit a DELETE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - delete() + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).delete() .. seealso:: @@ -291,28 +295,26 @@ into multiple columns/expressions:: @hybrid.hybrid_property def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) @name.update_expression def name(cls, value): - f, l = value.split(' ', 1) + f, l = value.split(" ", 1) return [(cls.first_name, f), (cls.last_name, l)] Above, an UPDATE can be rendered using:: - session.query(Person).filter(Person.id == 5).update( - {Person.name: "Dr. No"}) + session.query(Person).filter(Person.id == 5).update({Person.name: "Dr. No"}) Similar functionality is available for composites, where composite values will be broken out into their individual columns for bulk UPDATE:: session.query(Vertex).update({Edge.start: Point(3, 4)}) - .. seealso:: :ref:`hybrid_bulk_update` @@ -342,6 +344,7 @@ Python:: def name(self, value): self.first_name = value + class FirstNameLastName(FirstNameOnly): # ... @@ -349,15 +352,15 @@ Python:: @FirstNameOnly.name.getter def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.setter def name(self, value): - self.first_name, self.last_name = value.split(' ', maxsplit=1) + self.first_name, self.last_name = value.split(" ", maxsplit=1) @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) Above, the ``FirstNameOnly.name`` hybrid is referenced by the ``FirstNameLastName`` subclass in order to repurpose it specifically to the @@ -391,6 +394,7 @@ hybrid in-place, interfering with the definition on the superclass. def _set_name(self, value): self.first_name = value + class FirstNameOnly(Base): @hybrid_property def name(self): @@ -426,10 +430,12 @@ if this "append" event is the second part of a bulk replace:: from sqlalchemy.orm.attributes import OP_BULK_REPLACE + @event.listens_for(SomeObject.collection, "bulk_replace") def process_collection(target, values, initiator): values[:] = [_make_value(value) for value in values] + @event.listens_for(SomeObject.collection, "append", retval=True) def process_collection(target, value, initiator): # make sure bulk_replace didn't already do it @@ -438,7 +444,6 @@ if this "append" event is the second part of a bulk replace:: else: return value - :ticket:`3896` .. _change_3303: @@ -457,11 +462,13 @@ extension:: Base = declarative_base() + class MyDataClass(Base): - __tablename__ = 'my_data' + __tablename__ = "my_data" id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(JSONEncodedDict)) + @event.listens_for(MyDataClass.data, "modified") def modified_json(instance): print("json value modified:", instance.data) @@ -511,7 +518,6 @@ becomes part of the next flush process:: model = session.query(MyModel).first() model.json_set &= {1, 3} - :ticket:`3853` .. _change_3769: @@ -527,7 +533,7 @@ is an association proxy that links to ``AtoB.bvalue``, which is itself an association proxy onto ``B``:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b_values = association_proxy("atob", "b_value") @@ -535,26 +541,26 @@ itself an association proxy onto ``B``:: class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) value = Column(String) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) value = Column(String) class AtoB(Base): - __tablename__ = 'atob' + __tablename__ = "atob" - a_id = Column(ForeignKey('a.id'), primary_key=True) - b_id = Column(ForeignKey('b.id'), primary_key=True) + a_id = Column(ForeignKey("a.id"), primary_key=True) + b_id = Column(ForeignKey("b.id"), primary_key=True) a = relationship("A", backref="atob") b = relationship("B", backref="atob") @@ -567,7 +573,7 @@ query across the two proxies ``A.b_values``, ``AtoB.b_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.b_values.contains('hi')).all() + >>> s.query(A).filter(A.b_values.contains("hi")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -581,7 +587,7 @@ to query across the two proxies ``A.c_values``, ``AtoB.c_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.c_values.any(value='x')).all() + >>> s.query(A).filter(A.c_values.any(value="x")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -612,8 +618,8 @@ primary key value. The example now illustrates that a new ``identity_token`` field tracks this difference so that the two objects can co-exist in the same identity map:: - tokyo = WeatherLocation('Asia', 'Tokyo') - newyork = WeatherLocation('North America', 'New York') + tokyo = WeatherLocation("Asia", "Tokyo") + newyork = WeatherLocation("North America", "New York") tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) @@ -632,15 +638,14 @@ same identity map:: newyork_report = newyork.reports[0] tokyo_report = tokyo.reports[0] - assert inspect(newyork_report).identity_key == (Report, (1, ), "north_america") - assert inspect(tokyo_report).identity_key == (Report, (1, ), "asia") + assert inspect(newyork_report).identity_key == (Report, (1,), "north_america") + assert inspect(tokyo_report).identity_key == (Report, (1,), "asia") # the token representing the originating shard is also available directly assert inspect(newyork_report).identity_token == "north_america" assert inspect(tokyo_report).identity_token == "asia" - :ticket:`4137` New Features and Improvements - Core @@ -673,6 +678,7 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 from sqlalchemy import Boolean from sqlalchemy import TypeDecorator + class LiberalBoolean(TypeDecorator): impl = Boolean @@ -681,7 +687,6 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 value = bool(int(value)) return value - :ticket:`4102` .. _change_3919: @@ -844,7 +849,7 @@ other comparison operators has been flattened into one level. This will have the effect of more parenthesization being generated when comparison operators are combined together, such as:: - (column('q') == null()) != (column('y') == null()) + (column("q") == null()) != (column("y") == null()) Will now generate ``(q IS NULL) != (y IS NULL)`` rather than ``q IS NULL != y IS NULL``. @@ -862,9 +867,10 @@ and columns. These are specified via the :paramref:`_schema.Table.comment` and :paramref:`_schema.Column.comment` arguments:: Table( - 'my_table', metadata, - Column('q', Integer, comment="the Q value"), - comment="my Q table" + "my_table", + metadata, + Column("q", Integer, comment="the Q value"), + comment="my Q table", ) Above, DDL will be rendered appropriately upon table create to associate @@ -891,9 +897,11 @@ the 0.7 and 0.8 series. Given a statement as:: - stmt = users.delete().\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.delete() + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) The resulting SQL from the above statement on a PostgreSQL backend @@ -905,7 +913,7 @@ would render as:: .. seealso:: - :ref:`multi_table_deletes` + :ref:`tutorial_multi_table_deletes` :ticket:`959` @@ -930,7 +938,7 @@ can now be used to change the autoescape character, if desired. An expression such as:: - >>> column('x').startswith('total%score', autoescape=True) + >>> column("x").startswith("total%score", autoescape=True) Renders as:: @@ -940,7 +948,7 @@ Where the value of the parameter "x_1" is ``'total/%score'``. Similarly, an expression that has backslashes:: - >>> column('x').startswith('total/score', autoescape=True) + >>> column("x").startswith("total/score", autoescape=True) Will render the same way, with the value of the parameter "x_1" as ``'total//score'``. @@ -968,8 +976,8 @@ if the application is working with plain floats. float_value = connection.scalar( - select([literal(4.56)]) # the "BindParameter" will now be - # Float, not Numeric(asdecimal=True) + select([literal(4.56)]) # the "BindParameter" will now be + # Float, not Numeric(asdecimal=True) ) * Math operations between :class:`.Numeric`, :class:`.Float`, and @@ -978,11 +986,11 @@ if the application is working with plain floats. as well as if the type should be :class:`.Float`:: # asdecimal flag is maintained - expr = column('a', Integer) * column('b', Numeric(asdecimal=False)) + expr = column("a", Integer) * column("b", Numeric(asdecimal=False)) assert expr.type.asdecimal == False # Float subclass of Numeric is maintained - expr = column('a', Integer) * column('b', Float()) + expr = column("a", Integer) * column("b", Float()) assert isinstance(expr.type, Float) * The :class:`.Float` datatype will apply the ``float()`` processor to @@ -1009,9 +1017,7 @@ is added to the compiler to allow for the space. All three functions are named in the documentation now:: >>> from sqlalchemy import select, table, column, func, tuple_ - >>> t = table('t', - ... column('value'), column('x'), - ... column('y'), column('z'), column('q')) + >>> t = table("t", column("value"), column("x"), column("y"), column("z"), column("q")) >>> stmt = select([func.sum(t.c.value)]).group_by( ... func.grouping_sets( ... tuple_(t.c.x, t.c.y), @@ -1046,16 +1052,17 @@ localized to the current VALUES clause being processed:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - mytable = Table('mytable', meta, - Column('counter', Integer), - Column('counter_plus_twelve', - Integer, default=mydefault, onupdate=mydefault) + + mytable = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) - stmt = mytable.insert().values( - [{"counter": 5}, {"counter": 18}, {"counter": 20}]) + stmt = mytable.insert().values([{"counter": 5}, {"counter": 18}, {"counter": 20}]) conn.execute(stmt) @@ -1077,7 +1084,8 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the sess = Session() - user = sess.query(User).filter_by(name='x').first() + user = sess.query(User).filter_by(name="x").first() + @event.listens_for(sess, "after_rollback") def after_rollback(session): @@ -1086,12 +1094,14 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the # to emit a lazy load. print("user name: %s" % user.name) + @event.listens_for(sess, "after_commit") def after_commit(session): # 'user.name' is present, assuming it was already # loaded. this is the existing behavior. print("user name: %s" % user.name) + if should_rollback: sess.rollback() else: @@ -1148,7 +1158,7 @@ In the case of assigning a collection to an attribute that would replace the previous collection, a side effect of this was that the collection being replaced would also be mutated, which is misleading and unnecessary:: - >>> a1, a2, a3 = Address('a1'), Address('a2'), Address('a3') + >>> a1, a2, a3 = Address("a1"), Address("a2"), Address("a3") >>> user.addresses = [a1, a2] >>> previous_collection = user.addresses @@ -1177,18 +1187,19 @@ existing collection. Given a mapping as:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - @validates('bs') + @validates("bs") def convert_dict_to_b(self, key, value): - return B(data=value['data']) + return B(data=value["data"]) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) data = Column(String) Above, we could use the validator as follows, to convert from an incoming @@ -1217,7 +1228,7 @@ are new. Supposing a simple validator such as:: class A(Base): # ... - @validates('bs') + @validates("bs") def validate_b(self, key, value): assert value.data is not None return value @@ -1255,16 +1266,16 @@ Use flag_dirty() to mark an object as "dirty" without any attribute changing An exception is now raised if the :func:`.attributes.flag_modified` function is used to mark an attribute as modified that isn't actually loaded:: - a1 = A(data='adf') + a1 = A(data="adf") s.add(a1) s.flush() # expire, similarly as though we said s.commit() - s.expire(a1, 'data') + s.expire(a1, "data") # will raise InvalidRequestError - attributes.flag_modified(a1, 'data') + attributes.flag_modified(a1, "data") This because the flush process will most likely fail in any case if the attribute remains un-present by the time flush occurs. To mark an object @@ -1287,6 +1298,7 @@ such as :meth:`.SessionEvents.before_flush`, use the new A very old and undocumented keyword argument ``scope`` has been removed:: from sqlalchemy.orm import scoped_session + Session = scoped_session(sessionmaker()) session = Session(scope=None) @@ -1312,18 +1324,21 @@ it is re-stated during the UPDATE so that the "onupdate" rule does not overwrite it:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - favorite_b_id = Column(ForeignKey('b.id', name="favorite_b_fk")) + favorite_b_id = Column(ForeignKey("b.id", name="favorite_b_fk")) bs = relationship("B", primaryjoin="A.id == B.a_id") favorite_b = relationship( - "B", primaryjoin="A.favorite_b_id == B.id", post_update=True) + "B", primaryjoin="A.favorite_b_id == B.id", post_update=True + ) updated = Column(Integer, onupdate=my_onupdate_function) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id', name="a_fk")) + a_id = Column(ForeignKey("a.id", name="a_fk")) + a1 = A() b1 = B() @@ -1371,21 +1386,18 @@ now participates in the versioning feature, documented at Given a mapping:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) version_id = Column(Integer, default=0) - parent_id = Column(ForeignKey('node.id')) - favorite_node_id = Column(ForeignKey('node.id')) + parent_id = Column(ForeignKey("node.id")) + favorite_node_id = Column(ForeignKey("node.id")) nodes = relationship("Node", primaryjoin=remote(parent_id) == id) favorite_node = relationship( - "Node", primaryjoin=favorite_node_id == remote(id), - post_update=True + "Node", primaryjoin=favorite_node_id == remote(id), post_update=True ) - __mapper_args__ = { - 'version_id_col': version_id - } + __mapper_args__ = {"version_id_col": version_id} An UPDATE of a node that associates another node as "favorite" will now increment the version counter as well as match the current version:: @@ -1435,20 +1447,20 @@ Whereas in 1.1, an expression such as the following would produce a result with no return type (assume ``-%>`` is some special operator supported by the database):: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type NullType() Other types would use the default behavior of using the left-hand type as the return type:: - >>> column('x', types.String(50)).op('-%>')(None).type + >>> column("x", types.String(50)).op("-%>")(None).type String(length=50) These behaviors were mostly by accident, so the behavior has been made consistent with the second form, that is the default return type is the same as the left-hand expression:: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type DateTime() As most user-defined operators tend to be "comparison" operators, often @@ -1457,18 +1469,18 @@ one of the many special operators defined by PostgreSQL, the its documented behavior of allowing the return type to be :class:`.Boolean` in all cases, including for :class:`_types.ARRAY` and :class:`_types.JSON`:: - >>> column('x', types.String(50)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.String(50)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.ARRAY(types.Integer)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.ARRAY(types.Integer)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.JSON()).op('-%>', is_comparison=True)(None).type + >>> column("x", types.JSON()).op("-%>", is_comparison=True)(None).type Boolean() To assist with boolean comparison operators, a new shorthand method :meth:`.Operators.bool_op` has been added. This method should be preferred for on-the-fly boolean operators:: - >>> print(column('x', types.Integer).bool_op('-%>')(5)) + >>> print(column("x", types.Integer).bool_op("-%>")(5)) x -%> :x_1 @@ -1485,7 +1497,7 @@ Previously, it was not possible to produce a :obj:`_expression.literal_column` construct that stated a single percent sign:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%%symbol The percent sign is now unaffected for dialects that are not set to @@ -1494,10 +1506,10 @@ dialects which do state one of these paramstyles will continue to escape as is appropriate:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%symbol >>> from sqlalchemy.dialects import mysql - >>> print(literal_column('some%symbol').compile(dialect=mysql.dialect())) + >>> print(literal_column("some%symbol").compile(dialect=mysql.dialect())) some%%symbol As part of this change, the doubling that has been present when using @@ -1517,8 +1529,9 @@ A bug in the :func:`_expression.collate` and :meth:`.ColumnOperators.collate` functions, used to supply ad-hoc column collations at the statement level, is fixed, where a case sensitive name would not be quoted:: - stmt = select([mytable.c.x, mytable.c.y]).\ - order_by(mytable.c.somecolumn.collate("fr_FR")) + stmt = select([mytable.c.x, mytable.c.y]).order_by( + mytable.c.somecolumn.collate("fr_FR") + ) now renders:: @@ -1544,7 +1557,7 @@ Support for Batch Mode / Fast Execution Helpers The psycopg2 ``cursor.executemany()`` method has been identified as performing poorly, particularly with INSERT statements. To alleviate this, psycopg2 -has added `Fast Execution Helpers `_ +has added `Fast Execution Helpers `_ which rework statements into fewer server round trips by sending multiple DML statements in batch. SQLAlchemy 1.2 now includes support for these helpers to be used transparently whenever the :class:`_engine.Engine` makes use @@ -1553,8 +1566,8 @@ sets. The feature is off by default and can be enabled using the ``use_batch_mode`` argument on :func:`_sa.create_engine`:: engine = create_engine( - "postgresql+psycopg2://scott:tiger@host/dbname", - use_batch_mode=True) + "postgresql+psycopg2://scott:tiger@host/dbname", use_batch_mode=True + ) The feature is considered to be experimental for the moment but may become on by default in a future release. @@ -1577,10 +1590,7 @@ now allows these values to be specified:: from sqlalchemy.dialects.postgresql import INTERVAL - Table( - 'my_table', metadata, - Column("some_interval", INTERVAL(fields="DAY TO SECOND")) - ) + Table("my_table", metadata, Column("some_interval", INTERVAL(fields="DAY TO SECOND"))) Additionally, all INTERVAL datatypes can now be reflected independently of the "fields" specifier present; the "fields" parameter in the datatype @@ -1610,12 +1620,10 @@ This :class:`_expression.Insert` subclass adds a new method from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table). \ - values(id='some_id', data='some data to insert') + insert_stmt = insert(my_table).values(id="some_id", data="some data to insert") on_conflict_stmt = insert_stmt.on_duplicate_key_update( - data=insert_stmt.inserted.data, - status='U' + data=insert_stmt.inserted.data, status="U" ) conn.execute(on_conflict_stmt) @@ -1748,9 +1756,15 @@ name, rather than the raw UPPERCASE format that Oracle uses:: Previously, the foreign keys result would look like:: - [{'referred_table': u'users', 'referred_columns': [u'id'], - 'referred_schema': None, 'name': 'USER_ID_FK', - 'constrained_columns': [u'user_id']}] + [ + { + "referred_table": "users", + "referred_columns": ["id"], + "referred_schema": None, + "name": "USER_ID_FK", + "constrained_columns": ["user_id"], + } + ] Where the above could create problems particularly with Alembic autogenerate. @@ -1774,20 +1788,17 @@ now be passed using brackets to manually specify where this split occurs, allowing database and/or owner names that themselves contain one or more dots:: - Table( - "some_table", metadata, - Column("q", String(50)), - schema="[MyDataBase.dbo]" - ) + Table("some_table", metadata, Column("q", String(50)), schema="[MyDataBase.dbo]") The above table will consider the "owner" to be ``MyDataBase.dbo``, which will also be quoted upon render, and the "database" as None. To individually refer to database name and owner, use two pairs of brackets:: Table( - "some_table", metadata, + "some_table", + metadata, Column("q", String(50)), - schema="[MyDataBase.SomeDB].[MyDB.owner]" + schema="[MyDataBase.SomeDB].[MyDB.owner]", ) Additionally, the :class:`.quoted_name` construct is now honored when diff --git a/doc/build/changelog/migration_13.rst b/doc/build/changelog/migration_13.rst index d7a26084e36..a8197c6c62d 100644 --- a/doc/build/changelog/migration_13.rst +++ b/doc/build/changelog/migration_13.rst @@ -85,7 +85,7 @@ Relationship to AliasedClass replaces the need for non primary mappers ----------------------------------------------------------------------- The "non primary mapper" is a :func:`.mapper` created in the -:ref:`classical_mapping` style, which acts as an additional mapper against an +:ref:`orm_imperative_mapping` style, which acts as an additional mapper against an already mapped class against a different kind of selectable. The non primary mapper has its roots in the 0.1, 0.2 series of SQLAlchemy where it was anticipated that the :func:`.mapper` object was to be the primary query @@ -130,14 +130,17 @@ like:: j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) B_viacd = mapper( - B, j, non_primary=True, primary_key=[j.c.b_id], + B, + j, + non_primary=True, + primary_key=[j.c.b_id], properties={ "id": j.c.b_id, # so that 'id' looks the same as before - "c_id": j.c.c_id, # needed for disambiguation + "c_id": j.c.c_id, # needed for disambiguation "d_c_id": j.c.d_c_id, # needed for disambiguation "b_id": [j.c.b_id, j.c.d_b_id], "d_id": j.c.d_id, - } + }, ) A.b = relationship(B_viacd, primaryjoin=A.b_id == B_viacd.c.b_id) @@ -185,14 +188,14 @@ of collections all in one query without using JOIN or subqueries at all. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", lazy="selectin") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -349,7 +352,7 @@ where the ``del`` operation is roughly equivalent to setting the attribute to th some_object = session.query(SomeObject).get(5) - del some_object.some_attribute # from a SQL perspective, works like "= None" + del some_object.some_attribute # from a SQL perspective, works like "= None" :ticket:`4354` @@ -366,10 +369,9 @@ along with that object's full lifecycle in memory:: from sqlalchemy import inspect - u1 = User(id=7, name='ed') - - inspect(u1).info['user_info'] = '7|ed' + u1 = User(id=7, name="ed") + inspect(u1).info["user_info"] = "7|ed" :ticket:`4257` @@ -399,23 +401,22 @@ Association proxy has new cascade_scalar_deletes flag Given a mapping as:: class A(Base): - __tablename__ = 'test_a' + __tablename__ = "test_a" id = Column(Integer, primary_key=True) - ab = relationship( - 'AB', backref='a', uselist=False) + ab = relationship("AB", backref="a", uselist=False) b = association_proxy( - 'ab', 'b', creator=lambda b: AB(b=b), - cascade_scalar_deletes=True) + "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True + ) class B(Base): - __tablename__ = 'test_b' + __tablename__ = "test_b" id = Column(Integer, primary_key=True) - ab = relationship('AB', backref='b', cascade='all, delete-orphan') + ab = relationship("AB", backref="b", cascade="all, delete-orphan") class AB(Base): - __tablename__ = 'test_ab' + __tablename__ = "test_ab" a_id = Column(Integer, ForeignKey(A.id), primary_key=True) b_id = Column(Integer, ForeignKey(B.id), primary_key=True) @@ -490,7 +491,7 @@ to a class-specific :class:`.AssociationProxyInstance`, demonstrated as:: class User(Base): # ... - keywords = association_proxy('kws', 'keyword') + keywords = association_proxy("kws", "keyword") proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User) @@ -522,6 +523,7 @@ and is **not** an object reference or another association proxy:: # column-based association proxy values = association_proxy("elements", "value") + class Element(Base): # ... @@ -530,7 +532,7 @@ and is **not** an object reference or another association proxy:: The ``User.values`` association proxy refers to the ``Element.value`` column. Standard column operations are now available, such as ``like``:: - >>> print(s.query(User).filter(User.values.like('%foo%'))) + >>> print(s.query(User).filter(User.values.like("%foo%"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -539,7 +541,7 @@ Standard column operations are now available, such as ``like``:: ``equals``:: - >>> print(s.query(User).filter(User.values == 'foo')) + >>> print(s.query(User).filter(User.values == "foo")) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -564,7 +566,7 @@ comparison operator; **this is a change in behavior** in that previously, the association proxy used ``.contains`` as a list containment operator only. With a column-oriented comparison, it now behaves like a "like":: - >>> print(s.query(User).filter(User.values.contains('foo'))) + >>> print(s.query(User).filter(User.values.contains("foo"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -579,7 +581,7 @@ When using an object-based association proxy with a collection, the behavior is as before, that of testing for collection membership, e.g. given a mapping:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) user_elements = relationship("UserElement") @@ -589,7 +591,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class UserElement(Base): - __tablename__ = 'user_element' + __tablename__ = "user_element" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -598,7 +600,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" id = Column(Integer, primary_key=True) value = Column(String) @@ -633,21 +635,21 @@ any use cases arise where it causes side effects. As an example, given a mapping with association proxy:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - b_data = association_proxy('bs', 'data') + b_data = association_proxy("bs", "data") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) data = Column(String) - a1 = A(bs=[B(data='b1'), B(data='b2')]) + a1 = A(bs=[B(data="b1"), B(data="b2")]) b_data = a1.b_data @@ -671,7 +673,7 @@ Above, because the ``A`` object would be garbage collected before the The change is that the ``b_data`` collection is now maintaining a strong reference to the ``a1`` object, so that it remains present:: - assert b_data == ['b1', 'b2'] + assert b_data == ["b1", "b2"] This change introduces the side effect that if an application is passing around the collection as above, **the parent object won't be garbage collected** until @@ -699,7 +701,9 @@ new association objects where appropriate:: id = Column(Integer, primary_key=True) b_rel = relationship( - "B", collection_class=set, cascade="all, delete-orphan", + "B", + collection_class=set, + cascade="all, delete-orphan", ) b = association_proxy("b_rel", "value", creator=lambda x: B(value=x)) @@ -712,6 +716,7 @@ new association objects where appropriate:: a_id = Column(Integer, ForeignKey("test_a.id"), nullable=False) value = Column(String) + # ... s = Session(e) @@ -728,7 +733,6 @@ new association objects where appropriate:: # against the deleted ones. assert len(s.new) == 1 - :ticket:`2642` .. _change_1103: @@ -749,14 +753,14 @@ having a duplicate temporarily present in the list is intrinsic to a Python "swap" operation. Given a standard one-to-many/many-to-one setup:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -780,7 +784,7 @@ during the flush. The same issue can be demonstrated using plain duplicates:: >>> del a1.bs[1] >>> a1.bs # collection is unaffected so far... [<__main__.B object at 0x7f047af5fb70>] - >>> b1.a # however b1.a is None + >>> b1.a # however b1.a is None >>> >>> session.add(a1) >>> session.commit() # so upon flush + expire.... @@ -955,21 +959,21 @@ been removed. Previously, this did not take place for one-to-many, or one-to-one relationships, in the following situation:: class User(Base): - __tablename__ = 'users' + __tablename__ = "users" id = Column(Integer, primary_key=True) - addresses = relationship( - "Address", - passive_deletes="all") + addresses = relationship("Address", passive_deletes="all") + class Address(Base): - __tablename__ = 'addresses' + __tablename__ = "addresses" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('users.id')) + user_id = Column(Integer, ForeignKey("users.id")) user = relationship("User") + u1 = session.query(User).first() address = u1.addresses[0] u1.addresses.remove(address) @@ -1006,16 +1010,17 @@ joined together either with no separator or with an underscore separator. Below we define a convention that will name :class:`.UniqueConstraint` constraints with a name that joins together the names of all columns:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) table = Table( - 'info', metadata_obj, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer), - UniqueConstraint('a', 'b', 'c') + "info", + metadata_obj, + Column("a", Integer), + Column("b", Integer), + Column("c", Integer), + UniqueConstraint("a", "b", "c"), ) The CREATE TABLE for the above table will render as:: @@ -1037,11 +1042,12 @@ PostgreSQL where identifiers cannot be longer than 63 characters, a long constraint name would normally be generated from the table definition below:: long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) The truncation logic will ensure a too-long name isn't generated for the @@ -1137,17 +1143,16 @@ modifier to produce a :class:`.BinaryExpression` that has a "left" and a "right" side:: class Venue(Base): - __tablename__ = 'venue' + __tablename__ = "venue" id = Column(Integer, primary_key=True) name = Column(String) descendants = relationship( "Venue", - primaryjoin=func.instr( - remote(foreign(name)), name + "/" - ).as_comparison(1, 2) == 1, + primaryjoin=func.instr(remote(foreign(name)), name + "/").as_comparison(1, 2) + == 1, viewonly=True, - order_by=name + order_by=name, ) Above, the :paramref:`_orm.relationship.primaryjoin` of the "descendants" relationship @@ -1162,8 +1167,12 @@ lazyload to produce SQL like:: and a joinedload, such as:: - v1 = s.query(Venue).filter_by(name="parent1").options( - joinedload(Venue.descendants)).one() + v1 = ( + s.query(Venue) + .filter_by(name="parent1") + .options(joinedload(Venue.descendants)) + .one() + ) to work as:: @@ -1195,12 +1204,12 @@ backend, such as "SELECT CAST(NULL AS INTEGER) WHERE 1!=1" for PostgreSQL, >>> from sqlalchemy import select, literal_column, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(literal_column('1').in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... literal_column("1").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE 1 IN (SELECT CAST(NULL AS INTEGER) WHERE 1!=1) The feature also works for tuple-oriented IN statements, where the "empty IN" @@ -1211,12 +1220,12 @@ such as on PostgreSQL:: >>> from sqlalchemy import select, literal_column, tuple_, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(tuple_(50, "somestring").in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... tuple_(50, "somestring").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE (%(param_1)s, %(param_2)s) IN (SELECT CAST(NULL AS INTEGER), CAST(NULL AS VARCHAR) WHERE 1!=1) @@ -1239,6 +1248,7 @@ variant expression in order to locate these methods:: from sqlalchemy import TypeDecorator, LargeBinary, func + class CompressedLargeBinary(TypeDecorator): impl = LargeBinary @@ -1248,13 +1258,15 @@ variant expression in order to locate these methods:: def column_expression(self, col): return func.uncompress(col, type_=self) + MyLargeBinary = LargeBinary().with_variant(CompressedLargeBinary(), "sqlite") The above expression will render a function within SQL when used on SQLite only:: from sqlalchemy import select, column from sqlalchemy.dialects import sqlite - print(select([column('x', CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) + + print(select([column("x", CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) will render:: @@ -1445,17 +1457,20 @@ queries used until now. Given a schema such as:: dv = Table( - 'data_values', metadata_obj, - Column('modulus', Integer, nullable=False), - Column('data', String(30)), - postgresql_partition_by='range(modulus)') + "data_values", + metadata_obj, + Column("modulus", Integer, nullable=False), + Column("data", String(30)), + postgresql_partition_by="range(modulus)", + ) sa.event.listen( dv, "after_create", sa.DDL( "CREATE TABLE data_values_4_10 PARTITION OF data_values " - "FOR VALUES FROM (4) TO (10)") + "FOR VALUES FROM (4) TO (10)" + ), ) The two table names ``'data_values'`` and ``'data_values_4_10'`` will come @@ -1492,9 +1507,7 @@ can now be explicitly ordered by passing a list of 2-tuples:: from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table).values( - id='some_existing_id', - data='inserted value') + insert_stmt = insert(my_table).values(id="some_existing_id", data="inserted value") on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( [ @@ -1542,10 +1555,11 @@ keyword added to objects like :class:`.UniqueConstraint` as well as several :class:`_schema.Column` -specific variants:: some_table = Table( - 'some_table', metadata_obj, - Column('id', Integer, primary_key=True, sqlite_on_conflict_primary_key='FAIL'), - Column('data', Integer), - UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE') + "some_table", + metadata_obj, + Column("id", Integer, primary_key=True, sqlite_on_conflict_primary_key="FAIL"), + Column("data", Integer), + UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"), ) The above table would render in a CREATE TABLE statement as:: @@ -1651,7 +1665,8 @@ Pass it via :func:`_sa.create_engine`:: engine = create_engine( "mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server", - fast_executemany=True) + fast_executemany=True, + ) .. seealso:: @@ -1678,12 +1693,16 @@ new ``mssql_identity_start`` and ``mssql_identity_increment`` parameters on :class:`_schema.Column`:: test = Table( - 'test', metadata_obj, + "test", + metadata_obj, Column( - 'id', Integer, primary_key=True, mssql_identity_start=100, - mssql_identity_increment=10 + "id", + Integer, + primary_key=True, + mssql_identity_start=100, + mssql_identity_increment=10, ), - Column('name', String(20)) + Column("name", String(20)), ) In order to emit ``IDENTITY`` on a non-primary key column, which is a little-used @@ -1693,9 +1712,10 @@ primary key column:: test = Table( - 'test', metadata_obj, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('number', Integer, autoincrement=True) + "test", + metadata_obj, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("number", Integer, autoincrement=True), ) .. seealso:: diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index cca8c7e0e76..023550d8612 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -69,10 +69,12 @@ be encouraged to move to :term:`2.0 style` execution which allows Core construct to be used freely against ORM entities:: with Session(engine, future=True) as sess: - - stmt = select(User).where( - User.name == 'sandy' - ).join(User.addresses).where(Address.email_address.like("%gmail%")) + stmt = ( + select(User) + .where(User.name == "sandy") + .join(User.addresses) + .where(Address.email_address.like("%gmail%")) + ) result = sess.execute(stmt) @@ -102,7 +104,8 @@ Things to note about the above example: * Statements that work with ORM entities and are expected to return ORM results are invoked using :meth:`.orm.Session.execute`. See - :ref:`session_querying_20` for a primer. + :ref:`session_querying_20` for a primer. See also the following note + at :ref:`change_session_execute_result`. * a :class:`_engine.Result` object is returned, rather than a plain list, which itself is a much more sophisticated version of the previous ``ResultProxy`` @@ -121,16 +124,19 @@ Similar adjustments have been made to "bulk updates and deletes" such that Core :func:`_sql.update` and :func:`_sql.delete` can be used for bulk operations. A bulk update like the following:: - session.query(User).filter(User.name == 'sandy').update({"password": "foobar"}, synchronize_session="fetch") + session.query(User).filter(User.name == "sandy").update( + {"password": "foobar"}, synchronize_session="fetch" + ) can now be achieved in :term:`2.0 style` (and indeed the above runs internally in this way) as follows:: with Session(engine, future=True) as sess: - stmt = update(User).where( - User.name == 'sandy' - ).values(password="foobar").execution_options( - synchronize_session="fetch" + stmt = ( + update(User) + .where(User.name == "sandy") + .values(password="foobar") + .execution_options(synchronize_session="fetch") ) sess.execute(stmt) @@ -147,6 +153,49 @@ for some examples). :ticket:`5159` + +.. _change_session_execute_result: + +ORM ``Session.execute()`` uses "future" style ``Result`` sets in all cases +-------------------------------------------------------------------------- + +As noted in :ref:`change_4710_core`, the :class:`_engine.Result` and +:class:`_engine.Row` objects now feature "named tuple" behavior, when used with +an :class:`_engine.Engine` that includes the +:paramref:`_sa.create_engine.future` parameter set to ``True``. These +"named tuple" rows in particular include a behavioral change which is that +Python containment expressions using ``in``, such as:: + + >>> engine = create_engine("...", future=True) + >>> conn = engine.connect() + >>> row = conn.execute.first() + >>> "name" in row + True + +The above containment test will +use **value containment**, not **key containment**; the ``row`` would need to +have a **value** of "name" to return ``True``. + +Under SQLAlchemy 1.4, when :paramref:`_sa.create_engine.future` parameter set +to ``False``, legacy-style ``LegacyRow`` objects are returned which feature the +partial-named-tuple behavior of prior SQLAlchemy versions, where containment +checks continue to use key containment; ``"name" in row`` would return +True if the row had a **column** named "name", rather than a value. + +When using :meth:`_orm.Session.execute`, full named-tuple style is enabled +**unconditionally**, meaning ``"name" in row`` will use **value containment** +as the test, and **not** key containment. This is to accommodate that +:meth:`_orm.Session.execute` now returns a :class:`_engine.Result` that also +accommodates for ORM results, where even legacy ORM result rows such as those +returned by :meth:`_orm.Query.all` use value containment. + +This is a behavioral change from SQLAlchemy 1.3 to 1.4. To continue receiving +key-containment collections, use the :meth:`_engine.Result.mappings` method to +receive a :class:`_engine.MappingResult` that returns rows as dictionaries:: + + for dict_row in session.execute(text("select id from table")).mappings(): + assert "id" in dict_row + .. _change_4639: Transparent SQL Compilation Caching added to All DQL, DML Statements in Core, ORM @@ -288,7 +337,7 @@ the :class:`_orm.registry` object, and fall into these categories: * Declarative Table * Imperative Table (Hybrid) * :ref:`orm_declarative_dataclasses` -* :ref:`Imperative (a.k.a. "classical" mapping) ` +* :ref:`Imperative (a.k.a. "classical" mapping) ` * Using :meth:`_orm.registry.map_imperatively` * :ref:`orm_imperative_dataclasses` @@ -676,7 +725,7 @@ that are in the columns clause of the SELECT statement. A common beginner mist is code such as the following:: stmt = select(users) - stmt = stmt.where(stmt.c.name == 'foo') + stmt = stmt.where(stmt.c.name == "foo") The above code appears intuitive and that it would generate "SELECT * FROM users WHERE name='foo'", however veteran SQLAlchemy users will @@ -688,8 +737,7 @@ the use case above, as in a case like the above it links directly to the columns present in the ``users.c`` collection:: stmt = select(users) - stmt = stmt.where(stmt.selected_columns.name == 'foo') - + stmt = stmt.where(stmt.selected_columns.name == "foo") :ticket:`4617` @@ -745,7 +793,9 @@ With the new implementation, :meth:`_sql.Select.join` and :meth:`_orm.Query.join`, adding JOIN criteria to the existing statement by matching to the left entity:: - stmt = select(user_table).join(addresses_table, user_table.c.id == addresses_table.c.user_id) + stmt = select(user_table).join( + addresses_table, user_table.c.id == addresses_table.c.user_id + ) producing:: @@ -839,7 +889,7 @@ returns a new :class:`_engine.URL` object with changes applied:: To alter the contents of the :attr:`_engine.URL.query` dictionary, methods such as :meth:`_engine.URL.update_query_dict` may be used:: - >>> url.update_query_dict({"sslcert": '/path/to/crt'}) + >>> url.update_query_dict({"sslcert": "/path/to/crt"}) postgresql://user:***@host/dbname?sslcert=%2Fpath%2Fto%2Fcrt To upgrade code that is mutating these fields directly, a **backwards and @@ -855,6 +905,7 @@ style:: some_url.drivername = some_drivername return some_url + def set_ssl_cert(some_url, ssl_cert): # check for 1.4 if hasattr(some_url, "update_query_dict"): @@ -869,7 +920,9 @@ to strings, using sequences of strings to represent multiple parameters. For example:: >>> from sqlalchemy.engine import make_url - >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt") + >>> url = make_url( + ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt" + ... ) >>> url.query immutabledict({'alt_host': ('host1', 'host2'), 'sslcert': '/path/to/crt'}) @@ -901,25 +954,24 @@ method. A backwards compatible approach would look like:: from sqlalchemy.engine import CreateEnginePlugin + class MyPlugin(CreateEnginePlugin): def __init__(self, url, kwargs): # check for 1.4 style if hasattr(CreateEnginePlugin, "update_url"): - self.my_argument_one = url.query['my_argument_one'] - self.my_argument_two = url.query['my_argument_two'] + self.my_argument_one = url.query["my_argument_one"] + self.my_argument_two = url.query["my_argument_two"] else: # legacy - self.my_argument_one = url.query.pop('my_argument_one') - self.my_argument_two = url.query.pop('my_argument_two') + self.my_argument_one = url.query.pop("my_argument_one") + self.my_argument_two = url.query.pop("my_argument_two") - self.my_argument_three = kwargs.pop('my_argument_three', None) + self.my_argument_three = kwargs.pop("my_argument_three", None) def update_url(self, url): # this method runs in 1.4 only and should be used to consume # plugin-specific arguments - return url.difference_update_query( - ["my_argument_one", "my_argument_two"] - ) + return url.difference_update_query(["my_argument_one", "my_argument_two"]) See the docstring at :class:`_engine.CreateEnginePlugin` for complete details on how this class is used. @@ -974,9 +1026,9 @@ track for the old calling style:: stmt = select(users_table).where( case( - (users_table.c.name == 'wendy', 'W'), - (users_table.c.name == 'jack', 'J'), - else_='E' + (users_table.c.name == "wendy", "W"), + (users_table.c.name == "jack", "J"), + else_="E", ) ) @@ -1128,9 +1180,11 @@ not line up with these two tables will create an additional FROM entry:: address_alias = aliased(Address) - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo') + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + ) The above query selects from a JOIN of ``User`` and ``address_alias``, the latter of which is an alias of the ``Address`` entity. However, the @@ -1189,11 +1243,13 @@ JOIN clauses but also through the WHERE clause Above, we can add a WHERE clause to link the new ``Address`` entity with the previous ``address_alias`` entity and that will remove the warning:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id == address_alias.id) # resolve cartesian products, - # will no longer warn + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id == address_alias.id) + ) # resolve cartesian products, + # will no longer warn The cartesian product warning considers **any** kind of link between two FROM clauses to be a resolution, even if the end result set is still @@ -1201,11 +1257,13 @@ wasteful, as the linter is intended only to detect the common case of a FROM clause that is completely unexpected. If the FROM clause is referred to explicitly elsewhere and linked to the other FROMs, no warning is emitted:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id > address_alias.id) # will generate a lot of rows, - # but no warning + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id > address_alias.id) + ) # will generate a lot of rows, + # but no warning Full cartesian products are also allowed if they are explicitly stated; if we wanted for example the cartesian product of ``User`` and ``Address``, we can @@ -1256,7 +1314,6 @@ including methods such as: with engine.connect() as conn: row = conn.execute(table.select().where(table.c.id == 5)).one() - :meth:`_engine.Result.one_or_none` - same, but also returns None for no rows :meth:`_engine.Result.all` - returns all rows @@ -1278,12 +1335,12 @@ including methods such as: .. sourcecode:: with engine.connect() as conn: - # requests x, y, z - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + # requests x, y, z + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - # iterate rows as y, x - for y, x in result.columns("y", "x"): - print("Y: %s X: %s" % (y, x)) + # iterate rows as y, x + for y, x in result.columns("y", "x"): + print("Y: %s X: %s" % (y, x)) :meth:`_engine.Result.scalars` - returns lists of scalar objects, from the first column by default but can also be selected: @@ -1300,10 +1357,10 @@ dictionaries: .. sourcecode:: with engine.connect() as conn: - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - for map_ in result.mappings(): - print("Y: %(y)s X: %(x)s" % map_) + for map_ in result.mappings(): + print("Y: %(y)s X: %(x)s" % map_) When using Core, the object returned by :meth:`_engine.Connection.execute` is an instance of :class:`.CursorResult`, which continues to feature the same API @@ -1374,8 +1431,8 @@ can be summarized. Given a "named tuple" in pseudo code as:: The biggest cross-incompatible difference is the behavior of ``__contains__``:: - "id" in row # True for a mapping, False for a named tuple - "some name" in row # False for a mapping, True for a named tuple + "id" in row # True for a mapping, False for a named tuple + "some name" in row # False for a mapping, True for a named tuple In 1.4, when a :class:`.LegacyRow` is returned by a Core result set, the above ``"id" in row`` comparison will continue to succeed, however a deprecation @@ -1402,7 +1459,7 @@ when the row was first fetched. This means for example when retrieving a datetime value from SQLite, the data for the row as present in the :class:`.RowProxy` object would previously have looked like:: - row_proxy = (1, '2019-12-31 19:56:58.272106') + row_proxy = (1, "2019-12-31 19:56:58.272106") and then upon access via ``__getitem__``, the ``datetime.strptime()`` function would be used on the fly to convert the above string date into a ``datetime`` @@ -1463,6 +1520,8 @@ There are many reasons why the above assumptions do not hold: :ref:`change_4710_orm` + :ref:`change_session_execute_result` + :ticket:`4710` .. _change_4753: @@ -1478,8 +1537,8 @@ allows for greater cross-compatibility between the two, which is a key goal of the 2.0 transition:: >>> from sqlalchemy import column, select - >>> c1, c2, c3, c4 = column('c1'), column('c2'), column('c3'), column('c4') - >>> stmt = select(c1, c2, c3.label('c2'), c2, c4) + >>> c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4") + >>> stmt = select(c1, c2, c3.label("c2"), c2, c4) >>> print(stmt) SELECT c1, c2, c3 AS c2, c2, c4 @@ -1522,7 +1581,7 @@ does not imply deduplication of column objects, although it does imply deduplication of implicitly generated labels:: >>> from sqlalchemy import table - >>> user = table('user', column('id'), column('name')) + >>> user = table("user", column("id"), column("name")) >>> stmt = select(user.c.id, user.c.name, user.c.id).apply_labels() >>> print(stmt) SELECT "user".id AS user_id, "user".name AS user_name, "user".id AS id_1 @@ -1606,7 +1665,7 @@ prominently with CAST:: For CAST against expressions that don't have a name, the previous logic is used to generate the usual "anonymous" labels:: - >>> print(select(cast('hi there,' + foo.c.data, String))) + >>> print(select(cast("hi there," + foo.c.data, String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS anon_1 FROM foo @@ -1614,14 +1673,14 @@ A :func:`.cast` against a :class:`.Label`, despite having to omit the label expression as these don't render inside of a CAST, will nonetheless make use of the given name:: - >>> print(select(cast(('hi there,' + foo.c.data).label('hello_data'), String))) + >>> print(select(cast(("hi there," + foo.c.data).label("hello_data"), String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo And of course as was always the case, :class:`.Label` can be applied to the expression on the outside to apply an "AS " label directly:: - >>> print(select(cast(('hi there,' + foo.c.data), String).label('hello_data'))) + >>> print(select(cast(("hi there," + foo.c.data), String).label("hello_data"))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo @@ -1768,7 +1827,6 @@ flags to ``True``:: boolean = Column(Boolean(create_constraint=True)) enum = Column(Enum("a", "b", "c", create_constraint=True)) - :ticket:`5367` New Features - ORM @@ -1796,13 +1854,14 @@ To configure column-level raiseload on a mapping, the the attribute:: class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = deferred(Column(String(2000)), raiseload=True) excerpt = deferred(Column(Text), raiseload=True) + book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() It was originally considered that the existing :func:`.raiseload` option that @@ -1810,8 +1869,7 @@ works for :func:`_orm.relationship` attributes be expanded to also support colum attributes. However, this would break the "wildcard" behavior of :func:`.raiseload`, which is documented as allowing one to prevent all relationships from loading:: - session.query(Order).options( - joinedload(Order.items), raiseload('*')) + session.query(Order).options(joinedload(Order.items), raiseload("*")) Above, if we had expanded :func:`.raiseload` to accommodate for columns as well, the wildcard would also prevent columns from loading and thus be a @@ -2003,11 +2061,7 @@ as entity / column should work:: row._mapping[u1] # same as row[0] - row = ( - s.query(User.id, Address.email_address) - .join(User.addresses) - .first() - ) + row = s.query(User.id, Address.email_address).join(User.addresses).first() row._mapping[User.id] # same as row[0] row._mapping["id"] # same as row[0] @@ -2202,13 +2256,11 @@ use of the :paramref:`_orm.Session.future` flag to :term:`2.0-style` mode:: Session = sessionmaker(engine, future=True) with Session() as session: - u1 = User() - session.add(u1) - - a1 = Address() - a1.user = u1 # <--- will not add "a1" to the Session - + u1 = User() + session.add(u1) + a1 = Address() + a1.user = u1 # <--- will not add "a1" to the Session :ticket:`5150` @@ -2225,7 +2277,7 @@ selectin/subquery loaders will run an "immediateload" operation for a given relationship, when an expired object is unexpired or an object is refreshed:: >>> a1 = session.query(A).options(joinedload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() Above, the ``A`` object was loaded with a ``joinedload()`` option associated @@ -2251,7 +2303,7 @@ a refresh scenario, which resembles the query emitted by "lazyload", emitted as an additional query:: >>> a1 = session.query(A).options(selectinload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() >>> a1.data SELECT a.id AS a_id, a.data AS a_data @@ -2273,6 +2325,175 @@ to be more noticeable. :ticket:`1763` +.. _change_8879: + +Column loaders such as ``deferred()``, ``with_expression()`` only take effect when indicated on the outermost, full entity query +-------------------------------------------------------------------------------------------------------------------------------- + +.. note:: This change note was not present in earlier versions of this document, + however is relevant for all SQLAlchemy 1.4 versions. + +A behavior that was never supported in 1.3 and previous versions +yet nonetheless would have a particular effect +was to repurpose column loader options such as :func:`_orm.defer` and +:func:`_orm.with_expression` in subqueries in order to control which +SQL expressions would be in the columns clause of each subquery. A typical +example would be to +construct UNION queries, such as:: + + q1 = session.query(User).options(with_expression(User.expr, literal("u1"))) + q2 = session.query(User).options(with_expression(User.expr, literal("u2"))) + + q1.union_all(q2).all() + +In version 1.3, the :func:`_orm.with_expression` option would take effect +for each element of the UNION, such as: + +.. sourcecode:: sql + + SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.user_account_id AS anon_1_user_account_id, + anon_1.user_account_name AS anon_1_user_account_name + FROM ( + SELECT ? AS anon_2, user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + UNION ALL + SELECT ? AS anon_3, user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + ) AS anon_1 + ('u1', 'u2') + +SQLAlchemy 1.4's notion of loader options has been made more strict, and as such +are applied to the **outermost part of the query only**, which is the +SELECT that is intended to populate the actual ORM entities to be returned; the +query above in 1.4 will produce: + +.. sourcecode:: sql + + SELECT ? AS anon_1, anon_2.user_account_id AS anon_2_user_account_id, + anon_2.user_account_name AS anon_2_user_account_name + FROM ( + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + UNION ALL + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + ) AS anon_2 + ('u1',) + +that is, the options for the :class:`_orm.Query` were taken from the first +element of the UNION, since all loader options are only to be at the topmost +level. The option from the second query was ignored. + +Rationale +^^^^^^^^^ + +This behavior now more closely matches that of other kinds of loader options +such as relationship loader options like :func:`_orm.joinedload` in all +SQLAlchemy versions, 1.3 and earlier included, which in a UNION situation were +already copied out to the top most level of the query, and only taken from the +first element of the UNION, discarding any options on other parts of the query. + +This implicit copying and selective ignoring of options, demonstrated above as +being fairly arbitrary, is a legacy behavior that's only part of +:class:`_orm.Query`, and is a particular example of where :class:`_orm.Query` +and its means of applying :meth:`_orm.Query.union_all` falls short, as it's +ambiguous how to turn a single SELECT into a UNION of itself and another query +and how loader options should be applied to that new statement. + +SQLAlchemy 1.4's behavior can be demonstrated as generally superior to that +of 1.3 for a more common case of using :func:`_orm.defer`. The following +query:: + + q1 = session.query(User).options(defer(User.name)) + q2 = session.query(User).options(defer(User.name)) + + q1.union_all(q2).all() + +In 1.3 would awkwardly add NULL to the inner queries and then SELECT it: + +.. sourcecode:: sql + + SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.user_account_id AS anon_1_user_account_id + FROM ( + SELECT NULL AS anon_2, user_account.id AS user_account_id + FROM user_account + UNION ALL + SELECT NULL AS anon_2, user_account.id AS user_account_id + FROM user_account + ) AS anon_1 + +If all queries didn't have the identical options set up, the above scenario +would raise an error due to not being able to form a proper UNION. + +Whereas in 1.4, the option is applied only at the top layer, omitting +the fetch for ``User.name``, and this complexity is avoided: + +.. sourcecode:: sql + + SELECT anon_1.user_account_id AS anon_1_user_account_id + FROM ( + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + UNION ALL + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + ) AS anon_1 + +Correct Approach +^^^^^^^^^^^^^^^^ + +Using :term:`2.0-style` querying, no warning is emitted at the moment, however +the nested :func:`_orm.with_expression` options are consistently ignored as +they don't apply to an entity being loaded, and are not implicitly copied +anywhere. The query below produces no output for the +:func:`_orm.with_expression` calls:: + + s1 = select(User).options(with_expression(User.expr, literal("u1"))) + s2 = select(User).options(with_expression(User.expr, literal("u2"))) + + stmt = union_all(s1, s2) + + session.scalars(select(User).from_statement(stmt)).all() + +producing the SQL: + +.. sourcecode:: sql + + SELECT user_account.id, user_account.name + FROM user_account + UNION ALL + SELECT user_account.id, user_account.name + FROM user_account + +To correctly apply :func:`_orm.with_expression` to the ``User`` entity, +it should be applied to the outermost level of the query, using an +ordinary SQL expression inside the columns clause of each SELECT:: + + s1 = select(User, literal("u1").label("some_literal")) + s2 = select(User, literal("u2").label("some_literal")) + + stmt = union_all(s1, s2) + + session.scalars( + select(User) + .from_statement(stmt) + .options(with_expression(User.expr, stmt.selected_columns.some_literal)) + ).all() + +Which will produce the expected SQL: + +.. sourcecode:: sql + + SELECT user_account.id, user_account.name, ? AS some_literal + FROM user_account + UNION ALL + SELECT user_account.id, user_account.name, ? AS some_literal + FROM user_account + +The ``User`` objects themselves will include this expression in their +contents underneath ``User.expr``. + + .. _change_4519: Accessing an uninitialized collection attribute on a transient object no longer mutates __dict__ @@ -2333,9 +2554,11 @@ eventually identified in :ticket:`4519` where this empty collection could be harmful, which is when the object is merged into a session:: >>> u1 = User(id=1) # create an empty User to merge with id=1 in the database - >>> merged1 = session.merge(u1) # value of merged1.addresses is unchanged from that of the DB + >>> merged1 = session.merge( + ... u1 + ... ) # value of merged1.addresses is unchanged from that of the DB - >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database + >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database >>> u2.addresses [] >>> merged2 = session.merge(u2) # value of merged2.addresses has been emptied in the DB @@ -2364,7 +2587,9 @@ however is not added to ``__dict__`` until it is actually mutated:: >>> u1 = User() >>> l1 = u1.addresses # new list is created, associated with the state >>> assert u1.addresses is l1 # you get the same list each time you access it - >>> assert "addresses" not in u1.__dict__ # but it won't go into __dict__ until it's mutated + >>> assert ( + ... "addresses" not in u1.__dict__ + ... ) # but it won't go into __dict__ until it's mutated >>> from sqlalchemy import inspect >>> inspect(u1).attrs.addresses.history History(added=None, unchanged=None, deleted=None) @@ -2386,7 +2611,9 @@ the object contains certain values based on its ``__dict__``:: >>> u1.addresses [] # this will now fail, would pass before - >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {"addresses": []} + >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == { + ... "addresses": [] + ... } or to ensure that the collection won't require a lazy load to proceed, the (admittedly awkward) code below will now also fail:: @@ -2415,10 +2642,11 @@ SQLAlchemy has always had logic to detect when an object in the :class:`.Session to be inserted has the same primary key as an object that is already present:: class Product(Base): - __tablename__ = 'product' + __tablename__ = "product" id = Column(Integer, primary_key=True) + session = Session(engine) # add Product with primary key 1 @@ -2500,8 +2728,7 @@ disallowed:: # ... # this is now an error - addresses = relationship( - "Address", viewonly=True, cascade="all, delete-orphan") + addresses = relationship("Address", viewonly=True, cascade="all, delete-orphan") The above will raise:: @@ -2542,10 +2769,7 @@ inheritance mapping:: s.commit() - print( - s.query(Manager).select_entity_from(s.query(Employee).subquery()).all() - ) - + print(s.query(Manager).select_entity_from(s.query(Employee).subquery()).all()) The subquery selects both the ``Engineer`` and the ``Manager`` rows, and even though the outer query is against ``Manager``, we get a non ``Manager`` @@ -2818,8 +3042,9 @@ effect. When "optional" is used on a :class:`.Sequence` that is present in the integer primary key column of a table:: Table( - "some_table", metadata, - Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True) + "some_table", + metadata, + Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True), ) The above :class:`.Sequence` is only used for DDL and INSERT statements if the diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 79e198d09c4..8f7b45c3e40 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -1,8 +1,8 @@ .. _migration_20_toplevel: -============================= +=========================== Migrating to SQLAlchemy 2.0 -============================= +=========================== .. admonition:: About this document @@ -30,7 +30,7 @@ Migrating to SQLAlchemy 2.0 Overview -======== +-------- The SQLAlchemy 2.0 transition presents itself in the SQLAlchemy 1.4 release as a series of steps that allow an application of any size or complexity to be @@ -48,13 +48,16 @@ new ORM declarative system that unifies classical and declarative mapping, support for Python dataclasses, and asyncio support for Core and ORM. The steps to achieve 2.0 migration are in the following subsections; overall, -the general strategy is that once an application runs on 1.4 with all -warning flags turned on and does not emit any 2.0-deprecation warnings, it is -now cross-compatible with SQLAlchemy 2.0. +the general strategy is that once an application runs on 1.4 with all warning +flags turned on and does not emit any 2.0-deprecation warnings, it is now +**mostly** cross-compatible with SQLAlchemy 2.0. **Please note there may be +additional API and behavioral changes that may behave differently when running +against SQLAlchemy 2.0; always test code against an actual SQLAlchemy 2.0 +release as the final step in migrating**. First Prerequisite, step one - A Working 1.3 Application ---------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is getting an existing application onto 1.4, in the case of a typical non trivial application, is to ensure it runs on SQLAlchemy 1.3 with @@ -70,7 +73,7 @@ warnings; these are warnings emitted for the :class:`_exc.SADeprecationWarning` class. First Prerequisite, step two - A Working 1.4 Application --------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Once the application is good to go on SQLAlchemy 1.3, the next step is to get it running on SQLAlchemy 1.4. In the vast majority of cases, applications @@ -117,26 +120,21 @@ as being in this realm are as follows: For the full overview of SQLAlchemy 1.4 changes, see the :doc:`/changelog/migration_14` document. -Migration to 2.0 Step One - Python 3 only (Python 3.6 minimum) --------------------------------------------------------------- +Migration to 2.0 Step One - Python 3 only (Python 3.7 minimum for 2.0 compatibility) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -SQLAlchemy 2.0 was first inspired by the fact that Python 2's EOL was in -2020. SQLAlchemy is taking a longer period of time than other major -projects to drop Python 2.7 support, since it is not too much in the way -of things for the moment. However, version 2.0 hopes to start embracing -:pep:`484` and other new features to a great degree, so it is likely -that release 1.4 will be the last Python 2 supporting version, even if -there is a SQLAlchemy 1.5 (which is also unlikely at the moment). - -In order to use SQLAlchemy 2.0, the application will need to be runnable on -at least **Python 3.6** as of this writing. SQLAlchemy 1.4 now supports -Python 3.6 or newer within the Python 3 series; throughout the 1.4 series, -the application can remain running on Python 2.7 or on at least Python 3.6. +SQLAlchemy 2.0 was first inspired by the fact that Python 2's EOL was in 2020. +SQLAlchemy is taking a longer period of time than other major projects to drop +Python 2.7 support. However, in order to use SQLAlchemy 2.0, the application +will need to be runnable on at least **Python 3.7**. SQLAlchemy 1.4 supports +Python 3.6 or newer within the Python 3 series; throughout the 1.4 series, the +application can remain running on Python 2.7 or on at least Python 3.6. Version +2.0 however starts at Python 3.7. .. _migration_20_deprecations_mode: Migration to 2.0 Step Two - Turn on RemovedIn20Warnings -------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SQLAlchemy 1.4 features a conditional deprecation warning system inspired by the Python "-3" flag that would indicate legacy patterns in a running @@ -165,8 +163,8 @@ Given the example program below:: The above program uses several patterns that many users will already identify as "legacy", namely the use of the :meth:`_engine.Engine.execute` method -that's part of the :ref:`connectionless execution ` -system. When we run the above program against 1.4, it returns a single line:: +that's part of the "connectionless execution" API. When we run the above +program against 1.4, it returns a single line:: $ python test3.py [(1,)] @@ -234,14 +232,13 @@ as a bonus our program is much clearer:: print(result.fetchall()) - The goal of "2.0 deprecations mode" is that a program which runs with no :class:`_exc.RemovedIn20Warning` warnings with "2.0 deprecations mode" turned on is then ready to run in SQLAlchemy 2.0. Migration to 2.0 Step Three - Resolve all RemovedIn20Warnings --------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code can be developed iteratively to resolve these warnings. Within the SQLAlchemy project itself, the approach taken is as follows: @@ -260,26 +257,25 @@ the SQLAlchemy project itself, the approach taken is as follows: import warnings from sqlalchemy import exc - + # for warnings not included in regex-based filter below, just log - warnings.filterwarnings( - "always", category=exc.RemovedIn20Warning - ) - + warnings.filterwarnings("always", category=exc.RemovedIn20Warning) + # for warnings related to execute() / scalar(), raise for msg in [ r"The (?:Executable|Engine)\.(?:execute|scalar)\(\) function", - r"The current statement is being autocommitted using implicit " - "autocommit,", + r"The current statement is being autocommitted using implicit " "autocommit,", r"The connection.execute\(\) method in SQLAlchemy 2.0 will accept " "parameters as a single dictionary or a single sequence of " "dictionaries only.", r"The Connection.connect\(\) function/method is considered legacy", r".*DefaultGenerator.execute\(\)", ]: - warnings.filterwarnings( - "error", message=msg, category=exc.RemovedIn20Warning, - ) + warnings.filterwarnings( + "error", + message=msg, + category=exc.RemovedIn20Warning, + ) 3. As each sub-category of warnings are resolved in the application, new warnings that are caught by the "always" filter can be added to the list @@ -288,7 +284,7 @@ the SQLAlchemy project itself, the approach taken is as follows: 4. Once no more warnings are emitted, the filter can be removed. Migration to 2.0 Step Four - Use the ``future`` flag on Engine --------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`_engine.Engine` object features an updated transaction-level API in version 2.0. In 1.4, this new API is available @@ -325,10 +321,8 @@ The new engine is described at :class:`_future.Engine` which delivers a new conn.commit() # commit as you go - - -Migration to 2.0 Step Four - Use the ``future`` flag on Session ---------------------------------------------------------------- +Migration to 2.0 Step Five - Use the ``future`` flag on Session +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`_orm.Session` object also features an updated transaction/connection level API in version 2.0. This API is available in 1.4 using the @@ -360,6 +354,7 @@ in 1.4 which are now closely matched to the patterns used by the :class:`_orm.Session` may be used as a context manager:: from sqlalchemy.orm import Session + with Session(engine) as session: session.add(MyObject()) session.commit() @@ -386,15 +381,116 @@ and all ``exc.RemovedIn20Warning`` occurrences set to raise an error, The sections that follow will detail the specific changes to make for all major API modifications. +.. _migration_20_step_six: + +Migration to 2.0 Step Six - Add ``__allow_unmapped__`` to explicitly typed ORM models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +SQLAlchemy 2.0 has new support for runtime interpretation of :pep:`484` typing annotations +on ORM models. A requirement of these annotations is that they must make use +of the :class:`_orm.Mapped` generic container. Annotations which don't use +:class:`_orm.Mapped` which link to constructs such as :func:`_orm.relationship` +will raise errors in Python, as they suggest mis-configurations. + +SQLAlchemy applications that use the :ref:`Mypy plugin ` with +explicit annotations that don't use :class:`_orm.Mapped` in their annotations +are subject to these errors, as would occur in the example below:: + + Base = declarative_base() + + + class Foo(Base): + __tablename__ = "foo" + + id: int = Column(Integer, primary_key=True) + + # will raise + bars: list["Bar"] = relationship("Bar", back_populates="foo") + + + class Bar(Base): + __tablename__ = "bar" + + id: int = Column(Integer, primary_key=True) + foo_id = Column(ForeignKey("foo.id")) + + # will raise + foo: Foo = relationship(Foo, back_populates="bars", cascade="all") + +Above, the ``Foo.bars`` and ``Bar.foo`` :func:`_orm.relationship` declarations +will raise an error at class construction time because they don't use +:class:`_orm.Mapped` (by contrast, the annotations that use +:class:`_schema.Column` are ignored by 2.0, as these are able to be +recognized as a legacy configuration style). To allow all annotations that +don't use :class:`_orm.Mapped` to pass without error, +the ``__allow_unmapped__`` attribute may be used on the class or any +subclasses, which will cause the annotations in these cases to be +ignored completely by the new Declarative system. + +.. note:: The ``__allow_unmapped__`` directive applies **only** to the + *runtime* behavior of the ORM. It does not affect the behavior of + Mypy, and the above mapping as written still requires that the Mypy + plugin be installed. For fully 2.0 style ORM models that will type + correctly under Mypy *without* a plugin, see the section named + "Migrating an Existing Mapping" in the "What's New in SQLAlchemy 2.0?" + document of the SQLAlchemy 2.0 documentation; this is the SQLAlchemy + 1.4 documentation. + +The example below illustrates the application of ``__allow_unmapped__`` +to the Declarative ``Base`` class, where it will take effect for all classes +that descend from ``Base``:: + + # qualify the base with __allow_unmapped__. Can also be + # applied to classes directly if preferred + class Base: + __allow_unmapped__ = True + + + Base = declarative_base(cls=Base) + + # existing mapping proceeds, Declarative will ignore any annotations + # which don't include ``Mapped[]`` + class Foo(Base): + __tablename__ = "foo" + + id: int = Column(Integer, primary_key=True) + + bars: list["Bar"] = relationship("Bar", back_populates="foo") + + + class Bar(Base): + __tablename__ = "bar" + + id: int = Column(Integer, primary_key=True) + foo_id = Column(ForeignKey("foo.id")) + + foo: Foo = relationship(Foo, back_populates="bars", cascade="all") + +.. versionchanged:: 2.0.0beta3 - improved the ``__allow_unmapped__`` + attribute support to allow for 1.4-style explicit annotated relationships + that don't use :class:`_orm.Mapped` to remain usable. + + +.. _migration_20_step_seven: + +Migration to 2.0 Step Seven - Test against a SQLAlchemy 2.0 Release +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As mentioned previously, SQLAlchemy 2.0 has additional API and behavioral +changes that are intended to be backwards compatible, however may introduce +some incompatibilities nonetheless. Therefore after the overall porting +process is complete, the final step is to test against the most recent release +of SQLAlchemy 2.0 to correct for any remaining issues that might be present. + 2.0 Migration - Core Connection / Transaction -============================================= +--------------------------------------------- .. _migration_20_autocommit: Library-level (but not driver level) "Autocommit" removed from both Core and ORM --------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -405,7 +501,7 @@ the underlying DBAPI transaction, but in SQLAlchemy conn = engine.connect() # won't autocommit in 2.0 - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) Nor will this autocommit:: @@ -421,10 +517,7 @@ execution option, will be removed:: conn = engine.connect() # won't autocommit in 2.0 - conn.execute( - text("EXEC my_procedural_thing()").execution_options(autocommit=True) - ) - + conn.execute(text("EXEC my_procedural_thing()").execution_options(autocommit=True)) **Migration to 2.0** @@ -433,13 +526,13 @@ style` execution is to make use of the :meth:`_engine.Connection.begin` method, or the :meth:`_engine.Engine.begin` context manager:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.connect() as conn: with conn.begin(): - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.begin() as conn: conn.execute(text("EXEC my_procedural_thing()")) @@ -451,8 +544,8 @@ when a statement is first invoked in the absence of an explicit call to :meth:`_future.Connection.begin`:: with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) conn.commit() @@ -490,7 +583,7 @@ explicit as to how the transaction should be used. For the vast majority of Core use cases, it's the pattern that is already recommended:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) For "commit as you go, or rollback instead" usage, which resembles how the :class:`_orm.Session` is normally used today, the "future" version of @@ -508,7 +601,7 @@ a statement is first invoked:: engine = create_engine(..., future=True) with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) conn.commit() conn.execute(text("some other SQL")) @@ -546,7 +639,7 @@ is turned on. .. _migration_20_implicit_execution: "Implicit" and "Connectionless" execution, "bound metadata" removed --------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -558,11 +651,11 @@ execution patterns, is removed:: metadata_obj = MetaData(bind=engine) # no longer supported - metadata_obj.create_all() # requires Engine or Connection + metadata_obj.create_all() # requires Engine or Connection metadata_obj.reflect() # requires Engine or Connection - t = Table('t', metadata_obj, autoload=True) # use autoload_with=engine + t = Table("t", metadata_obj, autoload=True) # use autoload_with=engine result = engine.execute(t.select()) # no longer supported @@ -592,7 +685,7 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(engine) # reflect individual table - t = Table('t', metadata_obj, autoload_with=engine) + t = Table("t", metadata_obj, autoload_with=engine) # connection level: @@ -607,12 +700,11 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(connection) # reflect individual table - t = Table('t', metadata_obj, autoload_with=connection) + t = Table("t", metadata_obj, autoload_with=connection) # execute SQL statements result = conn.execute(t.select()) - **Discussion** @@ -736,9 +828,8 @@ in the case that the operation is a write operation:: with conn.begin(): result = conn.execute(stmt) - execute() method more strict, execution options are more prominent -------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -756,18 +847,16 @@ require modification:: # positional parameters no longer supported, only named # unless using exec_driver_sql() - result = connection.execute(table.insert(), ('x', 'y', 'z')) + result = connection.execute(table.insert(), ("x", "y", "z")) # **kwargs no longer accepted, pass a single dictionary result = connection.execute(table.insert(), x=10, y=5) # multiple *args no longer accepted, pass a list result = connection.execute( - table.insert(), - {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} + table.insert(), {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} ) - **Migration to 2.0** The new :meth:`_future.Connection.execute` method now accepts a subset of the @@ -778,6 +867,7 @@ method, so the following code is cross-compatible between 1.x and 2.0:: connection = engine.connect() from sqlalchemy import text + result = connection.execute(text("select * from table")) # pass a single dictionary for single statement execution @@ -785,12 +875,9 @@ method, so the following code is cross-compatible between 1.x and 2.0:: # pass a list of dictionaries for executemany result = connection.execute( - table.insert(), - [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] + table.insert(), [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] ) - - **Discussion** The use of ``*args`` and ``**kwargs`` has been removed both to remove the @@ -817,7 +904,7 @@ given. .. _migration_20_result_rows: Result rows act like named tuples ---------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -832,11 +919,10 @@ tuples when using "future" mode:: row = result.first() # suppose the row is (1, 2) - "x" in row # evaluates to False, in 1.x / future=False, this would be True + "x" in row # evaluates to False, in 1.x / future=False, this would be True 1 in row # evaluates to True, in 1.x / future=False, this would be False - **Migration to 2.0** Application code or test suites that are testing for a particular key @@ -881,10 +967,7 @@ or attribute:: stmt = select(User, Address).join(User.addresses) for row in session.execute(stmt).mappings(): - print("the user is: %s the address is: %s" % ( - row[User], - row[Address] - )) + print("the user is: %s the address is: %s" % (row[User], row[Address])) .. seealso:: @@ -892,12 +975,12 @@ or attribute:: 2.0 Migration - Core Usage -============================= +-------------------------- .. _migration_20_5284: select() no longer accepts varied constructor arguments, columns are passed positionally ------------------------------------------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **synopsis** @@ -921,14 +1004,10 @@ now accepts its WHEN criteria positionally, rather than as a list:: # list emits a deprecation warning case_clause = case( - [ - (table.c.x == 5, "five"), - (table.c.x == 7, "seven") - ], - else_="neither five nor seven" + [(table.c.x == 5, "five"), (table.c.x == 7, "seven")], + else_="neither five nor seven", ) - **Migration to 2.0** Only the "generative" style of :func:`_sql.select` will be supported. The list @@ -951,9 +1030,7 @@ is cross-compatible with 1.4 and 2.0:: # case conditions passed positionally case_clause = case( - (table.c.x == 5, "five"), - (table.c.x == 7, "seven"), - else_="neither five nor seven" + (table.c.x == 5, "five"), (table.c.x == 7, "seven"), else_="neither five nor seven" ) **Discussion** @@ -973,7 +1050,7 @@ documented style in the Core tutorial. Examples of "structural" vs. "data" elements are as follows:: # table columns for CREATE TABLE - structural - table = Table("table", metadata_obj, Column('x', Integer), Column('y', Integer)) + table = Table("table", metadata_obj, Column("x", Integer), Column("y", Integer)) # columns in a SELECT statement - structural stmt = select(table.c.x, table.c.y) @@ -988,7 +1065,7 @@ Examples of "structural" vs. "data" elements are as follows:: :ref:`error_c9ae` insert/update/delete DML no longer accept keyword constructor arguments ------------------------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1006,10 +1083,7 @@ constructor arguments to :func:`_sql.insert`, :func:`_sql.update` and stmt = table.delete(table.c.x > 15) # no longer supported - stmt = table.update( - table.c.x < 15, - preserve_parameter_order=True - ).values( + stmt = table.update(table.c.x < 15, preserve_parameter_order=True).values( [(table.c.y, 20), (table.c.x, table.c.y + 10)] ) @@ -1028,10 +1102,12 @@ examples:: stmt = table.delete().where(table.c.x > 15) # use generative methods, ordered_values() replaces preserve_parameter_order - stmt = table.update().where( - table.c.x < 15, - ).ordered_values( - (table.c.y, 20), (table.c.x, table.c.y + 10) + stmt = ( + table.update() + .where( + table.c.x < 15, + ) + .ordered_values((table.c.y, 20), (table.c.x, table.c.y + 10)) ) **Discussion** @@ -1042,10 +1118,10 @@ manner as that of the :func:`_sql.select` construct. 2.0 Migration - ORM Configuration -============================================= +--------------------------------- Declarative becomes a first class API -------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1086,7 +1162,7 @@ at :ref:`change_5508`. The original "mapper()" function now a core element of Declarative, renamed ----------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1102,9 +1178,7 @@ Code that works with classical mappings should change imports and code from:: from sqlalchemy.orm import mapper - mapper(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper(SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)}) To work from a central :class:`_orm.registry` object:: @@ -1112,9 +1186,9 @@ To work from a central :class:`_orm.registry` object:: mapper_reg = registry() - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper_reg.map_imperatively( + SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)} + ) The above :class:`_orm.registry` is also the source for declarative mappings, and classical mappings now have access to this registry including string-based @@ -1126,19 +1200,23 @@ configuration on :func:`_orm.relationship`:: Base = mapper_reg.generate_base() + class SomeRelatedClass(Base): - __tablename__ = 'related' + __tablename__ = "related" # ... - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship( - "SomeRelatedClass", - primaryjoin="SomeRelatedClass.related_id == SomeClass.id" - ) - }) - + mapper_reg.map_imperatively( + SomeClass, + some_table, + properties={ + "related": relationship( + "SomeRelatedClass", + primaryjoin="SomeRelatedClass.related_id == SomeClass.id", + ) + }, + ) **Discussion** @@ -1163,7 +1241,7 @@ declarative decorator and classical mapping forms. Declarative, classical mapping, dataclasses, attrs, etc. 2.0 Migration - ORM Usage -============================================= +------------------------- The biggest visible change in SQLAlchemy 2.0 is the use of :meth:`_orm.Session.execute` in conjunction with :func:`_sql.select` to run ORM @@ -1177,6 +1255,7 @@ calling form with links to documentation for each technique presented. The individual migration notes are in the embedded sections following the table, and may include additional notes not summarized here. +.. format: off .. container:: sliding-table @@ -1204,23 +1283,31 @@ following the table, and may include additional notes not summarized here. - :: session.execute( - select(User) + select(User) ).scalars().all() + # or + + session.scalars( + select(User) + ).all() + - :ref:`migration_20_unify_select` + :meth:`_orm.Session.scalars` :meth:`_engine.Result.scalars` * - :: session.query(User).\ - filter_by(name='some user').one() + filter_by(name="some user").\ + one() - :: session.execute( - select(User). - filter_by(name="some user") + select(User). + filter_by(name="some user") ).scalar_one() - :ref:`migration_20_unify_select` @@ -1230,16 +1317,16 @@ following the table, and may include additional notes not summarized here. * - :: session.query(User).\ - filter_by(name='some user').first() - + filter_by(name="some user").\ + first() - :: - session.execute( + session.scalars( select(User). filter_by(name="some user"). limit(1) - ).scalars().first() + ).first() - :ref:`migration_20_unify_select` @@ -1248,16 +1335,16 @@ following the table, and may include additional notes not summarized here. * - :: session.query(User).options( - joinedload(User.addresses) + joinedload(User.addresses) ).all() - :: - session.execute( - select(User). - options( - joinedload(User.addresses) - ) + session.scalars( + select(User). + options( + joinedload(User.addresses) + ) ).unique().all() - :ref:`joinedload_not_uniqued` @@ -1265,16 +1352,20 @@ following the table, and may include additional notes not summarized here. * - :: session.query(User).\ - join(Address).\ - filter(Address.email == 'e@sa.us').\ - all() + join(Address).\ + filter( + Address.email == "e@sa.us" + ).\ + all() - :: session.execute( - select(User). - join(Address). - where(Address.email == 'e@sa.us') + select(User). + join(Address). + where( + Address.email == "e@sa.us" + ) ).scalars().all() - :ref:`migration_20_unify_select` @@ -1283,37 +1374,43 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).from_statement( + session.query(User).\ + from_statement( text("select * from users") - ).all() + ).\ + all() - :: - session.execute( - select(User). - from_statement( - text("select * from users") - ) - ).scalars().all() + session.scalars( + select(User). + from_statement( + text("select * from users") + ) + ).all() - :ref:`orm_queryguide_selecting_text` * - :: session.query(User).\ - join(User.addresses).\ - options( - contains_eager(User.addresses) - ).\ - populate_existing().all() + join(User.addresses).\ + options( + contains_eager(User.addresses) + ).\ + populate_existing().all() - :: session.execute( - select(User). - join(User.addresses). - options(contains_eager(User.addresses)). - execution_options(populate_existing=True) + select(User) + .join(User.addresses) + .options( + contains_eager(User.addresses) + ) + .execution_options( + populate_existing=True + ) ).scalars().all() - @@ -1326,20 +1423,21 @@ following the table, and may include additional notes not summarized here. - :: session.query(User).\ - filter(User.name == 'foo').\ - update( - {"fullname": "Foo Bar"}, - synchronize_session="evaluate" - ) - + filter(User.name == "foo").\ + update( + {"fullname": "Foo Bar"}, + synchronize_session="evaluate" + ) - :: session.execute( - update(User). - where(User.name == 'foo'). - values(fullname="Foo Bar"). - execution_options(synchronize_session="evaluate") + update(User) + .where(User.name == "foo") + .values(fullname="Foo Bar") + .execution_options( + synchronize_session="evaluate" + ) ) - :ref:`orm_expression_update_delete` @@ -1351,15 +1449,22 @@ following the table, and may include additional notes not summarized here. - :: - session.scalar(select(func.count()).select_from(User)) - session.scalar(select(func.count(User.id))) + session.scalars( + select(func.count()). + select_from(User) + ).one() + session.scalars( + select(func.count(User.id)) + ).one() - :meth:`_orm.Session.scalar` +.. format: on + .. _migration_20_unify_select: ORM Query Unified with Core Select ----------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1518,7 +1623,7 @@ the majority of this ORM logic is also cached. .. _migration_20_get_to_session: ORM Query - get() method moves to Session ------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1554,7 +1659,7 @@ with writing a SQL query. .. _migration_20_orm_query_join_strings: ORM Query - Joining / loading on relationships uses attributes, not strings ----------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1572,7 +1677,6 @@ will all be removed in 2.0:: # string use removed q = session.query(Address).filter(with_parent(u1, "addresses")) - **Migration to 2.0** Modern SQLAlchemy 1.x versions support the recommended technique which @@ -1609,7 +1713,7 @@ more potentially compatible with IDEs and pep-484 integrations. ORM Query - Chaining using lists of attributes, rather than individual calls, removed -------------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1619,7 +1723,6 @@ attributes in a list will be removed:: # chaining removed q = session.query(User).join("orders", "items", "keywords") - **Migration to 2.0** Use individual calls to :meth:`_orm.Query.join` for 1.x /2.0 cross compatible @@ -1648,7 +1751,7 @@ interface of methods such as :meth:`_sql.Select.join`. .. _migration_20_query_join_options: ORM Query - join(..., aliased=True), from_joinpoint removed ------------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1668,11 +1771,13 @@ Use explicit aliases instead:: n1 = aliased(Node) n2 = aliased(Node) - q = select(Node).join(Node.children.of_type(n1)).\ - where(n1.name == "some sub child").\ - join(n1.children.of_type(n2)).\ - where(n2.name == "some sub child") - + q = ( + select(Node) + .join(Node.children.of_type(n1)) + .where(n1.name == "some sub child") + .join(n1.children.of_type(n2)) + .where(n2.name == "some sub child") + ) **Discussion** @@ -1701,7 +1806,7 @@ construct itself didn't exist early on. .. _migration_20_query_distinct: Using DISTINCT with additional columns, but only select the entity -------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1711,8 +1816,13 @@ as well as "address.email_address" but only return User objects:: # 1.xx code - result = session.query(User).join(User.addresses).\ - distinct().order_by(Address.email_address).all() + result = ( + session.query(User) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + .all() + ) In version 2.0, the "email_address" column will not be automatically added to the columns clause, and the above query will fail, since relational @@ -1727,8 +1837,12 @@ returning the main entity object, and not the extra column, use the # 1.4 / 2.0 code - stmt = select(User, Address.email_address).join(User.addresses).\ - distinct().order_by(Address.email_address) + stmt = ( + select(User, Address.email_address) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + ) result = session.execute(stmt).columns(User).all() @@ -1748,17 +1862,19 @@ without inconvenience. .. _migration_20_query_from_self: Selecting from the query itself as a subquery, e.g. "from_self()" -------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** The :meth:`_orm.Query.from_self` method will be removed from :class:`_orm.Query`:: # from_self is removed - q = session.query(User, Address.email_address).\ - join(User.addresses).\ - from_self(User).order_by(Address.email_address) - + q = ( + session.query(User, Address.email_address) + .join(User.addresses) + .from_self(User) + .order_by(Address.email_address) + ) **Migration to 2.0** @@ -1772,8 +1888,7 @@ since the final query wants to query in terms of both the ``User`` and from sqlalchemy.orm import aliased - subq = session.query(User, Address.email_address).\ - join(User.addresses).subquery() + subq = session.query(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1785,8 +1900,7 @@ The same form may be used in :term:`2.0 style`:: from sqlalchemy.orm import aliased - subq = select(User, Address.email_address).\ - join(User.addresses).subquery() + subq = select(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1796,7 +1910,6 @@ The same form may be used in :term:`2.0 style`:: result = session.execute(stmt) - **Discussion** The :meth:`_query.Query.from_self` method is a very complicated method that is rarely @@ -1829,8 +1942,7 @@ labeling:: # 1.4 / 2.0 code - subq = select(User, Address).\ - join(User.addresses).subquery() + subq = select(User, Address).join(User.addresses).subquery() ua = aliased(User, subq) aa = aliased(Address, subq) @@ -1854,7 +1966,7 @@ The above query will disambiguate the ``.id`` column of ``User`` and :ticket:`5221` Selecting entities from alternative selectables; Query.select_entity_from() ---------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1906,7 +2018,7 @@ of view as well as how the internals of the SQLAlchemy ORM must handle it. .. _joinedload_not_uniqued: ORM Rows not uniquified by default ----------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1920,9 +2032,7 @@ where the "joined eager loading" loader strategy is used with collections:: # In the new API, uniquing is available but not implicitly # enabled - result = session.execute( - select(User).options(joinedload(User.addresses)) - ) + result = session.execute(select(User).options(joinedload(User.addresses))) # this actually will raise an error to let the user know that # uniquing should be applied @@ -1966,9 +2076,68 @@ the :func:`_orm.selectinload` strategy presents a collection-oriented eager loader that is superior in most respects to :func:`_orm.joinedload` and should be preferred. +.. _migration_20_dynamic_loaders: + +Making use of "dynamic" relationship loads without using Query +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Synopsis** + +The ``lazy="dynamic"`` relationship loader strategy, discussed at +:ref:`dynamic_relationship`, makes use of the :class:`_query.Query` object +which is legacy in 2.0. + + +**Migration to 2.0** + +This pattern is still under adjustment for SQLAlchemy 2.0, and it is expected +that new APIs will be introduced. In the interim, there are two ways +to achieve 2.0 style querying that's in terms of a specific relationship: + +* Make use of the :attr:`_orm.Query.statement` attribute on an existing + ``lazy="dynamic"`` relationship. We can use methods like + :meth:`_orm.Session.scalars` with the dynamic loader straight away as + follows:: + + + class User(Base): + __tablename__ = "user" + + posts = relationship(Post, lazy="dynamic") + + + jack = session.get(User, 5) + + # filter Jack's blog posts + posts = session.scalars(jack.posts.statement.where(Post.headline == "this is a post")) + +* Use the :func:`_orm.with_parent` function to construct a :func:`_sql.select` + construct directly:: + + from sqlalchemy.orm import with_parent + + jack = session.get(User, 5) + + posts = session.scalars( + select(Post) + .where(with_parent(jack, User.posts)) + .where(Post.headline == "this is a post") + ) + +**Discussion** + +The original idea was that the :func:`_orm.with_parent` function should be +sufficient, however continuing to make use of special attributes on the +relationship itself remains appealing, and there's no reason a 2.0 style +construct can't be made to work here as well. There will likely be a new +loader strategy name that sets up an API similar to the example above that +uses the ``.statement`` attribute, such as +``jack.posts.select().where(Post.headline == 'headline')``. + +.. _migration_20_session_autocommit: Autocommit mode removed from Session; autobegin support added -------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1987,7 +2156,6 @@ is, this pattern:: # commits, won't be supported sess.flush() - **Migration to 2.0** The main reason a :class:`_orm.Session` is used in "autocommit" mode @@ -2003,7 +2171,7 @@ be called:: sess = Session(engine) sess.begin() # begin explicitly; if not called, will autobegin - # when database access is needed + # when database access is needed sess.add(obj) @@ -2017,18 +2185,95 @@ explicit use of :meth:`_orm.Session.begin`, which is now solved by 1.4, as well as to allow the use of "subtransactions", which are also removed in 2.0. +.. _migration_20_session_subtransaction: + Session "subtransaction" behavior removed ------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Synopsis** + +The "subtransaction" pattern that was often used with autocommit mode is +also deprecated in 1.4. This pattern allowed the use of the +:meth:`_orm.Session.begin` method when a transaction were already begun, +resulting in a construct called a "subtransaction", which was essentially +a block that would prevent the :meth:`_orm.Session.commit` method from actually +committing. + +**Migration to 2.0** + + +To provide backwards compatibility for applications that make use of this +pattern, the following context manager or a similar implementation based on +a decorator may be used:: + + + import contextlib + + + @contextlib.contextmanager + def transaction(session): + if not session.in_transaction(): + with session.begin(): + yield + else: + yield + +The above context manager may be used in the same way the +"subtransaction" flag works, such as in the following example:: + + + # method_a starts a transaction and calls method_b + def method_a(session): + with transaction(session): + method_b(session) + + + # method_b also starts a transaction, but when + # called from method_a participates in the ongoing + # transaction. + def method_b(session): + with transaction(session): + session.add(SomeObject("bat", "lala")) + + + Session = sessionmaker(engine) + + # create a Session and call method_a + with Session() as session: + method_a(session) + +To compare towards the preferred idiomatic pattern, the begin block should +be at the outermost level. This removes the need for individual functions +or methods to be concerned with the details of transaction demarcation:: + + def method_a(session): + method_b(session) + + + def method_b(session): + session.add(SomeObject("bat", "lala")) + + + Session = sessionmaker(engine) + + # create a Session and call method_a + with Session() as session: + with session.begin(): + method_a(session) + +**Discussion** + +This pattern has been shown to be confusing in real world applications, and it +is preferable for an application to ensure that the top-most level of database +operations are performed with a single begin/commit pair. -See the section :ref:`session_subtransactions` for background on this -change. 2.0 Migration - ORM Extension and Recipe Changes -================================================ +------------------------------------------------ Dogpile cache recipe and Horizontal Sharding uses new Session API ------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As the :class:`_orm.Query` object becomes legacy, these two recipes which previously relied upon subclassing of the :class:`_orm.Query` @@ -2039,7 +2284,7 @@ an example. Baked Query Extension Superseded by built-in caching ------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The baked query extension is superseded by the built in caching system and is no longer used by the ORM internals. @@ -2049,7 +2294,7 @@ See :ref:`sql_caching` for full background on the new caching system. Asyncio Support -===================== +--------------- SQLAlchemy 1.4 includes asyncio support for both Core and ORM. The new API exclusively makes use of the "future" patterns noted above. diff --git a/doc/build/changelog/unreleased_14/12668.rst b/doc/build/changelog/unreleased_14/12668.rst new file mode 100644 index 00000000000..40e2a1ff22a --- /dev/null +++ b/doc/build/changelog/unreleased_14/12668.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, tests + :tickets: 12668 + + Backported to SQLAlchemy 1.4 an improvement to the test suite with regards + to how asyncio related tests are run, now using the newer Python 3.11 + ``asyncio.Runner`` or a backported equivalent, rather than relying on the + previous implementation based on ``asyncio.get_event_loop()``. This allows + the SQLAlchemy 1.4 codebase to run on Python 3.14 which has removed this + method. Pull request courtesy Nils Philippsen. + diff --git a/doc/build/changelog/unreleased_14/6023.rst b/doc/build/changelog/unreleased_14/6023.rst deleted file mode 100644 index 88d9777ba51..00000000000 --- a/doc/build/changelog/unreleased_14/6023.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: postgresql, pg8000 - :tickets: 7167 - - Improve array handling when using PostgreSQL with the - pg8000 dialect. diff --git a/doc/build/changelog/unreleased_14/7167.rst b/doc/build/changelog/unreleased_14/7167.rst deleted file mode 100644 index aedc8086c01..00000000000 --- a/doc/build/changelog/unreleased_14/7167.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, mysql, mariadb - :tickets: 7167 - - Reorganized the list of reserved words into two separate lists, one for - MySQL and one for MariaDB, so that these diverging sets of words can be - managed more accurately; adjusted the MySQL/MariaDB dialect to switch among - these lists based on either explicitly configured or - server-version-detected "MySQL" or "MariaDB" backend. Added all current - reserved words through MySQL 8 and current MariaDB versions including - recently added keywords like "lead" . Pull request courtesy Kevin Kirsche. diff --git a/doc/build/changelog/unreleased_14/7224.rst b/doc/build/changelog/unreleased_14/7224.rst deleted file mode 100644 index 3f10a60883d..00000000000 --- a/doc/build/changelog/unreleased_14/7224.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7224 - - Fixed bug in "relationship to aliased class" feature introduced at - :ref:`relationship_aliased_class` where it was not possible to create a - loader strategy option targeting an attribute on the target using the - :func:`_orm.aliased` construct directly in a second loader option, such as - ``selectinload(A.aliased_bs).joinedload(aliased_b.cs)``, without explicitly - qualifying using :meth:`_orm.PropComparator.of_type` on the preceding - element of the path. Additionally, targeting the non-aliased class directly - would be accepted (inappropriately), but would silently fail, such as - ``selectinload(A.aliased_bs).joinedload(B.cs)``; this now raises an error - referring to the typing mismatch. - diff --git a/doc/build/changelog/unreleased_14/7239.rst b/doc/build/changelog/unreleased_14/7239.rst deleted file mode 100644 index 14ef19118fd..00000000000 --- a/doc/build/changelog/unreleased_14/7239.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7239 - - Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function - correctly on a :class:`_orm.Query` that was produced from - :meth:`_orm.Query.union`, :meth:`_orm.Query.from_self` or similar. diff --git a/doc/build/changelog/unreleased_14/7244.rst b/doc/build/changelog/unreleased_14/7244.rst deleted file mode 100644 index 92352c6001b..00000000000 --- a/doc/build/changelog/unreleased_14/7244.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7244 - - Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function - correctly when :meth:`_orm.Query.join` were joined to an entity which made - use of :meth:`_orm.PropComparator.of_type` to specify an aliased version of - the target entity. The issue also applies to future style ORM queries - constructed with :func:`_sql.select`. - diff --git a/doc/build/conf.py b/doc/build/conf.py index 169d695d0f5..cf367309a08 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -20,7 +20,9 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("../../lib")) sys.path.insert(0, os.path.abspath("../..")) # examples -sys.path.insert(0, os.path.abspath(".")) + +# was never needed, does not work as of python 3.12 due to conflicts +# sys.path.insert(0, os.path.abspath(".")) # -- General configuration -------------------------------------------------- @@ -36,14 +38,27 @@ "zzzeeksphinx", "changelog", "sphinx_paramlinks", + "sphinx_copybutton", ] -needs_extensions = {"zzzeeksphinx": "1.2.1"} +needs_extensions = {"zzzeeksphinx": "1.6.1"} # Add any paths that contain templates here, relative to this directory. # not sure why abspath() is needed here, some users # have reported this. templates_path = [os.path.abspath("templates")] +# https://sphinx-copybutton.readthedocs.io/en/latest/use.html#strip-and-configure-input-prompts-for-code-cells +copybutton_prompt_text = ( + r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " +) +copybutton_prompt_is_regexp = True + +# workaround +# https://sphinx-copybutton-exclude-issue.readthedocs.io/en/v0.5.1-go/ +# https://github.com/executablebooks/sphinx-copybutton/issues/185 +# while we're at it, add our SQL css classes to also not be copied +copybutton_exclude = ".linenos .show_sql .show_sql_print .popup_sql" + nitpicky = False # The suffix of source filenames. @@ -67,10 +82,12 @@ "asyncio", "postgresql", "mysql", + "mariadb", "sqlite", "mssql", "oracle", "firebird", + "tests", ] # tags to sort on inside of sections changelog_inner_tag_sort = [ @@ -97,7 +114,7 @@ changelog_render_changeset = "https://www.sqlalchemy.org/trac/changeset/%s" -exclude_patterns = ["build", "**/unreleased*/*", "*_include.rst"] +exclude_patterns = ["build", "**/unreleased*/*", "**/*_include.rst", ".venv"] # zzzeeksphinx makes these conversions when it is rendering the # docstrings classes, methods, and functions within the scope of @@ -129,11 +146,6 @@ "sqlalchemy.orm.util": "sqlalchemy.orm", } -autodocmods_convert_modname_w_class = { - ("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine", - ("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base", -} - # on the referencing side, a newer zzzeeksphinx extension # applies shorthand symbols to references so that we can have short # names that are still using absolute references. @@ -145,6 +157,7 @@ "_row": "sqlalchemy.engine", "_schema": "sqlalchemy.schema", "_types": "sqlalchemy.types", + "_sqltypes": "sqlalchemy.types", "_asyncio": "sqlalchemy.ext.asyncio", "_expression": "sqlalchemy.sql.expression", "_sql": "sqlalchemy.sql.expression", @@ -194,7 +207,7 @@ # General information about the project. project = u"SQLAlchemy" -copyright = u"2007-2021, the SQLAlchemy authors and contributors" # noqa +copyright = u"2007-2025, the SQLAlchemy authors and contributors" # noqa # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -203,9 +216,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.26" +release = "1.4.54" -release_date = "October 19, 2021" +release_date = "September 5, 2024" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" diff --git a/doc/build/copyright.rst b/doc/build/copyright.rst index b38d3ae2963..54535474c42 100644 --- a/doc/build/copyright.rst +++ b/doc/build/copyright.rst @@ -6,7 +6,7 @@ Appendix: Copyright This is the MIT license: ``_ -Copyright (c) 2005-2021 Michael Bayer and contributors. +Copyright (c) 2005-2025 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 28d332203b6..f08b592d28c 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -21,7 +21,7 @@ Basic Usage Recall from :doc:`/core/engines` that an :class:`_engine.Engine` is created via the :func:`_sa.create_engine` call:: - engine = create_engine('mysql://scott:tiger@localhost/test') + engine = create_engine("mysql://scott:tiger@localhost/test") The typical usage of :func:`_sa.create_engine` is once per particular database URL, held globally for the lifetime of a single application process. A single @@ -48,7 +48,7 @@ a textual statement to the database looks like:: with engine.connect() as connection: result = connection.execute(text("select username from users")) for row in result: - print("username:", row['username']) + print("username:", row["username"]) Above, the :meth:`_engine.Engine.connect` method returns a :class:`_engine.Connection` object, and by using it in a Python context manager (e.g. the ``with:`` @@ -74,9 +74,6 @@ pooling mechanism issues a ``rollback()`` call on the DBAPI connection so that any transactional state or locks are removed, and the connection is ready for its next use. -.. deprecated:: 2.0 The :class:`_engine.CursorResult` object is replaced in SQLAlchemy - 2.0 with a newly refined object known as :class:`_future.Result`. - Our example above illustrated the execution of a textual SQL string, which should be invoked by using the :func:`_expression.text` construct to indicate that we'd like to use textual SQL. The :meth:`_engine.Connection.execute` method can of @@ -149,13 +146,15 @@ issue a transaction on a :class:`_engine.Connection`, but only the outermost with connection.begin(): # open a transaction method_b(connection) + # method_b also starts a transaction def method_b(connection): - with connection.begin(): # open a transaction - this runs in the - # context of method_a's transaction + with connection.begin(): # open a transaction - this runs in the + # context of method_a's transaction connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection and call method_a with engine.connect() as conn: method_a(conn) @@ -190,12 +189,14 @@ adapt the example from the previous section to this practice looks like:: def method_a(connection): method_b(connection) + # method_b uses the connection and assumes the transaction # is external def method_b(connection): connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection inside of a transaction and call method_a with engine.begin() as conn: method_a(conn) @@ -230,6 +231,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(connection): if not connection.in_transaction(): @@ -245,6 +247,7 @@ The above contextmanager would be used as:: with transaction(connection): # open a transaction method_b(connection) + # method_b either starts a transaction, or uses the one already # present def method_b(connection): @@ -252,6 +255,7 @@ The above contextmanager would be used as:: connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection and call method_a with engine.connect() as conn: method_a(conn) @@ -263,6 +267,7 @@ present:: import contextlib + def connectivity(engine): connection = None @@ -288,6 +293,7 @@ Using the above would look like:: with connectivity(): method_b(connectivity) + # method_b also wants to use a connection from the context, so it # also calls "with:", but also it actually uses the connection. def method_b(connectivity): @@ -295,6 +301,7 @@ Using the above would look like:: connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # create a new connection/transaction context object and call # method_a method_a(connectivity(engine)) @@ -441,9 +448,7 @@ parameter to :func:`_sa.create_engine`:: eng = create_engine( "postgresql://scott:tiger@localhost/test", - execution_options={ - "isolation_level": "REPEATABLE READ" - } + execution_options={"isolation_level": "REPEATABLE READ"}, ) With the above setting, the DBAPI connection will be set to use a @@ -464,7 +469,6 @@ separated off from the main engine:: autocommit_engine = eng.execution_options(isolation_level="AUTOCOMMIT") - Above, the :meth:`_engine.Engine.execution_options` method creates a shallow copy of the original :class:`_engine.Engine`. Both ``eng`` and ``autocommit_engine`` share the same dialect and connection pool. However, the @@ -636,20 +640,33 @@ To sum up: Using Server Side Cursors (a.k.a. stream results) ================================================== -A limited number of dialects have explicit support for the concept of "server -side cursors" vs. "buffered cursors". While a server side cursor implies a -variety of different capabilities, within SQLAlchemy's engine and dialect -implementation, it refers only to whether or not a particular set of results is -fully buffered in memory before they are fetched from the cursor, using a -method such as ``cursor.fetchall()``. SQLAlchemy has no direct support -for cursor behaviors such as scrolling; to make use of these features for -a particular DBAPI, use the cursor directly as documented at -:ref:`dbapi_connections`. - -Some DBAPIs, such as the cx_Oracle DBAPI, exclusively use server side cursors -internally. All result sets are essentially unbuffered across the total span -of a result set, utilizing only a smaller buffer that is of a fixed size such -as 100 rows at a time. +Some backends feature explicit support for the concept of "server +side cursors" versus "client side cursors". A client side cursor here +means that the database driver fully fetches all rows from a result set +into memory before returning from a statement execution. Drivers such as +those of PostgreSQL and MySQL/MariaDB generally use client side cursors +by default. A server side cursor, by contrast, indicates that result rows +remain pending within the database server's state as result rows are consumed +by the client. The drivers for Oracle generally use a "server side" model, +for example, and the SQLite dialect, while not using a real "client / server" +architecture, still uses an unbuffered result fetching approach that will +leave result rows outside of process memory before they are consumed. + +.. topic:: What we really mean is "buffered" vs. "unbuffered" results + + Server side cursors also imply a wider set of features with relational + databases, such as the ability to "scroll" a cursor forwards and backwards. + SQLAlchemy does not include any explicit support for these behaviors; within + SQLAlchemy itself, the general term "server side cursors" should be considered + to mean "unbuffered results" and "client side cursors" means "result rows + are buffered into memory before the first row is returned". To work with + a richer "server side cursor" featureset specific to a certain DBAPI driver, + see the section :ref:`dbapi_connections_cursor`. + +From this basic architecture it follows that a "server side cursor" is more +memory efficient when fetching very large result sets, while at the same time +may introduce more complexity in the client/server communication process +and be less efficient for small result sets (typically less than 10000 rows). For those dialects that have conditional support for buffered or unbuffered results, there are usually caveats to the use of the "unbuffered", or server @@ -668,75 +685,123 @@ unbuffered cursors are not generally useful except in the uncommon case of an application fetching a very large number of rows in chunks, where the processing of these rows can be complete before more rows are fetched. -To make use of a server side cursor for a particular execution, the -:paramref:`_engine.Connection.execution_options.stream_results` option -is used, which may be called on the :class:`_engine.Connection` object, -on the statement object, or in the ORM-level contexts mentioned below. - -When using this option for a statement, it's usually appropriate to use -a method like :meth:`_engine.Result.partitions` to work on small sections -of the result set at a time, while also fetching enough rows for each -pull so that the operation is efficient:: +For database drivers that provide client and server side cursor options, +the :paramref:`_engine.Connection.execution_options.stream_results` +and :paramref:`_engine.Connection.execution_options.yield_per` execution +options provide access to "server side cursors" on a per-:class:`_engine.Connection` +or per-statement basis. Similar options exist when using an ORM +:class:`_orm.Session` as well. - with engine.connect() as conn: - result = conn.execution_options(stream_results=True).execute(text("select * from table")) - - for partition in result.partitions(100): - _process_rows(partition) +Streaming with a fixed buffer via yield_per +-------------------------------------------- +As individual row-fetch operations with fully unbuffered server side cursors +are typically more expensive than fetching batches of rows at once, The +:paramref:`_engine.Connection.execution_options.yield_per` execution option +configures a :class:`_engine.Connection` or statement to make use of +server-side cursors as are available, while at the same time configuring a +fixed-size buffer of rows that will retrieve rows from the server in batches as +they are consumed. This parameter may be to a positive integer value using the +:meth:`_engine.Connection.execution_options` method on +:class:`_engine.Connection` or on a statement using the +:meth:`.Executable.execution_options` method. + +.. versionadded:: 1.4.40 :paramref:`_engine.Connection.execution_options.yield_per` as a + Core-only option is new as of SQLAlchemy 1.4.40; for prior 1.4 versions, + use :paramref:`_engine.Connection.execution_options.stream_results` + directly in combination with :meth:`_engine.Result.yield_per`. + +Using this option is equivalent to manually setting the +:paramref:`_engine.Connection.execution_options.stream_results` option, +described in the next section, and then invoking the +:meth:`_engine.Result.yield_per` method on the :class:`_engine.Result` +object with the given integer value. In both cases, the effect this +combination has includes: + +* server side cursors mode is selected for the given backend, if available + and not already the default behavior for that backend +* as result rows are fetched, they will be buffered in batches, where the + size of each batch up until the last batch will be equal to the integer + argument passed to the + :paramref:`_engine.Connection.execution_options.yield_per` option or the + :meth:`_engine.Result.yield_per` method; the last batch is then sized against + the remaining rows fewer than this size +* The default partition size used by the :meth:`_engine.Result.partitions` + method, if used, will be made equal to this integer size as well. + +These three behaviors are illustrated in the example below:: -If the :class:`_engine.Result` is iterated directly, rows are fetched internally + with engine.connect() as conn: + result = conn.execution_options(yield_per=100).execute(text("select * from table")) + + for partition in result.partitions(): + # partition is an iterable that will be at most 100 items + for row in partition: + print(f"{row}") + +The above example illustrates the combination of ``yield_per=100`` along +with using the :meth:`_engine.Result.partitions` method to run processing +on rows in batches that match the size fetched from the server. The +use of :meth:`_engine.Result.partitions` is optional, and if the +:class:`_engine.Result` is iterated directly, a new batch of rows will be +buffered for each 100 rows fetched. Calling a method such as +:meth:`_engine.Result.all` should **not** be used, as this will fully +fetch all remaining rows at once and defeat the purpose of using ``yield_per``. + +The :paramref:`_engine.Connection.execution_options.yield_per` option +is portable to the ORM as well, used by a :class:`_orm.Session` to fetch +ORM objects, where it also limits the amount of ORM objects generated at once. +See the section :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` +for further background on using +:paramref:`_engine.Connection.execution_options.yield_per` with the ORM. + +.. versionadded:: 1.4.40 Added + :paramref:`_engine.Connection.execution_options.yield_per` + as a Core level execution option to conveniently set streaming results, + buffer size, and partition size all at once in a manner that is transferrable + to that of the ORM's similar use case. + +.. _engine_stream_results_sr: + +Streaming with a dynamically growing buffer using stream_results +----------------------------------------------------------------- + +To enable server side cursors without a specific partition size, the +:paramref:`_engine.Connection.execution_options.stream_results` option may be +used, which like :paramref:`_engine.Connection.execution_options.yield_per` may +be called on the :class:`_engine.Connection` object or the statement object. + +When a :class:`_engine.Result` object delivered using the +:paramref:`_engine.Connection.execution_options.stream_results` option +is iterated directly, rows are fetched internally using a default buffering scheme that buffers first a small set of rows, then a larger and larger buffer on each fetch up to a pre-configured limit -of 1000 rows. This can be affected using the ``max_row_buffer`` execution -option:: +of 1000 rows. The maximum size of this buffer can be affected using the +:paramref:`_engine.Connection.execution_options.max_row_buffer` execution option:: with engine.connect() as conn: conn = conn.execution_options(stream_results=True, max_row_buffer=100) result = conn.execute(text("select * from table")) for row in result: - _process_row(row) - -The size of the buffer may also be set to a fixed size using the -:meth:`_engine.Result.yield_per` method. Calling this method with a number -of rows will cause all result-fetching methods to work from -buffers of the given size, only fetching new rows when the buffer is empty:: - - with engine.connect() as conn: - result = conn.execution_options(stream_results=True).execute(text("select * from table")) - - for row in result.yield_per(100): - _process_row(row) + print(f"{row}") -The ``stream_results`` option is also available with the ORM. When using the -ORM, either the :meth:`_engine.Result.yield_per` or :meth:`_engine.Result.partitions` -methods should be used to set the number of ORM rows to be buffered each time -while yielding:: +While the :paramref:`_engine.Connection.execution_options.stream_results` +option may be combined with use of the :meth:`_engine.Result.partitions` +method, a specific partition size should be passed to +:meth:`_engine.Result.partitions` so that the entire result is not fetched. +It is usually more straightforward to use the +:paramref:`_engine.Connection.execution_options.yield_per` option when setting +up to use the :meth:`_engine.Result.partitions` method. - with orm.Session(engine) as session: - result = session.execute( - select(User).order_by(User_id).execution_options(stream_results=True), - ) - for partition in result.partitions(100): - _process_rows(partition) - - -.. note:: ORM result sets currently must make use of :meth:`_engine.Result.yield_per` - or :meth:`_engine.Result.partitions` in order to achieve streaming ORM results. - If either of these methods are not used to set the number of rows to - fetch before yielding, the entire result is fetched before rows are yielded. - This may change in a future release so that the automatic buffer size used - by :class:`_engine.Connection` takes place for ORM results as well. +.. seealso:: -When using a :term:`1.x style` ORM query with :class:`_orm.Query`, yield_per is -available via :meth:`_orm.Query.yield_per` - this also sets the ``stream_results`` -execution option:: + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` - for row in session.query(User).yield_per(100): - # process row + :meth:`_engine.Result.partitions` + :meth:`_engine.Result.yield_per` .. _dbengine_implicit: @@ -756,7 +821,7 @@ which is not a :class:`_engine.Connection`. This was illustrated using the result = engine.execute(text("select username from users")) for row in result: - print("username:", row['username']) + print("username:", row["username"]) In addition to "connectionless" execution, it is also possible to use the :meth:`~.Executable.execute` method of @@ -770,9 +835,11 @@ Given a table as below:: from sqlalchemy import MetaData, Table, Column, Integer metadata_obj = MetaData() - users_table = Table('users', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + users_table = Table( + "users", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) Explicit execution delivers the SQL text or constructed SQL expression to the @@ -886,9 +953,10 @@ to render under different schema names without any changes. Given a table:: user_table = Table( - 'user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) The "schema" of this :class:`_schema.Table` as defined by the @@ -898,7 +966,8 @@ that all :class:`_schema.Table` objects with a schema of ``None`` would instead render the schema as ``user_schema_one``:: connection = engine.connect().execution_options( - schema_translate_map={None: "user_schema_one"}) + schema_translate_map={None: "user_schema_one"} + ) result = connection.execute(user_table.select()) @@ -912,10 +981,11 @@ map can specify any number of target->destination schemas:: connection = engine.connect().execution_options( schema_translate_map={ - None: "user_schema_one", # no schema name -> "user_schema_one" - "special": "special_schema", # schema="special" becomes "special_schema" - "public": None # Table objects with schema="public" will render with no schema - }) + None: "user_schema_one", # no schema name -> "user_schema_one" + "special": "special_schema", # schema="special" becomes "special_schema" + "public": None, # Table objects with schema="public" will render with no schema + } + ) The :paramref:`.Connection.execution_options.schema_translate_map` parameter affects all DDL and SQL constructs generated from the SQL expression language, @@ -940,12 +1010,24 @@ as the schema name is passed to these methods explicitly. to the :class:`_orm.Session`. The :class:`_orm.Session` uses a new :class:`_engine.Connection` for each transaction:: - schema_engine = engine.execution_options(schema_translate_map = { ... } ) + schema_engine = engine.execution_options(schema_translate_map={...}) session = Session(schema_engine) ... + .. warning:: + + When using the ORM :class:`_orm.Session` without extensions, the schema + translate feature is only supported as + **a single schema translate map per Session**. It will **not work** if + different schema translate maps are given on a per-statement basis, as + the ORM :class:`_orm.Session` does not take current schema translate + values into account for individual objects. + + To use a single :class:`_orm.Session` with multiple ``schema_translate_map`` + configurations, the :ref:`horizontal_sharding_toplevel` extension may + be used. See the example at :ref:`examples_sharding`. .. versionadded:: 1.1 @@ -1026,6 +1108,8 @@ what the cache is doing, engine logging will include details about the cache's behavior, described in the next section. +.. _sql_caching_logging: + Estimating Cache Performance Using Logging ------------------------------------------ @@ -1072,9 +1156,7 @@ As an example, we will examine the logging produced by the following program:: s = Session(e) - s.add_all( - [A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])] - ) + s.add_all([A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])]) s.commit() for a_rec in s.query(A): @@ -1293,31 +1375,39 @@ The cache can also be disabled with this argument by sending a value of Caching for Third Party Dialects --------------------------------- -The caching feature requires that the dialect's compiler produces a SQL -construct that is generically reusable given a particular cache key. This means +The caching feature requires that the dialect's compiler produces SQL +strings that are safe to reuse for many statement invocations, given +a particular cache key that is keyed to that SQL string. This means that any literal values in a statement, such as the LIMIT/OFFSET values for a SELECT, can not be hardcoded in the dialect's compilation scheme, as the compiled string will not be re-usable. SQLAlchemy supports rendered bound parameters using the :meth:`_sql.BindParameter.render_literal_execute` method which can be applied to the existing ``Select._limit_clause`` and -``Select._offset_clause`` attributes by a custom compiler. - -As there are many third party dialects, many of which may be generating -literal values from SQL statements without the benefit of the newer "literal execute" -feature, SQLAlchemy as of version 1.4.5 has added a flag to dialects known as -:attr:`_engine.Dialect.supports_statement_cache`. This flag is tested to be present -directly on a dialect class, and not any superclasses, so that even a third -party dialect that subclasses an existing cacheable SQLAlchemy dialect such -as ``sqlalchemy.dialects.postgresql.PGDialect`` must still specify this flag, +``Select._offset_clause`` attributes by a custom compiler, which +are illustrated later in this section. + +As there are many third party dialects, many of which may be generating literal +values from SQL statements without the benefit of the newer "literal execute" +feature, SQLAlchemy as of version 1.4.5 has added an attribute to dialects +known as :attr:`_engine.Dialect.supports_statement_cache`. This attribute is +checked at runtime for its presence directly on a particular dialect's class, +even if it's already present on a superclass, so that even a third party +dialect that subclasses an existing cacheable SQLAlchemy dialect such as +``sqlalchemy.dialects.postgresql.PGDialect`` must still explicitly include this +attribute for caching to be enabled. The attribute should **only** be enabled once the dialect has been altered as needed and tested for reusability of compiled SQL statements with differing parameters. -For all third party dialects that don't support this flag, the logging for -such a dialect will indicate ``dialect does not support caching``. Dialect -authors can apply the flag as follows:: +For all third party dialects that don't support this attribute, the logging for +such a dialect will indicate ``dialect does not support caching``. + +When a dialect has been tested against caching, and in particular the SQL +compiler has been updated to not render any literal LIMIT / OFFSET within +a SQL string directly, dialect authors can apply the attribute as follows:: from sqlalchemy.engine.default import DefaultDialect + class MyDialect(DefaultDialect): supports_statement_cache = True @@ -1328,6 +1418,96 @@ The flag needs to be applied to all subclasses of the dialect as well:: .. versionadded:: 1.4.5 + Added the :attr:`.Dialect.supports_statement_cache` attribute. + +The typical case for dialect modification follows. + +Example: Rendering LIMIT / OFFSET with post compile parameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As an example, suppose a dialect overrides the :meth:`.SQLCompiler.limit_clause` +method, which produces the "LIMIT / OFFSET" clause for a SQL statement, +like this:: + + # pre 1.4 style code + def limit_clause(self, select, **kw): + text = "" + if select._limit is not None: + text += " \n LIMIT %d" % (select._limit,) + if select._offset is not None: + text += " \n OFFSET %d" % (select._offset,) + return text + +The above routine renders the :attr:`.Select._limit` and +:attr:`.Select._offset` integer values as literal integers embedded in the SQL +statement. This is a common requirement for databases that do not support using +a bound parameter within the LIMIT/OFFSET clauses of a SELECT statement. +However, rendering the integer value within the initial compilation stage is +directly **incompatible** with caching as the limit and offset integer values +of a :class:`.Select` object are not part of the cache key, so that many +:class:`.Select` statements with different limit/offset values would not render +with the correct value. + +The correction for the above code is to move the literal integer into +SQLAlchemy's :ref:`post-compile ` facility, which will render the +literal integer outside of the initial compilation stage, but instead at +execution time before the statement is sent to the DBAPI. This is accessed +within the compilation stage using the :meth:`_sql.BindParameter.render_literal_execute` +method, in conjunction with using the :attr:`.Select._limit_clause` and +:attr:`.Select._offset_clause` attributes, which represent the LIMIT/OFFSET +as a complete SQL expression, as follows:: + + # 1.4 cache-compatible code + def limit_clause(self, select, **kw): + text = "" + + limit_clause = select._limit_clause + offset_clause = select._offset_clause + + if select._simple_int_clause(limit_clause): + text += " \n LIMIT %s" % ( + self.process(limit_clause.render_literal_execute(), **kw) + ) + elif limit_clause is not None: + # assuming the DB doesn't support SQL expressions for LIMIT. + # Otherwise render here normally + raise exc.CompileError( + "dialect 'mydialect' can only render simple integers for LIMIT" + ) + if select._simple_int_clause(offset_clause): + text += " \n OFFSET %s" % ( + self.process(offset_clause.render_literal_execute(), **kw) + ) + elif offset_clause is not None: + # assuming the DB doesn't support SQL expressions for OFFSET. + # Otherwise render here normally + raise exc.CompileError( + "dialect 'mydialect' can only render simple integers for OFFSET" + ) + + return text + +The approach above will generate a compiled SELECT statement that looks like:: + + SELECT x FROM y + LIMIT __[POSTCOMPILE_param_1] + OFFSET __[POSTCOMPILE_param_2] + +Where above, the ``__[POSTCOMPILE_param_1]`` and ``__[POSTCOMPILE_param_2]`` +indicators will be populated with their corresponding integer values at +statement execution time, after the SQL string has been retrieved from the +cache. + +After changes like the above have been made as appropriate, the +:attr:`.Dialect.supports_statement_cache` flag should be set to ``True``. +It is strongly recommended that third party dialects make use of the +`dialect third party test suite `_ +which will assert that operations like +SELECTs with LIMIT/OFFSET are correctly rendered and cached. + +.. seealso:: + + :ref:`faq_new_caching` - in the :ref:`faq_toplevel` section .. _engine_lambda_caching: @@ -1373,6 +1553,7 @@ approach:: from sqlalchemy import lambda_stmt + def run_my_statement(connection, parameter): stmt = lambda_stmt(lambda: select(table)) stmt += lambda s: s.where(table.c.col == parameter) @@ -1380,6 +1561,7 @@ approach:: return connection.execute(stmt) + with engine.connect() as conn: result = run_my_statement(some_connection, "some parameter") @@ -1402,7 +1584,7 @@ Quick Guidelines for Lambdas Above all, the emphasis within the lambda SQL system is ensuring that there is never a mismatch between the cache key generated for a lambda and the -SQL string it will produce. The :class:`_sql.LamdaElement` and related +SQL string it will produce. The :class:`_sql.LambdaElement` and related objects will run and analyze the given lambda in order to calculate how it should be cached on each run, trying to detect any potential problems. Basic guidelines include: @@ -1415,9 +1597,10 @@ Basic guidelines include: def upd(id_, newname): stmt = lambda_stmt(lambda: users.update()) stmt += lambda s: s.values(name=newname) - stmt += lambda s: s.where(users.c.id==id_) + stmt += lambda s: s.where(users.c.id == id_) return stmt + with engine.begin() as conn: conn.execute(upd(7, "foo")) @@ -1448,12 +1631,10 @@ Basic guidelines include: >>> def my_stmt(x, y): ... stmt = lambda_stmt(lambda: select(func.max(x, y))) ... return stmt - ... >>> engine = create_engine("sqlite://", echo=True) >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) ... print(conn.scalar(my_stmt(12, 8))) - ... {opensql}SELECT max(?, ?) AS max_1 [generated in 0.00057s] (5, 10){stop} 10 @@ -1485,9 +1666,9 @@ Basic guidelines include: def my_stmt(parameter, thing=False): stmt = lambda_stmt(lambda: select(table)) if thing: - stmt += s.where(table.c.x > parameter) + stmt += lambda s: s.where(table.c.x > parameter) else: - stmt += s.where(table.c.y == parameter) + stmt += lambda s: s.where(table.c.y == parameter) return stmt There are a variety of failures which can occur if the lambda does not @@ -1504,15 +1685,14 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y ... ... stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y()))) ... return stmt - ... >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) - ... Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Can't invoke Python callable get_x() @@ -1528,6 +1708,7 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y ... @@ -1549,14 +1730,11 @@ Basic guidelines include: ... def __init__(self, x, y): ... self.x = x ... self.y = y - ... >>> def my_stmt(foo): ... stmt = lambda_stmt(lambda: select(func.max(foo.x, foo.y))) ... return stmt - ... >>> with engine.connect() as conn: - ... print(conn.scalar(my_stmt(Foo(5, 10)))) - ... + ... print(conn.scalar(my_stmt(Foo(5, 10)))) Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Closure variable named 'foo' inside of @@ -1593,8 +1771,7 @@ Basic guidelines include: >>> def my_stmt(foo): ... stmt = lambda_stmt( - ... lambda: select(func.max(foo.x, foo.y)), - ... track_closure_variables=False + ... lambda: select(func.max(foo.x, foo.y)), track_closure_variables=False ... ) ... return stmt @@ -1610,13 +1787,9 @@ Basic guidelines include: >>> def my_stmt(self, foo): ... stmt = lambda_stmt( - ... lambda: select(*self.column_expressions), - ... track_closure_variables=False - ... ) - ... stmt = stmt.add_criteria( - ... lambda: self.where_criteria, - ... track_on=[self] + ... lambda: select(*self.column_expressions), track_closure_variables=False ... ) + ... stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self]) ... return stmt Using ``track_on`` means the given objects will be stored long term in the @@ -1639,7 +1812,7 @@ SQL expression construct by producing a structure that represents all the state within the construct:: >>> from sqlalchemy import select, column - >>> stmt = select(column('q')) + >>> stmt = select(column("q")) >>> cache_key = stmt._generate_cache_key() >>> print(cache_key) # somewhat paraphrased CacheKey(key=( @@ -1796,7 +1969,10 @@ Valid use cases for calling :meth:`_engine.Engine.dispose` include: :class:`_engine.Engine` object is copied to the child process, :meth:`_engine.Engine.dispose` should be called so that the engine creates brand new database connections local to that fork. Database connections - generally do **not** travel across process boundaries. + generally do **not** travel across process boundaries. Use the + :paramref:`.Engine.dispose.close` parameter set to False in this case. + See the section :ref:`pooling_multiprocessing` for more background on this + use case. * Within test suites or multitenancy scenarios where many ad-hoc, short-lived :class:`_engine.Engine` objects may be created and disposed. @@ -1821,6 +1997,12 @@ use of new connections, and means that when a connection is checked in, it is entirely closed out and is not held in memory. See :ref:`pool_switching` for guidelines on how to disable pooling. +.. seealso:: + + :ref:`pooling_toplevel` + + :ref:`pooling_multiprocessing` + .. _dbapi_connections: Working with Driver SQL and Raw DBAPI Connections @@ -1846,9 +2028,10 @@ method may be used:: with engine.connect() as conn: conn.exec_driver_sql("SET param='bar'") - .. versionadded:: 1.4 Added the :meth:`_engine.Connection.exec_driver_sql` method. +.. _dbapi_connections_cursor: + Working with the DBAPI cursor directly -------------------------------------- @@ -1921,7 +2104,7 @@ may potentially be used with your DBAPI. An example of this pattern is:: connection = engine.raw_connection() try: cursor_obj = connection.cursor() - cursor_obj.callproc("my_procedure", ['x', 'y', 'z']) + cursor_obj.callproc("my_procedure", ["x", "y", "z"]) results = list(cursor_obj.fetchall()) cursor_obj.close() connection.commit() @@ -1967,8 +2150,6 @@ Multiple result set support is available from a raw DBAPI cursor using the finally: connection.close() - - Registering New Dialects ======================== @@ -1984,7 +2165,7 @@ to create a new dialect "foodialect://", the steps are as follows: via ``foodialect.dialect``. 3. The entry point can be established in setup.py as follows:: - entry_points=""" + entry_points = """ [sqlalchemy.dialects] foodialect = foodialect.dialect:FooDialect """ @@ -1994,7 +2175,7 @@ an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if ``FooDialect`` were in fact a MySQL dialect, the entry point could be established like this:: - entry_points=""" + entry_points = """ [sqlalchemy.dialects] mysql.foodialect = foodialect.dialect:FooDialect """ @@ -2008,6 +2189,7 @@ SQLAlchemy also allows a dialect to be registered within the current process, by the need for separate installation. Use the ``register()`` function as follows:: from sqlalchemy.dialects import registry + registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect") The above will respond to ``create_engine("mysql+foodialect://")`` and load the @@ -2045,7 +2227,7 @@ Connection / Engine API :inherited-members: -Result Set API +Result Set API ================= .. autoclass:: BaseCursorResult @@ -2054,6 +2236,9 @@ Result Set API .. autoclass:: ChunkedIteratorResult :members: +.. autoclass:: FilterResult + :members: + .. autoclass:: FrozenResult :members: @@ -2095,4 +2280,3 @@ Result Set API .. autoclass:: RowMapping :members: - diff --git a/doc/build/core/constraints.rst b/doc/build/core/constraints.rst index 038c3134dd1..aa322238f58 100644 --- a/doc/build/core/constraints.rst +++ b/doc/build/core/constraints.rst @@ -33,11 +33,13 @@ column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object:: - user_preference = Table('user_preference', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) + user_preference = Table( + "user_preference", + metadata_obj, + Column("pref_id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False), + Column("pref_name", String(40), nullable=False), + Column("pref_value", String(100)), ) Above, we define a new table ``user_preference`` for which each row must @@ -64,21 +66,27 @@ known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key:: - invoice = Table('invoice', metadata_obj, - Column('invoice_id', Integer, primary_key=True), - Column('ref_num', Integer, primary_key=True), - Column('description', String(60), nullable=False) + invoice = Table( + "invoice", + metadata_obj, + Column("invoice_id", Integer, primary_key=True), + Column("ref_num", Integer, primary_key=True), + Column("description", String(60), nullable=False), ) And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``:: - invoice_item = Table('invoice_item', metadata_obj, - Column('item_id', Integer, primary_key=True), - Column('item_name', String(60), nullable=False), - Column('invoice_id', Integer, nullable=False), - Column('ref_num', Integer, nullable=False), - ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num']) + invoice_item = Table( + "invoice_item", + metadata_obj, + Column("item_id", Integer, primary_key=True), + Column("item_name", String(60), nullable=False), + Column("invoice_id", Integer, nullable=False), + Column("ref_num", Integer, nullable=False), + ForeignKeyConstraint( + ["invoice_id", "ref_num"], ["invoice.invoice_id", "invoice.ref_num"] + ), ) It's important to note that the @@ -126,22 +134,20 @@ statements, on all backends other than SQLite which does not support most forms of ALTER. Given a schema like:: node = Table( - 'node', metadata_obj, - Column('node_id', Integer, primary_key=True), - Column( - 'primary_element', Integer, - ForeignKey('element.element_id') - ) + "node", + metadata_obj, + Column("node_id", Integer, primary_key=True), + Column("primary_element", Integer, ForeignKey("element.element_id")), ) element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - name='fk_element_parent_node_id' - ) + ["parent_node_id"], ["node.node_id"], name="fk_element_parent_node_id" + ), ) When we call upon :meth:`_schema.MetaData.create_all` on a backend such as the @@ -151,7 +157,7 @@ constraints are created separately: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -179,7 +185,7 @@ those constraints that are named: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.drop_all(conn, checkfirst=False) + ... metadata_obj.drop_all(conn, checkfirst=False) {opensql}ALTER TABLE element DROP CONSTRAINT fk_element_parent_node_id DROP TABLE node DROP TABLE element @@ -205,13 +211,16 @@ to manually resolve dependency cycles. We can add this flag only to the ``'element'`` table as follows:: element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - use_alter=True, name='fk_element_parent_node_id' - ) + ["parent_node_id"], + ["node.node_id"], + use_alter=True, + name="fk_element_parent_node_id", + ), ) in our CREATE DDL we will see the ALTER statement only for this constraint, @@ -220,7 +229,7 @@ and not the other one: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -282,22 +291,29 @@ generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase:: - child = Table('child', metadata_obj, - Column('id', Integer, - ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"), - primary_key=True - ) - ) - - composite = Table('composite', metadata_obj, - Column('id', Integer, primary_key=True), - Column('rev_id', Integer), - Column('note_id', Integer), + child = Table( + "child", + metadata_obj, + Column( + "id", + Integer, + ForeignKey("parent.id", onupdate="CASCADE", ondelete="CASCADE"), + primary_key=True, + ), + ) + + composite = Table( + "composite", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("rev_id", Integer), + Column("note_id", Integer), ForeignKeyConstraint( - ['rev_id', 'note_id'], - ['revisions.id', 'revisions.note_id'], - onupdate="CASCADE", ondelete="SET NULL" - ) + ["rev_id", "note_id"], + ["revisions.id", "revisions.note_id"], + onupdate="CASCADE", + ondelete="SET NULL", + ), ) Note that these clauses require ``InnoDB`` tables when used with MySQL. @@ -327,17 +343,16 @@ unique constraints and/or those with multiple columns are created via the from sqlalchemy import UniqueConstraint metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - + mytable = Table( + "mytable", + metadata_obj, # per-column anonymous unique constraint - Column('col1', Integer, unique=True), - - Column('col2', Integer), - Column('col3', Integer), - + Column("col1", Integer, unique=True), + Column("col2", Integer), + Column("col3", Integer), # explicit/composite unique constraint. 'name' is optional. - UniqueConstraint('col2', 'col3', name='uix_1') - ) + UniqueConstraint("col2", "col3", name="uix_1"), + ) CHECK Constraint ---------------- @@ -357,17 +372,16 @@ MySQL. from sqlalchemy import CheckConstraint metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - + mytable = Table( + "mytable", + metadata_obj, # per-column CHECK constraint - Column('col1', Integer, CheckConstraint('col1>5')), - - Column('col2', Integer), - Column('col3', Integer), - + Column("col1", Integer, CheckConstraint("col1>5")), + Column("col2", Integer), + Column("col3", Integer), # table level CHECK constraint. 'name' is optional. - CheckConstraint('col2 > col3 + 5', name='check1') - ) + CheckConstraint("col2 > col3 + 5", name="check1"), + ) {sql}mytable.create(engine) CREATE TABLE mytable ( @@ -388,12 +402,14 @@ option of being configured directly:: from sqlalchemy import PrimaryKeyConstraint - my_table = Table('mytable', metadata_obj, - Column('id', Integer), - Column('version_id', Integer), - Column('data', String(50)), - PrimaryKeyConstraint('id', 'version_id', name='mytable_pk') - ) + my_table = Table( + "mytable", + metadata_obj, + Column("id", Integer), + Column("version_id", Integer), + Column("data", String(50)), + PrimaryKeyConstraint("id", "version_id", name="mytable_pk"), + ) .. seealso:: @@ -468,11 +484,11 @@ one exception case where an existing name can be further embellished). An example naming convention that suits basic cases is as follows:: convention = { - "ix": 'ix_%(column_0_label)s', - "uq": "uq_%(table_name)s_%(column_0_name)s", - "ck": "ck_%(table_name)s_%(constraint_name)s", - "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", - "pk": "pk_%(table_name)s" + "ix": "ix_%(column_0_label)s", + "uq": "uq_%(table_name)s_%(column_0_name)s", + "ck": "ck_%(table_name)s_%(constraint_name)s", + "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", + "pk": "pk_%(table_name)s", } metadata_obj = MetaData(naming_convention=convention) @@ -482,10 +498,12 @@ the target :class:`_schema.MetaData` collection. For example, we can observe the name produced when we create an unnamed :class:`.UniqueConstraint`:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False), - ... UniqueConstraint('name') + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False), + ... UniqueConstraint("name"), ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -493,10 +511,12 @@ For example, we can observe the name produced when we create an unnamed This same feature takes effect even if we just use the :paramref:`_schema.Column.unique` flag:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False, unique=True) - ... ) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False, unique=True), + ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -543,16 +563,17 @@ deterministically truncated using a 4-character suffix based on the md5 hash of the long name. For example, the naming convention below will generate very long names given the column names in use:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) On the PostgreSQL dialect, names longer than 63 characters will be truncated @@ -580,20 +601,22 @@ that as follows:: import uuid + def fk_guid(constraint, table): - str_tokens = [ - table.name, - ] + [ - element.parent.name for element in constraint.elements - ] + [ - element.target_fullname for element in constraint.elements - ] - guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode('ascii')) + str_tokens = ( + [ + table.name, + ] + + [element.parent.name for element in constraint.elements] + + [element.target_fullname for element in constraint.elements] + ) + guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode("ascii")) return str(guid) + convention = { "fk_guid": fk_guid, - "ix": 'ix_%(column_0_label)s', + "ix": "ix_%(column_0_label)s", "fk": "fk_%(fk_guid)s", } @@ -602,18 +625,21 @@ name as follows:: >>> metadata_obj = MetaData(naming_convention=convention) - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('version', Integer, primary_key=True), - ... Column('data', String(30)) - ... ) - >>> address_table = Table('address', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', Integer), - ... Column('user_version_id', Integer) - ... ) - >>> fk = ForeignKeyConstraint(['user_id', 'user_version_id'], - ... ['user.id', 'user.version']) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("version", Integer, primary_key=True), + ... Column("data", String(30)), + ... ) + >>> address_table = Table( + ... "address", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("user_id", Integer), + ... Column("user_version_id", Integer), + ... ) + >>> fk = ForeignKeyConstraint(["user_id", "user_version_id"], ["user.id", "user.version"]) >>> address_table.append_constraint(fk) >>> fk.name fk_0cd51ab5-8d70-56e8-a83c-86661737766d @@ -646,9 +672,11 @@ A typical convention is ``"ck_%(table_name)s_%(constraint_name)s"``:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint('value > 5', name='value_gt_5') + Table( + "foo", + metadata_obj, + Column("value", Integer), + CheckConstraint("value > 5", name="value_gt_5"), ) The above table will produce the name ``ck_foo_value_gt_5``:: @@ -663,13 +691,9 @@ token; we can make use of this by ensuring we use a :class:`_schema.Column` or :func:`_expression.column` element within the constraint's expression, either by declaring the constraint separate from the table:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer) - ) + foo = Table("foo", metadata_obj, Column("value", Integer)) CheckConstraint(foo.c.value > 5) @@ -677,13 +701,10 @@ or by using a :func:`_expression.column` inline:: from sqlalchemy import column - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint(column('value') > 5) + foo = Table( + "foo", metadata_obj, Column("value", Integer), CheckConstraint(column("value") > 5) ) Both will produce the name ``ck_foo_value``:: @@ -712,9 +733,7 @@ and :class:`.Enum` which generate a CHECK constraint accompanying the type. The name for the constraint here is most directly set up by sending the "name" parameter, e.g. :paramref:`.Boolean.name`:: - Table('foo', metadata_obj, - Column('flag', Boolean(name='ck_foo_flag')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="ck_foo_flag"))) The naming convention feature may be combined with these types as well, normally by using a convention which includes ``%(constraint_name)s`` @@ -724,9 +743,7 @@ and then applying a name to the type:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('flag', Boolean(name='flag_bool')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="flag_bool"))) The above table will produce the constraint name ``ck_foo_flag_bool``:: @@ -748,13 +765,9 @@ The CHECK constraint may also make use of the ``column_0_name`` token, which works nicely with :class:`.SchemaType` since these constraints have only one column:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - Table('foo', metadata_obj, - Column('flag', Boolean()) - ) + Table("foo", metadata_obj, Column("flag", Boolean())) The above schema will produce:: @@ -822,25 +835,24 @@ INDEX" is issued right after the create statements for the table: .. sourcecode:: python+sql metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, + mytable = Table( + "mytable", + metadata_obj, # an indexed column, with index "ix_mytable_col1" - Column('col1', Integer, index=True), - + Column("col1", Integer, index=True), # a uniquely indexed column with index "ix_mytable_col2" - Column('col2', Integer, index=True, unique=True), - - Column('col3', Integer), - Column('col4', Integer), - - Column('col5', Integer), - Column('col6', Integer), - ) + Column("col2", Integer, index=True, unique=True), + Column("col3", Integer), + Column("col4", Integer), + Column("col5", Integer), + Column("col6", Integer), + ) # place an index on col3, col4 - Index('idx_col34', mytable.c.col3, mytable.c.col4) + Index("idx_col34", mytable.c.col3, mytable.c.col4) # place a unique index on col5, col6 - Index('myindex', mytable.c.col5, mytable.c.col6, unique=True) + Index("myindex", mytable.c.col5, mytable.c.col6, unique=True) {sql}mytable.create(engine) CREATE TABLE mytable ( @@ -863,26 +875,24 @@ objects directly. :class:`.Index` also supports identify columns:: metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - Column('col1', Integer), - - Column('col2', Integer), - - Column('col3', Integer), - Column('col4', Integer), - + mytable = Table( + "mytable", + metadata_obj, + Column("col1", Integer), + Column("col2", Integer), + Column("col3", Integer), + Column("col4", Integer), # place an index on col1, col2 - Index('idx_col12', 'col1', 'col2'), - + Index("idx_col12", "col1", "col2"), # place a unique index on col3, col4 - Index('idx_col34', 'col3', 'col4', unique=True) + Index("idx_col34", "col3", "col4", unique=True), ) The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method: .. sourcecode:: python+sql - i = Index('someindex', mytable.c.col5) + i = Index("someindex", mytable.c.col5) {sql}i.create(engine) CREATE INDEX someindex ON mytable (col5){stop} @@ -897,14 +907,14 @@ value, the :meth:`_expression.ColumnElement.desc` modifier may be used:: from sqlalchemy import Index - Index('someindex', mytable.c.somecol.desc()) + Index("someindex", mytable.c.somecol.desc()) Or with a backend that supports functional indexes such as PostgreSQL, a "case insensitive" index can be created using the ``lower()`` function:: from sqlalchemy import func, Index - Index('someindex', func.lower(mytable.c.somecol)) + Index("someindex", func.lower(mytable.c.somecol)) Index API --------- diff --git a/doc/build/core/custom_types.rst b/doc/build/core/custom_types.rst index 6ec31ce089e..0db63fad942 100644 --- a/doc/build/core/custom_types.rst +++ b/doc/build/core/custom_types.rst @@ -24,6 +24,7 @@ can be associated with any type:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import BINARY + @compiles(BINARY, "sqlite") def compile_binary_sqlite(type_, compiler, **kw): return "BLOB" @@ -67,6 +68,7 @@ to and/or from the database is required. .. autoclass:: TypeDecorator :members: + .. autoattribute:: cache_ok TypeDecorator Recipes --------------------- @@ -92,6 +94,7 @@ which coerces as needed:: from sqlalchemy.types import TypeDecorator, Unicode + class CoerceUTF8(TypeDecorator): """Safely coerce Python bytestrings to Unicode before passing off to the database.""" @@ -100,7 +103,7 @@ which coerces as needed:: def process_bind_param(self, value, dialect): if isinstance(value, str): - value = value.decode('utf-8') + value = value.decode("utf-8") return value Rounding Numerics @@ -112,6 +115,7 @@ many decimal places. Here's a recipe that rounds them down:: from sqlalchemy.types import TypeDecorator, Numeric from decimal import Decimal + class SafeNumeric(TypeDecorator): """Adds quantization to Numeric.""" @@ -119,12 +123,11 @@ many decimal places. Here's a recipe that rounds them down:: def __init__(self, *arg, **kw): TypeDecorator.__init__(self, *arg, **kw) - self.quantize_int = - self.impl.scale + self.quantize_int = -self.impl.scale self.quantize = Decimal(10) ** self.quantize_int def process_bind_param(self, value, dialect): - if isinstance(value, Decimal) and \ - value.as_tuple()[2] < self.quantize_int: + if isinstance(value, Decimal) and value.as_tuple()[2] < self.quantize_int: value = value.quantize(self.quantize) return value @@ -146,6 +149,7 @@ denormalize:: import datetime + class TZDateTime(TypeDecorator): impl = DateTime cache_ok = True @@ -154,9 +158,7 @@ denormalize:: if value is not None: if not value.tzinfo: raise TypeError("tzinfo is required") - value = value.astimezone(datetime.timezone.utc).replace( - tzinfo=None - ) + value = value.astimezone(datetime.timezone.utc).replace(tzinfo=None) return value def process_result_value(self, value, dialect): @@ -164,7 +166,6 @@ denormalize:: value = value.replace(tzinfo=datetime.timezone.utc) return value - .. _custom_guid_type: Backend-agnostic GUID Type @@ -179,6 +180,7 @@ binary in CHAR(16) if desired:: from sqlalchemy.dialects.postgresql import UUID import uuid + class GUID(TypeDecorator): """Platform-independent GUID type. @@ -186,11 +188,12 @@ binary in CHAR(16) if desired:: CHAR(32), storing as stringified hex values. """ + impl = CHAR cache_ok = True def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': + if dialect.name == "postgresql": return dialect.type_descriptor(UUID()) else: return dialect.type_descriptor(CHAR(32)) @@ -198,7 +201,7 @@ binary in CHAR(16) if desired:: def process_bind_param(self, value, dialect): if value is None: return value - elif dialect.name == 'postgresql': + elif dialect.name == "postgresql": return str(value) else: if not isinstance(value, uuid.UUID): @@ -268,12 +271,12 @@ dictionary-oriented JSON structure, we can apply this as:: json_type = MutableDict.as_mutable(JSONEncodedDict) + class MyClass(Base): # ... json_data = Column(json_type) - .. seealso:: :ref:`mutable_toplevel` @@ -294,8 +297,7 @@ get at this with a type like ``JSONEncodedDict``, we need to from sqlalchemy import type_coerce, String - stmt = select(my_table).where( - type_coerce(my_table.c.json_data, String).like('%foo%')) + stmt = select(my_table).where(type_coerce(my_table.c.json_data, String).like("%foo%")) :class:`.TypeDecorator` provides a built-in system for working up type translations like these based on operators. If we wanted to frequently use the @@ -306,6 +308,7 @@ method:: from sqlalchemy.sql import operators from sqlalchemy import String + class JSONEncodedDict(TypeDecorator): impl = VARCHAR @@ -366,6 +369,7 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: from sqlalchemy import func from sqlalchemy.types import UserDefinedType + class Geometry(UserDefinedType): def get_col_spec(self): return "GEOMETRY" @@ -379,13 +383,18 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: We can apply the ``Geometry`` type into :class:`_schema.Table` metadata and use it in a :func:`_expression.select` construct:: - geometry = Table('geometry', metadata, - Column('geom_id', Integer, primary_key=True), - Column('geom_data', Geometry) - ) + geometry = Table( + "geometry", + metadata, + Column("geom_id", Integer, primary_key=True), + Column("geom_data", Geometry), + ) - print(select(geometry).where( - geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)')) + print( + select(geometry).where( + geometry.c.geom_data == "LINESTRING(189412 252431,189631 259122)" + ) + ) The resulting SQL embeds both functions as appropriate. ``ST_AsText`` is applied to the columns clause so that the return value is run through @@ -402,7 +411,7 @@ with the labeling of the wrapped expression. Such as, if we rendered a :func:`_expression.select` against a :func:`.label` of our expression, the string label is moved to the outside of the wrapped expression:: - print(select(geometry.c.geom_data.label('my_data'))) + print(select(geometry.c.geom_data.label("my_data"))) Output:: @@ -414,11 +423,21 @@ Another example is we decorate PostgreSQL ``pgcrypto`` extension to encrypt/decrypt values transparently:: - from sqlalchemy import create_engine, String, select, func, \ - MetaData, Table, Column, type_coerce, TypeDecorator + from sqlalchemy import ( + create_engine, + String, + select, + func, + MetaData, + Table, + Column, + type_coerce, + TypeDecorator, + ) from sqlalchemy.dialects.postgresql import BYTEA + class PGPString(TypeDecorator): impl = BYTEA @@ -439,24 +458,24 @@ transparently:: def column_expression(self, col): return func.pgp_sym_decrypt(col, self.passphrase) + metadata_obj = MetaData() - message = Table('message', metadata_obj, - Column('username', String(50)), - Column('message', - PGPString("this is my passphrase")), - ) + message = Table( + "message", + metadata_obj, + Column("username", String(50)), + Column("message", PGPString("this is my passphrase")), + ) engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) with engine.begin() as conn: metadata_obj.create_all(conn) - conn.execute(message.insert(), username="some user", - message="this is my message") + conn.execute(message.insert(), username="some user", message="this is my message") - print(conn.scalar( - select(message.c.message).\ - where(message.c.username == "some user") - )) + print( + conn.scalar(select(message.c.message).where(message.c.username == "some user")) + ) The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied to the INSERT and SELECT statements:: @@ -491,16 +510,39 @@ explicit methods on column expressions, such as :meth:`.ColumnOperators.in_` (``table.c.value.in_(['x', 'y'])``) and :meth:`.ColumnOperators.like` (``table.c.value.like('%ed%')``). -The Core expression constructs in all cases consult the type of the expression in order to determine -the behavior of existing operators, as well as to locate additional operators that aren't part of -the built-in set. The :class:`.TypeEngine` base class defines a root "comparison" implementation -:class:`.TypeEngine.Comparator`, and many specific types provide their own sub-implementations of this -class. User-defined :class:`.TypeEngine.Comparator` implementations can be built directly into a -simple subclass of a particular type in order to override or define new operations. Below, -we create a :class:`.Integer` subclass which overrides the :meth:`.ColumnOperators.__add__` operator:: +When the need arises for a SQL operator that isn't directly supported by the +already supplied methods above, the most expedient way to produce this operator is +to use the :meth:`_sql.Operators.op` method on any SQL expression object; this method +is given a string representing the SQL operator to render, and the return value +is a Python callable that accepts any arbitrary right-hand side expression:: + + >>> from sqlalchemy import column + >>> expr = column("x").op(">>")(column("y")) + >>> print(expr) + x >> y + +When making use of custom SQL types, there is also a means of implementing +custom operators as above that are automatically present upon any column +expression that makes use of that column type, without the need to directly +call :meth:`_sql.Operators.op` each time the operator is to be used. + +To achieve this, a SQL +expression construct consults the :class:`_types.TypeEngine` object associated +with the construct in order to determine the behavior of the built-in +operators as well as to look for new methods that may have been invoked. +:class:`.TypeEngine` defines a +"comparison" object implemented by the :class:`.TypeEngine.Comparator` class to provide the base +behavior for SQL operators, and many specific types provide their own +sub-implementations of this class. User-defined :class:`.TypeEngine.Comparator` +implementations can be built directly into a simple subclass of a particular +type in order to override or define new operations. Below, we create a +:class:`.Integer` subclass which overrides the :meth:`.ColumnOperators.__add__` +operator, which in turn uses :meth:`_sql.Operators.op` to produce the custom +SQL itself:: from sqlalchemy import Integer + class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): @@ -519,30 +561,27 @@ Usage:: The implementation for :meth:`.ColumnOperators.__add__` is consulted by an owning SQL expression, by instantiating the :class:`.TypeEngine.Comparator` with -itself as the ``expr`` attribute. The mechanics of the expression -system are such that operations continue recursively until an -expression object produces a new SQL expression construct. Above, we -could just as well have said ``self.expr.op("goofy")(other)`` instead -of ``self.op("goofy")(other)``. +itself as the ``expr`` attribute. This attribute may be used when the +implementation needs to refer to the originating :class:`_sql.ColumnElement` +object directly:: + + from sqlalchemy import Integer -When using :meth:`.Operators.op` for comparison operations that return a -boolean result, the :paramref:`.Operators.op.is_comparison` flag should be -set to ``True``:: class MyInt(Integer): class comparator_factory(Integer.Comparator): - def is_frobnozzled(self, other): - return self.op("--is_frobnozzled->", is_comparison=True)(other) + def __add__(self, other): + return func.special_addition(self.expr, other) New methods added to a :class:`.TypeEngine.Comparator` are exposed on an -owning SQL expression -using a ``__getattr__`` scheme, which exposes methods added to -:class:`.TypeEngine.Comparator` onto the owning :class:`_expression.ColumnElement`. -For example, to add a ``log()`` function +owning SQL expression object using a dynamic lookup scheme, which exposes methods added to +:class:`.TypeEngine.Comparator` onto the owning :class:`_expression.ColumnElement` +expression construct. For example, to add a ``log()`` function to integers:: from sqlalchemy import Integer, func + class MyInt(Integer): class comparator_factory(Integer.Comparator): def log(self, other): @@ -553,6 +592,15 @@ Using the above type:: >>> print(sometable.c.data.log(5)) log(:log_1, :log_2) +When using :meth:`.Operators.op` for comparison operations that return a +boolean result, the :paramref:`.Operators.op.is_comparison` flag should be +set to ``True``:: + + class MyInt(Integer): + class comparator_factory(Integer.Comparator): + def is_frobnozzled(self, other): + return self.op("--is_frobnozzled->", is_comparison=True)(other) + Unary operations are also possible. For example, to add an implementation of the PostgreSQL factorial operator, we combine the :class:`.UnaryExpression` construct @@ -562,17 +610,18 @@ along with a :class:`.custom_op` to produce the factorial expression:: from sqlalchemy.sql.expression import UnaryExpression from sqlalchemy.sql import operators + class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): - return UnaryExpression(self.expr, - modifier=operators.custom_op("!"), - type_=MyInteger) + return UnaryExpression( + self.expr, modifier=operators.custom_op("!"), type_=MyInteger + ) Using the above type:: >>> from sqlalchemy.sql import column - >>> print(column('x', MyInteger).factorial()) + >>> print(column("x", MyInteger).factorial()) x ! .. seealso:: @@ -594,6 +643,7 @@ is needed, use :class:`.TypeDecorator` instead. .. autoclass:: UserDefinedType :members: + .. autoattribute:: cache_ok .. _custom_and_decorated_types_reflection: @@ -623,8 +673,10 @@ datatype. For example:: >>> from sqlalchemy import Table, Column, MetaData, create_engine, PickleType, Integer >>> metadata = MetaData() - >>> my_table = Table("my_table", metadata, Column('id', Integer), Column("data", PickleType)) - >>> engine = create_engine("sqlite://", echo='debug') + >>> my_table = Table( + ... "my_table", metadata, Column("id", Integer), Column("data", PickleType) + ... ) + >>> engine = create_engine("sqlite://", echo="debug") >>> my_table.create(engine) INFO sqlalchemy.engine.base.Engine CREATE TABLE my_table ( @@ -675,7 +727,9 @@ use reflection in combination with explicit :class:`_schema.Column` objects for columns for which we want to use a custom or decorated datatype:: >>> metadata_three = MetaData() - >>> my_reflected_table = Table("my_table", metadata_three, Column("data", PickleType), autoload_with=engine) + >>> my_reflected_table = Table( + ... "my_table", metadata_three, Column("data", PickleType), autoload_with=engine + ... ) The ``my_reflected_table`` object above is reflected, and will load the definition of the "id" column from the SQLite database. But for the "data" @@ -698,6 +752,7 @@ for example we knew that we wanted all :class:`.BLOB` datatypes to in fact be from sqlalchemy import PickleType from sqlalchemy import Table + @event.listens_for(Table, "column_reflect") def _setup_pickletype(inspector, table, column_info): if isinstance(column_info["type"], BLOB): @@ -713,4 +768,4 @@ In practice, the above event-based approach would likely have additional rules in order to affect only those columns where the datatype is important, such as a lookup table of table names and possibly column names, or other heuristics in order to accurately determine which columns should be established with an -in Python datatype. \ No newline at end of file +in Python datatype. diff --git a/doc/build/core/ddl.rst b/doc/build/core/ddl.rst index 9c2fed198db..95665f26b92 100644 --- a/doc/build/core/ddl.rst +++ b/doc/build/core/ddl.rst @@ -32,9 +32,11 @@ other DDL elements except it accepts a string which is the text to be emitted: event.listen( metadata, "after_create", - DDL("ALTER TABLE users ADD CONSTRAINT " + DDL( + "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length " - " CHECK (length(user_name) >= 8)") + " CHECK (length(user_name) >= 8)" + ), ) A more comprehensive method of creating libraries of DDL constructs is to use @@ -54,9 +56,10 @@ method. For example, if we wanted to create a trigger but only on the PostgreSQL backend, we could invoke this as:: mytable = Table( - 'mytable', metadata, - Column('id', Integer, primary_key=True), - Column('data', String(50)) + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), ) func = DDL( @@ -73,30 +76,18 @@ the PostgreSQL backend, we could invoke this as:: "FOR EACH ROW EXECUTE PROCEDURE my_func();" ) - event.listen( - mytable, - 'after_create', - func.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", func.execute_if(dialect="postgresql")) - event.listen( - mytable, - 'after_create', - trigger.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", trigger.execute_if(dialect="postgresql")) The :paramref:`.DDLElement.execute_if.dialect` keyword also accepts a tuple of string dialect names:: event.listen( - mytable, - "after_create", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "after_create", trigger.execute_if(dialect=("postgresql", "mysql")) ) event.listen( - mytable, - "before_drop", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "before_drop", trigger.execute_if(dialect=("postgresql", "mysql")) ) The :meth:`.DDLElement.execute_if` method can also work against a callable @@ -108,27 +99,29 @@ first looking within the PostgreSQL catalogs to see if it exists: def should_create(ddl, target, connection, **kw): row = connection.execute( - "select conname from pg_constraint where conname='%s'" % - ddl.element.name).scalar() + "select conname from pg_constraint where conname='%s'" % ddl.element.name + ).scalar() return not bool(row) + def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) + event.listen( users, "after_create", DDL( "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length CHECK (length(user_name) >= 8)" - ).execute_if(callable_=should_create) + ).execute_if(callable_=should_create), ) event.listen( users, "before_drop", - DDL( - "ALTER TABLE users DROP CONSTRAINT cst_user_name_length" - ).execute_if(callable_=should_drop) + DDL("ALTER TABLE users DROP CONSTRAINT cst_user_name_length").execute_if( + callable_=should_drop + ), ) {sql}users.create(engine) @@ -198,22 +191,20 @@ constraints, using these as we did in our previous example of def should_create(ddl, target, connection, **kw): row = connection.execute( - "select conname from pg_constraint where conname='%s'" % - ddl.element.name).scalar() + "select conname from pg_constraint where conname='%s'" % ddl.element.name + ).scalar() return not bool(row) + def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) + event.listen( - users, - "after_create", - AddConstraint(constraint).execute_if(callable_=should_create) + users, "after_create", AddConstraint(constraint).execute_if(callable_=should_create) ) event.listen( - users, - "before_drop", - DropConstraint(constraint).execute_if(callable_=should_drop) + users, "before_drop", DropConstraint(constraint).execute_if(callable_=should_drop) ) {sql}users.create(engine) diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index e2e71ea00fd..ca78e3aa046 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -59,9 +59,7 @@ Scalar Defaults The simplest kind of default is a scalar value used as the default value of a column:: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, default=12) - ) + Table("mytable", metadata_obj, Column("somecolumn", Integer, default=12)) Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied. @@ -70,10 +68,7 @@ A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, onupdate=25) - ) - + Table("mytable", metadata_obj, Column("somecolumn", Integer, onupdate=25)) Python-Executed Functions ------------------------- @@ -86,13 +81,18 @@ incrementing counter to a primary key column:: # a function which counts upwards i = 0 + + def mydefault(): global i i += 1 return i - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True, default=mydefault), + + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True, default=mydefault), ) It should be noted that for real "incrementing sequence" behavior, the @@ -109,11 +109,12 @@ the :paramref:`_schema.Column.onupdate` attribute:: import datetime - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'last_updated' to be populated with datetime.now() - Column('last_updated', DateTime, onupdate=datetime.datetime.now), + Column("last_updated", DateTime, onupdate=datetime.datetime.now), ) When an update statement executes and no value is passed for ``last_updated``, @@ -139,11 +140,14 @@ updated on the row. To access the context, provide a function that accepts a single ``context`` argument:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - t = Table('mytable', metadata_obj, - Column('counter', Integer), - Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) + + t = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) The above default generation function is applied so that it will execute for @@ -175,6 +179,8 @@ and returned alone. by offering the service of organizing multiple VALUES clauses into individual parameter dictionaries. +.. _defaults_client_invoked_sql: + Client-Invoked SQL Expressions ------------------------------ @@ -182,18 +188,21 @@ The :paramref:`_schema.Column.default` and :paramref:`_schema.Column.onupdate` k also be passed SQL expressions, which are in most cases rendered inline within the INSERT or UPDATE statement:: - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'create_date' to default to now() - Column('create_date', DateTime, default=func.now()), - + Column("create_date", DateTime, default=func.now()), # define 'key' to pull its default from the 'keyvalues' table - Column('key', String(20), default=select(keyvalues.c.key).where(keyvalues.c.type='type1')), - + Column( + "key", + String(20), + default=select(keyvalues.c.key).where(keyvalues.c.type="type1"), + ), # define 'last_modified' to use the current_timestamp SQL function on update - Column('last_modified', DateTime, onupdate=func.utc_timestamp()) - ) + Column("last_modified", DateTime, onupdate=func.utc_timestamp()), + ) Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` @@ -255,10 +264,12 @@ placed in the CREATE TABLE statement during a :meth:`_schema.Table.create` opera .. sourcecode:: python+sql - t = Table('test', metadata_obj, - Column('abc', String(20), server_default='abc'), - Column('created_at', DateTime, server_default=func.sysdate()), - Column('index_value', Integer, server_default=text("0")) + t = Table( + "test", + metadata_obj, + Column("abc", String(20), server_default="abc"), + Column("created_at", DateTime, server_default=func.sysdate()), + Column("index_value", Integer, server_default=text("0")), ) A create call for the above table will produce:: @@ -294,10 +305,12 @@ may be called out using :class:`.FetchedValue` as a marker:: from sqlalchemy.schema import FetchedValue - t = Table('test', metadata_obj, - Column('id', Integer, primary_key=True), - Column('abc', TIMESTAMP, server_default=FetchedValue()), - Column('def', String(20), server_onupdate=FetchedValue()) + t = Table( + "test", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("abc", TIMESTAMP, server_default=FetchedValue()), + Column("def", String(20), server_onupdate=FetchedValue()), ) The :class:`.FetchedValue` indicator does not affect the rendered DDL for the @@ -333,58 +346,134 @@ Defining Sequences SQLAlchemy represents database sequences using the :class:`~sqlalchemy.schema.Sequence` object, which is considered to be a special case of "column default". It only has an effect on databases which have -explicit support for sequences, which currently includes PostgreSQL, Oracle, -MariaDB 10.3 or greater, and Firebird. The :class:`~sqlalchemy.schema.Sequence` -object is otherwise ignored. +explicit support for sequences, which among SQLAlchemy's included dialects +includes PostgreSQL, Oracle, MS SQL Server, and MariaDB. The +:class:`~sqlalchemy.schema.Sequence` object is otherwise ignored. + +.. tip:: + + In newer database engines, the :class:`.Identity` construct should likely + be preferred vs. :class:`.Sequence` for generation of integer primary key + values. See the section :ref:`identity_ddl` for background on this + construct. The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a "default" generator to be used during INSERT operations, and can also be configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:: - table = Table("cartitems", metadata_obj, + table = Table( + "cartitems", + metadata_obj, Column( "cart_id", Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + Sequence("cart_id_seq", start=1), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), + ) + +Where above, the table ``cartitems`` is associated with a sequence named +``cart_id_seq``. Emitting :meth:`.MetaData.create_all` for the above +table will include: + +.. sourcecode:: sql + + CREATE SEQUENCE cart_id_seq START WITH 1 + + CREATE TABLE cartitems ( + cart_id INTEGER NOT NULL, + description VARCHAR(40), + createdate TIMESTAMP WITHOUT TIME ZONE, + PRIMARY KEY (cart_id) ) -Where above, the table "cartitems" is associated with a sequence named -"cart_id_seq". When INSERT statements take place for "cartitems", and no value -is passed for the "cart_id" column, the "cart_id_seq" sequence will be used to -generate a value. Typically, the sequence function is embedded in the -INSERT statement, which is combined with RETURNING so that the newly generated -value can be returned to the Python code:: +.. tip:: + + When using tables with explicit schema names (detailed at + :ref:`schema_table_schema_name`), the configured schema of the :class:`.Table` + is **not** automatically shared by an embedded :class:`.Sequence`, instead, + specify :paramref:`.Sequence.schema`:: + + Sequence("cart_id_seq", start=1, schema="some_schema") + + The :class:`.Sequence` may also be made to automatically make use of the + :paramref:`.MetaData.schema` setting on the :class:`.MetaData` in use; + see :ref:`sequence_metadata` for background. + +When :class:`_dml .Insert` DML constructs are invoked against the ``cartitems`` +table, without an explicit value passed for the ``cart_id`` column, the +``cart_id_seq`` sequence will be used to generate a value on participating +backends. Typically, the sequence function is embedded in the INSERT statement, +which is combined with RETURNING so that the newly generated value can be +returned to the Python process: + +.. sourcecode:: sql INSERT INTO cartitems (cart_id, description, createdate) VALUES (next_val(cart_id_seq), 'some description', '2015-10-15 12:00:15') RETURNING cart_id +When using :meth:`_engine.Connection.execute` to invoke an :class:`_dml.Insert` +construct, newly generated primary key identifiers, including but not limited +to those generated using :class:`.Sequence`, are available from the +:class:`.CursorResult` construct using the +:attr:`.CursorResult.inserted_primary_key` attribute. + When the :class:`~sqlalchemy.schema.Sequence` is associated with a :class:`_schema.Column` as its **Python-side** default generator, the :class:`.Sequence` will also be subject to "CREATE SEQUENCE" and "DROP -SEQUENCE" DDL when similar DDL is emitted for the owning :class:`_schema.Table`. -This is a limited scope convenience feature that does not accommodate for -inheritance of other aspects of the :class:`_schema.MetaData`, such as the default -schema. Therefore, it is best practice that for a :class:`.Sequence` which -is local to a certain :class:`_schema.Column` / :class:`_schema.Table`, that it be -explicitly associated with the :class:`_schema.MetaData` using the -:paramref:`.Sequence.metadata` parameter. See the section -:ref:`sequence_metadata` for more background on this. +SEQUENCE" DDL when similar DDL is emitted for the owning :class:`_schema.Table`, +such as when using :meth:`.MetaData.create_all` to generate DDL for a series +of tables. + +The :class:`.Sequence` may also be associated with a +:class:`.MetaData` construct directly. This allows the :class:`.Sequence` +to be used in more than one :class:`.Table` at a time and also allows the +:paramref:`.MetaData.schema` parameter to be inherited. See the section +:ref:`sequence_metadata` for background. Associating a Sequence on a SERIAL column ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PostgreSQL's SERIAL datatype is an auto-incrementing type that implies the implicit creation of a PostgreSQL sequence when CREATE TABLE is emitted. -If a :class:`_schema.Column` specifies an explicit :class:`.Sequence` object -which also specifies a ``True`` value for the :paramref:`.Sequence.optional` -boolean flag, the :class:`.Sequence` will not take effect under PostgreSQL, -and the SERIAL datatype will proceed normally. Instead, the :class:`.Sequence` -will only take effect when used against other sequence-supporting -databases, currently Oracle and Firebird. +The :class:`.Sequence` construct, when indicated for a :class:`_schema.Column`, +may indicate that it should not be used in this specific case by specifying +a value of ``True`` for the :paramref:`.Sequence.optional` parameter. +This allows the given :class:`.Sequence` to be used for backends that have no +alternative primary key generation system but to ignore it for backends +such as PostgreSQL which will automatically generate a sequence for a particular +column:: + + table = Table( + "cartitems", + metadata_obj, + Column( + "cart_id", + Integer, + # use an explicit Sequence where available, but not on + # PostgreSQL where SERIAL will be used + Sequence("cart_id_seq", start=1, optional=True), + primary_key=True, + ), + Column("description", String(40)), + Column("createdate", DateTime()), + ) + +In the above example, ``CREATE TABLE`` for PostgreSQL will make use of the +``SERIAL`` datatype for the ``cart_id`` column, and the ``cart_id_seq`` +sequence will be ignored. However on Oracle, the ``cart_id_seq`` sequence +will be created explicitly. + +.. tip:: + + This particular interaction of SERIAL and SEQUENCE is fairly legacy, and + as in other cases, using :class:`.Identity` instead will simplify the + operation to simply use ``IDENTITY`` on all supported backends. + Executing a Sequence Standalone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -395,7 +484,7 @@ object, it can be invoked with its "next value" instruction by passing it directly to a SQL execution method:: with my_engine.connect() as conn: - seq = Sequence('some_sequence') + seq = Sequence("some_sequence") nextid = conn.execute(seq) In order to embed the "next value" function of a :class:`.Sequence` @@ -403,7 +492,7 @@ inside of a SQL statement like a SELECT or INSERT, use the :meth:`.Sequence.next method, which will render at statement compilation time a SQL function that is appropriate for the target backend:: - >>> my_seq = Sequence('some_sequence') + >>> my_seq = Sequence("some_sequence") >>> stmt = select(my_seq.next_value()) >>> print(stmt.compile(dialect=postgresql.dialect())) SELECT nextval('some_sequence') AS next_value_1 @@ -413,38 +502,26 @@ appropriate for the target backend:: Associating a Sequence with the MetaData ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For many years, the SQLAlchemy documentation referred to the -example of associating a :class:`.Sequence` with a table as follows:: +For a :class:`.Sequence` that is to be associated with arbitrary +:class:`.Table` objects, the :class:`.Sequence` may be associated with +a particular :class:`_schema.MetaData`, using the +:paramref:`.Sequence.metadata` parameter:: - table = Table("cartitems", metadata_obj, - Column("cart_id", Integer, Sequence('cart_id_seq'), - primary_key=True), - Column("description", String(40)), - Column("createdate", DateTime()) - ) + seq = Sequence("my_general_seq", metadata=metadata_obj, start=1) -While the above is a prominent idiomatic pattern, it is recommended that -the :class:`.Sequence` in most cases be explicitly associated with the -:class:`_schema.MetaData`, using the :paramref:`.Sequence.metadata` parameter:: +Such a sequence can then be associated with columns in the usual way:: - table = Table("cartitems", metadata_obj, - Column( - "cart_id", - Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + table = Table( + "cartitems", + metadata_obj, + seq, Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) -The :class:`.Sequence` object is a first class -schema construct that can exist independently of any table in a database, and -can also be shared among tables. Therefore SQLAlchemy does not implicitly -modify the :class:`.Sequence` when it is associated with a :class:`_schema.Column` -object as either the Python-side or server-side default generator. While the -CREATE SEQUENCE / DROP SEQUENCE DDL is emitted for a :class:`.Sequence` -defined as a Python side generator at the same time the table itself is subject -to CREATE or DROP, this is a convenience feature that does not imply that the -:class:`.Sequence` is fully associated with the :class:`_schema.MetaData` object. +In the above example, the :class:`.Sequence` object is treated as an +independent schema construct that can exist on its own or be shared among +tables. Explicitly associating the :class:`.Sequence` with :class:`_schema.MetaData` allows for the following behaviors: @@ -453,9 +530,6 @@ allows for the following behaviors: parameter specified to the target :class:`_schema.MetaData`, which affects the production of CREATE / DROP DDL, if any. -* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods - automatically use the engine bound to the :class:`_schema.MetaData` - object, if any. * The :meth:`_schema.MetaData.create_all` and :meth:`_schema.MetaData.drop_all` methods will emit CREATE / DROP for this :class:`.Sequence`, @@ -463,11 +537,6 @@ allows for the following behaviors: :class:`_schema.Table` / :class:`_schema.Column` that's a member of this :class:`_schema.MetaData`. -Since the vast majority of cases that deal with :class:`.Sequence` expect -that :class:`.Sequence` to be fully "owned" by the associated :class:`_schema.Table` -and that options like default schema are propagated, setting the -:paramref:`.Sequence.metadata` parameter should be considered a best practice. - Associating a Sequence as the Server Side Default ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -478,8 +547,8 @@ The preceding sections illustrate how to associate a :class:`.Sequence` with a :class:`_schema.Column` as the **Python side default generator**:: Column( - "cart_id", Integer, Sequence('cart_id_seq', metadata=metadata_obj), - primary_key=True) + "cart_id", Integer, Sequence("cart_id_seq", metadata=metadata_obj), primary_key=True + ) In the above case, the :class:`.Sequence` will automatically be subject to CREATE SEQUENCE / DROP SEQUENCE DDL when the related :class:`_schema.Table` @@ -495,24 +564,30 @@ we illustrate the same :class:`.Sequence` being associated with the :class:`_schema.Column` both as the Python-side default generator as well as the server-side default generator:: - cart_id_seq = Sequence('cart_id_seq', metadata=metadata_obj) - table = Table("cartitems", metadata_obj, + cart_id_seq = Sequence("cart_id_seq", metadata=metadata_obj) + table = Table( + "cartitems", + metadata_obj, Column( - "cart_id", Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True), + "cart_id", + Integer, + cart_id_seq, + server_default=cart_id_seq.next_value(), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) or with the ORM:: class CartItem(Base): - __tablename__ = 'cartitems' + __tablename__ = "cartitems" - cart_id_seq = Sequence('cart_id_seq', metadata=Base.metadata) + cart_id_seq = Sequence("cart_id_seq", metadata=Base.metadata) cart_id = Column( - Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True) + Integer, cart_id_seq, server_default=cart_id_seq.next_value(), primary_key=True + ) description = Column(String(40)) createdate = Column(DateTime) @@ -627,8 +702,6 @@ eagerly fetched. * SQLite as of version 3.31 -* Firebird - When :class:`.Computed` is used with an unsupported backend, if the target dialect does not support it, a :class:`.CompileError` is raised when attempting to render the construct. Otherwise, if the dialect supports it but the @@ -656,15 +729,15 @@ shares most of its option to control the database behaviour with Example:: - from sqlalchemy import Table, Column, MetaData, Integer, Computed + from sqlalchemy import Table, Column, MetaData, Integer, Identity, String metadata_obj = MetaData() data = Table( "data", metadata_obj, - Column('id', Integer, Identity(start=42, cycle=True), primary_key=True), - Column('data', String) + Column("id", Integer, Identity(start=42, cycle=True), primary_key=True), + Column("data", String), ) The DDL for the ``data`` table when run on a PostgreSQL 12 backend will look diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index cb114ef7f9e..42c21d12430 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -22,16 +22,20 @@ Creating an engine is just a matter of issuing a single call, :func:`_sa.create_engine()`:: from sqlalchemy import create_engine - engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase') + + engine = create_engine("postgresql://scott:tiger@localhost:5432/mydatabase") The above engine creates a :class:`.Dialect` object tailored towards -PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a DBAPI -connection at ``localhost:5432`` when a connection request is first received. -Note that the :class:`_engine.Engine` and its underlying :class:`_pool.Pool` do **not** -establish the first actual DBAPI connection until the :meth:`_engine.Engine.connect` -method is called, or an operation which is dependent on this method such as -:meth:`_engine.Engine.execute` is invoked. In this way, :class:`_engine.Engine` and -:class:`_pool.Pool` can be said to have a *lazy initialization* behavior. +PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a +DBAPI connection at ``localhost:5432`` when a connection request is first +received. Note that the :class:`_engine.Engine` and its underlying +:class:`_pool.Pool` do **not** establish the first actual DBAPI connection +until the :meth:`_engine.Engine.connect` or :meth:`_engine.Engine.begin` +methods are called. Either of these methods may also be invoked by other +SQLAlchemy :class:`_engine.Engine` dependent objects such as the ORM +:class:`_orm.Session` object when they first require database connectivity. +In this way, :class:`_engine.Engine` and :class:`_pool.Pool` can be said to +have a *lazy initialization* behavior. The :class:`_engine.Engine`, once created, can either be used directly to interact with the database, or can be passed to a :class:`.Session` object to work with the ORM. This section @@ -52,15 +56,19 @@ See the section :ref:`dialect_toplevel` for information on the various backends .. _database_urls: -Database Urls +Database URLs ============= -The :func:`_sa.create_engine` function produces an :class:`_engine.Engine` object based -on a URL. These URLs follow `RFC-1738 -`_, and usually can include username, password, -hostname, database name as well as optional keyword arguments for additional configuration. -In some cases a file path is accepted, and in others a "data source name" replaces -the "host" and "database" portions. The typical form of a database URL is:: +The :func:`_sa.create_engine` function produces an :class:`_engine.Engine` +object based on a URL. The format of the URL generally follows `RFC-1738 +`_, with some exceptions, including that +underscores, not dashes or periods, are accepted within the "scheme" portion. +URLs typically include username, password, hostname, database name fields, as +well as optional keyword arguments for additional configuration. In some cases +a file path is accepted, and in others a "data source name" replaces the "host" +and "database" portions. The typical form of a database URL is: + +.. sourcecode:: none dialect+driver://username:password@host:port/database @@ -71,87 +79,151 @@ the database using all lowercase letters. If not specified, a "default" DBAPI will be imported if available - this default is typically the most widely known driver available for that backend. -As the URL is like any other URL, **special characters such as those that may -be used in the password need to be URL encoded to be parsed correctly.**. Below -is an example of a URL that includes the password ``"kx%jj5/g"``, where the -percent sign and slash characters are represented as ``%25`` and ``%2F``, -respectively:: +Escaping Special Characters such as @ signs in Passwords +---------------------------------------------------------- + +When constructing a fully formed URL string to pass to +:func:`_sa.create_engine`, **special characters such as those that may +be used in the user and password need to be URL encoded to be parsed correctly.**. +**This includes the @ sign**. - postgresql+pg8000://dbuser:kx%25jj5%2Fg@pghost10/appdb +Below is an example of a URL that includes the password ``"kx@jj5/g"``, where the +"at" sign and slash characters are represented as ``%40`` and ``%2F``, +respectively: + +.. sourcecode:: none + + postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb The encoding for the above password can be generated using `urllib.parse `_:: >>> import urllib.parse - >>> urllib.parse.quote_plus("kx%jj5/g") - 'kx%25jj5%2Fg' + >>> urllib.parse.quote_plus("kx@jj5/g") + 'kx%40jj5%2Fg' + +The URL may then be passed as a string to :func:`_sa.create_engine`:: + + from sqlalchemy import create_engine + + engine = create_engine("postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb") + +As an alternative to escaping special characters in order to create a complete +URL string, the object passed to :func:`_sa.create_engine` may instead be an +instance of the :class:`.URL` object, which bypasses the parsing +phase and can accommodate for unescaped strings directly. See the next +section for an example. + +.. versionchanged:: 1.4 + + Support for ``@`` signs in hostnames and database names has been + fixed. As a side effect of this fix, ``@`` signs in passwords must be + escaped. + +Creating URLs Programmatically +------------------------------- + +The value passed to :func:`_sa.create_engine` may be an instance of +:class:`.URL`, instead of a plain string, which bypasses the need for string +parsing to be used, and therefore does not need an escaped URL string to be +provided. + +The :class:`.URL` object is created using the :meth:`_engine.URL.create()` +constructor method, passing all fields individually. Special characters +such as those within passwords may be passed without any modification:: + + from sqlalchemy.engine import URL + + url_object = URL.create( + "postgresql+pg8000", + username="dbuser", + password="kx@jj5/g", # plain (unescaped) text + host="pghost10", + database="appdb", + ) + +The constructed :class:`.URL` object may then be passed directly to +:func:`_sa.create_engine` in place of a string argument:: + + from sqlalchemy import create_engine + + engine = create_engine(url_object) + +.. seealso:: + + :class:`.URL` + + :meth:`.URL.create` + +Backend-Specific URLs +---------------------- Examples for common connection styles follow below. For a full index of detailed information on all included dialects as well as links to third-party dialects, see :ref:`dialect_toplevel`. PostgreSQL ----------- +^^^^^^^^^^ -The PostgreSQL dialect uses psycopg2 as the default DBAPI. pg8000 is -also available as a pure-Python substitute:: +The PostgreSQL dialect uses psycopg2 as the default DBAPI. Other +PostgreSQL DBAPIs include pg8000 and asyncpg:: # default - engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql://scott:tiger@localhost/mydatabase") # psycopg2 - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/mydatabase") # pg8000 - engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+pg8000://scott:tiger@localhost/mydatabase") More notes on connecting to PostgreSQL at :ref:`postgresql_toplevel`. MySQL ------ +^^^^^^^^^^ -The MySQL dialect uses mysql-python as the default DBAPI. There are many -MySQL DBAPIs available, including MySQL-connector-python and OurSQL:: +The MySQL dialect uses mysqlclient as the default DBAPI. There are other +MySQL DBAPIs available, including PyMySQL:: # default - engine = create_engine('mysql://scott:tiger@localhost/foo') + engine = create_engine("mysql://scott:tiger@localhost/foo") # mysqlclient (a maintained fork of MySQL-Python) - engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo') + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/foo") # PyMySQL - engine = create_engine('mysql+pymysql://scott:tiger@localhost/foo') + engine = create_engine("mysql+pymysql://scott:tiger@localhost/foo") More notes on connecting to MySQL at :ref:`mysql_toplevel`. Oracle ------- +^^^^^^^^^^ The Oracle dialect uses cx_oracle as the default DBAPI:: - engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname') + engine = create_engine("oracle://scott:tiger@127.0.0.1:1521/sidname") - engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname') + engine = create_engine("oracle+cx_oracle://scott:tiger@tnsname") More notes on connecting to Oracle at :ref:`oracle_toplevel`. Microsoft SQL Server --------------------- +^^^^^^^^^^^^^^^^^^^^ The SQL Server dialect uses pyodbc as the default DBAPI. pymssql is also available:: # pyodbc - engine = create_engine('mssql+pyodbc://scott:tiger@mydsn') + engine = create_engine("mssql+pyodbc://scott:tiger@mydsn") # pymssql - engine = create_engine('mssql+pymssql://scott:tiger@hostname:port/dbname') + engine = create_engine("mssql+pymssql://scott:tiger@hostname:port/dbname") More notes on connecting to SQL Server at :ref:`mssql_toplevel`. SQLite ------- +^^^^^^^ SQLite connects to file-based databases, using the Python built-in module ``sqlite3`` by default. @@ -162,27 +234,27 @@ For a relative file path, this requires three slashes:: # sqlite:/// # where is relative: - engine = create_engine('sqlite:///foo.db') + engine = create_engine("sqlite:///foo.db") And for an absolute file path, the three slashes are followed by the absolute path:: # Unix/Mac - 4 initial slashes in total - engine = create_engine('sqlite:////absolute/path/to/foo.db') + engine = create_engine("sqlite:////absolute/path/to/foo.db") # Windows - engine = create_engine('sqlite:///C:\\path\\to\\foo.db') + engine = create_engine("sqlite:///C:\\path\\to\\foo.db") # Windows alternative using raw string - engine = create_engine(r'sqlite:///C:\path\to\foo.db') + engine = create_engine(r"sqlite:///C:\path\to\foo.db") To use a SQLite ``:memory:`` database, specify an empty URL:: - engine = create_engine('sqlite://') + engine = create_engine("sqlite://") More notes on connecting to SQLite at :ref:`sqlite_toplevel`. Others ------- +^^^^^^ See :ref:`dialect_toplevel`, the top-level page for all additional dialect documentation. @@ -243,7 +315,9 @@ Engine Creation API for keys and either strings or tuples of strings for values, e.g.:: >>> from sqlalchemy.engine import make_url - >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt") + >>> url = make_url( + ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt" + ... ) >>> url.query immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'}) @@ -315,9 +389,7 @@ often specified in the query string of the URL directly. A common example of this is DBAPIs that accept an argument ``encoding`` for character encodings, such as most MySQL DBAPIs:: - engine = create_engine( - "mysql+pymysql://user:pass@host/test?charset=utf8mb4" - ) + engine = create_engine("mysql+pymysql://user:pass@host/test?charset=utf8mb4") The advantage of using the query string is that additional DBAPI options may be specified in configuration files in a manner that's portable to the DBAPI @@ -336,7 +408,9 @@ supported at this level. method directly as follows:: >>> from sqlalchemy import create_engine - >>> engine = create_engine("mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4") + >>> engine = create_engine( + ... "mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4" + ... ) >>> args, kwargs = engine.dialect.create_connect_args(engine.url) >>> args, kwargs ([], {'host': 'some_host', 'database': 'test', 'user': 'some_user', 'password': 'some_pass', 'charset': 'utf8mb4', 'client_flag': 2}) @@ -361,14 +435,14 @@ underlying implementation the connection:: engine = create_engine( "postgresql://user:pass@hostname/dbname", - connect_args={"connection_factory": MyConnectionFactory} + connect_args={"connection_factory": MyConnectionFactory}, ) Another example is the pyodbc "timeout" parameter:: engine = create_engine( - "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", - connect_args={"timeout": 30} + "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", + connect_args={"timeout": 30}, ) The above example also illustrates that both URL "query string" parameters as @@ -389,9 +463,10 @@ collections can then be modified in place to alter how they are used:: engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): - cparams['connection_factory'] = MyConnectionFactory + cparams["connection_factory"] = MyConnectionFactory .. _engines_dynamic_tokens: @@ -408,9 +483,10 @@ parameter, this could be implemented as:: engine = create_engine("postgresql://user@hostname/dbname") + @event.listens_for(engine, "do_connect") def provide_token(dialect, conn_rec, cargs, cparams): - cparams['token'] = get_authentication_token() + cparams["token"] = get_authentication_token() .. seealso:: @@ -429,9 +505,8 @@ SQLAlchemy:: from sqlalchemy import event - engine = create_engine( - "postgresql://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): @@ -439,7 +514,6 @@ SQLAlchemy:: cursor_obj.execute("SET some session variables") cursor_obj.close() - Fully Replacing the DBAPI ``connect()`` function ------------------------------------------------ @@ -449,9 +523,8 @@ and returning it:: from sqlalchemy import event - engine = create_engine( - "postgresql://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): @@ -511,7 +584,7 @@ For example, to log SQL queries using Python logging instead of the import logging logging.basicConfig() - logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) + logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) By default, the log level is set to ``logging.WARN`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an @@ -539,10 +612,9 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: >>> from sqlalchemy import create_engine, text - >>> e = create_engine("sqlite://", echo=True, echo_pool='debug') + >>> e = create_engine("sqlite://", echo=True, echo_pool="debug") >>> with e.connect() as conn: - ... print(conn.scalar(text("select 'hi'"))) - ... + ... print(conn.scalar(text("select 'hi'"))) 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Created new connection 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Connection checked out from pool 2020-10-24 12:54:57,702 INFO sqlalchemy.engine.Engine select 'hi' @@ -554,6 +626,7 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: Use of these flags is roughly equivalent to:: import logging + logging.basicConfig() logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG) @@ -568,19 +641,22 @@ getting duplicate log lines. Setting the Logging Name ------------------------- -The logger name of instance such as an :class:`~sqlalchemy.engine.Engine` or -:class:`~sqlalchemy.pool.Pool` defaults to using a truncated hex identifier -string. To set this to a specific name, use the +The logger name for :class:`~sqlalchemy.engine.Engine` or +:class:`~sqlalchemy.pool.Pool` is set to be the module-qualified class name of the +object. This name can be further qualified with an additional name +using the :paramref:`_sa.create_engine.logging_name` and -:paramref:`_sa.create_engine.pool_logging_name` with -:func:`sqlalchemy.create_engine`:: +:paramref:`_sa.create_engine.pool_logging_name` parameters with +:func:`sqlalchemy.create_engine`; the name will be appended to existing +class-qualified logging name. This use is recommended for applications that +make use of multiple global :class:`.Engine` instances simultaenously, so +that they may be distinguished in logging:: >>> from sqlalchemy import create_engine >>> from sqlalchemy import text - >>> e = create_engine("sqlite://", echo=True, logging_name='myengine') + >>> e = create_engine("sqlite://", echo=True, logging_name="myengine") >>> with e.connect() as conn: ... conn.execute(text("select 'hi'")) - ... 2020-10-24 12:47:04,291 INFO sqlalchemy.engine.Engine.myengine select 'hi' 2020-10-24 12:47:04,292 INFO sqlalchemy.engine.Engine.myengine () @@ -649,7 +725,6 @@ these parameters from being logged for privacy purposes, enable the >>> e = create_engine("sqlite://", echo=True, hide_parameters=True) >>> with e.connect() as conn: ... conn.execute(text("select :some_private_name"), {"some_private_name": "pii"}) - ... 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine select ? 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine [SQL parameters hidden due to hide_parameters=True] diff --git a/doc/build/core/event.rst b/doc/build/core/event.rst index af4e33ba9a5..fbdc72183e5 100644 --- a/doc/build/core/event.rst +++ b/doc/build/core/event.rst @@ -25,16 +25,19 @@ and that a user-defined listener function should receive two positional argument from sqlalchemy.event import listen from sqlalchemy.pool import Pool + def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) - listen(Pool, 'connect', my_on_connect) + + listen(Pool, "connect", my_on_connect) To listen with the :func:`.listens_for` decorator looks like:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect") def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) @@ -54,9 +57,10 @@ that accepts ``**keyword`` arguments, by passing ``named=True`` to either from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(**kw): - print("New DBAPI connection:", kw['dbapi_connection']) + print("New DBAPI connection:", kw["dbapi_connection"]) When using named argument passing, the names listed in the function argument specification will be used as keys in the dictionary. @@ -68,10 +72,11 @@ as long as the names match up:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(dbapi_connection, **kw): print("New DBAPI connection:", dbapi_connection) - print("Connection record:", kw['connection_record']) + print("Connection record:", kw["connection_record"]) Above, the presence of ``**kw`` tells :func:`.listens_for` that arguments should be passed to the function by name, rather than positionally. @@ -95,25 +100,26 @@ and objects:: from sqlalchemy.engine import Engine import psycopg2 + def connect(): - return psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + return psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") + my_pool = QueuePool(connect) - my_engine = create_engine('postgresql://ed@localhost/test') + my_engine = create_engine("postgresql://ed@localhost/test") # associate listener with all instances of Pool - listen(Pool, 'connect', my_on_connect) + listen(Pool, "connect", my_on_connect) # associate listener with all instances of Pool # via the Engine class - listen(Engine, 'connect', my_on_connect) + listen(Engine, "connect", my_on_connect) # associate listener with my_pool - listen(my_pool, 'connect', my_on_connect) + listen(my_pool, "connect", my_on_connect) # associate listener with my_engine.pool - listen(my_engine, 'connect', my_on_connect) - + listen(my_engine, "connect", my_on_connect) .. _event_modifiers: @@ -130,11 +136,12 @@ this value can be supported:: def validate_phone(target, value, oldvalue, initiator): """Strip non-numeric characters from a phone number""" - return re.sub(r'\D', '', value) + return re.sub(r"\D", "", value) + # setup listener on UserContact.phone attribute, instructing # it to use the return value - listen(UserContact.phone, 'set', validate_phone, retval=True) + listen(UserContact.phone, "set", validate_phone, retval=True) Event Reference --------------- diff --git a/doc/build/core/expression_api.rst b/doc/build/core/expression_api.rst index 7d455d20010..6df14f372cb 100644 --- a/doc/build/core/expression_api.rst +++ b/doc/build/core/expression_api.rst @@ -19,4 +19,5 @@ see :ref:`sqlexpression_toplevel`. functions compiler serializer + foundation visitors diff --git a/doc/build/core/foundation.rst b/doc/build/core/foundation.rst new file mode 100644 index 00000000000..3a017dd5dfe --- /dev/null +++ b/doc/build/core/foundation.rst @@ -0,0 +1,32 @@ +.. _core_foundation_toplevel: + +================================================= +SQL Expression Language Foundational Constructs +================================================= + +Base classes and mixins that are used to compose SQL Expression Language +elements. + +.. currentmodule:: sqlalchemy.sql.expression + +.. autoclass:: CacheKey + :members: + +.. autoclass:: ClauseElement + :members: + :inherited-members: + + +.. autoclass:: sqlalchemy.sql.base.DialectKWArgs + :members: + + +.. autoclass:: sqlalchemy.sql.traversals.HasCacheKey + :members: + +.. autoclass:: LambdaElement + :members: + +.. autoclass:: StatementLambdaElement + :members: + diff --git a/doc/build/core/functions.rst b/doc/build/core/functions.rst index 8a3c5221fd1..6fcee6edaa2 100644 --- a/doc/build/core/functions.rst +++ b/doc/build/core/functions.rst @@ -7,24 +7,137 @@ SQL and Generic Functions .. currentmodule:: sqlalchemy.sql.functions -SQL functions which are known to SQLAlchemy with regards to database-specific -rendering, return types and argument behavior. Generic functions are invoked -like all SQL functions, using the :attr:`func` attribute:: +SQL functions are invoked by using the :data:`_sql.func` namespace. +See the tutorial at :ref:`tutorial_functions` for background on how to +use the :data:`_sql.func` object to render SQL functions in statements. - select(func.count()).select_from(sometable) +.. seealso:: + + :ref:`tutorial_functions` - in the :ref:`unified_tutorial` + +Function API +------------ + +The base API for SQL functions, which provides for the :data:`_sql.func` +namespace as well as classes that may be used for extensibility. + +.. autoclass:: AnsiFunction + :exclude-members: inherit_cache, __new__ + +.. autoclass:: Function + +.. autoclass:: FunctionElement + :members: + :exclude-members: inherit_cache, __new__ + +.. autoclass:: GenericFunction + :exclude-members: inherit_cache, __new__ + +.. autofunction:: register_function -Note that any name not known to :attr:`func` generates the function name as is -- there is no restriction on what SQL functions can be called, known or + +Selected "Known" Functions +-------------------------- + +These are :class:`.GenericFunction` implementations for a selected set of +common SQL functions that set up the expected return type for each function +automatically. The are invoked in the same way as any other member of the +:data:`_sql.func` namespace:: + + select(func.count("*")).select_from(some_table) + +Note that any name not known to :data:`_sql.func` generates the function name +as is - there is no restriction on what SQL functions can be called, known or unknown to SQLAlchemy, built-in or user defined. The section here only describes those functions where SQLAlchemy already knows what argument and return types are in use. -.. seealso:: +.. autoclass:: array_agg + :no-members: - :ref:`tutorial_functions` - in the :ref:`unified_tutorial` +.. autoclass:: char_length + :no-members: -.. automodule:: sqlalchemy.sql.functions - :members: - :exclude-members: func +.. autoclass:: coalesce + :no-members: + +.. autoclass:: concat + :no-members: + +.. autoclass:: count + :no-members: + +.. autoclass:: cube + :no-members: + +.. autoclass:: cume_dist + :no-members: + +.. autoclass:: current_date + :no-members: + +.. autoclass:: current_time + :no-members: + +.. autoclass:: current_timestamp + :no-members: + +.. autoclass:: current_user + :no-members: + +.. autoclass:: dense_rank + :no-members: + +.. autoclass:: grouping_sets + :no-members: + +.. autoclass:: localtime + :no-members: + +.. autoclass:: localtimestamp + :no-members: + +.. autoclass:: max + :no-members: + +.. autoclass:: min + :no-members: + +.. autoclass:: mode + :no-members: + +.. autoclass:: next_value + :no-members: + +.. autoclass:: now + :no-members: + +.. autoclass:: percent_rank + :no-members: + +.. autoclass:: percentile_cont + :no-members: + +.. autoclass:: percentile_disc + :no-members: + +.. autoclass:: random + :no-members: + +.. autoclass:: rank + :no-members: + +.. autoclass:: rollup + :no-members: + +.. autoclass:: session_user + :no-members: + +.. autoclass:: sum + :no-members: +.. autoclass:: sysdate + :no-members: +.. autoclass:: user + :no-members: diff --git a/doc/build/core/future.rst b/doc/build/core/future.rst index 204e401350d..6323e732a3d 100644 --- a/doc/build/core/future.rst +++ b/doc/build/core/future.rst @@ -15,6 +15,7 @@ by passing the :paramref:`_sa.create_engine.future` flag to :func:`_sa.create_engine`:: from sqlalchemy import create_engine + engine = create_engine("postgresql://user:pass@host/dbname", future=True) Similarly, with the ORM, to enable "future" behavior in the ORM :class:`.Session`, diff --git a/doc/build/core/inspection.rst b/doc/build/core/inspection.rst index eab1288422c..7816cd3fd8c 100644 --- a/doc/build/core/inspection.rst +++ b/doc/build/core/inspection.rst @@ -25,8 +25,18 @@ Below is a listing of many of the most common inspection targets. to per attribute state via the :class:`.AttributeState` interface as well as the per-flush "history" of any attribute via the :class:`.History` object. + + .. seealso:: + + :ref:`orm_mapper_inspection_instancestate` + * ``type`` (i.e. a class) - a class given will be checked by the ORM for a mapping - if so, a :class:`_orm.Mapper` for that class is returned. + + .. seealso:: + + :ref:`orm_mapper_inspection_mapper` + * mapped attribute - passing a mapped attribute to :func:`_sa.inspect`, such as ``inspect(MyClass.some_attribute)``, returns a :class:`.QueryableAttribute` object, which is the :term:`descriptor` associated with a mapped class. @@ -36,3 +46,4 @@ Below is a listing of many of the most common inspection targets. attribute. * :class:`.AliasedClass` - returns an :class:`.AliasedInsp` object. + diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 86a8f6de345..154472af5dc 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -13,10 +13,15 @@ Describing Databases with MetaData This section discusses the fundamental :class:`_schema.Table`, :class:`_schema.Column` and :class:`_schema.MetaData` objects. +.. seealso:: + + :ref:`tutorial_working_with_metadata` - tutorial introduction to + SQLAlchemy's database metadata concept in the :ref:`unified_tutorial` + A collection of metadata entities is stored in an object aptly named :class:`~sqlalchemy.schema.MetaData`:: - from sqlalchemy import * + from sqlalchemy import MetaData metadata_obj = MetaData() @@ -29,11 +34,15 @@ primary arguments are the table name, then the The remaining positional arguments are mostly :class:`~sqlalchemy.schema.Column` objects describing each column:: - user = Table('user', metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60)), - Column('nickname', String(50), nullable=False) + from sqlalchemy import Table, Column, Integer, String + + user = Table( + "user", + metadata_obj, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("email_address", String(60)), + Column("nickname", String(50), nullable=False), ) Above, a table called ``user`` is described, which contains four columns. The @@ -60,7 +69,7 @@ dependency (that is, each table is preceded by all tables which it references):: >>> for t in metadata_obj.sorted_tables: - ... print(t.name) + ... print(t.name) user user_preference invoice @@ -73,10 +82,12 @@ module-level variables in an application. Once a accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition:: - employees = Table('employees', metadata_obj, - Column('employee_id', Integer, primary_key=True), - Column('employee_name', String(60), nullable=False), - Column('employee_dept', Integer, ForeignKey("departments.department_id")) + employees = Table( + "employees", + metadata_obj, + Column("employee_id", Integer, primary_key=True), + Column("employee_name", String(60), nullable=False), + Column("employee_dept", Integer, ForeignKey("departments.department_id")), ) Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - @@ -91,7 +102,7 @@ table include:: employees.c.employee_id # via string - employees.c['employee_id'] + employees.c["employee_id"] # iterate through all columns for c in employees.c: @@ -160,41 +171,45 @@ The usual way to issue CREATE is to use that first check for the existence of each individual table, and if not found will issue the CREATE statements: - .. sourcecode:: python+sql - - engine = create_engine('sqlite:///:memory:') - - metadata_obj = MetaData() - - user = Table('user', metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60), key='email'), - Column('nickname', String(50), nullable=False) - ) - - user_prefs = Table('user_prefs', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) - ) - - {sql}metadata_obj.create_all(engine) - PRAGMA table_info(user){} - CREATE TABLE user( - user_id INTEGER NOT NULL PRIMARY KEY, - user_name VARCHAR(16) NOT NULL, - email_address VARCHAR(60), - nickname VARCHAR(50) NOT NULL - ) - PRAGMA table_info(user_prefs){} - CREATE TABLE user_prefs( - pref_id INTEGER NOT NULL PRIMARY KEY, - user_id INTEGER NOT NULL REFERENCES user(user_id), - pref_name VARCHAR(40) NOT NULL, - pref_value VARCHAR(100) - ) +.. sourcecode:: python+sql + + engine = create_engine("sqlite:///:memory:") + + metadata_obj = MetaData() + + user = Table( + "user", + metadata_obj, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("email_address", String(60), key="email"), + Column("nickname", String(50), nullable=False), + ) + + user_prefs = Table( + "user_prefs", + metadata_obj, + Column("pref_id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False), + Column("pref_name", String(40), nullable=False), + Column("pref_value", String(100)), + ) + + {sql}metadata_obj.create_all(engine) + PRAGMA table_info(user){} + CREATE TABLE user( + user_id INTEGER NOT NULL PRIMARY KEY, + user_name VARCHAR(16) NOT NULL, + email_address VARCHAR(60), + nickname VARCHAR(50) NOT NULL + ) + PRAGMA table_info(user_prefs){} + CREATE TABLE user_prefs( + pref_id INTEGER NOT NULL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES user(user_id), + pref_name VARCHAR(40) NOT NULL, + pref_value VARCHAR(100) + ) :func:`~sqlalchemy.schema.MetaData.create_all` creates foreign key constraints between tables usually inline with the table definition itself, and for this @@ -213,14 +228,16 @@ default issue the CREATE or DROP regardless of the table being present: .. sourcecode:: python+sql - engine = create_engine('sqlite:///:memory:') + engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() - employees = Table('employees', metadata_obj, - Column('employee_id', Integer, primary_key=True), - Column('employee_name', String(60), nullable=False, key='name'), - Column('employee_dept', Integer, ForeignKey("departments.department_id")) + employees = Table( + "employees", + metadata_obj, + Column("employee_id", Integer, primary_key=True), + Column("employee_name", String(60), nullable=False, key="name"), + Column("employee_dept", Integer, ForeignKey("departments.department_id")), ) {sql}employees.create(engine) CREATE TABLE employees( @@ -284,11 +301,11 @@ remote servers (Oracle DBLINK with synonyms). What all of the above approaches have (mostly) in common is that there's a way of referring to this alternate set of tables using a string name. SQLAlchemy -refers to this name as the **schema name**. Within SQLAlchemy, this is nothing more than -a string name which is associated with a :class:`_schema.Table` object, and -is then rendered into SQL statements in a manner appropriate to the target -database such that the table is referred towards in its remote "schema", whatever -mechanism that is on the target database. +refers to this name as the **schema name**. Within SQLAlchemy, this is nothing +more than a string name which is associated with a :class:`_schema.Table` +object, and is then rendered into SQL statements in a manner appropriate to the +target database such that the table is referred towards in its remote "schema", +whatever mechanism that is on the target database. The "schema" name may be associated directly with a :class:`_schema.Table` using the :paramref:`_schema.Table.schema` argument; when using the ORM @@ -298,11 +315,27 @@ the parameter is passed using the ``__table_args__`` parameter dictionary. The "schema" name may also be associated with the :class:`_schema.MetaData` object where it will take effect automatically for all :class:`_schema.Table` objects associated with that :class:`_schema.MetaData` that don't otherwise -specify their own name. Finally, SQLAlchemy also supports a "dynamic" schema name +specify their own name. Finally, SQLAlchemy also supports a "dynamic" schema name system that is often used for multi-tenant applications such that a single set of :class:`_schema.Table` metadata may refer to a dynamically configured set of schema names on a per-connection or per-statement basis. +.. topic:: What's "schema" ? + + SQLAlchemy's support for database "schema" was designed with first party + support for PostgreSQL-style schemas. In this style, there is first a + "database" that typically has a single "owner". Within this database there + can be any number of "schemas" which then contain the actual table objects. + + A table within a specific schema is referred towards explicitly using the + syntax ".". Constrast this to an architecture such + as that of MySQL, where there are only "databases", however SQL statements + can refer to multiple databases at once, using the same syntax except it + is ".". On Oracle, this syntax refers to yet another + concept, the "owner" of a table. Regardless of which kind of database is + in use, SQLAlchemy uses the phrase "schema" to refer to the qualifying + identifier within the general syntax of ".". + .. seealso:: :ref:`orm_declarative_table_schema_name` - schema name specification when using the ORM @@ -315,11 +348,11 @@ using a Core :class:`_schema.Table` object as follows:: metadata_obj = MetaData() financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema="remote_banks", ) SQL that is rendered using this :class:`_schema.Table`, such as the SELECT @@ -336,7 +369,7 @@ using the combination of the schema and table name. We can view this in the :attr:`_schema.MetaData.tables` collection by searching for the key ``'remote_banks.financial_info'``:: - >>> metadata_obj.tables['remote_banks.financial_info'] + >>> metadata_obj.tables["remote_banks.financial_info"] Table('financial_info', MetaData(), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('value', String(length=100), table=, nullable=False), @@ -349,9 +382,9 @@ objects, even if the referring table is also in that same schema:: customer = Table( "customer", metadata_obj, - Column('id', Integer, primary_key=True), - Column('financial_info_id', ForeignKey("remote_banks.financial_info.id")), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("financial_info_id", ForeignKey("remote_banks.financial_info.id")), + schema="remote_banks", ) The :paramref:`_schema.Table.schema` argument may also be used with certain @@ -361,13 +394,15 @@ important on a database such as Microsoft SQL Server where there are often dotted "database/owner" tokens. The tokens may be placed directly in the name at once, such as:: - schema="dbo.scott" + schema = "dbo.scott" .. seealso:: :ref:`multipart_schema_names` - describes use of dotted schema names with the SQL Server dialect. + :ref:`metadata_reflection_schemas` + .. _schema_metadata_schema_name: @@ -382,10 +417,10 @@ construct:: metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), ) Above, for any :class:`_schema.Table` object (or :class:`_schema.Sequence` object @@ -395,7 +430,7 @@ act as though the parameter were set to the value ``"remote_banks"``. This includes that the :class:`_schema.Table` is cataloged in the :class:`_schema.MetaData` using the schema-qualified name, that is:: - metadata_obj.tables['remote_banks.financial_info'] + metadata_obj.tables["remote_banks.financial_info"] When using the :class:`_schema.ForeignKey` or :class:`_schema.ForeignKeyConstraint` objects to refer to this table, either the schema-qualified name or the @@ -405,20 +440,20 @@ table:: # either will work: refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("financial_info.id")), ) # or refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('remote_banks.financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("remote_banks.financial_info.id")), ) When using a :class:`_schema.MetaData` object that sets @@ -431,18 +466,18 @@ to specify that it should not be schema qualified may use the special symbol metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema=BLANK_SCHEMA # will not use "remote_banks" + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema=BLANK_SCHEMA, # will not use "remote_banks" ) - .. seealso:: :paramref:`_schema.MetaData.schema` + .. _schema_dynamic_naming_convention: Applying Dynamic Schema Naming Conventions @@ -454,11 +489,11 @@ basis, so that for example in multi-tenant situations, each transaction or statement may be targeted at a specific set of schema names that change. The section :ref:`schema_translating` describes how this feature is used. - .. seealso:: :ref:`schema_translating` + .. _schema_set_default_connections: Setting a Default Schema for New Connections @@ -484,6 +519,7 @@ Oracle CURRENT_SCHEMA variable to an alternate name:: engine = create_engine("oracle+cx_oracle://scott:tiger@tsn_name") + @event.listens_for(engine, "connect", insert=True) def set_current_schema(dbapi_connection, connection_record): cursor_obj = dbapi_connection.cursor() @@ -506,6 +542,17 @@ for specific information regarding how default schemas are configured. :ref:`postgresql_alternate_search_path` - in the :ref:`postgresql_toplevel` dialect documentation. + + + +Schemas and Reflection +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The schema feature of SQLAlchemy interacts with the table reflection +feature introduced at :ref:`metadata_reflection_toplevel`. See the section +:ref:`metadata_reflection_schemas` for additional details on how this works. + + Backend-Specific Options ------------------------ @@ -514,11 +561,13 @@ example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``:: - addresses = Table('engine_email_addresses', metadata_obj, - Column('address_id', Integer, primary_key=True), - Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), - Column('email_address', String(20)), - mysql_engine='InnoDB' + addresses = Table( + "engine_email_addresses", + metadata_obj, + Column("address_id", Integer, primary_key=True), + Column("remote_user_id", Integer, ForeignKey(users.c.user_id)), + Column("email_address", String(20)), + mysql_engine="InnoDB", ) Other backends may support table-level options as well - these would be @@ -528,6 +577,7 @@ Column, Table, MetaData API --------------------------- .. attribute:: sqlalchemy.schema.BLANK_SCHEMA + :noindex: Symbol indicating that a :class:`_schema.Table` or :class:`.Sequence` should have 'None' for its schema, even if the parent @@ -543,6 +593,15 @@ Column, Table, MetaData API .. versionadded:: 1.0.14 +.. attribute:: sqlalchemy.schema.RETAIN_SCHEMA + :noindex: + + Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` + or in some cases a :class:`_schema.ForeignKey` object, in situations + where the object is being copied for a :meth:`.Table.to_metadata` + operation, should retain the schema name that it already has. + + .. autoclass:: Column :members: diff --git a/doc/build/core/operators.rst b/doc/build/core/operators.rst index 8d962560d58..d3da3c60821 100644 --- a/doc/build/core/operators.rst +++ b/doc/build/core/operators.rst @@ -11,17 +11,17 @@ Operator Reference >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) >>> from sqlalchemy import ForeignKey >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> metadata_obj.create_all(engine) BEGIN (implicit) @@ -30,7 +30,7 @@ Operator Reference >>> Base = declarative_base() >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' + ... __tablename__ = "user_account" ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) @@ -39,14 +39,14 @@ Operator Reference ... addresses = relationship("Address", back_populates="user") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' + ... __tablename__ = "address" ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('user_account.id')) + ... user_id = Column(Integer, ForeignKey("user_account.id")) ... ... user = relationship("User", back_populates="addresses") ... @@ -55,22 +55,34 @@ Operator Reference >>> conn = engine.connect() >>> from sqlalchemy.orm import Session >>> session = Session(conn) - >>> session.add_all([ - ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[ - ... Address(email_address="spongebob@sqlalchemy.org") - ... ]), - ... User(name="sandy", fullname="Sandy Cheeks", addresses=[ - ... Address(email_address="sandy@sqlalchemy.org"), - ... Address(email_address="squirrel@squirrelpower.org") - ... ]), - ... User(name="patrick", fullname="Patrick Star", addresses=[ - ... Address(email_address="pat999@aol.com") - ... ]), - ... User(name="squidward", fullname="Squidward Tentacles", addresses=[ - ... Address(email_address="stentcl@sqlalchemy.org") - ... ]), - ... User(name="ehkrabs", fullname="Eugene H. Krabs"), - ... ]) + >>> session.add_all( + ... [ + ... User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ), + ... User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="squirrel@squirrelpower.org"), + ... ], + ... ), + ... User( + ... name="patrick", + ... fullname="Patrick Star", + ... addresses=[Address(email_address="pat999@aol.com")], + ... ), + ... User( + ... name="squidward", + ... fullname="Squidward Tentacles", + ... addresses=[Address(email_address="stentcl@sqlalchemy.org")], + ... ), + ... User(name="ehkrabs", fullname="Eugene H. Krabs"), + ... ] + ... ) >>> session.commit() BEGIN ... >>> conn.begin() @@ -108,49 +120,49 @@ strings, dates, and many others: * :meth:`_sql.ColumnOperators.__eq__` (Python "``==``" operator):: - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 .. * :meth:`_sql.ColumnOperators.__ne__` (Python "``!=``" operator):: - >>> print(column('x') != 5) + >>> print(column("x") != 5) x != :x_1 .. * :meth:`_sql.ColumnOperators.__gt__` (Python "``>``" operator):: - >>> print(column('x') > 5) + >>> print(column("x") > 5) x > :x_1 .. * :meth:`_sql.ColumnOperators.__lt__` (Python "``<``" operator):: - >>> print(column('x') < 5) + >>> print(column("x") < 5) x < :x_1 .. * :meth:`_sql.ColumnOperators.__ge__` (Python "``>=``" operator):: - >>> print(column('x') >= 5) + >>> print(column("x") >= 5) x >= :x_1 .. * :meth:`_sql.ColumnOperators.__le__` (Python "``<=``" operator):: - >>> print(column('x') <= 5) + >>> print(column("x") <= 5) x <= :x_1 .. * :meth:`_sql.ColumnOperators.between`:: - >>> print(column('x').between(5, 10)) + >>> print(column("x").between(5, 10)) x BETWEEN :x_1 AND :x_2 .. @@ -171,10 +183,10 @@ IN is available most typically by passing a list of values to the :meth:`_sql.ColumnOperators.in_` method:: - >>> print(column('x').in_([1, 2, 3])) - x IN ([POSTCOMPILE_x_1]) + >>> print(column("x").in_([1, 2, 3])) + x IN (__[POSTCOMPILE_x_1]) -The special bound form ``POSTCOMPILE`` is rendered into individual parameters +The special bound form ``__[POSTCOMPILE`` is rendered into individual parameters at execution time, illustrated below: .. sourcecode:: pycon+sql @@ -211,13 +223,13 @@ NOT IN "NOT IN" is available via the :meth:`_sql.ColumnOperators.not_in` operator:: - >>> print(column('x').not_in([1, 2, 3])) - (x NOT IN ([POSTCOMPILE_x_1])) + >>> print(column("x").not_in([1, 2, 3])) + (x NOT IN (__[POSTCOMPILE_x_1])) This is typically more easily available by negating with the ``~`` operator:: - >>> print(~column('x').in_([1, 2, 3])) - (x NOT IN ([POSTCOMPILE_x_1])) + >>> print(~column("x").in_([1, 2, 3])) + (x NOT IN (__[POSTCOMPILE_x_1])) Tuple IN Expressions ~~~~~~~~~~~~~~~~~~~~ @@ -229,10 +241,10 @@ building block for tuple comparisons. The :meth:`_sql.Tuple.in_` operator then receives a list of tuples:: >>> from sqlalchemy import tuple_ - >>> tup = tuple_(column('x', Integer), column('y', Integer)) + >>> tup = tuple_(column("x", Integer), column("y", Integer)) >>> expr = tup.in_([(1, 2), (3, 4)]) >>> print(expr) - (x, y) IN ([POSTCOMPILE_param_1]) + (x, y) IN (__[POSTCOMPILE_param_1]) To illustrate the parameters rendered: @@ -256,14 +268,14 @@ operators work with subqueries. The form provides that a :class:`_sql.Select` construct is passed in directly, without any explicit conversion to a named subquery:: - >>> print(column('x').in_(select(user_table.c.id))) + >>> print(column("x").in_(select(user_table.c.id))) x IN (SELECT user_account.id FROM user_account) Tuples work as expected:: >>> print( - ... tuple_(column('x'), column('y')).in_( + ... tuple_(column("x"), column("y")).in_( ... select(user_table.c.id, address_table.c.id).join(address_table) ... ) ... ) @@ -283,14 +295,14 @@ databases support: as " IS NULL". The ``NULL`` constant is most easily acquired using regular Python ``None``:: - >>> print(column('x').is_(None)) + >>> print(column("x").is_(None)) x IS NULL SQL NULL is also explicitly available, if needed, using the :func:`_sql.null` construct:: >>> from sqlalchemy import null - >>> print(column('x').is_(null())) + >>> print(column("x").is_(null())) x IS NULL The :meth:`_sql.ColumnOperators.is_` operator is automatically invoked when @@ -300,7 +312,7 @@ databases support: explicitly, paricularly when used with a dynamic value:: >>> a = None - >>> print(column('x') == a) + >>> print(column("x") == a) x IS NULL Note that the Python ``is`` operator is **not overloaded**. Even though @@ -311,26 +323,26 @@ databases support: Similar to :meth:`_sql.ColumnOperators.is_`, produces "IS NOT":: - >>> print(column('x').is_not(None)) + >>> print(column("x").is_not(None)) x IS NOT NULL Is similarly equivalent to ``!= None``:: - >>> print(column('x') != None) + >>> print(column("x") != None) x IS NOT NULL * :meth:`_sql.ColumnOperators.is_distinct_from`: Produces SQL IS DISTINCT FROM:: - >>> print(column('x').is_distinct_from('some value')) + >>> print(column("x").is_distinct_from("some value")) x IS DISTINCT FROM :x_1 * :meth:`_sql.ColumnOperators.isnot_distinct_from`: Produces SQL IS NOT DISTINCT FROM:: - >>> print(column('x').isnot_distinct_from('some value')) + >>> print(column("x").isnot_distinct_from("some value")) x IS NOT DISTINCT FROM :x_1 String Comparisons @@ -338,7 +350,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.like`:: - >>> print(column('x').like('word')) + >>> print(column("x").like("word")) x LIKE :x_1 .. @@ -348,14 +360,14 @@ String Comparisons Case insensitive LIKE makes use of the SQL ``lower()`` function on a generic backend. On the PostgreSQL backend it will use ``ILIKE``:: - >>> print(column('x').ilike('word')) + >>> print(column("x").ilike("word")) lower(x) LIKE lower(:x_1) .. * :meth:`_sql.ColumnOperators.notlike`:: - >>> print(column('x').notlike('word')) + >>> print(column("x").notlike("word")) x NOT LIKE :x_1 .. @@ -363,7 +375,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.notilike`:: - >>> print(column('x').notilike('word')) + >>> print(column("x").notilike("word")) lower(x) NOT LIKE lower(:x_1) .. @@ -378,21 +390,21 @@ backends or sometimes a function like ``concat()``: * :meth:`_sql.ColumnOperators.startswith`:: The string containment operators - >>> print(column('x').startswith('word')) + >>> print(column("x").startswith("word")) x LIKE :x_1 || '%' .. * :meth:`_sql.ColumnOperators.endswith`:: - >>> print(column('x').endswith('word')) + >>> print(column("x").endswith("word")) x LIKE '%' || :x_1 .. * :meth:`_sql.ColumnOperators.contains`:: - >>> print(column('x').contains('word')) + >>> print(column("x").contains("word")) x LIKE '%' || :x_1 || '%' .. @@ -408,7 +420,7 @@ behaviors and results on different databases: This is a dialect-specific operator that makes use of the MATCH feature of the underlying database, if available:: - >>> print(column('x').match('word')) + >>> print(column("x").match("word")) x MATCH :x_1 .. @@ -419,13 +431,13 @@ behaviors and results on different databases: for example the PostgreSQL dialect:: >>> from sqlalchemy.dialects import postgresql - >>> print(column('x').regexp_match('word').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=postgresql.dialect())) x ~ %(x_1)s Or MySQL:: >>> from sqlalchemy.dialects import mysql - >>> print(column('x').regexp_match('word').compile(dialect=mysql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=mysql.dialect())) x REGEXP %s .. @@ -440,20 +452,20 @@ String Alteration String concatenation:: - >>> print(column('x').concat("some string")) + >>> print(column("x").concat("some string")) x || :x_1 This operator is available via :meth:`_sql.ColumnOperators.__add__`, that is, the Python ``+`` operator, when working with a column expression that derives from :class:`_types.String`:: - >>> print(column('x', String) + "some string") + >>> print(column("x", String) + "some string") x || :x_1 The operator will produce the appropriate database-specific construct, such as on MySQL it's historically been the ``concat()`` SQL function:: - >>> print((column('x', String) + "some string").compile(dialect=mysql.dialect())) + >>> print((column("x", String) + "some string").compile(dialect=mysql.dialect())) concat(x, %s) .. @@ -463,7 +475,7 @@ String Alteration Complementary to :meth:`_sql.ColumnOperators.regexp` this produces REGEXP REPLACE equivalent for the backends which support it:: - >>> print(column('x').regexp_replace('foo', 'bar').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_replace("foo", "bar").compile(dialect=postgresql.dialect())) REGEXP_REPLACE(x, %(x_1)s, %(x_2)s) .. @@ -473,7 +485,11 @@ String Alteration Produces the COLLATE SQL operator which provides for specific collations at expression time:: - >>> print((column('x').collate('latin1_german2_ci') == 'Müller').compile(dialect=mysql.dialect())) + >>> print( + ... (column("x").collate("latin1_german2_ci") == "Müller").compile( + ... dialect=mysql.dialect() + ... ) + ... ) (x COLLATE latin1_german2_ci) = %s @@ -481,7 +497,11 @@ String Alteration >>> from sqlalchemy import literal - >>> print((literal('Müller').collate('latin1_german2_ci') == column('x')).compile(dialect=mysql.dialect())) + >>> print( + ... (literal("Müller").collate("latin1_german2_ci") == column("x")).compile( + ... dialect=mysql.dialect() + ... ) + ... ) (%s COLLATE latin1_german2_ci) = x .. @@ -491,10 +511,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__add__`, :meth:`_sql.ColumnOperators.__radd__` (Python "``+``" operator):: - >>> print(column('x') + 5) + >>> print(column("x") + 5) x + :x_1 - >>> print(5 + column('x')) + >>> print(5 + column("x")) :x_1 + x .. @@ -507,10 +527,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__sub__`, :meth:`_sql.ColumnOperators.__rsub__` (Python "``-``" operator):: - >>> print(column('x') - 5) + >>> print(column("x") - 5) x - :x_1 - >>> print(5 - column('x')) + >>> print(5 - column("x")) :x_1 - x .. @@ -518,19 +538,19 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mul__`, :meth:`_sql.ColumnOperators.__rmul__` (Python "``*``" operator):: - >>> print(column('x') * 5) + >>> print(column("x") * 5) x * :x_1 - >>> print(5 * column('x')) + >>> print(5 * column("x")) :x_1 * x .. * :meth:`_sql.ColumnOperators.__div__`, :meth:`_sql.ColumnOperators.__rdiv__` (Python "``/``" operator):: - >>> print(column('x') / 5) + >>> print(column("x") / 5) x / :x_1 - >>> print(5 / column('x')) + >>> print(5 / column("x")) :x_1 / x .. @@ -538,9 +558,9 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mod__`, :meth:`_sql.ColumnOperators.__rmod__` (Python "``%``" operator):: - >>> print(column('x') % 5) + >>> print(column("x") % 5) x % :x_1 - >>> print(5 % column('x')) + >>> print(5 % column("x")) :x_1 % x .. @@ -553,10 +573,10 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Update.where` and :meth:`_sql.Delete.where`:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) - ... ) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -564,12 +584,10 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Select.where`, :meth:`_sql.Update.where` and :meth:`_sql.Delete.where` also accept multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id - ... ) - ... ) + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id + ... ) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -579,11 +597,10 @@ The "AND" conjunction, as well as its partner "OR", are both available directly >>> from sqlalchemy import and_, or_ >>> print( - ... select(address_table.c.email_address). - ... where( + ... select(address_table.c.email_address).where( ... and_( - ... or_(user_table.c.name == 'squidward', user_table.c.name == 'sandy'), - ... address_table.c.user_id == user_table.c.id + ... or_(user_table.c.name == "squidward", user_table.c.name == "sandy"), + ... address_table.c.user_id == user_table.c.id, ... ) ... ) ... ) @@ -596,13 +613,13 @@ A negation is available using the :func:`_sql.not_` function. This will typically invert the operator in a boolean expression:: >>> from sqlalchemy import not_ - >>> print(not_(column('x') == 5)) + >>> print(not_(column("x") == 5)) x != :x_1 It also may apply a keyword such as ``NOT`` when appropriate:: >>> from sqlalchemy import Boolean - >>> print(not_(column('x', Boolean))) + >>> print(not_(column("x", Boolean))) NOT x @@ -622,7 +639,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``&`` operator is overloaded to behave the same as :func:`_sql.and_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) & (column('y') == 10)) + >>> print((column("x") == 5) & (column("y") == 10)) x = :x_1 AND y = :y_1 .. @@ -633,7 +650,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``|`` operator is overloaded to behave the same as :func:`_sql.or_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) | (column('y') == 10)) + >>> print((column("x") == 5) | (column("y") == 10)) x = :x_1 OR y = :y_1 .. @@ -645,24 +662,16 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, as :func:`_sql.not_`, either inverting the existing operator, or applying the ``NOT`` keyword to the expression as a whole:: - >>> print(~(column('x') == 5)) + >>> print(~(column("x") == 5)) x != :x_1 >>> from sqlalchemy import Boolean - >>> print(~column('x', Boolean)) + >>> print(~column("x", Boolean)) NOT x .. - - -Operator Customization -^^^^^^^^^^^^^^^^^^^^^^ - -TODO - - .. Setup code, not for display >>> conn.close() - ROLLBACK \ No newline at end of file + ROLLBACK diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 878a9ccab6f..35c312302ce 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -35,8 +35,7 @@ directly to :func:`~sqlalchemy.create_engine` as keyword arguments: ``pool_size``, ``max_overflow``, ``pool_recycle`` and ``pool_timeout``. For example:: - engine = create_engine('postgresql://me@localhost/mydb', - pool_size=20, max_overflow=0) + engine = create_engine("postgresql://me@localhost/mydb", pool_size=20, max_overflow=0) In the case of SQLite, the :class:`.SingletonThreadPool` or :class:`.NullPool` are selected by the dialect to provide @@ -68,14 +67,16 @@ of building the pool for you. Common options include specifying :class:`.QueuePool` with SQLite:: from sqlalchemy.pool import QueuePool - engine = create_engine('sqlite:///file.db', poolclass=QueuePool) + + engine = create_engine("sqlite:///file.db", poolclass=QueuePool) Disabling pooling using :class:`.NullPool`:: from sqlalchemy.pool import NullPool + engine = create_engine( - 'postgresql+psycopg2://scott:tiger@localhost/test', - poolclass=NullPool) + "postgresql+psycopg2://scott:tiger@localhost/test", poolclass=NullPool + ) Using a Custom Connection Function ---------------------------------- @@ -95,10 +96,12 @@ by any additional options:: import sqlalchemy.pool as pool import psycopg2 + def getconn(): - c = psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + c = psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") return c + mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5) DBAPI connections can then be procured from the pool using the @@ -130,43 +133,116 @@ however and in particular is not supported with asyncio DBAPI drivers. Reset On Return --------------- -The pool also includes the a "reset on return" feature which will call the -``rollback()`` method of the DBAPI connection when the connection is returned -to the pool. This is so that any existing -transaction on the connection is removed, not only ensuring that no existing -state remains on next usage, but also so that table and row locks are released -as well as that any isolated data snapshots are removed. This ``rollback()`` -occurs in most cases even when using an :class:`_engine.Engine` object, -except in the case when the :class:`_engine.Connection` can guarantee -that a ``rollback()`` has been called immediately before the connection -is returned to the pool. - -For most DBAPIs, the call to ``rollback()`` is very inexpensive and if the +The pool includes "reset on return" behavior which will call the ``rollback()`` +method of the DBAPI connection when the connection is returned to the pool. +This is so that any existing transactional state is removed from the +connection, which includes not just uncommitted data but table and row locks as +well. For most DBAPIs, the call to ``rollback()`` is inexpensive, and if the DBAPI has already completed a transaction, the method should be a no-op. -However, for DBAPIs that incur performance issues with ``rollback()`` even if -there's no state on the connection, this behavior can be disabled using the -``reset_on_return`` option of :class:`_pool.Pool`. The behavior is safe -to disable under the following conditions: - -* If the database does not support transactions at all, such as using - MySQL with the MyISAM engine, or the DBAPI is used in autocommit - mode only, the behavior can be disabled. -* If the pool itself doesn't maintain a connection after it's checked in, - such as when using :class:`.NullPool`, the behavior can be disabled. -* Otherwise, it must be ensured that: - - * the application ensures that all :class:`_engine.Connection` - objects are explicitly closed out using a context manager (i.e. ``with`` - block) or a ``try/finally`` style block - * connections are never allowed to be garbage collected before being explicitly - closed. - * the DBAPI connection itself, e.g. ``connection.connection``, is not used - directly, or the application ensures that ``.rollback()`` is called - on this connection before releasing it back to the connection pool. - -The "reset on return" step may be logged using the ``logging.DEBUG`` + +Disabling Reset on Return for non-transactional connections +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For very specific cases where this ``rollback()`` is not useful, such as when +using a connection that is configured for +:ref:`autocommit ` or when using a database +that has no ACID capabilities such as the MyISAM engine of MySQL, the +reset-on-return behavior can be disabled, which is typically done for +performance reasons. This can be affected by using the +:paramref:`_pool.Pool.reset_on_return` parameter of :class:`_pool.Pool`, which +is also available from :func:`_sa.create_engine` as +:paramref:`_sa.create_engine.pool_reset_on_return`, passing a value of ``None``. +This is illustrated in the example below, in conjunction with the +:paramref:`_sa.create_engine.isolation_level` parameter setting of +``AUTOCOMMIT``:: + + non_acid_engine = create_engine( + "mysql://scott:tiger@host/db", + pool_reset_on_return=None, + isolation_level="AUTOCOMMIT", + ) + +The above engine won't actually perform ROLLBACK when connections are returned +to the pool; since AUTOCOMMIT is enabled, the driver will also not perform +any BEGIN operation. + +Custom Reset-on-Return Schemes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +"reset on return" consisting of a single ``rollback()`` may not be sufficient +for some use cases; in particular, applications which make use of temporary +tables may wish for these tables to be automatically removed on connection +checkin. Some (but notably not all) backends include features that can "reset" +such tables within the scope of a database connection, which may be a desirable +behavior for connection pool reset. Other server resources such as prepared +statement handles and server-side statement caches may persist beyond the +checkin process, which may or may not be desirable, depending on specifics. +Again, some (but again not all) backends may provide for a means of resetting +this state. The two SQLAlchemy included dialects which are known to have +such reset schemes include Microsoft SQL Server, where an undocumented but +widely known stored procedure called ``sp_reset_connection`` is often used, +and PostgreSQL, which has a well-documented series of commands including +``DISCARD`` ``RESET``, ``DEALLOCATE``, and ``UNLISTEN``. + +.. note: next paragraph + example should match mssql/base.py example + +The following example illustrates how to replace reset on return with the +Microsoft SQL Server ``sp_reset_connection`` stored procedure, using the +:meth:`.PoolEvents.reset` event hook (**requires SQLAlchemy 1.4.43 or greater**). +The :paramref:`_sa.create_engine.pool_reset_on_return` parameter is set to +``None`` so that the custom scheme can replace the default behavior completely. +The custom hook implementation calls ``.rollback()`` in any case, as it's +usually important that the DBAPI's own tracking of commit/rollback will remain +consistent with the state of the transaction:: + + from sqlalchemy import create_engine + from sqlalchemy import event + + mssql_engine = create_engine( + "mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server", + # disable default reset-on-return scheme + pool_reset_on_return=None, + ) + + + @event.listens_for(mssql_engine, "reset") + def _reset_mssql(dbapi_connection, connection_record, reset_state): + dbapi_connection.execute("{call sys.sp_reset_connection}") + + # so that the DBAPI itself knows that the connection has been + # reset + dbapi_connection.rollback() + +.. versionchanged:: 1.4.43 Ensured the :meth:`.PoolEvents.reset` event + is invoked for all "reset" occurrences, so that it's appropriate + as a place for custom "reset" handlers. Previous schemes which + use the :meth:`.PoolEvents.checkin` handler remain usable as well. + +.. seealso:: + * :ref:`mssql_reset_on_return` - in the :ref:`mssql_toplevel` documentation + * :ref:`postgresql_reset_on_return` in the :ref:`postgresql_toplevel` documentation + +Logging reset-on-return events +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Logging for pool events including reset on return can be set +``logging.DEBUG`` log level along with the ``sqlalchemy.pool`` logger, or by setting -``echo_pool='debug'`` with :func:`_sa.create_engine`. +:paramref:`_sa.create_engine.echo_pool` to ``"debug"`` when using +:func:`_sa.create_engine`:: + + >>> from sqlalchemy import create_engine + >>> engine = create_engine("postgresql://scott:tiger@localhost/test", echo_pool="debug") + +The above pool will show verbose logging including reset on return:: + + >>> c1 = engine.connect() + DEBUG sqlalchemy.pool.impl.QueuePool Created new connection + DEBUG sqlalchemy.pool.impl.QueuePool Connection checked out from pool + >>> c1.close() + DEBUG sqlalchemy.pool.impl.QueuePool Connection being returned to pool + DEBUG sqlalchemy.pool.impl.QueuePool Connection rollback-on-return + Pool Events ----------- @@ -263,6 +339,7 @@ behaviors are needed:: some_engine = create_engine(...) + @event.listens_for(some_engine, "engine_connect") def ping_connection(connection, branch): if branch: @@ -327,6 +404,7 @@ that they are replaced with new ones upon next checkout. This flow is illustrated by the code example below:: from sqlalchemy import create_engine, exc + e = create_engine(...) c = e.connect() @@ -334,7 +412,7 @@ illustrated by the code example below:: # suppose the database has been restarted. c.execute(text("SELECT * FROM table")) c.close() - except exc.DBAPIError, e: + except exc.DBAPIError as e: # an exception is raised, Connection is invalidated. if e.connection_invalidated: print("Connection was invalidated!") @@ -365,6 +443,7 @@ such as MySQL that automatically close connections that have been stale after a period of time:: from sqlalchemy import create_engine + e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600) Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, @@ -433,8 +512,7 @@ close these connections out. The difference between FIFO and LIFO is basically whether or not its desirable for the pool to keep a full set of connections ready to go even during idle periods:: - engine = create_engine( - "postgreql://", pool_use_lifo=True, pool_pre_ping=True) + engine = create_engine("postgreql://", pool_use_lifo=True, pool_pre_ping=True) Above, we also make use of the :paramref:`_sa.create_engine.pool_pre_ping` flag so that connections which are closed from the server side are gracefully @@ -476,34 +554,72 @@ are three general approaches to this: more than once:: from sqlalchemy.pool import NullPool + engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool) +2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine`, + passing the :paramref:`.Engine.dispose.close` parameter with a value of + ``False``, within the initialize phase of the child process. This is + so that the new process will not touch any of the parent process' connections + and will instead start with new connections. + **This is the recommended approach**:: -2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine` as - soon one is within the new process. In Python multiprocessing, constructs - such as ``multiprocessing.Pool`` include "initializer" hooks which are a - place that this can be performed; otherwise at the top of where - ``os.fork()`` or where the ``Process`` object begins the child fork, a - single call to :meth:`_engine.Engine.dispose` will ensure any remaining - connections are flushed. **This is the recommended approach**:: + from multiprocessing import Pool - engine = create_engine("mysql://user:pass@host/dbname") + engine = create_engine("mysql+mysqldb://user:pass@host/dbname") - def run_in_process(): - # process starts. ensure engine.dispose() is called just once - # at the beginning - engine.dispose() - with engine.connect() as conn: - conn.execute(text("...")) + def run_in_process(some_data_record): + with engine.connect() as conn: + conn.execute(text("...")) + - p = Process(target=run_in_process) - p.start() + def initializer(): + """ensure the parent proc's database connections are not touched + in the new connection pool""" + engine.dispose(close=False) -3. An event handler can be applied to the connection pool that tests for - connections being shared across process boundaries, and invalidates them. - This approach, **when combined with an explicit call to dispose() as - mentioned above**, should cover all cases:: + + with Pool(10, initializer=initializer) as p: + p.map(run_in_process, data) + + .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` + parameter to allow the replacement of a connection pool in a child + process without interfering with the connections used by the parent + process. + + To achieve the same "dispose without close" behavior prior to version + 1.4.33 (all SQLAlchemy versions), instead of calling + :meth:`.Engine.dispose`, replace the :class:`.Pool` directly using + :meth:`.Pool.recreate`:: + + engine.pool = engine.pool.recreate() + + The above code is equivalent to ``engine.dispose(close=False)`` with the + exception that the :meth:`.ConnectionEvents.engine_disposed` end-user + event hook is not invoked; assuming end-user code is not making use of + this hook, this workaround has no other negative effects. + +3. Call :meth:`.Engine.dispose` **directly before** the child process is + created. This will also cause the child process to start with a new + connection pool, while ensuring the parent connections are not transferred + to the child process:: + + engine = create_engine("mysql://user:pass@host/dbname") + + + def run_in_process(): + with engine.connect() as conn: + conn.execute(text("...")) + + + # before process starts, ensure engine.dispose() is called + engine.dispose() + p = Process(target=run_in_process) + p.start() + +4. An event handler can be applied to the connection pool that tests for + connections being shared across process boundaries, and invalidates them:: from sqlalchemy import event from sqlalchemy import exc @@ -511,19 +627,20 @@ are three general approaches to this: engine = create_engine("...") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): - connection_record.info['pid'] = os.getpid() + connection_record.info["pid"] = os.getpid() + @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() - if connection_record.info['pid'] != pid: + if connection_record.info["pid"] != pid: connection_record.dbapi_connection = connection_proxy.dbapi_connection = None raise exc.DisconnectionError( - "Connection record belongs to pid %s, " - "attempting to check out in pid %s" % - (connection_record.info['pid'], pid) + "Connection record belongs to pid %s, " + "attempting to check out in pid %s" % (connection_record.info["pid"], pid) ) Above, we use an approach similar to that described in @@ -531,50 +648,37 @@ are three general approaches to this: originated in a different parent process as an "invalid" connection, coercing the pool to recycle the connection record to make a new connection. - When using the above recipe, **ensure the dispose approach from #2 is also - used**, as if the connection pool is exhausted in the parent process - when the fork occurs, an empty pool will be copied into - the child process which will then hang because it has no connections. - The above strategies will accommodate the case of an :class:`_engine.Engine` -being shared among processes. However, for the case of a transaction-active -:class:`.Session` or :class:`_engine.Connection` being shared, there's no automatic -fix for this; an application needs to ensure a new child process only -initiate new :class:`_engine.Connection` objects and transactions, as well as ORM -:class:`.Session` objects. For a :class:`.Session` object, technically -this is only needed if the session is currently transaction-bound, however -the scope of a single :class:`.Session` is in any case intended to be -kept within a single call stack in any case (e.g. not a global object, not -shared between processes or threads). - +being shared among processes. The above steps alone are not sufficient for the +case of sharing a specific :class:`_engine.Connection` over a process boundary; +prefer to keep the scope of a particular :class:`_engine.Connection` local to a +single process (and thread). It's additionally not supported to share any kind +of ongoing transactional state directly across a process boundary, such as an +ORM :class:`_orm.Session` object that's begun a transaction and references +active :class:`_orm.Connection` instances; again prefer to create new +:class:`_orm.Session` objects in new processes. API Documentation - Available Pool Implementations -------------------------------------------------- .. autoclass:: sqlalchemy.pool.Pool - - .. automethod:: __init__ - .. automethod:: connect - .. automethod:: dispose - .. automethod:: recreate + :members: .. autoclass:: sqlalchemy.pool.QueuePool - - .. automethod:: __init__ - .. automethod:: connect + :members: .. autoclass:: SingletonThreadPool - - .. automethod:: __init__ + :members: .. autoclass:: AssertionPool - + :members: .. autoclass:: NullPool - + :members: .. autoclass:: StaticPool + :members: .. autoclass:: _ConnectionFairy :members: diff --git a/doc/build/core/reflection.rst b/doc/build/core/reflection.rst index 0660823eb02..8c31b7ff000 100644 --- a/doc/build/core/reflection.rst +++ b/doc/build/core/reflection.rst @@ -13,7 +13,7 @@ existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload_with`` argument:: - >>> messages = Table('messages', meta, autoload_with=engine) + >>> messages = Table("messages", metadata_obj, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] @@ -30,8 +30,8 @@ Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: - >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload_with=engine) - >>> 'shopping_carts' in meta.tables: + >>> shopping_cart_items = Table("shopping_cart_items", metadata_obj, autoload_with=engine) + >>> 'shopping_carts' in metadata_obj.tables: True The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like" @@ -43,7 +43,7 @@ you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: - shopping_carts = Table('shopping_carts', meta) + shopping_carts = Table("shopping_carts", metadata_obj) Of course, it's a good idea to use ``autoload_with=engine`` with the above table regardless. This is so that the table's attributes will be loaded if they have @@ -61,11 +61,16 @@ Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: - >>> mytable = Table('mytable', meta, - ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key - ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode - ... # additional Column objects which require no change are reflected normally - ... autoload_with=some_engine) + >>> mytable = Table( + ... "mytable", + ... metadata_obj, + ... Column( + ... "id", Integer, primary_key=True + ... ), # override reflected 'id' to have primary key + ... Column("mydata", Unicode(50)), # override reflected 'mydata' to be Unicode + ... # additional Column objects which require no change are reflected normally + ... autoload_with=some_engine, + ... ) .. seealso:: @@ -92,10 +97,12 @@ extrapolate these constraints. Use the "override" technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:: - my_view = Table("some_view", metadata, - Column("view_id", Integer, primary_key=True), - Column("related_thing", Integer, ForeignKey("othertable.thing_id")), - autoload_with=engine + my_view = Table( + "some_view", + metadata, + Column("view_id", Integer, primary_key=True), + Column("related_thing", Integer, ForeignKey("othertable.thing_id")), + autoload_with=engine, ) Reflecting All Tables at Once @@ -109,8 +116,8 @@ object's dictionary of tables:: metadata_obj = MetaData() metadata_obj.reflect(bind=someengine) - users_table = metadata_obj.tables['users'] - addresses_table = metadata_obj.tables['addresses'] + users_table = metadata_obj.tables["users"] + addresses_table = metadata_obj.tables["addresses"] ``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database:: @@ -119,6 +126,223 @@ object's dictionary of tables:: for table in reversed(metadata_obj.sorted_tables): someengine.execute(table.delete()) +.. _metadata_reflection_schemas: + +Reflecting Tables from Other Schemas +------------------------------------ + +The section :ref:`schema_table_schema_name` introduces the concept of table +schemas, which are namespaces within a database that contain tables and other +objects, and which can be specified explicitly. The "schema" for a +:class:`_schema.Table` object, as well as for other objects like views, indexes and +sequences, can be set up using the :paramref:`_schema.Table.schema` parameter, +and also as the default schema for a :class:`_schema.MetaData` object using the +:paramref:`_schema.MetaData.schema` parameter. + +The use of this schema parameter directly affects where the table reflection +feature will look when it is asked to reflect objects. For example, given +a :class:`_schema.MetaData` object configured with a default schema name +"project" via its :paramref:`_schema.MetaData.schema` parameter:: + + >>> metadata_obj = MetaData(schema="project") + +The :meth:`.MetaData.reflect` will then utilize that configured ``.schema`` +for reflection:: + + >>> # uses `schema` configured in metadata_obj + >>> metadata_obj.reflect(someengine) + +The end result is that :class:`_schema.Table` objects from the "project" +schema will be reflected, and they will be populated as schema-qualified +with that name:: + + >>> metadata_obj.tables["project.messages"] + Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') + +Similarly, an individual :class:`_schema.Table` object that includes the +:paramref:`_schema.Table.schema` parameter will also be reflected from that +database schema, overriding any default schema that may have been configured on the +owning :class:`_schema.MetaData` collection:: + + >>> messages = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> messages + Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') + +Finally, the :meth:`_schema.MetaData.reflect` method itself also allows a +:paramref:`_schema.MetaData.reflect.schema` parameter to be passed, so we +could also load tables from the "project" schema for a default configured +:class:`_schema.MetaData` object:: + + >>> metadata_obj = MetaData() + >>> metadata_obj.reflect(someengine, schema="project") + +We can call :meth:`_schema.MetaData.reflect` any number of times with different +:paramref:`_schema.MetaData.schema` arguments (or none at all) to continue +populating the :class:`_schema.MetaData` object with more objects:: + + >>> # add tables from the "customer" schema + >>> metadata_obj.reflect(someengine, schema="customer") + >>> # add tables from the default schema + >>> metadata_obj.reflect(someengine) + +.. _reflection_schema_qualified_interaction: + +Interaction of Schema-qualified Reflection with the Default Schema +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. admonition:: Section Best Practices Summarized + + In this section, we discuss SQLAlchemy's reflection behavior regarding + tables that are visible in the "default schema" of a database session, + and how these interact with SQLAlchemy directives that include the schema + explicitly. As a best practice, ensure the "default" schema for a database + is just a single name, and not a list of names; for tables that are + part of this "default" schema and can be named without schema qualification + in DDL and SQL, leave corresponding :paramref:`_schema.Table.schema` and + similar schema parameters set to their default of ``None``. + +As described at :ref:`schema_metadata_schema_name`, databases that have +the concept of schemas usually also include the concept of a "default" schema. +The reason for this is naturally that when one refers to table objects without +a schema as is common, a schema-capable database will still consider that +table to be in a "schema" somewhere. Some databases such as PostgreSQL +take this concept further into the notion of a +`schema search path +`_ +where *multiple* schema names can be considered in a particular database +session to be "implicit"; referring to a table name that it's any of those +schemas will not require that the schema name be present (while at the same time +it's also perfectly fine if the schema name *is* present). + +Since most relational databases therefore have the concept of a particular +table object which can be referred towards both in a schema-qualified way, as +well as an "implicit" way where no schema is present, this presents a +complexity for SQLAlchemy's reflection +feature. Reflecting a table in +a schema-qualified manner will always populate its :attr:`_schema.Table.schema` +attribute and additionally affect how this :class:`_schema.Table` is organized +into the :attr:`_schema.MetaData.tables` collection, that is, in a schema +qualified manner. Conversely, reflecting the **same** table in a non-schema +qualified manner will organize it into the :attr:`_schema.MetaData.tables` +collection **without** being schema qualified. The end result is that there +would be two separate :class:`_schema.Table` objects in the single +:class:`_schema.MetaData` collection representing the same table in the +actual database. + +To illustrate the ramifications of this issue, consider tables from the +"project" schema in the previous example, and suppose also that the "project" +schema is the default schema of our database connection, or if using a database +such as PostgreSQL suppose the "project" schema is set up in the PostgreSQL +``search_path``. This would mean that the database accepts the following +two SQL statements as equivalent:: + + -- schema qualified + SELECT message_id FROM project.messages + + -- non-schema qualified + SELECT message_id FROM messages + +This is not a problem as the table can be found in both ways. However +in SQLAlchemy, it's the **identity** of the :class:`_schema.Table` object +that determines its semantic role within a SQL statement. Based on the current +decisions within SQLAlchemy, this means that if we reflect the same "messages" table in +both a schema-qualified as well as a non-schema qualified manner, we get +**two** :class:`_schema.Table` objects that will **not** be treated as +semantically equivalent:: + + >>> # reflect in non-schema qualified fashion + >>> messages_table_1 = Table("messages", metadata_obj, autoload_with=someengine) + >>> # reflect in schema qualified fashion + >>> messages_table_2 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) + >>> # two different objects + >>> messages_table_1 is messages_table_2 + False + >>> # stored in two different ways + >>> metadata.tables["messages"] is messages_table_1 + True + >>> metadata.tables["project.messages"] is messages_table_2 + True + +The above issue becomes more complicated when the tables being reflected contain +foreign key references to other tables. Suppose "messages" has a "project_id" +column which refers to rows in another schema-local table "projects", meaning +there is a :class:`_schema.ForeignKeyConstraint` object that is part of the +definition of the "messages" table. + +We can find ourselves in a situation where one :class:`_schema.MetaData` +collection may contain as many as four :class:`_schema.Table` objects +representing these two database tables, where one or two of the additional +tables were generated by the reflection process; this is because when +the reflection process encounters a foreign key constraint on a table +being reflected, it branches out to reflect that referenced table as well. +The decision making it uses to assign the schema to this referenced +table is that SQLAlchemy will **omit a default schema** from the reflected +:class:`_schema.ForeignKeyConstraint` object if the owning +:class:`_schema.Table` also omits its schema name and also that these two objects +are in the same schema, but will **include** it if +it were not omitted. + +The common scenario is when the reflection of a table in a schema qualified +fashion then loads a related table that will also be performed in a schema +qualified fashion:: + + >>> # reflect "messages" in a schema qualified fashion + >>> messages_table_1 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) + +The above ``messages_table_1`` will refer to ``projects`` also in a schema +qualified fashion. This "projects" table will be reflected automatically by +the fact that "messages" refers to it:: + + >>> messages_table_1.c.project_id + Column('project_id', INTEGER(), ForeignKey('project.projects.project_id'), table=) + +if some other part of the code reflects "projects" in a non-schema qualified +fashion, there are now two projects tables that are not the same: + + >>> # reflect "projects" in a non-schema qualified fashion + >>> projects_table_1 = Table("projects", metadata_obj, autoload_with=someengine) + + >>> # messages does not refer to projects_table_1 above + >>> messages_table_1.c.project_id.references(projects_table_1.c.project_id) + False + + >>> it refers to this one + >>> projects_table_2 = metadata_obj.tables["project.projects"] + >>> messages_table_1.c.project_id.references(projects_table_2.c.project_id) + True + + >>> they're different, as one non-schema qualified and the other one is + >>> projects_table_1 is projects_table_2 + False + +The above confusion can cause problems within applications that use table +reflection to load up application-level :class:`_schema.Table` objects, as +well as within migration scenarios, in particular such as when using Alembic +Migrations to detect new tables and foreign key constraints. + +The above behavior can be remedied by sticking to one simple practice: + +* Don't include the :paramref:`_schema.Table.schema` parameter for any + :class:`_schema.Table` that expects to be located in the **default** schema + of the database. + +For PostgreSQL and other databases that support a "search" path for schemas, +add the following additional practice: + +* Keep the "search path" narrowed down to **one schema only, which is the + default schema**. + + +.. seealso:: + + :ref:`postgresql_schema_reflection` - additional details of this behavior + as regards the PostgreSQL database. + + .. _metadata_reflection_inspector: Fine Grained Reflection with Inspector @@ -130,7 +354,8 @@ database is also available. This is known as the "Inspector":: from sqlalchemy import create_engine from sqlalchemy import inspect - engine = create_engine('...') + + engine = create_engine("...") insp = inspect(engine) print(insp.get_table_names()) diff --git a/doc/build/core/sqlelement.rst b/doc/build/core/sqlelement.rst index 8e65993624d..499f26571a8 100644 --- a/doc/build/core/sqlelement.rst +++ b/doc/build/core/sqlelement.rst @@ -120,20 +120,12 @@ The classes here are generated using the constructors listed at .. autoclass:: BindParameter :members: -.. autoclass:: CacheKey - :members: - .. autoclass:: Case :members: .. autoclass:: Cast :members: -.. autoclass:: ClauseElement - :members: - :inherited-members: - - .. autoclass:: ClauseList :members: @@ -155,8 +147,6 @@ The classes here are generated using the constructors listed at :special-members: :inherited-members: -.. autoclass:: sqlalchemy.sql.base.DialectKWArgs - :members: .. autoclass:: Extract :members: @@ -170,9 +160,6 @@ The classes here are generated using the constructors listed at .. autoclass:: Label :members: -.. autoclass:: LambdaElement - :members: - .. autoclass:: Null :members: @@ -183,9 +170,6 @@ The classes here are generated using the constructors listed at .. autoclass:: Over :members: -.. autoclass:: StatementLambdaElement - :members: - .. autoclass:: TextClause :members: diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst index 7a91e39a3f7..9ec74fead7f 100644 --- a/doc/build/core/tutorial.rst +++ b/doc/build/core/tutorial.rst @@ -97,7 +97,7 @@ anywhere. To connect we use :func:`~sqlalchemy.create_engine`: .. sourcecode:: pycon+sql >>> from sqlalchemy import create_engine - >>> engine = create_engine('sqlite:///:memory:', echo=True) + >>> engine = create_engine("sqlite:///:memory:", echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll @@ -154,17 +154,21 @@ addresses" for each row in the "users" table: >>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey >>> metadata_obj = MetaData() - >>> users = Table('users', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String), - ... Column('fullname', String), + >>> users = Table( + ... "users", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String), + ... Column("fullname", String), ... ) - >>> addresses = Table('addresses', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('users.id')), - ... Column('email_address', String, nullable=False) - ... ) + >>> addresses = Table( + ... "addresses", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("users.id")), + ... Column("email_address", String, nullable=False), + ... ) All about how to define :class:`~sqlalchemy.schema.Table` objects, as well as how to create them from an existing database automatically, is described in @@ -206,7 +210,7 @@ each table first before creating, so it's safe to call multiple times: issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: - Column('name', String(50)) + Column("name", String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by @@ -217,15 +221,18 @@ each table first before creating, so it's safe to call multiple times: without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence - Column('id', Integer, Sequence('user_id_seq'), primary_key=True) + + Column("id", Integer, Sequence("user_id_seq"), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` is therefore:: - users = Table('users', metadata_obj, - Column('id', Integer, Sequence('user_id_seq'), primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(50)) + users = Table( + "users", + metadata_obj, + Column("id", Integer, Sequence("user_id_seq"), primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(50)), ) We include this more verbose :class:`_schema.Table` construct separately @@ -255,7 +262,7 @@ Notice above that the INSERT statement names every column in the ``users`` table. This can be limited by using the ``values()`` method, which establishes the VALUES clause of the INSERT explicitly:: - >>> ins = users.insert().values(name='jack', fullname='Jack Jones') + >>> ins = users.insert().values(name="jack", fullname="Jack Jones") >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (:name, :fullname)' @@ -351,7 +358,7 @@ and use it in the "normal" way: .. sourcecode:: pycon+sql >>> ins = users.insert() - >>> conn.execute(ins, {"id": 2, "name":"wendy", "fullname": "Wendy Williams"}) + >>> conn.execute(ins, {"id": 2, "name": "wendy", "fullname": "Wendy Williams"}) {opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?) [...] (2, 'wendy', 'Wendy Williams') COMMIT @@ -370,12 +377,15 @@ inserted, as we do here to add some email addresses: .. sourcecode:: pycon+sql - >>> conn.execute(addresses.insert(), [ - ... {'user_id': 1, 'email_address' : 'jack@yahoo.com'}, - ... {'user_id': 1, 'email_address' : 'jack@msn.com'}, - ... {'user_id': 2, 'email_address' : 'www@www.org'}, - ... {'user_id': 2, 'email_address' : 'wendy@aol.com'}, - ... ]) + >>> conn.execute( + ... addresses.insert(), + ... [ + ... {"user_id": 1, "email_address": "jack@yahoo.com"}, + ... {"user_id": 1, "email_address": "jack@msn.com"}, + ... {"user_id": 2, "email_address": "www@www.org"}, + ... {"user_id": 2, "email_address": "wendy@aol.com"}, + ... ], + ... ) {opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?) [...] ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com')) COMMIT @@ -484,7 +494,7 @@ programmatically generated, or contains non-ascii characters, the [...] () {stop}>>> row = result.fetchone() - >>> print("name:", row._mapping['name'], "; fullname:", row._mapping['fullname']) + >>> print("name:", row._mapping["name"], "; fullname:", row._mapping["fullname"]) name: jack ; fullname: Jack Jones .. deprecated:: 1.4 @@ -522,7 +532,12 @@ collection: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(s): - ... print("name:", row._mapping[users.c.name], "; fullname:", row._mapping[users.c.fullname]) + ... print( + ... "name:", + ... row._mapping[users.c.name], + ... "; fullname:", + ... row._mapping[users.c.fullname], + ... ) SELECT users.id, users.name, users.fullname FROM users [...] () @@ -681,7 +696,7 @@ equals, not equals, etc.: users.name IS NULL >>> # reverse works too - >>> print('fred' > users.c.name) + >>> print("fred" > users.c.name) users.name < :name_1 If we add two integer columns together, we get an addition expression: @@ -707,8 +722,9 @@ not all of them. MySQL users, fear not: .. sourcecode:: pycon+sql - >>> print((users.c.name + users.c.fullname). - ... compile(bind=create_engine('mysql://'))) # doctest: +SKIP + >>> print( + ... (users.c.name + users.c.fullname).compile(bind=create_engine("mysql://")) + ... ) # doctest: +SKIP concat(users.name, users.fullname) The above illustrates the SQL that's generated for an @@ -720,12 +736,12 @@ always use the :meth:`.Operators.op` method; this generates whatever operator yo .. sourcecode:: pycon+sql - >>> print(users.c.name.op('tiddlywinks')('foo')) + >>> print(users.c.name.op("tiddlywinks")("foo")) users.name tiddlywinks :name_1 This function can also be used to make bitwise operators explicit. For example:: - somecolumn.op('&')(0xff) + somecolumn.op("&")(0xFF) is a bitwise AND of the value in ``somecolumn``. @@ -735,15 +751,14 @@ column. For this case, be sure to make the type explicit, if not what's normally expected, using :func:`.type_coerce`:: from sqlalchemy import type_coerce - expr = type_coerce(somecolumn.op('-%>')('foo'), MySpecialType()) - stmt = select(expr) + expr = type_coerce(somecolumn.op("-%>")("foo"), MySpecialType()) + stmt = select(expr) For boolean operators, use the :meth:`.Operators.bool_op` method, which will ensure that the return type of the expression is handled as boolean:: - somecolumn.bool_op('-->')('some value') - + somecolumn.bool_op("-->")("some value") Commonly Used Operators ------------------------- @@ -760,11 +775,11 @@ objects is at :class:`.ColumnOperators`. * :meth:`equals <.ColumnOperators.__eq__>`:: - statement.where(users.c.name == 'ed') + statement.where(users.c.name == "ed") * :meth:`not equals <.ColumnOperators.__ne__>`:: - statement.where(users.c.name != 'ed') + statement.where(users.c.name != "ed") * :meth:`LIKE <.ColumnOperators.like>`:: @@ -785,27 +800,29 @@ objects is at :class:`.ColumnOperators`. * :meth:`IN <.ColumnOperators.in_>`:: - statement.where(users.c.name.in_(['ed', 'wendy', 'jack'])) + statement.where(users.c.name.in_(["ed", "wendy", "jack"])) # works with Select objects too: - statement.where.filter(users.c.name.in_( - select(users.c.name).where(users.c.name.like('%ed%')) - )) + statement.where.filter( + users.c.name.in_(select(users.c.name).where(users.c.name.like("%ed%"))) + ) # use tuple_() for composite (multi-column) queries from sqlalchemy import tuple_ + statement.where( - tuple_(users.c.name, users.c.nickname).\ - in_([('ed', 'edsnickname'), ('wendy', 'windy')]) + tuple_(users.c.name, users.c.nickname).in_( + [("ed", "edsnickname"), ("wendy", "windy")] + ) ) * :meth:`NOT IN <.ColumnOperators.not_in>`:: - statement.where(~users.c.name.in_(['ed', 'wendy', 'jack'])) + statement.where(~users.c.name.in_(["ed", "wendy", "jack"])) * :meth:`IS NULL <.ColumnOperators.is_>`:: - statement.where(users.c. == None) + statement.where(users.c.name == None) # alternatively, if pep8/linters are a concern statement.where(users.c.name.is_(None)) @@ -878,16 +895,17 @@ a :meth:`~.ColumnOperators.like`): .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import and_, or_, not_ - >>> print(and_( - ... users.c.name.like('j%'), + >>> print( + ... and_( + ... users.c.name.like("j%"), ... users.c.id == addresses.c.user_id, ... or_( - ... addresses.c.email_address == 'wendy@aol.com', - ... addresses.c.email_address == 'jack@yahoo.com' + ... addresses.c.email_address == "wendy@aol.com", + ... addresses.c.email_address == "jack@yahoo.com", ... ), - ... not_(users.c.id > 5) - ... ) - ... ) + ... not_(users.c.id > 5), + ... ) + ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) @@ -899,12 +917,14 @@ parenthesis: .. sourcecode:: pycon+sql - >>> print(users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & - ... ( - ... (addresses.c.email_address == 'wendy@aol.com') | \ - ... (addresses.c.email_address == 'jack@yahoo.com') - ... ) \ - ... & ~(users.c.id>5) + >>> print( + ... users.c.name.like("j%") + ... & (users.c.id == addresses.c.user_id) + ... & ( + ... (addresses.c.email_address == "wendy@aol.com") + ... | (addresses.c.email_address == "jack@yahoo.com") + ... ) + ... & ~(users.c.id > 5) ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 @@ -923,19 +943,16 @@ not have a name: .. sourcecode:: pycon+sql - >>> s = select((users.c.fullname + - ... ", " + addresses.c.email_address). - ... label('title')).\ - ... where( - ... and_( - ... users.c.id == addresses.c.user_id, - ... users.c.name.between('m', 'z'), - ... or_( - ... addresses.c.email_address.like('%@aol.com'), - ... addresses.c.email_address.like('%@msn.com') - ... ) - ... ) - ... ) + >>> s = select((users.c.fullname + ", " + addresses.c.email_address).label("title")).where( + ... and_( + ... users.c.id == addresses.c.user_id, + ... users.c.name.between("m", "z"), + ... or_( + ... addresses.c.email_address.like("%@aol.com"), + ... addresses.c.email_address.like("%@msn.com"), + ... ), + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses @@ -954,17 +971,17 @@ A shortcut to using :func:`.and_` is to chain together multiple .. sourcecode:: pycon+sql - >>> s = select((users.c.fullname + - ... ", " + addresses.c.email_address). - ... label('title')).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name.between('m', 'z')).\ - ... where( - ... or_( - ... addresses.c.email_address.like('%@aol.com'), - ... addresses.c.email_address.like('%@msn.com') - ... ) - ... ) + >>> s = ( + ... select((users.c.fullname + ", " + addresses.c.email_address).label("title")) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name.between("m", "z")) + ... .where( + ... or_( + ... addresses.c.email_address.like("%@aol.com"), + ... addresses.c.email_address.like("%@msn.com"), + ... ) + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses @@ -995,12 +1012,13 @@ unchanged. Below, we create a :func:`_expression.text` object and execute it: >>> from sqlalchemy.sql import text >>> s = text( ... "SELECT users.fullname || ', ' || addresses.email_address AS title " - ... "FROM users, addresses " - ... "WHERE users.id = addresses.user_id " - ... "AND users.name BETWEEN :x AND :y " - ... "AND (addresses.email_address LIKE :e1 " - ... "OR addresses.email_address LIKE :e2)") - >>> conn.execute(s, {"x":"m", "y":"z", "e1":"%@aol.com", "e2":"%@msn.com"}).fetchall() + ... "FROM users, addresses " + ... "WHERE users.id = addresses.user_id " + ... "AND users.name BETWEEN :x AND :y " + ... "AND (addresses.email_address LIKE :e1 " + ... "OR addresses.email_address LIKE :e2)" + ... ) + >>> conn.execute(s, {"x": "m", "y": "z", "e1": "%@aol.com", "e2": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND @@ -1060,8 +1078,7 @@ When we call the :meth:`_expression.TextClause.columns` method, we get back a j = stmt.join(addresses, stmt.c.id == addresses.c.user_id) - new_stmt = select(stmt.c.id, addresses.c.id).\ - select_from(j).where(stmt.c.name == 'x') + new_stmt = select(stmt.c.id, addresses.c.id).select_from(j).where(stmt.c.name == "x") The positional form of :meth:`_expression.TextClause.columns` is particularly useful when relating textual SQL to existing Core or ORM models, because we can use @@ -1070,16 +1087,18 @@ result column names in the textual SQL: .. sourcecode:: pycon+sql - >>> stmt = text("SELECT users.id, addresses.id, users.id, " + >>> stmt = text( + ... "SELECT users.id, addresses.id, users.id, " ... "users.name, addresses.email_address AS email " ... "FROM users JOIN addresses ON users.id=addresses.user_id " - ... "WHERE users.id = 1").columns( - ... users.c.id, - ... addresses.c.id, - ... addresses.c.user_id, - ... users.c.name, - ... addresses.c.email_address - ... ) + ... "WHERE users.id = 1" + ... ).columns( + ... users.c.id, + ... addresses.c.id, + ... addresses.c.user_id, + ... users.c.name, + ... addresses.c.email_address, + ... ) >>> result = conn.execute(stmt) {opensql}SELECT users.id, addresses.id, users.id, users.name, addresses.email_address AS email @@ -1143,18 +1162,20 @@ need to refer to any pre-established :class:`_schema.Table` metadata: .. sourcecode:: pycon+sql - >>> s = select( - ... text("users.fullname || ', ' || addresses.email_address AS title") - ... ).\ - ... where( - ... and_( - ... text("users.id = addresses.user_id"), - ... text("users.name BETWEEN 'm' AND 'z'"), - ... text( - ... "(addresses.email_address LIKE :x " - ... "OR addresses.email_address LIKE :y)") - ... ) - ... ).select_from(text('users, addresses')) + >>> s = ( + ... select(text("users.fullname || ', ' || addresses.email_address AS title")) + ... .where( + ... and_( + ... text("users.id = addresses.user_id"), + ... text("users.name BETWEEN 'm' AND 'z'"), + ... text( + ... "(addresses.email_address LIKE :x " + ... "OR addresses.email_address LIKE :y)" + ... ), + ... ) + ... ) + ... .select_from(text("users, addresses")) + ... ) >>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses @@ -1197,22 +1218,27 @@ be quoted: >>> from sqlalchemy import select, and_, text, String >>> from sqlalchemy.sql import table, literal_column - >>> s = select( - ... literal_column("users.fullname", String) + - ... ', ' + - ... literal_column("addresses.email_address").label("title") - ... ).\ - ... where( - ... and_( - ... literal_column("users.id") == literal_column("addresses.user_id"), - ... text("users.name BETWEEN 'm' AND 'z'"), - ... text( - ... "(addresses.email_address LIKE :x OR " - ... "addresses.email_address LIKE :y)") - ... ) - ... ).select_from(table('users')).select_from(table('addresses')) - - >>> conn.execute(s, {"x":"%@aol.com", "y":"%@msn.com"}).fetchall() + >>> s = ( + ... select( + ... literal_column("users.fullname", String) + ... + ", " + ... + literal_column("addresses.email_address").label("title") + ... ) + ... .where( + ... and_( + ... literal_column("users.id") == literal_column("addresses.user_id"), + ... text("users.name BETWEEN 'm' AND 'z'"), + ... text( + ... "(addresses.email_address LIKE :x OR " + ... "addresses.email_address LIKE :y)" + ... ), + ... ) + ... ) + ... .select_from(table("users")) + ... .select_from(table("addresses")) + ... ) + + >>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS anon_1 FROM users, addresses WHERE users.id = addresses.user_id @@ -1239,10 +1265,11 @@ are rendered fully: .. sourcecode:: pycon+sql >>> from sqlalchemy import func - >>> stmt = select( - ... addresses.c.user_id, - ... func.count(addresses.c.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", "num_addresses") + >>> stmt = ( + ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", "num_addresses") + ... ) {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses @@ -1256,10 +1283,11 @@ name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc - >>> stmt = select( - ... addresses.c.user_id, - ... func.count(addresses.c.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", desc("num_addresses")) + >>> stmt = ( + ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", desc("num_addresses")) + ... ) {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses @@ -1278,9 +1306,9 @@ by a column name that appears more than once: .. sourcecode:: pycon+sql >>> u1a, u1b = users.alias(), users.alias() - >>> stmt = select(u1a, u1b).\ - ... where(u1a.c.name > u1b.c.name).\ - ... order_by(u1a.c.name) # using "name" here would be ambiguous + >>> stmt = ( + ... select(u1a, u1b).where(u1a.c.name > u1b.c.name).order_by(u1a.c.name) + ... ) # using "name" here would be ambiguous {sql}>>> conn.execute(stmt).fetchall() SELECT users_1.id, users_1.name, users_1.fullname, users_2.id AS id_1, @@ -1325,13 +1353,14 @@ once for each address. We create two :class:`_expression.Alias` constructs aga >>> a1 = addresses.alias() >>> a2 = addresses.alias() - >>> s = select(users).\ - ... where(and_( - ... users.c.id == a1.c.user_id, - ... users.c.id == a2.c.user_id, - ... a1.c.email_address == 'jack@msn.com', - ... a2.c.email_address == 'jack@yahoo.com' - ... )) + >>> s = select(users).where( + ... and_( + ... users.c.id == a1.c.user_id, + ... users.c.id == a2.c.user_id, + ... a1.c.email_address == "jack@msn.com", + ... a2.c.email_address == "jack@yahoo.com", + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.id, users.name, users.fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 @@ -1355,7 +1384,7 @@ itself, we don't need to be concerned about the generated name. However, for the purposes of debugging, it can be specified by passing a string name to the :meth:`_expression.FromClause.alias` method:: - >>> a1 = addresses.alias('a1') + >>> a1 = addresses.alias("a1") SELECT-oriented constructs which extend from :class:`_expression.SelectBase` may be turned into aliased subqueries using the :meth:`_expression.SelectBase.subquery` method, which @@ -1417,10 +1446,7 @@ username: .. sourcecode:: pycon+sql - >>> print(users.join(addresses, - ... addresses.c.email_address.like(users.c.name + '%') - ... ) - ... ) + >>> print(users.join(addresses, addresses.c.email_address.like(users.c.name + "%"))) users JOIN addresses ON addresses.email_address LIKE users.name || :name_1 When we create a :func:`_expression.select` construct, SQLAlchemy looks around at the @@ -1431,9 +1457,8 @@ here we make use of the :meth:`_expression.Select.select_from` method: .. sourcecode:: pycon+sql >>> s = select(users.c.fullname).select_from( - ... users.join(addresses, - ... addresses.c.email_address.like(users.c.name + '%')) - ... ) + ... users.join(addresses, addresses.c.email_address.like(users.c.name + "%")) + ... ) {sql}>>> conn.execute(s).fetchall() SELECT users.fullname FROM users JOIN addresses ON addresses.email_address LIKE users.name || ? @@ -1486,8 +1511,12 @@ typically acquires using the :meth:`_expression.Select.cte` method on a .. sourcecode:: pycon+sql - >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == 'wendy').cte() - >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id) + >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == "wendy").cte() + >>> stmt = ( + ... select(addresses) + ... .where(addresses.c.user_id == users_cte.c.id) + ... .order_by(addresses.c.id) + ... ) >>> conn.execute(stmt).fetchall() {opensql}WITH anon_1 AS (SELECT users.id AS id, users.name AS name @@ -1523,8 +1552,14 @@ this form looks like: >>> users_cte = select(users.c.id, users.c.name).cte(recursive=True) >>> users_recursive = users_cte.alias() - >>> users_cte = users_cte.union(select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id)) - >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id) + >>> users_cte = users_cte.union( + ... select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id) + ... ) + >>> stmt = ( + ... select(addresses) + ... .where(addresses.c.user_id == users_cte.c.id) + ... .order_by(addresses.c.id) + ... ) >>> conn.execute(stmt).fetchall() {opensql}WITH RECURSIVE anon_1(id, name) AS (SELECT users.id AS id, users.name AS name @@ -1562,7 +1597,7 @@ at execution time, as here where it converts to positional for SQLite: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import bindparam - >>> s = users.select().where(users.c.name == bindparam('username')) + >>> s = users.select().where(users.c.name == bindparam("username")) {sql}>>> conn.execute(s, {"username": "wendy"}).fetchall() SELECT users.id, users.name, users.fullname FROM users @@ -1577,7 +1612,9 @@ off to the database: .. sourcecode:: pycon+sql - >>> s = users.select().where(users.c.name.like(bindparam('username', type_=String) + text("'%'"))) + >>> s = users.select().where( + ... users.c.name.like(bindparam("username", type_=String) + text("'%'")) + ... ) {sql}>>> conn.execute(s, {"username": "wendy"}).fetchall() SELECT users.id, users.name, users.fullname FROM users @@ -1591,17 +1628,19 @@ single named value is needed in the execute parameters: .. sourcecode:: pycon+sql - >>> s = select(users, addresses).\ - ... where( - ... or_( - ... users.c.name.like( - ... bindparam('name', type_=String) + text("'%'")), - ... addresses.c.email_address.like( - ... bindparam('name', type_=String) + text("'@%'")) - ... ) - ... ).\ - ... select_from(users.outerjoin(addresses)).\ - ... order_by(addresses.c.id) + >>> s = ( + ... select(users, addresses) + ... .where( + ... or_( + ... users.c.name.like(bindparam("name", type_=String) + text("'%'")), + ... addresses.c.email_address.like( + ... bindparam("name", type_=String) + text("'@%'") + ... ), + ... ) + ... ) + ... .select_from(users.outerjoin(addresses)) + ... .order_by(addresses.c.id) + ... ) {sql}>>> conn.execute(s, {"name": "jack"}).fetchall() SELECT users.id, users.name, users.fullname, addresses.id AS id_1, addresses.user_id, addresses.email_address @@ -1629,7 +1668,7 @@ generates functions using attribute access: >>> print(func.now()) now() - >>> print(func.concat('x', 'y')) + >>> print(func.concat("x", "y")) concat(:concat_1, :concat_2) By "generates", we mean that **any** SQL function is created based on the word @@ -1657,7 +1696,6 @@ as date and numeric coercions, the type may need to be specified explicitly:: stmt = select(func.date(some_table.c.date_string, type_=Date)) - Functions are most typically used in the columns clause of a select statement, and can also be labeled as well as given a type. Labeling a function is recommended so that the result can be targeted in a result row based on a @@ -1670,11 +1708,8 @@ not important in this case: .. sourcecode:: pycon+sql >>> conn.execute( - ... select( - ... func.max(addresses.c.email_address, type_=String). - ... label('maxemail') - ... ) - ... ).scalar() + ... select(func.max(addresses.c.email_address, type_=String).label("maxemail")) + ... ).scalar() {opensql}SELECT max(addresses.email_address) AS maxemail FROM addresses [...] () @@ -1690,13 +1725,9 @@ well as bind parameters: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import column - >>> calculate = select(column('q'), column('z'), column('r')).\ - ... select_from( - ... func.calculate( - ... bindparam('x'), - ... bindparam('y') - ... ) - ... ) + >>> calculate = select(column("q"), column("z"), column("r")).select_from( + ... func.calculate(bindparam("x"), bindparam("y")) + ... ) >>> calc = calculate.alias() >>> print(select(users).where(users.c.id > calc.c.z)) SELECT users.id, users.name, users.fullname @@ -1712,10 +1743,9 @@ of our selectable: .. sourcecode:: pycon+sql - >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45) - >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12) - >>> s = select(users).\ - ... where(users.c.id.between(calc1.c.z, calc2.c.z)) + >>> calc1 = calculate.alias("c1").unique_params(x=17, y=45) + >>> calc2 = calculate.alias("c2").unique_params(x=5, y=12) + >>> s = select(users).where(users.c.id.between(calc1.c.z, calc2.c.z)) >>> print(s) SELECT users.id, users.name, users.fullname FROM users, @@ -1723,7 +1753,7 @@ of our selectable: (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2 WHERE users.id BETWEEN c1.z AND c2.z - >>> s.compile().params # doctest: +SKIP + >>> s.compile().params # doctest: +SKIP {u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17} .. seealso:: @@ -1739,10 +1769,7 @@ Any :class:`.FunctionElement`, including functions generated by :data:`~.expression.func`, can be turned into a "window function", that is an OVER clause, using the :meth:`.FunctionElement.over` method:: - >>> s = select( - ... users.c.id, - ... func.row_number().over(order_by=users.c.name) - ... ) + >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name)) >>> print(s) SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1 FROM users @@ -1751,12 +1778,7 @@ OVER clause, using the :meth:`.FunctionElement.over` method:: either the :paramref:`.expression.over.rows` or :paramref:`.expression.over.range` parameters:: - >>> s = select( - ... users.c.id, - ... func.row_number().over( - ... order_by=users.c.name, - ... rows=(-2, None)) - ... ) + >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name, rows=(-2, None))) >>> print(s) SELECT users.id, row_number() OVER (ORDER BY users.name ROWS BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) AS anon_1 @@ -1830,11 +1852,7 @@ string into one of MySQL's JSON functions: >>> from sqlalchemy import JSON >>> from sqlalchemy import type_coerce >>> from sqlalchemy.dialects import mysql - >>> s = select( - ... type_coerce( - ... {'some_key': {'foo': 'bar'}}, JSON - ... )['some_key'] - ... ) + >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"]) >>> print(s.compile(dialect=mysql.dialect())) SELECT JSON_EXTRACT(%s, %s) AS anon_1 @@ -1856,10 +1874,8 @@ module level functions :func:`_expression.union` and >>> from sqlalchemy.sql import union >>> u = union( - ... addresses.select(). - ... where(addresses.c.email_address == 'foo@bar.com'), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), + ... addresses.select().where(addresses.c.email_address == "foo@bar.com"), + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), ... ).order_by(addresses.c.email_address) {sql}>>> conn.execute(u).fetchall() @@ -1882,10 +1898,8 @@ Also available, though not supported on all databases, are >>> from sqlalchemy.sql import except_ >>> u = except_( - ... addresses.select(). - ... where(addresses.c.email_address.like('%@%.com')), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@msn.com')) + ... addresses.select().where(addresses.c.email_address.like("%@%.com")), + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), ... ) {sql}>>> conn.execute(u).fetchall() @@ -1910,13 +1924,13 @@ want the "union" to be stated as a subquery: .. sourcecode:: pycon+sql >>> u = except_( - ... union( - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@msn.com')) - ... ).subquery().select(), # apply subquery here - ... addresses.select().where(addresses.c.email_address.like('%@msn.com')) + ... union( + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), + ... ) + ... .subquery() + ... .select(), # apply subquery here + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), ... ) {sql}>>> conn.execute(u).fetchall() SELECT anon_1.id, anon_1.user_id, anon_1.email_address @@ -1966,10 +1980,8 @@ selected from the first SELECT; the SQLAlchemy compiler will ensure these will be rendered without table names:: >>> u = union( - ... addresses.select(). - ... where(addresses.c.email_address == 'foo@bar.com'), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), + ... addresses.select().where(addresses.c.email_address == "foo@bar.com"), + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), ... ) >>> u = u.order_by(u.selected_columns.email_address) >>> print(u) @@ -1997,9 +2009,11 @@ or :meth:`_expression.SelectBase.label` method: .. sourcecode:: pycon+sql - >>> subq = select(func.count(addresses.c.id)).\ - ... where(users.c.id == addresses.c.user_id).\ - ... scalar_subquery() + >>> subq = ( + ... select(func.count(addresses.c.id)) + ... .where(users.c.id == addresses.c.user_id) + ... .scalar_subquery() + ... ) The above construct is now a :class:`_expression.ScalarSelect` object, which is an adapter around the original :class:`.~expression.Select` @@ -2022,9 +2036,11 @@ it using :meth:`_expression.SelectBase.label` instead: .. sourcecode:: pycon+sql - >>> subq = select(func.count(addresses.c.id)).\ - ... where(users.c.id == addresses.c.user_id).\ - ... label("address_count") + >>> subq = ( + ... select(func.count(addresses.c.id)) + ... .where(users.c.id == addresses.c.user_id) + ... .label("address_count") + ... ) >>> conn.execute(select(users.c.name, subq)).fetchall() {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses @@ -2052,11 +2068,12 @@ still have at least one FROM clause of its own. For example: .. sourcecode:: pycon+sql - >>> stmt = select(addresses.c.user_id).\ - ... where(addresses.c.user_id == users.c.id).\ - ... where(addresses.c.email_address == 'jack@yahoo.com') - >>> enclosing_stmt = select(users.c.name).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(addresses.c.user_id) + ... .where(addresses.c.user_id == users.c.id) + ... .where(addresses.c.email_address == "jack@yahoo.com") + ... ) + >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery()) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users @@ -2075,14 +2092,17 @@ may be correlated: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name == 'jack').\ - ... correlate(addresses) - >>> enclosing_stmt = select( - ... users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(users.c.id) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name == "jack") + ... .correlate(addresses) + ... ) + >>> enclosing_stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .where(users.c.id == stmt.scalar_subquery()) + ... ) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2097,11 +2117,8 @@ as the argument: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.name == 'wendy').\ - ... correlate(None) - >>> enclosing_stmt = select(users.c.name).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = select(users.c.id).where(users.c.name == "wendy").correlate(None) + >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery()) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users @@ -2117,14 +2134,17 @@ by telling it to correlate all FROM clauses except for ``users``: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name == 'jack').\ - ... correlate_except(users) - >>> enclosing_stmt = select( - ... users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(users.c.id) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name == "jack") + ... .correlate_except(users) + ... ) + >>> enclosing_stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .where(users.c.id == stmt.scalar_subquery()) + ... ) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2165,10 +2185,13 @@ to the left side of the JOIN. SQLAlchemy Core supports a statement like the above using the :meth:`_expression.Select.lateral` method as follows:: >>> from sqlalchemy import table, column, select, true - >>> people = table('people', column('people_id'), column('age'), column('name')) - >>> books = table('books', column('book_id'), column('owner_id')) - >>> subq = select(books.c.book_id).\ - ... where(books.c.owner_id == people.c.people_id).lateral("book_subq") + >>> people = table("people", column("people_id"), column("age"), column("name")) + >>> books = table("books", column("book_id"), column("owner_id")) + >>> subq = ( + ... select(books.c.book_id) + ... .where(books.c.owner_id == people.c.people_id) + ... .lateral("book_subq") + ... ) >>> print(select(people).select_from(people.join(subq, true()))) SELECT people.people_id, people.age, people.name FROM people JOIN LATERAL (SELECT books.book_id AS book_id @@ -2237,9 +2260,11 @@ This is provided via the :meth:`_expression.SelectBase.group_by` method: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, func.count(addresses.c.id)).\ - ... select_from(users.join(addresses)).\ - ... group_by(users.c.name) + >>> stmt = ( + ... select(users.c.name, func.count(addresses.c.id)) + ... .select_from(users.join(addresses)) + ... .group_by(users.c.name) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses @@ -2257,10 +2282,12 @@ method: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, func.count(addresses.c.id)).\ - ... select_from(users.join(addresses)).\ - ... group_by(users.c.name).\ - ... having(func.length(users.c.name) > 4) + >>> stmt = ( + ... select(users.c.name, func.count(addresses.c.id)) + ... .select_from(users.join(addresses)) + ... .group_by(users.c.name) + ... .having(func.length(users.c.name) > 4) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses @@ -2276,10 +2303,11 @@ is the DISTINCT modifier. A simple DISTINCT clause can be added using the .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name).\ - ... where(addresses.c.email_address. - ... contains(users.c.name)).\ - ... distinct() + >>> stmt = ( + ... select(users.c.name) + ... .where(addresses.c.email_address.contains(users.c.name)) + ... .distinct() + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT DISTINCT users.name FROM users, addresses @@ -2298,9 +2326,12 @@ into the current backend's methodology: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... limit(1).offset(1) + >>> stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .limit(1) + ... .offset(1) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2326,8 +2357,7 @@ as a value: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... values(fullname="Fullname: " + users.c.name) + >>> stmt = users.update().values(fullname="Fullname: " + users.c.name) >>> conn.execute(stmt) {opensql}UPDATE users SET fullname=(? || users.name) [...] ('Fullname: ',) @@ -2351,13 +2381,15 @@ as in the example below: .. sourcecode:: pycon+sql - >>> stmt = users.insert().\ - ... values(name=bindparam('_name') + " .. name") - >>> conn.execute(stmt, [ - ... {'id':4, '_name':'name1'}, - ... {'id':5, '_name':'name2'}, - ... {'id':6, '_name':'name3'}, - ... ]) + >>> stmt = users.insert().values(name=bindparam("_name") + " .. name") + >>> conn.execute( + ... stmt, + ... [ + ... {"id": 4, "_name": "name1"}, + ... {"id": 5, "_name": "name2"}, + ... {"id": 6, "_name": "name3"}, + ... ], + ... ) {opensql}INSERT INTO users (id, name) VALUES (?, (? || ?)) [...] ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name')) COMMIT @@ -2369,9 +2401,7 @@ that can be specified: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... where(users.c.name == 'jack').\ - ... values(name='ed') + >>> stmt = users.update().where(users.c.name == "jack").values(name="ed") >>> conn.execute(stmt) {opensql}UPDATE users SET name=? WHERE users.name = ? @@ -2386,14 +2416,19 @@ used to achieve this: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... where(users.c.name == bindparam('oldname')).\ - ... values(name=bindparam('newname')) - >>> conn.execute(stmt, [ - ... {'oldname':'jack', 'newname':'ed'}, - ... {'oldname':'wendy', 'newname':'mary'}, - ... {'oldname':'jim', 'newname':'jake'}, - ... ]) + >>> stmt = ( + ... users.update() + ... .where(users.c.name == bindparam("oldname")) + ... .values(name=bindparam("newname")) + ... ) + >>> conn.execute( + ... stmt, + ... [ + ... {"oldname": "jack", "newname": "ed"}, + ... {"oldname": "wendy", "newname": "mary"}, + ... {"oldname": "jim", "newname": "jake"}, + ... ], + ... ) {opensql}UPDATE users SET name=? WHERE users.name = ? [...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) COMMIT @@ -2410,9 +2445,9 @@ subquery using :meth:`_expression.Select.scalar_subquery`: .. sourcecode:: pycon+sql - >>> stmt = select(addresses.c.email_address).\ - ... where(addresses.c.user_id == users.c.id).\ - ... limit(1) + >>> stmt = ( + ... select(addresses.c.email_address).where(addresses.c.user_id == users.c.id).limit(1) + ... ) >>> conn.execute(users.update().values(fullname=stmt.scalar_subquery())) {opensql}UPDATE users SET fullname=(SELECT addresses.email_address FROM addresses @@ -2435,10 +2470,12 @@ multiple tables can be embedded into a single UPDATE statement separated by a co The SQLAlchemy :func:`_expression.update` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: - stmt = users.update().\ - values(name='ed wood').\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.update() + .values(name="ed wood") + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) The resulting SQL from the above statement would render as:: @@ -2450,13 +2487,12 @@ The resulting SQL from the above statement would render as:: When using MySQL, columns from each table can be assigned to in the SET clause directly, using the dictionary form passed to :meth:`_expression.Update.values`:: - stmt = users.update().\ - values({ - users.c.name:'ed wood', - addresses.c.email_address:'ed.wood@foo.com' - }).\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.update() + .values({users.c.name: "ed wood", addresses.c.email_address: "ed.wood@foo.com"}) + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) The tables are referenced explicitly in the SET clause:: @@ -2506,8 +2542,9 @@ To suit this specific use case, the we supply a **series of 2-tuples** as the argument to the method:: - stmt = some_table.update().\ - ordered_values((some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)) + stmt = some_table.update().ordered_values( + (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10) + ) The series of 2-tuples is essentially the same structure as a Python dictionary, except that it explicitly suggests a specific ordering. Using the @@ -2539,7 +2576,7 @@ Finally, a delete. This is accomplished easily enough using the COMMIT {stop} - >>> conn.execute(users.delete().where(users.c.name > 'm')) + >>> conn.execute(users.delete().where(users.c.name > "m")) {opensql}DELETE FROM users WHERE users.name > ? [...] ('m',) COMMIT @@ -2559,9 +2596,11 @@ and MySQL, this is the "DELETE USING" syntax, and for SQL Server, it's a :func:`_expression.delete` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: - stmt = users.delete().\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.delete() + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) On a PostgreSQL backend, the resulting SQL from the above statement would render as:: diff --git a/doc/build/core/type_api.rst b/doc/build/core/type_api.rst index 0dd1b492053..2586b2b732a 100644 --- a/doc/build/core/type_api.rst +++ b/doc/build/core/type_api.rst @@ -18,6 +18,8 @@ Base Type API .. autoclass:: NullType +.. autoclass:: ExternalType + :members: .. autoclass:: Variant :members: with_variant, __init__ diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index b938cc5eee4..ed4d928e6d0 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -1,34 +1,178 @@ -Column and Data Types +The Type Hierarchy ===================== .. module:: sqlalchemy.types SQLAlchemy provides abstractions for most common database data types, -and a mechanism for specifying your own custom data types. +as well as several techniques for customization of datatypes. + +Database types are represented using Python classes, all of which ultimately +extend from the base type class known as :class:`_types.TypeEngine`. There are +two general categories of datatypes, each of which express themselves within +the typing hierarchy in different ways. The category used by an individual +datatype class can be identified based on the use of two different naming +conventions, which are "CamelCase" and "UPPERCASE". + +.. seealso:: + + :ref:`tutorial_core_metadata` - in the :ref:`unified_tutorial`. Illustrates + the most rudimental use of :class:`_types.TypeEngine` type objects to + define :class:`_schema.Table` metadata and introduces the concept + of type objects in tutorial form. + +The "CamelCase" datatypes +------------------------- + +The rudimental types have "CamelCase" names such as :class:`_types.String`, +:class:`_types.Numeric`, :class:`_types.Integer`, and :class:`_types.DateTime`. +All of the immediate subclasses of :class:`_types.TypeEngine` are +"CamelCase" types. The "CamelCase" types are to the greatest degree possible +**database agnostic**, meaning they can all be used on any database backend +where they will behave in such a way as appropriate to that backend in order to +produce the desired behavior. + +An example of a straightforward "CamelCase" datatype is :class:`_types.String`. +On most backends, using this datatype in a +:ref:`table specification ` will correspond to the +``VARCHAR`` database type being used on the target backend, delivering string +values to and from the database, as in the example below:: + + from sqlalchemy import MetaData + from sqlalchemy import Table, Column, Integer, String + + metadata_obj = MetaData() + + user = Table( + "user", + metadata_obj, + Column("user_name", String, primary_key=True), + Column("email_address", String(60)), + ) + +When using a particular :class:`_types.TypeEngine` class in a +:class:`_schema.Table` definition or in any SQL expression overall, if no +arguments are required it may be passed as the class itself, that is, without +instantiating it with ``()``. If arguments are needed, such as the length +argument of 60 in the ``"email_address"`` column above, the type may be +instantiated. + +Another "CamelCase" datatype that expresses more backend-specific behavior +is the :class:`_types.Boolean` datatype. Unlike :class:`_types.String`, +which represents a string datatype that all databases have, +not every backend has a real "boolean" datatype; some make use of integers +or BIT values 0 and 1, some have boolean literal constants ``true`` and +``false`` while others dont. For this datatype, :class:`_types.Boolean` +may render ``BOOLEAN`` on a backend such as PostgreSQL, ``BIT`` on the +MySQL backend and ``SMALLINT`` on Oracle. As data is sent and received +from the database using this type, based on the dialect in use it may be +interpreting Python numeric or boolean values. + +The typical SQLAlchemy application will likely wish to use primarily +"CamelCase" types in the general case, as they will generally provide the best +basic behavior and be automatically portable to all backends. + +Reference for the general set of "CamelCase" datatypes is below at +:ref:`types_generic`. + +The "UPPERCASE" datatypes +------------------------- + +In contrast to the "CamelCase" types are the "UPPERCASE" datatypes. These +datatypes are always inherited from a particular "CamelCase" datatype, and +always represent an **exact** datatype. When using an "UPPERCASE" datatype, +the name of the type is always rendered exactly as given, without regard for +whether or not the current backend supports it. Therefore the use +of "UPPERCASE" types in a SQLAlchemy application indicates that specific +datatypes are required, which then implies that the application would normally, +without additional steps taken, +be limited to those backends which use the type exactly as given. Examples +of UPPERCASE types include :class:`_types.VARCHAR`, :class:`_types.NUMERIC`, +:class:`_types.INTEGER`, and :class:`_types.TIMESTAMP`, which inherit directly +from the previously mentioned "CamelCase" types +:class:`_types.String`, +:class:`_types.Numeric`, :class:`_types.Integer`, and :class:`_types.DateTime`, +respectively. + +The "UPPERCASE" datatypes that are part of ``sqlalchemy.types`` are common +SQL types that typically expect to be available on at least two backends +if not more. + +Reference for the general set of "UPPERCASE" datatypes is below at +:ref:`types_sqlstandard`. + + + +.. _types_vendor: + +Backend-specific "UPPERCASE" datatypes +-------------------------------------- -The methods and attributes of type objects are rarely used directly. -Type objects are supplied to :class:`~sqlalchemy.schema.Table` definitions -and can be supplied as type hints to `functions` for occasions where -the database driver returns an incorrect type. +Most databases also have their own datatypes that +are either fully specific to those databases, or add additional arguments +that are specific to those databases. For these datatypes, specific +SQLAlchemy dialects provide **backend-specific** "UPPERCASE" datatypes, for a +SQL type that has no analogue on other backends. Examples of backend-specific +uppercase datatypes include PostgreSQL's :class:`_postgresql.JSONB`, SQL Server's +:class:`_mssql.IMAGE` and MySQL's :class:`_mysql.TINYTEXT`. + +Specific backends may also include "UPPERCASE" datatypes that extend the +arguments available from that same "UPPERCASE" datatype as found in the +``sqlalchemy.types`` module. An example is when creating a MySQL string +datatype, one might want to specify MySQL-specific arguments such as ``charset`` +or ``national``, which are available from the MySQL version +of :class:`_mysql.VARCHAR` as the MySQL-only parameters +:paramref:`_mysql.VARCHAR.charset` and :paramref:`_mysql.VARCHAR.national`. + +API documentation for backend-specific types are in the dialect-specific +documentation, listed at :ref:`dialect_toplevel`. + + +.. _types_with_variant: + +Using "UPPERCASE" and Backend-specific types for multiple backends +------------------------------------------------------------------ + +Reviewing the presence of "UPPERCASE" and "CamelCase" types leads to the natural +use case of how to make use of "UPPERCASE" datatypes for backend-specific +options, but only when that backend is in use. To tie together the +database-agnostic "CamelCase" and backend-specific "UPPERCASE" systems, one +makes use of the :meth:`_types.TypeEngine.with_variant` method in order to +**compose** types together to work with specific behaviors on specific backends. + +Such as, to use the :class:`_types.String` datatype, but when running on MySQL +to make use of the :paramref:`_mysql.VARCHAR.charset` parameter of +:class:`_mysql.VARCHAR` when the table is created on MySQL, +:meth:`_types.TypeEngine.with_variant` may be used as below:: + + from sqlalchemy import MetaData + from sqlalchemy import Table, Column, Integer, String + from sqlalchemy.dialects.mysql import VARCHAR + + metadata_obj = MetaData() + + user = Table( + "user", + metadata_obj, + Column("user_name", String(100), primary_key=True), + Column( + "bio", + String(255).with_variant(VARCHAR(255, charset="utf8"), "mysql"), + ), + ) -.. code-block:: pycon +In the above table definition, the ``"bio"`` column will have string-behaviors +on all backends. On most backends it will render in DDL as ``VARCHAR``. However +on MySQL (indicated by database URLs that start with ``mysql``), it will +render as ``VARCHAR(255) CHARACTER SET utf8``. - >>> users = Table('users', metadata, - ... Column('id', Integer, primary_key=True), - ... Column('login', String(32)) - ... ) +.. seealso:: -SQLAlchemy will use the ``Integer`` and ``String(32)`` type -information when issuing a ``CREATE TABLE`` statement and will use it -again when reading back rows ``SELECTed`` from the database. -Functions that accept a type (such as :func:`~sqlalchemy.schema.Column`) will -typically accept a type class or instance; ``Integer`` is equivalent -to ``Integer()`` with no construction arguments in this case. + :meth:`_types.TypeEngine.with_variant` - additional usage examples and notes .. _types_generic: -Generic Types -------------- +Generic "CamelCase" Types +------------------------- Generic types specify a column that can read, write and store a particular type of Python data. SQLAlchemy will choose the best @@ -97,8 +241,8 @@ type is emitted in ``CREATE TABLE``, such as ``VARCHAR`` see .. _types_sqlstandard: -SQL Standard and Multiple Vendor Types --------------------------------------- +SQL Standard and Multiple Vendor "UPPERCASE" Types +-------------------------------------------------- This category of types refers to types that are either part of the SQL standard, or are potentially found within a subset of database backends. @@ -181,59 +325,3 @@ its exact name in DDL with ``CREATE TABLE`` is issued. .. autoclass:: VARCHAR -.. _types_vendor: - -Vendor-Specific Types ---------------------- - -Database-specific types are also available for import from each -database's dialect module. See the :ref:`dialect_toplevel` -reference for the database you're interested in. - -For example, MySQL has a ``BIGINT`` type and PostgreSQL has an -``INET`` type. To use these, import them from the module explicitly:: - - from sqlalchemy.dialects import mysql - - table = Table('foo', metadata, - Column('id', mysql.BIGINT), - Column('enumerates', mysql.ENUM('a', 'b', 'c')) - ) - -Or some PostgreSQL types:: - - from sqlalchemy.dialects import postgresql - - table = Table('foo', metadata, - Column('ipaddress', postgresql.INET), - Column('elements', postgresql.ARRAY(String)) - ) - -Each dialect provides the full set of typenames supported by -that backend within its `__all__` collection, so that a simple -`import *` or similar will import all supported types as -implemented for that backend:: - - from sqlalchemy.dialects.postgresql import * - - t = Table('mytable', metadata, - Column('id', INTEGER, primary_key=True), - Column('name', VARCHAR(300)), - Column('inetaddr', INET) - ) - -Where above, the INTEGER and VARCHAR types are ultimately from -sqlalchemy.types, and INET is specific to the PostgreSQL dialect. - -Some dialect level types have the same name as the SQL standard type, -but also provide additional arguments. For example, MySQL implements -the full range of character and string types including additional arguments -such as `collation` and `charset`:: - - from sqlalchemy.dialects.mysql import VARCHAR, TEXT - - table = Table('foo', meta, - Column('col1', VARCHAR(200, collation='binary')), - Column('col2', TEXT(charset='latin1')) - ) - diff --git a/doc/build/core/types.rst b/doc/build/core/types.rst index 762105646cb..d569bdee77e 100644 --- a/doc/build/core/types.rst +++ b/doc/build/core/types.rst @@ -1,6 +1,6 @@ .. _types_toplevel: -Column and Data Types +SQL Datatype Objects ===================== .. toctree:: diff --git a/doc/build/core/visitors.rst b/doc/build/core/visitors.rst index 6ef466265d4..06d839d54cb 100644 --- a/doc/build/core/visitors.rst +++ b/doc/build/core/visitors.rst @@ -23,4 +23,5 @@ as well as when building out custom SQL expressions using the .. automodule:: sqlalchemy.sql.visitors :members: - :private-members: \ No newline at end of file + :private-members: + diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index d632026dc32..ebcb72eaaad 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -22,8 +22,8 @@ Included Dialects oracle mssql -Support Levels for Included Dialects -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Supported versions for Included Dialects +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following table summarizes the support level for each included dialect. @@ -33,21 +33,20 @@ The following table summarizes the support level for each included dialect. Support Definitions ^^^^^^^^^^^^^^^^^^^ -.. glossary:: + .. Fully tested in CI + .. **Fully tested in CI** indicates a version that is tested in the sqlalchemy + .. CI system and passes all the tests in the test suite. - Fully tested in CI - **Fully tested in CI** indicates a version that is tested in the sqlalchemy - CI system and passes all the tests in the test suite. +.. glossary:: - Normal support - **Normal support** indicates that most features should work, - but not all versions are tested in the ci configuration so there may - be some not supported edge cases. We will try to fix issues that affect - these versions. + Supported version + **Supported version** indicates that most SQLAlchemy features should work + for the mentioned database version. Since not all database versions may be + tested in the ci there may be some not working edge cases. Best effort - **Best effort** indicates that we try to support basic features on them, - but most likely there will be unsupported features or errors in some use cases. + **Best effort** indicates that SQLAlchemy tries to support basic features on these + versions, but most likely there will be unsupported features or errors in some use cases. Pull requests with associated issues may be accepted to continue supporting older versions, which are reviewed on a case-by-case basis. @@ -76,75 +75,83 @@ External Dialects Currently maintained external dialect projects for SQLAlchemy include: -+-----------------------------------------+---------------------------------------+ -| Database | Dialect | -+=========================================+=======================================+ -| Amazon Redshift (via psycopg2) | sqlalchemy-redshift_ | -+-----------------------------------------+---------------------------------------+ -| Apache Drill | sqlalchemy-drill_ | -+-----------------------------------------+---------------------------------------+ -| Apache Druid | pydruid_ | -+-----------------------------------------+---------------------------------------+ -| Apache Hive and Presto | PyHive_ | -+-----------------------------------------+---------------------------------------+ -| Apache Solr | sqlalchemy-solr_ | -+-----------------------------------------+---------------------------------------+ -| CockroachDB | sqlalchemy-cockroachdb_ | -+-----------------------------------------+---------------------------------------+ -| CrateDB [1]_ | crate-python_ | -+-----------------------------------------+---------------------------------------+ -| EXASolution | sqlalchemy_exasol_ | -+-----------------------------------------+---------------------------------------+ -| Elasticsearch (readonly) | elasticsearch-dbapi_ | -+-----------------------------------------+---------------------------------------+ -| Firebird | sqlalchemy-firebird_ | -+-----------------------------------------+---------------------------------------+ -| Firebolt | firebolt-sqlalchemy_ | -+-----------------------------------------+---------------------------------------+ -| Google BigQuery | pybigquery_ | -+-----------------------------------------+---------------------------------------+ -| Google Sheets | gsheets_ | -+-----------------------------------------+---------------------------------------+ -| IBM DB2 and Informix | ibm-db-sa_ | -+-----------------------------------------+---------------------------------------+ -| IBM Netezza Performance Server [1]_ | nzalchemy_ | -+-----------------------------------------+---------------------------------------+ -| Microsoft Access (via pyodbc) | sqlalchemy-access_ | -+-----------------------------------------+---------------------------------------+ -| Microsoft SQL Server (via python-tds) | sqlalchemy-tds_ | -+-----------------------------------------+---------------------------------------+ -| Microsoft SQL Server (via turbodbc) | sqlalchemy-turbodbc_ | -+-----------------------------------------+---------------------------------------+ -| MonetDB [1]_ | sqlalchemy-monetdb_ | -+-----------------------------------------+---------------------------------------+ -| SAP ASE (fork of former Sybase dialect) | sqlalchemy-sybase_ | -+-----------------------------------------+---------------------------------------+ -| SAP Hana [1]_ | sqlalchemy-hana_ | -+-----------------------------------------+---------------------------------------+ -| SAP Sybase SQL Anywhere | sqlalchemy-sqlany_ | -+-----------------------------------------+---------------------------------------+ -| Snowflake | snowflake-sqlalchemy_ | -+-----------------------------------------+---------------------------------------+ -| Teradata Vantage | teradatasqlalchemy_ | -+-----------------------------------------+---------------------------------------+ ++------------------------------------------------+---------------------------------------+ +| Database | Dialect | ++================================================+=======================================+ +| Actian Avalanche, Vector, Actian X, and Ingres | sqlalchemy-ingres_ | ++------------------------------------------------+---------------------------------------+ +| Amazon Athena | pyathena_ | ++------------------------------------------------+---------------------------------------+ +| Amazon Redshift (via psycopg2) | sqlalchemy-redshift_ | ++------------------------------------------------+---------------------------------------+ +| Apache Drill | sqlalchemy-drill_ | ++------------------------------------------------+---------------------------------------+ +| Apache Druid | pydruid_ | ++------------------------------------------------+---------------------------------------+ +| Apache Hive and Presto | PyHive_ | ++------------------------------------------------+---------------------------------------+ +| Apache Solr | sqlalchemy-solr_ | ++------------------------------------------------+---------------------------------------+ +| CockroachDB | sqlalchemy-cockroachdb_ | ++------------------------------------------------+---------------------------------------+ +| CrateDB | sqlalchemy-cratedb_ | ++------------------------------------------------+---------------------------------------+ +| EXASolution | sqlalchemy_exasol_ | ++------------------------------------------------+---------------------------------------+ +| Elasticsearch (readonly) | elasticsearch-dbapi_ | ++------------------------------------------------+---------------------------------------+ +| Firebird | sqlalchemy-firebird_ | ++------------------------------------------------+---------------------------------------+ +| Firebolt | firebolt-sqlalchemy_ | ++------------------------------------------------+---------------------------------------+ +| Google BigQuery | sqlalchemy-bigquery_ | ++------------------------------------------------+---------------------------------------+ +| Google Sheets | gsheets_ | ++------------------------------------------------+---------------------------------------+ +| IBM DB2 and Informix | ibm-db-sa_ | ++------------------------------------------------+---------------------------------------+ +| IBM Netezza Performance Server [1]_ | nzalchemy_ | ++------------------------------------------------+---------------------------------------+ +| Microsoft Access (via pyodbc) | sqlalchemy-access_ | ++------------------------------------------------+---------------------------------------+ +| Microsoft SQL Server (via python-tds) | sqlalchemy-tds_ | ++------------------------------------------------+---------------------------------------+ +| Microsoft SQL Server (via turbodbc) | sqlalchemy-turbodbc_ | ++------------------------------------------------+---------------------------------------+ +| MonetDB | sqlalchemy-monetdb_ | ++------------------------------------------------+---------------------------------------+ +| OpenGauss | openGauss-sqlalchemy_ | ++------------------------------------------------+---------------------------------------+ +| SAP ASE (fork of former Sybase dialect) | sqlalchemy-sybase_ | ++------------------------------------------------+---------------------------------------+ +| SAP Hana [1]_ | sqlalchemy-hana_ | ++------------------------------------------------+---------------------------------------+ +| SAP Sybase SQL Anywhere | sqlalchemy-sqlany_ | ++------------------------------------------------+---------------------------------------+ +| Snowflake | snowflake-sqlalchemy_ | ++------------------------------------------------+---------------------------------------+ +| Teradata Vantage | teradatasqlalchemy_ | ++------------------------------------------------+---------------------------------------+ .. [1] Supports version 1.3.x only at the moment. +.. _openGauss-sqlalchemy: https://gitee.com/opengauss/openGauss-sqlalchemy +.. _sqlalchemy-ingres: https://github.com/clach04/ingres_sa_dialect .. _nzalchemy: https://pypi.org/project/nzalchemy/ .. _ibm-db-sa: https://pypi.org/project/ibm-db-sa/ .. _PyHive: https://github.com/dropbox/PyHive#sqlalchemy .. _teradatasqlalchemy: https://pypi.org/project/teradatasqlalchemy/ -.. _pybigquery: https://github.com/mxmzdlv/pybigquery/ +.. _sqlalchemy-bigquery: https://pypi.org/project/sqlalchemy-bigquery/ .. _sqlalchemy-redshift: https://pypi.org/project/sqlalchemy-redshift .. _sqlalchemy-drill: https://github.com/JohnOmernik/sqlalchemy-drill .. _sqlalchemy-hana: https://github.com/SAP/sqlalchemy-hana .. _sqlalchemy-solr: https://github.com/aadel/sqlalchemy-solr .. _sqlalchemy_exasol: https://github.com/blue-yonder/sqlalchemy_exasol .. _sqlalchemy-sqlany: https://github.com/sqlanywhere/sqlalchemy-sqlany -.. _sqlalchemy-monetdb: https://github.com/gijzelaerr/sqlalchemy-monetdb +.. _sqlalchemy-monetdb: https://github.com/MonetDB/sqlalchemy-monetdb .. _snowflake-sqlalchemy: https://github.com/snowflakedb/snowflake-sqlalchemy .. _sqlalchemy-tds: https://github.com/m32/sqlalchemy-tds -.. _crate-python: https://github.com/crate/crate-python +.. _sqlalchemy-cratedb: https://github.com/crate/sqlalchemy-cratedb .. _sqlalchemy-access: https://pypi.org/project/sqlalchemy-access/ .. _elasticsearch-dbapi: https://github.com/preset-io/elasticsearch-dbapi/ .. _pydruid: https://github.com/druid-io/pydruid @@ -154,3 +161,4 @@ Currently maintained external dialect projects for SQLAlchemy include: .. _sqlalchemy-turbodbc: https://pypi.org/project/sqlalchemy-turbodbc/ .. _sqlalchemy-sybase: https://pypi.org/project/sqlalchemy-sybase/ .. _firebolt-sqlalchemy: https://pypi.org/project/firebolt-sqlalchemy/ +.. _pyathena: https://github.com/laughingman7743/PyAthena/ diff --git a/doc/build/dialects/mssql.rst b/doc/build/dialects/mssql.rst index 2bad5c9e2c2..6fd573c8d3d 100644 --- a/doc/build/dialects/mssql.rst +++ b/doc/build/dialects/mssql.rst @@ -19,16 +19,46 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.mssql import \ - BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \ - DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, JSON, MONEY, \ - NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \ - SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \ - TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR + from sqlalchemy.dialects.mssql import ( + BIGINT, + BINARY, + BIT, + CHAR, + DATE, + DATETIME, + DATETIME2, + DATETIMEOFFSET, + DECIMAL, + FLOAT, + IMAGE, + INTEGER, + JSON, + MONEY, + NCHAR, + NTEXT, + NUMERIC, + NVARCHAR, + REAL, + SMALLDATETIME, + SMALLINT, + SMALLMONEY, + SQL_VARIANT, + TEXT, + TIME, + TIMESTAMP, + TINYINT, + UNIQUEIDENTIFIER, + VARBINARY, + VARCHAR, + ) Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: +.. note: where :noindex: is used, indicates a type that is not redefined + in the dialect module, just imported from sqltypes. this avoids warnings + in the sphinx build + .. currentmodule:: sqlalchemy.dialects.mssql .. autoclass:: BIT @@ -37,6 +67,7 @@ construction arguments, are as follows: .. autoclass:: CHAR :members: __init__ + :noindex: .. autoclass:: DATETIME2 @@ -61,6 +92,7 @@ construction arguments, are as follows: .. autoclass:: NCHAR :members: __init__ + :noindex: .. autoclass:: NTEXT @@ -69,7 +101,7 @@ construction arguments, are as follows: .. autoclass:: NVARCHAR :members: __init__ - + :noindex: .. autoclass:: REAL :members: __init__ @@ -91,7 +123,7 @@ construction arguments, are as follows: .. autoclass:: TEXT :members: __init__ - + :noindex: .. autoclass:: TIME :members: __init__ @@ -108,8 +140,13 @@ construction arguments, are as follows: :members: __init__ +.. autoclass:: VARBINARY + :members: __init__ + :noindex: + .. autoclass:: VARCHAR :members: __init__ + :noindex: .. autoclass:: XML diff --git a/doc/build/dialects/mysql.rst b/doc/build/dialects/mysql.rst index 9eb7f5a7405..52dd45cfac2 100644 --- a/doc/build/dialects/mysql.rst +++ b/doc/build/dialects/mysql.rst @@ -19,16 +19,50 @@ MySQL Data Types As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:: - from sqlalchemy.dialects.mysql import \ - BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \ - DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \ - LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ - NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \ - TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR + from sqlalchemy.dialects.mysql import ( + BIGINT, + BINARY, + BIT, + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + DECIMAL, + DOUBLE, + ENUM, + FLOAT, + INTEGER, + LONGBLOB, + LONGTEXT, + MEDIUMBLOB, + MEDIUMINT, + MEDIUMTEXT, + NCHAR, + NUMERIC, + NVARCHAR, + REAL, + SET, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + TINYBLOB, + TINYINT, + TINYTEXT, + VARBINARY, + VARCHAR, + YEAR, + ) Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: +.. note: where :noindex: is used, indicates a type that is not redefined + in the dialect module, just imported from sqltypes. this avoids warnings + in the sphinx build + .. currentmodule:: sqlalchemy.dialects.mysql .. autoclass:: BIGINT @@ -36,6 +70,7 @@ construction arguments, are as follows: .. autoclass:: BINARY + :noindex: :members: __init__ @@ -45,10 +80,12 @@ construction arguments, are as follows: .. autoclass:: BLOB :members: __init__ + :noindex: .. autoclass:: BOOLEAN :members: __init__ + :noindex: .. autoclass:: CHAR @@ -57,6 +94,7 @@ construction arguments, are as follows: .. autoclass:: DATE :members: __init__ + :noindex: .. autoclass:: DATETIME @@ -69,7 +107,7 @@ construction arguments, are as follows: .. autoclass:: DOUBLE :members: __init__ - + :noindex: .. autoclass:: ENUM :members: __init__ @@ -131,6 +169,7 @@ construction arguments, are as follows: .. autoclass:: TEXT :members: __init__ + :noindex: .. autoclass:: TIME @@ -155,6 +194,7 @@ construction arguments, are as follows: .. autoclass:: VARBINARY :members: __init__ + :noindex: .. autoclass:: VARCHAR @@ -184,6 +224,11 @@ PyMySQL .. automodule:: sqlalchemy.dialects.mysql.pymysql +MariaDB-Connector +------------------ + +.. automodule:: sqlalchemy.dialects.mysql.mariadbconnector + MySQL-Connector --------------- diff --git a/doc/build/dialects/oracle.rst b/doc/build/dialects/oracle.rst index 988a698e827..d992a2f83b0 100644 --- a/doc/build/dialects/oracle.rst +++ b/doc/build/dialects/oracle.rst @@ -12,11 +12,26 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.oracle import \ - BFILE, BLOB, CHAR, CLOB, DATE, \ - DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, NCHAR, \ - NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \ - VARCHAR2 + from sqlalchemy.dialects.oracle import ( + BFILE, + BLOB, + CHAR, + CLOB, + DATE, + DOUBLE_PRECISION, + FLOAT, + INTERVAL, + LONG, + NCLOB, + NCHAR, + NUMBER, + NVARCHAR, + NVARCHAR2, + RAW, + TIMESTAMP, + VARCHAR, + VARCHAR2, + ) .. versionadded:: 1.2.19 Added :class:`_types.NCHAR` to the list of datatypes exported by the Oracle dialect. @@ -55,6 +70,7 @@ construction arguments, are as follows: .. autoclass:: RAW :members: __init__ +.. _cx_oracle: cx_Oracle --------- diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index 34cdabc1039..c591ab00066 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -12,16 +12,52 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with PostgreSQL are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.postgresql import \ - ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \ - DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \ - INTERVAL, JSON, JSONB, MACADDR, MONEY, NUMERIC, OID, REAL, SMALLINT, TEXT, \ - TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \ - DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR + from sqlalchemy.dialects.postgresql import ( + ARRAY, + BIGINT, + BIT, + BOOLEAN, + BYTEA, + CHAR, + CIDR, + DATE, + DOUBLE_PRECISION, + ENUM, + FLOAT, + HSTORE, + INET, + INTEGER, + INTERVAL, + JSON, + JSONB, + MACADDR, + MACADDR8, + MONEY, + NUMERIC, + OID, + REAL, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + UUID, + VARCHAR, + INT4RANGE, + INT8RANGE, + NUMRANGE, + DATERANGE, + TSRANGE, + TSTZRANGE, + TSVECTOR, + ) Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: +.. note: where :noindex: is used, indicates a type that is not redefined + in the dialect module, just imported from sqltypes. this avoids warnings + in the sphinx build + .. currentmodule:: sqlalchemy.dialects.postgresql .. autoclass:: aggregate_order_by @@ -47,6 +83,7 @@ construction arguments, are as follows: .. autoclass:: DOUBLE_PRECISION :members: __init__ + :noindex: .. autoclass:: ENUM @@ -74,15 +111,24 @@ construction arguments, are as follows: .. autoclass:: MACADDR +.. autoclass:: MACADDR8 + .. autoclass:: MONEY .. autoclass:: OID .. autoclass:: REAL :members: __init__ + :noindex: .. autoclass:: REGCLASS +.. autoclass:: TIMESTAMP + :members: __init__ + +.. autoclass:: TIME + :members: __init__ + .. autoclass:: TSVECTOR .. autoclass:: UUID @@ -167,16 +213,15 @@ For example:: from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE + class RoomBooking(Base): - __tablename__ = 'room_booking' + __tablename__ = "room_booking" room = Column(Integer(), primary_key=True) during = Column(TSRANGE()) - __table_args__ = ( - ExcludeConstraint(('room', '='), ('during', '&&')), - ) + __table_args__ = (ExcludeConstraint(("room", "="), ("during", "&&")),) PostgreSQL DML Constructs ------------------------- diff --git a/doc/build/dialects/sqlite.rst b/doc/build/dialects/sqlite.rst index 6d40daf5fe2..d25301fa53f 100644 --- a/doc/build/dialects/sqlite.rst +++ b/doc/build/dialects/sqlite.rst @@ -12,10 +12,23 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.sqlite import \ - BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \ - INTEGER, NUMERIC, JSON, SMALLINT, TEXT, TIME, TIMESTAMP, \ - VARCHAR + from sqlalchemy.dialects.sqlite import ( + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + FLOAT, + INTEGER, + NUMERIC, + JSON, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + VARCHAR, + ) .. module:: sqlalchemy.dialects.sqlite diff --git a/doc/build/errors.rst b/doc/build/errors.rst index 2b163ec2692..f270ee3202b 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -33,351 +33,6 @@ Within this section, the goal is to try to provide background on some of the most common runtime errors as well as programming time errors. -Legacy API Features -=================== - -.. the reason we need this section here distinct from the migration notes - is because this is actually an ArgumentError that's raised by select() - when the "legacy" and "future" mode styles are used together. - -.. _error_c9ae: - -select() construct created in "legacy" mode; keyword arguments, etc. --------------------------------------------------------------------- - -The :func:`_expression.select` construct has been updated as of SQLAlchemy -1.4 to support the newer calling style that will be standard in -:ref:`SQLAlchemy 2.0 `. For backwards compatibility in the -interim, the construct accepts arguments in both the "legacy" style as well -as the "new" style. - -The "new" style features that column and table expressions are passed -positionally to the :func:`_expression.select` construct only; any other -modifiers to the object must be passed using subsequent method chaining:: - - # this is the way to do it going forward - stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid) - -For comparison, a :func:`_expression.select` in legacy forms of SQLAlchemy, -before methods like :meth:`.Select.where` were even added, would like:: - - # this is how it was documented in original SQLAlchemy versions - # many years ago - stmt = select([table1.c.myid], whereclause=table1.c.myid == table2.c.otherid) - -Or even that the "whereclause" would be passed positionally:: - - # this is also how it was documented in original SQLAlchemy versions - # many years ago - stmt = select([table1.c.myid], table1.c.myid == table2.c.otherid) - -For some years now, the additional "whereclause" and other arguments that are -accepted have been removed from most narrative documentation, leading to a -calling style that is most familiar as the list of column arguments passed -as a list, but no further arguments:: - - # this is how it's been documented since around version 1.0 or so - stmt = select([table1.c.myid]).where(table1.c.myid == table2.c.otherid) - -The document at :ref:`migration_20_5284` describes this change in terms -of :ref:`2.0 Migration `. - -.. seealso:: - - :ref:`migration_20_5284` - - :ref:`migration_20_toplevel` - - - -.. _error_b8d9: - -The in SQLAlchemy 2.0 will no longer --------------------------------------------------------------------------------------------- - -SQLAlchemy 2.0 is expected to be a major shift for a wide variety of key -SQLAlchemy usage patterns in both the Core and ORM components. The goal -of this release is to make a slight readjustment in some of the most -fundamental assumptions of SQLAlchemy since its early beginnings, and -to deliver a newly streamlined usage model that is hoped to be significantly -more minimalist and consistent between the Core and ORM components, as well as -more capable. - -Introduced at :ref:`migration_20_toplevel`, the SQLAlchemy 2.0 project includes -a comprehensive future compatibility system that is to be integrated into the -1.4 series of SQLAlchemy, such that applications will have a clear, -unambiguous, and incremental upgrade path in order to migrate applications to -being fully 2.0 compatible. The :class:`.exc.RemovedIn20Warning` deprecation -warning is at the base of this system to provide guidance on what behaviors in -an existing codebase will need to be modified. An overview of how to enable -this warning is at :ref:`deprecation_20_mode`. - -.. seealso:: - - :ref:`migration_20_toplevel` - An overview of the upgrade process from - the 1.x series, as well as the current goals and progress of SQLAlchemy - 2.0. - - - :ref:`deprecation_20_mode` - specific guidelines on how to use - "2.0 deprecations mode" in SQLAlchemy 1.4. - -.. _error_c9bf: - -A bind was located via legacy bound metadata, but since future=True is set on this Session, this bind is ignored. -------------------------------------------------------------------------------------------------------------------- - -The concept of "bound metadata" is being removed in SQLAlchemy 2.0. This -refers to the :paramref:`_schema.MetaData.bind` parameter on the -:class:`_schema.MetaData` object that in turn allows objects like the ORM -:class:`_orm.Session` to associate a particular mapped class with an -:class:`_orm.Engine`. In SQLAlchemy 2.0, the :class:`_orm.Session` must be -linked to each :class:`_orm.Engine` directly. That is, instead of instantiating -the :class:`_orm.Session` or -:class:`_orm.sessionmaker` without any arguments, and associating the -:class:`_engine.Engine` with the :class:`_schema.MetaData`:: - - engine = create_engine("sqlite://") - Session = sessionmaker() - metadata_obj = MetaData(bind=engine) - Base = declarative_base(metadata=metadata_obj) - - class MyClass(Base): - # ... - - - session = Session() - session.add(MyClass()) - session.commit() - -The :class:`_engine.Engine` must instead be associated directly with the -:class:`_orm.sessionmaker` or :class:`_orm.Session`. The -:class:`_schema.MetaData` object should no longer be associated with any -engine:: - - - engine = create_engine("sqlite://") - Session = sessionmaker(engine) - Base = declarative_base() - - class MyClass(Base): - # ... - - - session = Session() - session.add(MyClass()) - session.commit() - -In SQLAlchemy 1.4, this :term:`2.0 style` behavior is enabled when the -:paramref:`_orm.Session.future` flag is set on :class:`_orm.sessionmaker` -or :class:`_orm.Session`. - -.. _error_s9r1: - -Object is being merged into a Session along the backref cascade ---------------------------------------------------------------- - -This message refers to the "backref cascade" behavior of SQLAlchemy, -which is described at :ref:`backref_cascade`. This refers to the action of -an object being added into a :class:`_orm.Session` as a result of another -object that's already present in that session being associated with it. -As this behavior has been shown to be more confusing than helpful, -the :paramref:`_orm.relationship.cascade_backrefs` and -:paramref:`_orm.backref.cascade_backrefs` parameters were added, which can -be set to ``False`` to disable it, and in SQLAlchemy 2.0 the "cascade backrefs" -behavior will be disabled completely. - -To set :paramref:`_orm.relationship.cascade_backrefs` to ``False`` on a -backref that is currently configured using the -:paramref:`_orm.relationship.backref` string parameter, the backref must -be declared using the :func:`_orm.backref` function first so that the -:paramref:`_orm.backref.cascade_backrefs` parameter may be passed. - -Alternatively, the entire "cascade backrefs" behavior can be turned off -across the board by using the :class:`_orm.Session` in "future" mode, -by passing ``True`` for the :paramref:`_orm.Session.future` parameter. - -.. seealso:: - - :ref:`backref_cascade` - complete description of the cascade backrefs - behavior - - :ref:`change_5150` - background on the change for SQLAlchemy 2.0. - -.. _error_xaj1: - -An alias is being generated automatically for raw clauseelement ----------------------------------------------------------------- - -.. versionadded:: 1.4.26 - -This deprecation warning refers to a very old and likely not well known pattern -that applies to the legacy :meth:`_orm.Query.join` method as well as the -:term:`2.0 style` :meth:`_sql.Select.join` method, where a join can be stated -in terms of a :func:`_orm.relationship` but the target is the -:class:`_schema.Table` or other Core selectable to which the class is mapped, -rather than an ORM entity such as a mapped class or :func:`_orm.aliased` -construct:: - - a1 = Address.__table__ - - q = s.query(User).\ - join(a1, User.addresses).\ - filter(Address.email_address == 'ed@foo.com').all() - - -The above pattern also allows an arbitrary selectable, such as -a Core :class:`_sql.Join` or :class:`_sql.Alias` object, -however there is no automatic adaptation of this element, meaning the -Core element would need to be referred towards directly:: - - a1 = Address.__table__.alias() - - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.c.email_address == 'ed@foo.com').all() - -The correct way to specify a join target is always by using the mapped -class itself or an :class:`_orm.aliased` object, in the latter case using the -:meth:`_orm.PropComparator.of_type` modifier to set up an alias:: - - # normal join to relationship entity - q = s.query(User).\ - join(User.addresses).\ - filter(Address.email_address == 'ed@foo.com') - - # name Address target explicitly, not necessary but legal - q = s.query(User).\ - join(Address, User.addresses).\ - filter(Address.email_address == 'ed@foo.com') - -Join to an alias:: - - from sqlalchemy.orm import aliased - - a1 = aliased(Address) - - # of_type() form; recommended - q = s.query(User).\ - join(User.addresses.of_type(a1)).\ - filter(a1.email_address == 'ed@foo.com') - - # target, onclause form - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.email_address == 'ed@foo.com') - - -.. _error_xaj2: - -An alias is being generated automatically due to overlapping tables -------------------------------------------------------------------- - -.. versionadded:: 1.4.26 - -This warning is typically generated when querying using the -:meth:`_sql.Select.join` method or the legacy :meth:`_orm.Query.join` method -with mappings that involve joined table inheritance. The issue is that when -joining between two joined inheritance models that share a common base table, a -proper SQL JOIN between the two entities cannot be formed without applying an -alias to one side or the other; SQLAlchemy applies an alias to the right side -of the join. For example given a joined inheritance mapping as:: - - class Employee(Base): - __tablename__ = 'employee' - id = Column(Integer, primary_key=True) - manager_id = Column(ForeignKey("manager.id")) - name = Column(String(50)) - type = Column(String(50)) - - reports_to = relationship("Manager", foreign_keys=manager_id) - - __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, - } - - class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'inherit_condition': id == Employee.id - } - -The above mapping includes a relationship between the ``Employee`` and -``Manager`` classes. Since both classes make use of the "employee" database -table, from a SQL perspective this is a -:ref:`self referential relationship `. If we wanted to -query from both the ``Employee`` and ``Manager`` models using a join, at the -SQL level the "employee" table needs to be included twice in the query, which -means it must be aliased. When we create such a join using the SQLAlchemy -ORM, we get SQL that looks like the following: - -.. sourcecode:: pycon+sql - - >>> stmt = select(Employee, Manager).join(Employee.reports_to) - >>> print(stmt) - {opensql}SELECT employee.id, employee.manager_id, employee.name, - employee.type, manager_1.id AS id_1, employee_1.id AS id_2, - employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, - employee_1.type AS type_1 - FROM employee JOIN - (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) - ON manager_1.id = employee.manager_id - -Above, the SQL selects FROM the ``employee`` table, representing the -``Employee`` entity in the query. It then joins to a right-nested join of -``employee AS employee_1 JOIN manager AS manager_1``, where the ``employee`` -table is stated again, except as an anonymous alias ``employee_1``. This is the -"automatic generation of an alias" that the warning message refers towards. - -When SQLAlchemy loads ORM rows that each contain an ``Employee`` and a -``Manager`` object, the ORM must adapt rows from what above is the -``employee_1`` and ``manager_1`` table aliases into those of the un-aliased -``Manager`` class. This process is internally complex and does not accommodate -for all API features, notably when trying to use eager loading features such as -:func:`_orm.contains_eager` with more deeply nested queries than are shown -here. As the pattern is unreliable for more complex scenarios and involves -implicit decisionmaking that is difficult to anticipate and follow, -the warning is emitted and this pattern may be considered a legacy feature. The -better way to write this query is to use the same patterns that apply to any -other self-referential relationship, which is to use the :func:`_orm.aliased` -construct explicitly. For joined-inheritance and other join-oriented mappings, -it is usually desirable to add the use of the :paramref:`_orm.aliased.flat` -parameter, which will allow a JOIN of two or more tables to be aliased by -applying an alias to the individual tables within the join, rather than -embedding the join into a new subquery: - -.. sourcecode:: pycon+sql - - >>> from sqlalchemy.orm import aliased - >>> manager_alias = aliased(Manager, flat=True) - >>> stmt = select(Employee, manager_alias).join(Employee.reports_to.of_type(manager_alias)) - >>> print(stmt) - {opensql}SELECT employee.id, employee.manager_id, employee.name, - employee.type, manager_1.id AS id_1, employee_1.id AS id_2, - employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, - employee_1.type AS type_1 - FROM employee JOIN - (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) - ON manager_1.id = employee.manager_id - -If we then wanted to use :func:`_orm.contains_eager` to populate the -``reports_to`` attribute, we refer to the alias:: - - >>> stmt =select(Employee).join( - ... Employee.reports_to.of_type(manager_alias) - ... ).options( - ... contains_eager(Employee.reports_to.of_type(manager_alias)) - ... ) - -Without using the explicit :func:`_orm.aliased` object, in some more nested -cases the :func:`_orm.contains_eager` option does not have enough context to -know where to get its data from, in the case that the ORM is "auto-aliasing" -in a very nested context. Therefore it's best not to rely on this feature -and instead keep the SQL construction as explicit as possible. Connections and Transactions ============================ @@ -547,56 +202,6 @@ method. When a connection is invalidated, any :class:`_engine.Transaction` that was in progress is now in an invalid state, and must be explicitly rolled back in order to remove it from the :class:`_engine.Connection`. -.. _error_8s2a: - -This connection is on an inactive transaction. Please rollback() fully before proceeding ------------------------------------------------------------------------------------------- - -This error condition was added to SQLAlchemy as of version 1.4. The error -refers to the state where a :class:`_engine.Connection` is placed into a -transaction using a method like :meth:`_engine.Connection.begin`, and then a -further "marker" transaction is created within that scope; the "marker" -transaction is then rolled back using :meth:`.Transaction.rollback` or closed -using :meth:`.Transaction.close`, however the outer transaction is still -present in an "inactive" state and must be rolled back. - -The pattern looks like:: - - engine = create_engine(...) - - connection = engine.connect() - transaction1 = connection.begin() - - # this is a "sub" or "marker" transaction, a logical nesting - # structure based on "real" transaction transaction1 - transaction2 = connection.begin() - transaction2.rollback() - - # transaction1 is still present and needs explicit rollback, - # so this will raise - connection.execute(text("select 1")) - -Above, ``transaction2`` is a "marker" transaction, which indicates a logical -nesting of transactions within an outer one; while the inner transaction -can roll back the whole transaction via its rollback() method, its commit() -method has no effect except to close the scope of the "marker" transaction -itself. The call to ``transaction2.rollback()`` has the effect of -**deactivating** transaction1 which means it is essentially rolled back -at the database level, however is still present in order to accommodate -a consistent nesting pattern of transactions. - -The correct resolution is to ensure the outer transaction is also -rolled back:: - - transaction1.rollback() - -This pattern is not commonly used in Core. Within the ORM, a similar issue can -occur which is the product of the ORM's "logical" transaction structure; this -is described in the FAQ entry at :ref:`faq_session_rollback`. - -The "subtransaction" pattern is to be removed in SQLAlchemy 2.0 so that this -particular programming pattern will no longer be available and this -error message will no longer occur in Core. .. _error_dbapi: @@ -727,6 +332,95 @@ the database driver (DBAPI), not SQLAlchemy itself. SQL Expression Language ======================= +.. _error_cprf: +.. _caching_caveats: + +Object will not produce a cache key, Performance Implications +-------------------------------------------------------------- + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +Core and ORM SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement, allowing the +relatively expensive string compilation process to be skipped when another +structurally equivalent construct is next used. This system +relies upon functionality that is implemented for all SQL constructs, including +objects such as :class:`_schema.Column`, +:func:`_sql.select`, and :class:`_types.TypeEngine` objects, to produce a +**cache key** which fully represents their state to the degree that it affects +the SQL compilation process. + +If the warnings in question refer to widely used objects such as +:class:`_schema.Column` objects, and are shown to be affecting the majority of +SQL constructs being emitted (using the estimation techniques described at +:ref:`sql_caching_logging`) such that caching is generally not enabled for an +application, this will negatively impact performance and can in some cases +effectively produce a **performance degradation** compared to prior SQLAlchemy +versions. The FAQ at :ref:`faq_new_caching` covers this in additional detail. + +Caching disables itself if there's any doubt +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Caching relies on being able to generate a cache key that accurately represents +the **complete structure** of a statement in a **consistent** fashion. If a particular +SQL construct (or type) does not have the appropriate directives in place which +allow it to generate a proper cache key, then caching cannot be safely enabled: + +* The cache key must represent the **complete structure**: If the usage of two + separate instances of that construct may result in different SQL being + rendered, caching the SQL against the first instance of the element using a + cache key that does not capture the distinct differences between the first and + second elements will result in incorrect SQL being cached and rendered for the + second instance. + +* The cache key must be **consistent**: If a construct represents state that + changes every time, such as a literal value, producing unique SQL for every + instance of it, this construct is also not safe to cache, as repeated use of + the construct will quickly fill up the statement cache with unique SQL strings + that will likely not be used again, defeating the purpose of the cache. + +For the above two reasons, SQLAlchemy's caching system is **extremely +conservative** about deciding to cache the SQL corresponding to an object. + +Assertion attributes for caching +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The warning is emitted based on the criteria below. For further detail on +each, see the section :ref:`faq_new_caching`. + +* The :class:`.Dialect` itself (i.e. the module that is specified by the + first part of the URL we pass to :func:`_sa.create_engine`, like + ``postgresql+psycopg2://``), must indicate it has been reviewed and tested + to support caching correctly, which is indicated by the + :attr:`.Dialect.supports_statement_cache` attribute being set to ``True``. + When using third party dialects, consult with the maintainers of the dialect + so that they may follow the :ref:`steps to ensure caching may be enabled + ` in their dialect and publish a new release. + +* Third party or user defined types that inherit from either + :class:`.TypeDecorator` or :class:`.UserDefinedType` must include the + :attr:`.ExternalType.cache_ok` attribute in their definition, including for + all derived subclasses, following the guidelines described in the docstring + for :attr:`.ExternalType.cache_ok`. As before, if these datatypes are + imported from third party libraries, consult with the maintainers of that + library so that they may provide the necessary changes to their library and + publish a new release. + +* Third party or user defined SQL constructs that subclass from classes such + as :class:`.ClauseElement`, :class:`_schema.Column`, :class:`_dml.Insert` + etc, including simple subclasses as well as those which are designed to + work with the :ref:`sqlalchemy.ext.compiler_toplevel`, should normally + include the :attr:`.HasCacheKey.inherit_cache` attribute set to ``True`` + or ``False`` based on the design of the construct, following the guidelines + described at :ref:`compilerext_caching`. + +.. seealso:: + + :ref:`sql_caching_logging` - background on observing cache behavior + and efficiency + + :ref:`faq_new_caching` - in the :ref:`faq_toplevel` section + .. _error_l7de: @@ -747,7 +441,7 @@ Normally, a Core SQL construct or ORM :class:`_query.Query` object can be string directly, such as when we use ``print()``:: >>> from sqlalchemy import column - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 When the above SQL expression is stringified, the :class:`.StrSQLCompiler` @@ -761,11 +455,9 @@ to turn into a string, such as the PostgreSQL >>> from sqlalchemy.dialects.postgresql import insert >>> from sqlalchemy import table, column - >>> my_table = table('my_table', column('x'), column('y')) - >>> insert_stmt = insert(my_table).values(x='foo') - >>> insert_stmt = insert_stmt.on_conflict_do_nothing( - ... index_elements=['y'] - ... ) + >>> my_table = table("my_table", column("x"), column("y")) + >>> insert_stmt = insert(my_table).values(x="foo") + >>> insert_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["y"]) >>> print(insert_stmt) Traceback (most recent call last): @@ -807,14 +499,12 @@ This often occurs when attempting to use a :func:`.column_property` or declarative such as:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop > 5), - ) + __table_args__ = (CheckConstraint(cprop > 5),) Above, the ``cprop`` attribute is used inline before it has been mapped, however this ``cprop`` attribute is not a :class:`_schema.Column`, @@ -833,56 +523,12 @@ The solution is to access the :class:`_schema.Column` directly using the :attr:`.ColumnProperty.expression` attribute:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop.expression > 5), - ) - -.. _error_2afi: - -This Compiled object is not bound to any Engine or Connection -------------------------------------------------------------- - -This error refers to the concept of "bound metadata", described at -:ref:`dbengine_implicit`. The issue occurs when one invokes the -:meth:`.Executable.execute` method directly off of a Core expression object -that is not associated with any :class:`_engine.Engine`:: - - metadata_obj = MetaData() - table = Table('t', metadata_obj, Column('q', Integer)) - - stmt = select(table) - result = stmt.execute() # <--- raises - -What the logic is expecting is that the :class:`_schema.MetaData` object has -been **bound** to a :class:`_engine.Engine`:: - - engine = create_engine("mysql+pymysql://user:pass@host/db") - metadata_obj = MetaData(bind=engine) - -Where above, any statement that derives from a :class:`_schema.Table` which -in turn derives from that :class:`_schema.MetaData` will implicitly make use of -the given :class:`_engine.Engine` in order to invoke the statement. - -Note that the concept of bound metadata is a **legacy pattern** and in most -cases is **highly discouraged**. The best way to invoke the statement is -to pass it to the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: - - with engine.connect() as conn: - result = conn.execute(stmt) - -When using the ORM, a similar facility is available via the :class:`.Session`:: - - result = session.execute(stmt) - -.. seealso:: - - :ref:`dbengine_implicit` - + __table_args__ = (CheckConstraint(cprop.expression > 5),) .. _error_cd3x: @@ -893,7 +539,7 @@ This error occurs when a statement makes use of :func:`.bindparam` either implicitly or explicitly and does not provide a value when the statement is executed:: - stmt = select(table.c.column).where(table.c.id == bindparam('my_param')) + stmt = select(table.c.column).where(table.c.id == bindparam("my_param")) result = conn.execute(stmt) @@ -940,18 +586,17 @@ this error is generated:: Since "b" is required, pass it as ``None`` so that the INSERT may proceed:: e.execute( - t.insert(), [ + t.insert(), + [ {"a": 1, "b": 2, "c": 3}, {"a": 2, "b": None, "c": 4}, {"a": 3, "b": 4, "c": 5}, - ] + ], ) .. seealso:: - :ref:`coretutorial_bind_param` - - :ref:`execute_multiple` + :ref:`tutorial_sending_parameters` .. _error_89ve: @@ -968,12 +613,7 @@ Core and the full rationale is discussed at :ref:`change_4617`. Given an example as:: m = MetaData() - t = Table( - 't', m, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer)) stmt = select(t) Above, ``stmt`` represents a SELECT statement. The error is produced when we want @@ -999,15 +639,195 @@ Beyond the above practical reasons, there are a lot of other SQLAlchemy-oriented reasons the change is being made. The correct form of the above two statements therefore requires that :meth:`_expression.SelectBase.subquery` is used:: - subq = stmt.subquery() + subq = stmt.subquery() + + new_stmt_1 = select(subq) + + new_stmt_2 = select(some_table).select_from(some_table.join(subq)) + +.. seealso:: + + :ref:`change_4617` + +.. _error_xaj1: + +An alias is being generated automatically for raw clauseelement +---------------------------------------------------------------- + +.. versionadded:: 1.4.26 + +This deprecation warning refers to a very old and likely not well known pattern +that applies to the legacy :meth:`_orm.Query.join` method as well as the +:term:`2.0 style` :meth:`_sql.Select.join` method, where a join can be stated +in terms of a :func:`_orm.relationship` but the target is the +:class:`_schema.Table` or other Core selectable to which the class is mapped, +rather than an ORM entity such as a mapped class or :func:`_orm.aliased` +construct:: + + a1 = Address.__table__ + + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(Address.email_address == "ed@foo.com") + .all() + ) + +The above pattern also allows an arbitrary selectable, such as +a Core :class:`_sql.Join` or :class:`_sql.Alias` object, +however there is no automatic adaptation of this element, meaning the +Core element would need to be referred towards directly:: + + a1 = Address.__table__.alias() + + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(a1.c.email_address == "ed@foo.com") + .all() + ) + +The correct way to specify a join target is always by using the mapped +class itself or an :class:`_orm.aliased` object, in the latter case using the +:meth:`_orm.PropComparator.of_type` modifier to set up an alias:: + + # normal join to relationship entity + q = s.query(User).join(User.addresses).filter(Address.email_address == "ed@foo.com") + + # name Address target explicitly, not necessary but legal + q = ( + s.query(User) + .join(Address, User.addresses) + .filter(Address.email_address == "ed@foo.com") + ) + +Join to an alias:: + + from sqlalchemy.orm import aliased + + a1 = aliased(Address) + + # of_type() form; recommended + q = ( + s.query(User) + .join(User.addresses.of_type(a1)) + .filter(a1.email_address == "ed@foo.com") + ) + + # target, onclause form + q = s.query(User).join(a1, User.addresses).filter(a1.email_address == "ed@foo.com") + +.. _error_xaj2: + +An alias is being generated automatically due to overlapping tables +------------------------------------------------------------------- + +.. versionadded:: 1.4.26 + +This warning is typically generated when querying using the +:meth:`_sql.Select.join` method or the legacy :meth:`_orm.Query.join` method +with mappings that involve joined table inheritance. The issue is that when +joining between two joined inheritance models that share a common base table, a +proper SQL JOIN between the two entities cannot be formed without applying an +alias to one side or the other; SQLAlchemy applies an alias to the right side +of the join. For example given a joined inheritance mapping as:: + + class Employee(Base): + __tablename__ = "employee" + id = Column(Integer, primary_key=True) + manager_id = Column(ForeignKey("manager.id")) + name = Column(String(50)) + type = Column(String(50)) + + reports_to = relationship("Manager", foreign_keys=manager_id) + + __mapper_args__ = { + "polymorphic_identity": "employee", + "polymorphic_on": type, + } + + + class Manager(Employee): + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) + + __mapper_args__ = { + "polymorphic_identity": "manager", + "inherit_condition": id == Employee.id, + } + +The above mapping includes a relationship between the ``Employee`` and +``Manager`` classes. Since both classes make use of the "employee" database +table, from a SQL perspective this is a +:ref:`self referential relationship `. If we wanted to +query from both the ``Employee`` and ``Manager`` models using a join, at the +SQL level the "employee" table needs to be included twice in the query, which +means it must be aliased. When we create such a join using the SQLAlchemy +ORM, we get SQL that looks like the following: + +.. sourcecode:: pycon+sql + + >>> stmt = select(Employee, Manager).join(Employee.reports_to) + >>> print(stmt) + {opensql}SELECT employee.id, employee.manager_id, employee.name, + employee.type, manager_1.id AS id_1, employee_1.id AS id_2, + employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, + employee_1.type AS type_1 + FROM employee JOIN + (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) + ON manager_1.id = employee.manager_id + +Above, the SQL selects FROM the ``employee`` table, representing the +``Employee`` entity in the query. It then joins to a right-nested join of +``employee AS employee_1 JOIN manager AS manager_1``, where the ``employee`` +table is stated again, except as an anonymous alias ``employee_1``. This is the +"automatic generation of an alias" that the warning message refers towards. + +When SQLAlchemy loads ORM rows that each contain an ``Employee`` and a +``Manager`` object, the ORM must adapt rows from what above is the +``employee_1`` and ``manager_1`` table aliases into those of the un-aliased +``Manager`` class. This process is internally complex and does not accommodate +for all API features, notably when trying to use eager loading features such as +:func:`_orm.contains_eager` with more deeply nested queries than are shown +here. As the pattern is unreliable for more complex scenarios and involves +implicit decisionmaking that is difficult to anticipate and follow, +the warning is emitted and this pattern may be considered a legacy feature. The +better way to write this query is to use the same patterns that apply to any +other self-referential relationship, which is to use the :func:`_orm.aliased` +construct explicitly. For joined-inheritance and other join-oriented mappings, +it is usually desirable to add the use of the :paramref:`_orm.aliased.flat` +parameter, which will allow a JOIN of two or more tables to be aliased by +applying an alias to the individual tables within the join, rather than +embedding the join into a new subquery: + +.. sourcecode:: pycon+sql - new_stmt_1 = select(subq) + >>> from sqlalchemy.orm import aliased + >>> manager_alias = aliased(Manager, flat=True) + >>> stmt = select(Employee, manager_alias).join(Employee.reports_to.of_type(manager_alias)) + >>> print(stmt) + {opensql}SELECT employee.id, employee.manager_id, employee.name, + employee.type, manager_1.id AS id_1, employee_1.id AS id_2, + employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, + employee_1.type AS type_1 + FROM employee JOIN + (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) + ON manager_1.id = employee.manager_id - new_stmt_2 = select(some_table).select_from(some_table.join(subq)) +If we then wanted to use :func:`_orm.contains_eager` to populate the +``reports_to`` attribute, we refer to the alias:: -.. seealso:: + >>> stmt = ( + ... select(Employee) + ... .join(Employee.reports_to.of_type(manager_alias)) + ... .options(contains_eager(Employee.reports_to.of_type(manager_alias))) + ... ) - :ref:`change_4617` +Without using the explicit :func:`_orm.aliased` object, in some more nested +cases the :func:`_orm.contains_eager` option does not have enough context to +know where to get its data from, in the case that the ORM is "auto-aliasing" +in a very nested context. Therefore it's best not to rely on this feature +and instead keep the SQL construction as explicit as possible. Object Relational Mapping @@ -1043,9 +863,9 @@ method. The objects will then live on to be accessed further, very often within web applications where they are delivered to a server-side templating engine and are asked for further attributes which they cannot load. -Mitigation of this error is via two general techniques: +Mitigation of this error is via these techniques: -* **Don't close the session prematurely** - Often, applications will close +* **Try not to have detached objects; don't close the session prematurely** - Often, applications will close out a transaction before passing off related objects to some other system which then fails due to this error. Sometimes the transaction doesn't need to be closed so soon; an example is the web application closes out @@ -1057,20 +877,26 @@ Mitigation of this error is via two general techniques: :class:`.Session` can be held open until the lifespan of the objects are done, this is the best approach. -* **Load everything that's needed up front** - It is very often impossible to +* **Otherwise, load everything that's needed up front** - It is very often impossible to keep the transaction open, especially in more complex applications that need to pass objects off to other systems that can't run in the same context even though they're in the same process. In this case, the application - should try to make appropriate use of :term:`eager loading` to ensure + should prepare to deal with :term:`detached` objects, + and should try to make appropriate use of :term:`eager loading` to ensure that objects have what they need up front. - When using this approach, it is usually necessary that the - :paramref:`_orm.Session.expire_on_commit` parameter be set to ``False``, so - that after a :meth:`_orm.Session.commit` operation, the objects within the - session aren't :term:`expired`, which would incur a lazy load if their - attributes were subsequently accessed. Additionally, the - :meth:`_orm.Session.rollback` method unconditionally expires all contents in - the :class:`_orm.Session` and should also be avoided in non-error scenarios. +* **And importantly, set expire_on_commit to False** - When using detached objects, the + most common reason objects need to re-load data is because they were expired + from the last call to :meth:`_orm.Session.commit`. This expiration should + not be used when dealing with detached objects; so the + :paramref:`_orm.Session.expire_on_commit` parameter be set to ``False``. + By preventing the objects from becoming expired outside of the transaction, + the data which was loaded will remain present and will not incur additional + lazy loads when that data is accessed. + + Note also that :meth:`_orm.Session.rollback` method unconditionally expires + all contents in the :class:`_orm.Session` and should also be avoided in + non-error scenarios. .. seealso:: @@ -1127,6 +953,7 @@ is set on a many-to-one or many-to-many relationship, such as:: # configuration step occurs a = relationship("A", back_populates="bs", cascade="all, delete-orphan") + configure_mappers() Above, the "delete-orphan" setting on ``B.a`` indicates the intent that @@ -1389,12 +1216,12 @@ items in each case:: "Child", primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)", backref="parent", - overlaps="c2, parent" + overlaps="c2, parent", ) c2 = relationship( "Child", primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)", - overlaps="c1, parent" + overlaps="c1, parent", ) @@ -1405,7 +1232,6 @@ items in each case:: flag = Column(Integer) - Above, the ORM will know that the overlap between ``Parent.c1``, ``Parent.c2`` and ``Child.parent`` is intentional. @@ -1456,8 +1282,7 @@ the ``prebuffer_rows`` execution option may be used as follows:: # result internally pre-fetches all objects result = sess.execute( - select(User).where(User.id == 7), - execution_options={"prebuffer_rows": True} + select(User).where(User.id == 7), execution_options={"prebuffer_rows": True} ) # context manager is closed, so session_obj above is closed, identity @@ -1562,3 +1387,269 @@ See :ref:`orm_exceptions_toplevel` for ORM exception classes. +Legacy Exceptions +================= + +Exceptions in this section are not generated by current SQLAlchemy +versions, however are provided here to suit exception message hyperlinks. + +.. _error_b8d9: + +The in SQLAlchemy 2.0 will no longer +-------------------------------------------------------------------------------------------- + +SQLAlchemy 2.0 represents a major shift for a wide variety of key +SQLAlchemy usage patterns in both the Core and ORM components. The goal +of the 2.0 release is to make a slight readjustment in some of the most +fundamental assumptions of SQLAlchemy since its early beginnings, and +to deliver a newly streamlined usage model that is hoped to be significantly +more minimalist and consistent between the Core and ORM components, as well as +more capable. + +Introduced at :ref:`migration_20_toplevel`, the SQLAlchemy 2.0 project includes +a comprehensive future compatibility system that's integrated into the +1.4 series of SQLAlchemy, such that applications will have a clear, +unambiguous, and incremental upgrade path in order to migrate applications to +being fully 2.0 compatible. The :class:`.exc.RemovedIn20Warning` deprecation +warning is at the base of this system to provide guidance on what behaviors in +an existing codebase will need to be modified. An overview of how to enable +this warning is at :ref:`deprecation_20_mode`. + +.. seealso:: + + :ref:`migration_20_toplevel` - An overview of the upgrade process from + the 1.x series, as well as the current goals and progress of SQLAlchemy + 2.0. + + + :ref:`deprecation_20_mode` - specific guidelines on how to use + "2.0 deprecations mode" in SQLAlchemy 1.4. + + +.. _error_s9r1: + +Object is being merged into a Session along the backref cascade +--------------------------------------------------------------- + +This message refers to the "backref cascade" behavior of SQLAlchemy, +removed in version 2.0. This refers to the action of +an object being added into a :class:`_orm.Session` as a result of another +object that's already present in that session being associated with it. +As this behavior has been shown to be more confusing than helpful, +the :paramref:`_orm.relationship.cascade_backrefs` and +:paramref:`_orm.backref.cascade_backrefs` parameters were added, which can +be set to ``False`` to disable it, and in SQLAlchemy 2.0 the "cascade backrefs" +behavior has been removed entirely. + +For older SQLAlchemy versions, to set +:paramref:`_orm.relationship.cascade_backrefs` to ``False`` on a backref that +is currently configured using the :paramref:`_orm.relationship.backref` string +parameter, the backref must be declared using the :func:`_orm.backref` function +first so that the :paramref:`_orm.backref.cascade_backrefs` parameter may be +passed. + +Alternatively, the entire "cascade backrefs" behavior can be turned off +across the board by using the :class:`_orm.Session` in "future" mode, +by passing ``True`` for the :paramref:`_orm.Session.future` parameter. + +.. seealso:: + + :ref:`change_5150` - background on the change for SQLAlchemy 2.0. + + +.. _error_c9ae: + +select() construct created in "legacy" mode; keyword arguments, etc. +-------------------------------------------------------------------- + +The :func:`_expression.select` construct has been updated as of SQLAlchemy +1.4 to support the newer calling style that is standard in +SQLAlchemy 2.0. For backwards compatibility within +the 1.4 series, the construct accepts arguments in both the "legacy" style as well +as the "new" style. + +The "new" style features that column and table expressions are passed +positionally to the :func:`_expression.select` construct only; any other +modifiers to the object must be passed using subsequent method chaining:: + + # this is the way to do it going forward + stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid) + +For comparison, a :func:`_expression.select` in legacy forms of SQLAlchemy, +before methods like :meth:`.Select.where` were even added, would like:: + + # this is how it was documented in original SQLAlchemy versions + # many years ago + stmt = select([table1.c.myid], whereclause=table1.c.myid == table2.c.otherid) + +Or even that the "whereclause" would be passed positionally:: + + # this is also how it was documented in original SQLAlchemy versions + # many years ago + stmt = select([table1.c.myid], table1.c.myid == table2.c.otherid) + +For some years now, the additional "whereclause" and other arguments that are +accepted have been removed from most narrative documentation, leading to a +calling style that is most familiar as the list of column arguments passed +as a list, but no further arguments:: + + # this is how it's been documented since around version 1.0 or so + stmt = select([table1.c.myid]).where(table1.c.myid == table2.c.otherid) + +The document at :ref:`migration_20_5284` describes this change in terms +of :ref:`2.0 Migration `. + +.. seealso:: + + :ref:`migration_20_5284` + + :ref:`migration_20_toplevel` + +.. _error_c9bf: + +A bind was located via legacy bound metadata, but since future=True is set on this Session, this bind is ignored. +------------------------------------------------------------------------------------------------------------------- + +The concept of "bound metadata" is present up until SQLAlchemy 1.4; as +of SQLAlchemy 2.0 it's been removed. + +This error refers to the :paramref:`_schema.MetaData.bind` parameter on the +:class:`_schema.MetaData` object that in turn allows objects like the ORM +:class:`_orm.Session` to associate a particular mapped class with an +:class:`_orm.Engine`. In SQLAlchemy 2.0, the :class:`_orm.Session` must be +linked to each :class:`_orm.Engine` directly. That is, instead of instantiating +the :class:`_orm.Session` or :class:`_orm.sessionmaker` without any arguments, +and associating the :class:`_engine.Engine` with the +:class:`_schema.MetaData`:: + + engine = create_engine("sqlite://") + Session = sessionmaker() + metadata_obj = MetaData(bind=engine) + Base = declarative_base(metadata=metadata_obj) + + class MyClass(Base): + # ... + + + session = Session() + session.add(MyClass()) + session.commit() + +The :class:`_engine.Engine` must instead be associated directly with the +:class:`_orm.sessionmaker` or :class:`_orm.Session`. The +:class:`_schema.MetaData` object should no longer be associated with any +engine:: + + + engine = create_engine("sqlite://") + Session = sessionmaker(engine) + Base = declarative_base() + + class MyClass(Base): + # ... + + + session = Session() + session.add(MyClass()) + session.commit() + +In SQLAlchemy 1.4, this :term:`2.0 style` behavior is enabled when the +:paramref:`_orm.Session.future` flag is set on :class:`_orm.sessionmaker` +or :class:`_orm.Session`. + + +.. _error_2afi: + +This Compiled object is not bound to any Engine or Connection +------------------------------------------------------------- + +This error refers to the concept of "bound metadata", which is a legacy +SQLAlchemy pattern present only in 1.x versions. The issue occurs when one invokes +the :meth:`.Executable.execute` method directly off of a Core expression object +that is not associated with any :class:`_engine.Engine`:: + + metadata_obj = MetaData() + table = Table("t", metadata_obj, Column("q", Integer)) + + stmt = select(table) + result = stmt.execute() # <--- raises + +What the logic is expecting is that the :class:`_schema.MetaData` object has +been **bound** to a :class:`_engine.Engine`:: + + engine = create_engine("mysql+pymysql://user:pass@host/db") + metadata_obj = MetaData(bind=engine) + +Where above, any statement that derives from a :class:`_schema.Table` which +in turn derives from that :class:`_schema.MetaData` will implicitly make use of +the given :class:`_engine.Engine` in order to invoke the statement. + +Note that the concept of bound metadata is **not present in SQLAlchemy 2.0**. +The correct way to invoke statements is via +the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: + + with engine.connect() as conn: + result = conn.execute(stmt) + +When using the ORM, a similar facility is available via the :class:`.Session`:: + + result = session.execute(stmt) + +.. seealso:: + + :ref:`tutorial_statement_execution` + +.. _error_8s2a: + +This connection is on an inactive transaction. Please rollback() fully before proceeding +------------------------------------------------------------------------------------------ + +This error condition was added to SQLAlchemy as of version 1.4, and does not +apply to SQLAlchemy 2.0. The error +refers to the state where a :class:`_engine.Connection` is placed into a +transaction using a method like :meth:`_engine.Connection.begin`, and then a +further "marker" transaction is created within that scope; the "marker" +transaction is then rolled back using :meth:`.Transaction.rollback` or closed +using :meth:`.Transaction.close`, however the outer transaction is still +present in an "inactive" state and must be rolled back. + +The pattern looks like:: + + engine = create_engine(...) + + connection = engine.connect() + transaction1 = connection.begin() + + # this is a "sub" or "marker" transaction, a logical nesting + # structure based on "real" transaction transaction1 + transaction2 = connection.begin() + transaction2.rollback() + + # transaction1 is still present and needs explicit rollback, + # so this will raise + connection.execute(text("select 1")) + +Above, ``transaction2`` is a "marker" transaction, which indicates a logical +nesting of transactions within an outer one; while the inner transaction +can roll back the whole transaction via its rollback() method, its commit() +method has no effect except to close the scope of the "marker" transaction +itself. The call to ``transaction2.rollback()`` has the effect of +**deactivating** transaction1 which means it is essentially rolled back +at the database level, however is still present in order to accommodate +a consistent nesting pattern of transactions. + +The correct resolution is to ensure the outer transaction is also +rolled back:: + + transaction1.rollback() + +This pattern is not commonly used in Core. Within the ORM, a similar issue can +occur which is the product of the ORM's "logical" transaction structure; this +is described in the FAQ entry at :ref:`faq_session_rollback`. + +The "subtransaction" pattern is removed in SQLAlchemy 2.0 so that this +particular programming pattern is no longer be available, preventing +this error message. + + + diff --git a/doc/build/faq/connections.rst b/doc/build/faq/connections.rst index 1bee24c3247..fe8e56f815d 100644 --- a/doc/build/faq/connections.rst +++ b/doc/build/faq/connections.rst @@ -27,8 +27,9 @@ How do I pass custom connect arguments to my database API? The :func:`_sa.create_engine` call accepts additional arguments either directly via the ``connect_args`` keyword argument:: - e = create_engine("mysql://scott:tiger@localhost/test", - connect_args={"encoding": "utf8"}) + e = create_engine( + "mysql://scott:tiger@localhost/test", connect_args={"encoding": "utf8"} + ) Or for basic string and integer arguments, they can usually be specified in the query string of the URL:: @@ -167,18 +168,12 @@ a new transaction when it is first used that remains in effect for subsequent statements, until the DBAPI-level ``connection.commit()`` or ``connection.rollback()`` method is invoked. -As discussed at :ref:`autocommit`, there is a library level "autocommit" -feature which is deprecated in 1.4 that causes :term:`DML` and :term:`DDL` -executions to commit automatically after individual statements are executed; -however, outside of this deprecated case, modern use of SQLAlchemy works with -this transaction in all cases and does not commit any data unless explicitly -told to commit. - -At the ORM level, a similar situation where the ORM -:class:`_orm.Session` object also presents a legacy "autocommit" operation is -present; however even if this legacy mode of operation is used, the -:class:`_orm.Session` still makes use of transactions internally, -particularly within the :meth:`_orm.Session.flush` process. +In modern use of SQLAlchemy, a series of SQL statements are always invoked +within this transactional state, assuming +:ref:`DBAPI autocommit mode ` is not enabled (more on that in +the next section), meaning that no single statement is automatically committed; +if an operation fails, the effects of all statements within the current +transaction will be lost. The implication that this has for the notion of "retrying" a statement is that in the default case, when a connection is lost, **the entire transaction is @@ -188,9 +183,10 @@ SQLAlchemy does not have a transparent "reconnection" feature that works mid-transaction, for the case when the database connection has disconnected while being used. The canonical approach to dealing with mid-operation disconnects is to **retry the entire operation from the start of the -transaction**, often by using a Python "retry" decorator, or to otherwise +transaction**, often by using a custom Python decorator that will +"retry" a particular function several times until it succeeds, or to otherwise architect the application in such a way that it is resilient against -transactions that are dropped. +transactions that are dropped that then cause operations to fail. There is also the notion of extensions that can keep track of all of the statements that have proceeded within a transaction and then replay them all in @@ -255,15 +251,13 @@ statement executions:: def reconnecting_engine(engine, num_retries, retry_interval): - def _run_with_retries(fn, context, cursor, statement, *arg, **kw): + def _run_with_retries(fn, context, cursor_obj, statement, *arg, **kw): for retry in range(num_retries + 1): try: - fn(cursor, statement, context=context, *arg) + fn(cursor_obj, statement, context=context, *arg) except engine.dialect.dbapi.Error as raw_dbapi_err: connection = context.root_connection - if engine.dialect.is_disconnect( - raw_dbapi_err, connection, cursor - ): + if engine.dialect.is_disconnect(raw_dbapi_err, connection, cursor_obj): if retry > num_retries: raise engine.logger.error( @@ -321,9 +315,7 @@ using the following proof of concept script. Once run, it will emit a time.sleep(5) e = reconnecting_engine( - create_engine( - "mysql://scott:tiger@localhost/test", echo_pool=True - ), + create_engine("mysql://scott:tiger@localhost/test", echo_pool=True), num_retries=5, retry_interval=2, ) @@ -379,7 +371,10 @@ configured using ``reset_on_return``:: from sqlalchemy import create_engine from sqlalchemy.pool import QueuePool - engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False)) + engine = create_engine( + "mysql://scott:tiger@localhost/myisam_database", + pool=QueuePool(reset_on_return=False), + ) I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -388,8 +383,9 @@ I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause a COMMIT as any connection is returned to the pool:: - engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit')) - + engine = create_engine( + "mssql://scott:tiger@mydsn", pool=QueuePool(reset_on_return="commit") + ) I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working! ---------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/doc/build/faq/index.rst b/doc/build/faq/index.rst index 810a0401157..4b2397d5b8d 100644 --- a/doc/build/faq/index.rst +++ b/doc/build/faq/index.rst @@ -10,6 +10,7 @@ observed questions to well-known issues. .. toctree:: :maxdepth: 2 + installation connections metadata_schema sqlexpressions diff --git a/doc/build/faq/installation.rst b/doc/build/faq/installation.rst new file mode 100644 index 00000000000..500198df8a4 --- /dev/null +++ b/doc/build/faq/installation.rst @@ -0,0 +1,29 @@ +Installation +================= + +.. contents:: + :local: + :class: faq + :backlinks: none + +.. _faq_asyncio_installation: + +I'm getting an error about greenlet not being installed when I try to use asyncio +---------------------------------------------------------------------------------- + +The ``greenlet`` dependency does not install by default for CPU architectures +for which ``greenlet`` does not supply a `pre-built binary wheel `_. +Notably, **this includes Apple M1**. To install including ``greenlet``, +add the ``asyncio`` `setuptools extra `_ +to the ``pip install`` command:: + + pip install sqlalchemy[asyncio] + +For more background, see :ref:`asyncio_install`. + + +.. seealso:: + + :ref:`asyncio_install` + + diff --git a/doc/build/faq/metadata_schema.rst b/doc/build/faq/metadata_schema.rst index 2556db60c1a..2eab0033a5e 100644 --- a/doc/build/faq/metadata_schema.rst +++ b/doc/build/faq/metadata_schema.rst @@ -88,9 +88,12 @@ metadata creation sequence as a string, using this recipe:: from sqlalchemy import create_mock_engine + def dump(sql, *multiparams, **params): print(sql.compile(dialect=engine.dialect)) - engine = create_mock_engine('postgresql://', dump) + + + engine = create_mock_engine("postgresql://", dump) metadata_obj.create_all(engine, checkfirst=False) The `Alembic `_ tool also supports diff --git a/doc/build/faq/ormconfiguration.rst b/doc/build/faq/ormconfiguration.rst index 3eab2185471..1059354ed84 100644 --- a/doc/build/faq/ormconfiguration.rst +++ b/doc/build/faq/ormconfiguration.rst @@ -48,7 +48,7 @@ applied directly to the mapper:: class SomeClass(Base): __table__ = some_table_with_no_pk __mapper_args__ = { - 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] + "primary_key": [some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] } Better yet is when using fully declared table metadata, use the ``primary_key=True`` @@ -142,16 +142,18 @@ Given the example as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) As of SQLAlchemy version 0.9.5, the above condition is detected, and will warn that the ``id`` column of ``A`` and ``B`` is being combined under @@ -161,33 +163,33 @@ that a ``B`` object's primary key will always mirror that of its ``A``. A mapping which resolves this is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" - b_id = Column('id', Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + b_id = Column("id", Integer, primary_key=True) + a_id = Column(Integer, ForeignKey("a.id")) Suppose we did want ``A.id`` and ``B.id`` to be mirrors of each other, despite the fact that ``B.a_id`` is where ``A.id`` is related. We could combine them together using :func:`.column_property`:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" # probably not what you want, but this is a demonstration id = column_property(Column(Integer, primary_key=True), A.id) - a_id = Column(Integer, ForeignKey('a.id')) - - + a_id = Column(Integer, ForeignKey("a.id")) I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys. ------------------------------------------------------------------------------------------------------------------------------------------------------------------ @@ -197,21 +199,27 @@ Are you doing this?:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")) + foo = relationship( + "Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar") + ) That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`_orm.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)") + foo = relationship( + "Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)" + ) Or if the objects you need are already available, skip the strings:: class MyClass(Base): # .... - foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)) + foo = relationship( + Dest, primaryjoin=and_(MyClass.id == Dest.foo_id, MyClass.foo == Dest.bar) + ) The same idea applies to all the other arguments, such as ``foreign_keys``:: @@ -234,25 +242,22 @@ The same idea applies to all the other arguments, such as ``foreign_keys``:: .. _faq_subqueryload_limit_sort: -Why is ``ORDER BY`` required with ``LIMIT`` (especially with ``subqueryload()``)? ---------------------------------------------------------------------------------- +Why is ``ORDER BY`` recommended with ``LIMIT`` (especially with ``subqueryload()``)? +------------------------------------------------------------------------------------ -A relational database can return rows in any -arbitrary order, when an explicit ordering is not set. -While this ordering very often corresponds to the natural -order of rows within a table, this is not the case for all databases and -all queries. The consequence of this is that any query that limits rows -using ``LIMIT`` or ``OFFSET`` should **always** specify an ``ORDER BY``. -Otherwise, it is not deterministic which rows will actually be returned. +When ORDER BY is not used for a SELECT statement that returns rows, the +relational database is free to returned matched rows in any arbitrary +order. While this ordering very often corresponds to the natural +order of rows within a table, this is not the case for all databases and all +queries. The consequence of this is that any query that limits rows using +``LIMIT`` or ``OFFSET``, or which merely selects the first row of the result, +discarding the rest, will not be deterministic in terms of what result row is +returned, assuming there's more than one row that matches the query's criteria. -When we use a SQLAlchemy method like :meth:`_query.Query.first`, we are in fact -applying a ``LIMIT`` of one to the query, so without an explicit ordering -it is not deterministic what row we actually get back. While we may not notice this for simple queries on databases that usually -returns rows in their natural -order, it becomes much more of an issue if we also use :func:`_orm.subqueryload` -to load related collections, and we may not be loading the collections -as intended. +returns rows in their natural order, it becomes more of an issue if we +also use :func:`_orm.subqueryload` to load related collections, and we may not +be loading the collections as intended. SQLAlchemy implements :func:`_orm.subqueryload` by issuing a separate query, the results of which are matched up to the results from the first query. diff --git a/doc/build/faq/performance.rst b/doc/build/faq/performance.rst index 6e144072131..91061c85927 100644 --- a/doc/build/faq/performance.rst +++ b/doc/build/faq/performance.rst @@ -8,6 +8,166 @@ Performance :class: faq :backlinks: none +.. _faq_new_caching: + +Why is my application slow after upgrading to 1.4 and/or 2.x? +-------------------------------------------------------------- + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +Core and ORM SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement, allowing the +relatively expensive string compilation process to be skipped when another +structurally equivalent construct is next used. This system +relies upon functionality that is implemented for all SQL constructs, including +objects such as :class:`_schema.Column`, +:func:`_sql.select`, and :class:`_types.TypeEngine` objects, to produce a +**cache key** which fully represents their state to the degree that it affects +the SQL compilation process. + +The caching system allows SQLAlchemy 1.4 and above to be more performant than +SQLAlchemy 1.3 with regards to the time spent converting SQL constructs into +strings repeatedly. However, this only works if caching is enabled for the +dialect and SQL constructs in use; if not, string compilation is usually +similar to that of SQLAlchemy 1.3, with a slight decrease in speed in some +cases. + +There is one case however where if SQLAlchemy's new caching system has been +disabled (for reasons below), performance for the ORM may be in fact +significantly poorer than that of 1.3 or other prior releases which is due to +the lack of caching within ORM lazy loaders and object refresh queries, which +in the 1.3 and earlier releases used the now-legacy ``BakedQuery`` system. If +an application is seeing significant (30% or higher) degradations in +performance (measured in time for operations to complete) when switching to +1.4, this is the likely cause of the issue, with steps to mitigate below. + +.. seealso:: + + :ref:`sql_caching` - overview of the caching system + + :ref:`caching_caveats` - additional information regarding the warnings + generated for elements that don't enable caching. + +Step one - turn on SQL logging and confirm whether or not caching is working +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here, we want to use the technique described at +:ref:`engine logging `, looking for statements with the +``[no key]`` indicator or even ``[dialect does not support caching]``. +The indicators we would see for SQL statements that are successfully participating +in the caching system would be indicating ``[generated in Xs]`` when +statements are invoked for the first time and then +``[cached since Xs ago]`` for the vast majority of statements subsequent. +If ``[no key]`` is prevalent in particular for SELECT statements, or +if caching is disabled entirely due to ``[dialect does not support caching]``, +this can be the cause of significant performance degradation. + +.. seealso:: + + :ref:`sql_caching_logging` + + +Step two - identify what constructs are blocking caching from being enabled +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Assuming statements are not being cached, there should be warnings emitted +early in the application's log (SQLAlchemy 1.4.28 and above only) indicating +dialects, :class:`.TypeEngine` objects, and SQL constructs that are not +participating in caching. + +For user defined datatypes such as those which extend :class:`_types.TypeDecorator` +and :class:`_types.UserDefinedType`, the warnings will look like:: + + sqlalchemy.ext.SAWarning: MyType will not produce a cache key because the + ``cache_ok`` attribute is not set to True. This can have significant + performance implications including some performance degradations in + comparison to prior SQLAlchemy versions. Set this attribute to True if this + type object's state is safe to use in a cache key, or False to disable this + warning. + +For custom and third party SQL elements, such as those constructed using +the techniques described at :ref:`sqlalchemy.ext.compiler_toplevel`, these +warnings will look like:: + + sqlalchemy.exc.SAWarning: Class MyClass will not make use of SQL + compilation caching as it does not set the 'inherit_cache' attribute to + ``True``. This can have significant performance implications including some + performance degradations in comparison to prior SQLAlchemy versions. Set + this attribute to True if this object can make use of the cache key + generated by the superclass. Alternatively, this attribute may be set to + False which will disable this warning. + +For custom and third party dialects which make use of the :class:`.Dialect` +class hierarchy, the warnings will look like:: + + sqlalchemy.exc.SAWarning: Dialect database:driver will not make use of SQL + compilation caching as it does not set the 'supports_statement_cache' + attribute to ``True``. This can have significant performance implications + including some performance degradations in comparison to prior SQLAlchemy + versions. Dialect maintainers should seek to set this attribute to True + after appropriate development and testing for SQLAlchemy 1.4 caching + support. Alternatively, this attribute may be set to False which will + disable this warning. + + +Step three - enable caching for the given objects and/or seek alternatives +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Steps to mitigate the lack of caching include: + +* Review and set :attr:`.ExternalType.cache_ok` to ``True`` for all custom types + which extend from :class:`_types.TypeDecorator`, + :class:`_types.UserDefinedType`, as well as subclasses of these such as + :class:`_types.PickleType`. Set this **only** if the custom type does not + include any additional state attributes which affect how it renders SQL:: + + class MyCustomType(TypeDecorator): + cache_ok = True + impl = String + + If the types in use are from a third-party library, consult with the + maintainers of that library so that it may be adjusted and released. + + .. seealso:: + + :attr:`.ExternalType.cache_ok` - background on requirements to enable + caching for custom datatypes. + +* Make sure third party dialects set :attr:`.Dialect.supports_statement_cache` + to ``True``. What this indicates is that the maintainers of a third party + dialect have made sure their dialect works with SQLAlchemy 1.4 or greater, + and that their dialect doesn't include any compilation features which may get + in the way of caching. As there are some common compilation patterns which + can in fact interfere with caching, it's important that dialect maintainers + check and test this carefully, adjusting for any of the legacy patterns + which won't work with caching. + + .. seealso:: + + :ref:`engine_thirdparty_caching` - background and examples for third-party + dialects to participate in SQL statement caching. + +* Custom SQL classes, including all DQL / DML constructs one might create + using the :ref:`sqlalchemy.ext.compiler_toplevel`, as well as ad-hoc + subclasses of objects such as :class:`_schema.Column` or + :class:`_schema.Table`. The :attr:`.HasCacheKey.inherit_cache` attribute + may be set to ``True`` for trivial subclasses, which do not contain any + subclass-specific state information which affects the SQL compilation. + + .. seealso:: + + :ref:`compilerext_caching` - guidelines for applying the + :attr:`.HasCacheKey.inherit_cache` attribute. + + +.. seealso:: + + :ref:`sql_caching` - caching system overview + + :ref:`caching_caveats` - background on warnings emitted when caching + is not enabled for specific constructs and/or dialects. + + .. _faq_how_to_profile: How can I profile a SQLAlchemy powered application? @@ -55,16 +215,16 @@ using a recipe like the following:: logger = logging.getLogger("myapp.sqltime") logger.setLevel(logging.DEBUG) + @event.listens_for(Engine, "before_cursor_execute") - def before_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - conn.info.setdefault('query_start_time', []).append(time.time()) + def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): + conn.info.setdefault("query_start_time", []).append(time.time()) logger.debug("Start Query: %s", statement) + @event.listens_for(Engine, "after_cursor_execute") - def after_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - total = time.time() - conn.info['query_start_time'].pop(-1) + def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): + total = time.time() - conn.info["query_start_time"].pop(-1) logger.debug("Query Complete!") logger.debug("Total Time: %f", total) @@ -95,6 +255,7 @@ Below is a simple recipe which works profiling into a context manager:: import pstats import contextlib + @contextlib.contextmanager def profiled(): pr = cProfile.Profile() @@ -102,7 +263,7 @@ Below is a simple recipe which works profiling into a context manager:: yield pr.disable() s = io.StringIO() - ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') + ps = pstats.Stats(pr, stream=s).sort_stats("cumulative") ps.print_stats() # uncomment this to see who's calling what # ps.print_callers() @@ -111,7 +272,7 @@ Below is a simple recipe which works profiling into a context manager:: To profile a section of code:: with profiled(): - Session.query(FooClass).filter(FooClass.somevalue==8).all() + Session.query(FooClass).filter(FooClass.somevalue == 8).all() The output of profiling can be used to give an idea where time is being spent. A section of profiling output looks like this:: @@ -197,12 +358,13 @@ this:: from sqlalchemy import TypeDecorator import time + class Foo(TypeDecorator): impl = String def process_result_value(self, value, thing): # intentionally add slowness for illustration purposes - time.sleep(.001) + time.sleep(0.001) return value the profiling output of this intentionally slow operation can be seen like this:: diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst index dc1336dad00..43f3673bbd9 100644 --- a/doc/build/faq/sessions.rst +++ b/doc/build/faq/sessions.rst @@ -91,12 +91,14 @@ does not properly handle the exception. For example:: from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base - Base = declarative_base(create_engine('sqlite://')) + Base = declarative_base(create_engine("sqlite://")) + class Foo(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) + Base.metadata.create_all() session = sessionmaker()() @@ -113,7 +115,6 @@ does not properly handle the exception. For example:: # continue using session without rolling back session.commit() - The usage of the :class:`.Session` should fit within a structure similar to this:: try: @@ -186,7 +187,7 @@ point of view there is still a transaction that is now in an inactive state. Given a block such as:: - sess = Session() # begins a logical transaction + sess = Session() # begins a logical transaction try: sess.flush() @@ -237,7 +238,7 @@ will **deduplicate the objects based on primary key**. That is, if we for example use the ``User`` mapping described at :ref:`ormtutorial_toplevel`, and we had a SQL query like the following:: - q = session.query(User).outerjoin(User.addresses).filter(User.name == 'jack') + q = session.query(User).outerjoin(User.addresses).filter(User.name == "jack") Above, the sample data used in the tutorial has two rows in the ``addresses`` table for the ``users`` row with the name ``'jack'``, primary key value 5. @@ -257,7 +258,9 @@ This is because when the :class:`_query.Query` object returns full entities, the are **deduplicated**. This does not occur if we instead request individual columns back:: - >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(User.name == 'jack').all() + >>> session.query(User.id, User.name).outerjoin(User.addresses).filter( + ... User.name == "jack" + ... ).all() [(5, 'jack'), (5, 'jack')] There are two main reasons the :class:`_query.Query` will deduplicate: @@ -338,6 +341,7 @@ one:: print("ITER!") return iter([1, 2, 3, 4, 5]) + list(Iterates()) output:: @@ -350,7 +354,7 @@ How Do I use Textual SQL with ORM Queries? See: -* :ref:`orm_tutorial_literal_sql` - Ad-hoc textual blocks with :class:`_query.Query` +* :ref:`orm_queryguide_selecting_text` - Ad-hoc textual blocks with :class:`_query.Query` * :ref:`session_sql_expressions` - Using :class:`.Session` with textual SQL directly. @@ -396,14 +400,21 @@ an "expire" event of the :func:`_orm.relationship` in which it's involved. This that for the following sequence:: o = Session.query(SomeClass).first() - assert o.foo is None # accessing an un-set attribute sets it to None + + # assume the existing o.foo_id value is None; + # accessing o.foo will reconcile this as ``None``, but will effectively + # "load" the value of None + assert o.foo is None + + # now set foo_id to something. o.foo will not be immediately affected o.foo_id = 7 -``o.foo`` is initialized to ``None`` when we first accessed it. Setting -``o.foo_id = 7`` will have the value of "7" as pending, but no flush +``o.foo`` is loaded with its effective database value of ``None`` when it +is first accessed. Setting +``o.foo_id = 7`` will have the value of "7" as a pending change, but no flush has occurred - so ``o.foo`` is still ``None``:: - # attribute is already set to None, has not been + # attribute is already "loaded" as None, has not been # reconciled with o.foo_id = 7 yet assert o.foo is None @@ -411,18 +422,19 @@ For ``o.foo`` to load based on the foreign key mutation is usually achieved naturally after the commit, which both flushes the new foreign key value and expires all state:: - Session.commit() # expires all attributes + session.commit() # expires all attributes foo_7 = Session.query(Foo).get(7) - assert o.foo is foo_7 # o.foo lazyloads on access + # o.foo will lazyload again, this time getting the new object + assert o.foo is foo_7 A more minimal operation is to expire the attribute individually - this can be performed for any :term:`persistent` object using :meth:`.Session.expire`:: o = Session.query(SomeClass).first() o.foo_id = 7 - Session.expire(o, ['foo']) # object must be persistent for this + Session.expire(o, ["foo"]) # object must be persistent for this foo_7 = Session.query(Foo).get(7) @@ -438,17 +450,15 @@ have meaning until the row is inserted; otherwise there is no row yet:: Session.add(new_obj) - # accessing an un-set attribute sets it to None + # returns None but this is not a "lazyload", as the object is not + # persistent in the DB yet, and the None value is not part of the + # object's state assert new_obj.foo is None Session.flush() # emits INSERT - # expire this because we already set .foo to None - Session.expire(o, ['foo']) - assert new_obj.foo is foo_7 # now it loads - .. topic:: Attribute loading for non-persistent objects One variant on the "pending" behavior above is if we use the flag @@ -504,21 +514,21 @@ The function can be demonstrated as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - c_id = Column(ForeignKey('c.id')) + a_id = Column(ForeignKey("a.id")) + c_id = Column(ForeignKey("c.id")) c = relationship("C", backref="bs") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) diff --git a/doc/build/faq/sqlexpressions.rst b/doc/build/faq/sqlexpressions.rst index 93653a10ced..287647a7936 100644 --- a/doc/build/faq/sqlexpressions.rst +++ b/doc/build/faq/sqlexpressions.rst @@ -19,7 +19,7 @@ function (note the Python ``print`` function also calls ``str()`` automatically if we don't use it explicitly):: >>> from sqlalchemy import table, column, select - >>> t = table('my_table', column('x')) + >>> t = table("my_table", column("x")) >>> statement = select(t) >>> print(str(statement)) SELECT my_table.x @@ -31,7 +31,7 @@ The ``str()`` builtin, or an equivalent, can be invoked on ORM as:: >>> from sqlalchemy import column - >>> print(column('x') == 'some value') + >>> print(column("x") == "some value") x = :x_1 Stringifying for Specific Databases @@ -59,8 +59,16 @@ instantiate a :class:`.Dialect` object directly, as below where we use a PostgreSQL dialect:: from sqlalchemy.dialects import postgresql + print(statement.compile(dialect=postgresql.dialect())) +Note that any dialect can be assembled using :func:`_sa.create_engine` itself +with a dummy URL and then accessing the :attr:`_engine.Engine.dialect` attribute, +such as if we wanted a dialect object for psycopg2:: + + e = create_engine("postgresql+psycopg2://") + psycopg2_dialect = e.dialect + When given an ORM :class:`~.orm.query.Query` object, in order to get at the :meth:`_expression.ClauseElement.compile` method we only need access the :attr:`~.orm.query.Query.statement` @@ -72,7 +80,7 @@ accessor first:: Rendering Bound Parameters Inline ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. warning:: **Never** use this technique with string content received from +.. warning:: **Never** use these techniques with string content received from untrusted input, such as from web forms or other user-input applications. SQLAlchemy's facilities to coerce Python values into direct SQL string values are **not secure against untrusted input and do not validate the type @@ -91,51 +99,199 @@ flag, passed to ``compile_kwargs``:: from sqlalchemy.sql import table, column, select - t = table('t', column('x')) + t = table("t", column("x")) s = select(t).where(t.c.x == 5) # **do not use** with untrusted input!!! print(s.compile(compile_kwargs={"literal_binds": True})) -The above approach has the caveats that it is only supported for basic -types, such as ints and strings, and furthermore if a :func:`.bindparam` -without a pre-set value is used directly, it won't be able to -stringify that either. + # to render for a specific dialect + print(s.compile(dialect=dialect, compile_kwargs={"literal_binds": True})) -This functionality is provided mainly for -logging or debugging purposes, where having the raw sql string of a query -may prove useful. Note that the ``dialect`` parameter should also -passed to the :meth:`_expression.ClauseElement.compile` method to render -the query that will be sent to the database. + # or if you have an Engine, pass as first argument + print(s.compile(some_engine, compile_kwargs={"literal_binds": True})) -To support inline literal rendering for types not supported, implement -a :class:`.TypeDecorator` for the target type which includes a -:meth:`.TypeDecorator.process_literal_param` method:: +This functionality is provided mainly for logging or debugging purposes, where +having the raw sql string of a query may prove useful. - from sqlalchemy import TypeDecorator, Integer +The above approach has the caveats that it is only supported for basic types, +such as ints and strings, and furthermore if a :func:`.bindparam` without a +pre-set value is used directly, it won't be able to stringify that either. +Methods of stringifying all parameters unconditionally are detailed below. +.. tip:: - class MyFancyType(TypeDecorator): - impl = Integer + The reason SQLAlchemy does not support full stringification of all + datatypes is threefold: - def process_literal_param(self, value, dialect): - return "my_fancy_formatting(%s)" % value + 1. This is a functionality that is already supported by the DBAPI in use + when the DBAPI is used normally. The SQLAlchemy project cannot be + tasked with duplicating this functionality for every datatype for + all backends, as this is redundant work which also incurs significant + testing and ongoing support overhead. + + 2. Stringifying with bound parameters inlined for specific databases + suggests a usage that is actually passing these fully stringified + statements onto the database for execution. This is unnecessary and + insecure, and SQLAlchemy does not want to encourage this use in any + way. + + 3. The area of rendering literal values is the most likely area for + security issues to be reported. SQLAlchemy tries to keep the area of + safe parameter stringification an issue for the DBAPI drivers as much + as possible where the specifics for each DBAPI can be handled + appropriately and securely. + +As SQLAlchemy intentionally does not support full stringification of literal +values, techniques to do so within specific debugging scenarios include the +following. As an example, we will use the PostgreSQL :class:`_postgresql.UUID` +datatype:: + + import uuid + + from sqlalchemy import Column + from sqlalchemy import create_engine + from sqlalchemy import Integer + from sqlalchemy import select + from sqlalchemy.dialects.postgresql import UUID + from sqlalchemy.orm import declarative_base + + + Base = declarative_base() + + + class A(Base): + __tablename__ = "a" + + id = Column(Integer, primary_key=True) + data = Column(UUID) + + + stmt = select(A).where(A.data == uuid.uuid4()) + +Given the above model and statement which will compare a column to a single +UUID value, options for stringifying this statement with inline values +include: + +* Some DBAPIs such as psycopg2 support helper functions like + `mogrify() `_ which + provide access to their literal-rendering functionality. To use such + features, render the SQL string without using ``literal_binds`` and pass + the parameters separately via the :attr:`.SQLCompiler.params` accessor:: + + e = create_engine("postgresql+psycopg2://scott:tiger@localhost/test") + + with e.connect() as conn: + cursor = conn.connection.cursor() + compiled = stmt.compile(e) + + print(cursor.mogrify(str(compiled), compiled.params)) + + The above code will produce psycopg2's raw bytestring:: + + b"SELECT a.id, a.data \nFROM a \nWHERE a.data = 'a511b0fc-76da-4c47-a4b4-716a8189b7ac'::uuid" + +* Render the :attr:`.SQLCompiler.params` directly into the statement, using + the appropriate `paramstyle `_ + of the target DBAPI. For example, the psycopg2 DBAPI uses the named ``pyformat`` + style. The meaning of ``render_postcompile`` will be discussed in the next + section. **WARNING this is NOT secure, do NOT use untrusted input**:: + + e = create_engine("postgresql+psycopg2://") + + # will use pyformat style, i.e. %(paramname)s for param + compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) + + print(str(compiled) % compiled.params) + + This will produce a non-working string, that nonetheless is suitable for + debugging:: + + SELECT a.id, a.data + FROM a + WHERE a.data = 9eec1209-50b4-4253-b74b-f82461ed80c1 + + Another example using a positional paramstyle such as ``qmark``, we can render + our above statement in terms of SQLite by also using the + :attr:`.SQLCompiler.positiontup` collection in conjunction with + :attr:`.SQLCompiler.params`, in order to retrieve the parameters in + their positional order for the statement as compiled:: - from sqlalchemy import Table, Column, MetaData + import re - tab = Table('mytable', MetaData(), Column('x', MyFancyType())) + e = create_engine("sqlite+pysqlite://") - stmt = tab.select().where(tab.c.x > 5) - print(stmt.compile(compile_kwargs={"literal_binds": True})) + # will use qmark style, i.e. ? for param + compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) -producing output like:: + # params in positional order + params = (repr(compiled.params[name]) for name in compiled.positiontup) - SELECT mytable.x - FROM mytable - WHERE mytable.x > my_fancy_formatting(5) + print(re.sub(r"\?", lambda m: next(params), str(compiled))) + The above snippet prints:: + SELECT a.id, a.data + FROM a + WHERE a.data = UUID('1bd70375-db17-4d8c-94f1-fc2ef3aada26') + +* Use the :ref:`sqlalchemy.ext.compiler_toplevel` extension to render + :class:`_sql.BindParameter` objects in a custom way when a user-defined + flag is present. This flag is sent through the ``compile_kwargs`` + dictionary like any other flag:: + + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.sql.expression import BindParameter + + + @compiles(BindParameter) + def _render_literal_bindparam(element, compiler, use_my_literal_recipe=False, **kw): + if not use_my_literal_recipe: + # use normal bindparam processing + return compiler.visit_bindparam(element, **kw) + + # if use_my_literal_recipe was passed to compiler_kwargs, + # render the value directly + return repr(element.value) + + + e = create_engine("postgresql+psycopg2://") + print(stmt.compile(e, compile_kwargs={"use_my_literal_recipe": True})) + + The above recipe will print:: + + SELECT a.id, a.data + FROM a + WHERE a.data = UUID('47b154cd-36b2-42ae-9718-888629ab9857') + +* For type-specific stringification that's built into a model or a statement, the + :class:`_types.TypeDecorator` class may be used to provide custom stringification + of any datatype using the :meth:`.TypeDecorator.process_literal_param` method:: + + from sqlalchemy import TypeDecorator + + + class UUIDStringify(TypeDecorator): + impl = UUID + + def process_literal_param(self, value, dialect): + return repr(value) + + The above datatype needs to be used either explicitly within the model + or locally within the statement using :func:`_sql.type_coerce`, such as :: + + from sqlalchemy import type_coerce + + stmt = select(A).where(type_coerce(A.data, UUIDStringify) == uuid.uuid4()) + + print(stmt.compile(e, compile_kwargs={"literal_binds": True})) + + Again printing the same form:: + + SELECT a.id, a.data + FROM a + WHERE a.data = UUID('47b154cd-36b2-42ae-9718-888629ab9857') Rendering "POSTCOMPILE" Parameters as Bound Parameters ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -149,23 +305,59 @@ known values are passed. "Expanding" parameters are used for string can be safely cached independently of the actual lists of values being passed to a particular invocation of :meth:`_sql.ColumnOperators.in_`:: - >>> from sqlalchemy import column - >>> expr = column('x').in_([1, 2, 3]) - >>> print(expr) - x IN ([POSTCOMPILE_x_1]) + >>> stmt = select(A).where(A.id.in_[1, 2, 3]) To render the IN clause with real bound parameter symbols, use the ``render_postcompile=True`` flag with :meth:`_sql.ClauseElement.compile`:: - >>> print(expr.compile(compile_kwargs={"render_postcompile": True})) - x IN (:x_1_1, :x_1_2, :x_1_3) + >>> e = create_engine("postgresql+psycopg2://") + >>> print(stmt.compile(e, compile_kwargs={"render_postcompile": True})) + SELECT a.id, a.data + FROM a + WHERE a.id IN (%(id_1_1)s, %(id_1_2)s, %(id_1_3)s) + +The ``literal_binds`` flag, described in the previous section regarding +rendering of bound parameters, automatically sets ``render_postcompile`` to +True, so for a statement with simple ints/strings, these can be stringified +directly:: + + # render_postcompile is implied by literal_binds + >>> print(stmt.compile(e, compile_kwargs={"literal_binds": True})) + SELECT a.id, a.data + FROM a + WHERE a.id IN (1, 2, 3) + +The :attr:`.SQLCompiler.params` and :attr:`.SQLCompiler.positiontup` are +also compatible with ``render_postcompile``, so that +the previous recipes for rendering inline bound parameters will work here +in the same way, such as SQLite's positional form:: + + >>> u1, u2, u3 = uuid.uuid4(), uuid.uuid4(), uuid.uuid4() + >>> stmt = select(A).where(A.data.in_([u1, u2, u3])) + + >>> import re + >>> e = create_engine("sqlite+pysqlite://") + >>> compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) + >>> params = (repr(compiled.params[name]) for name in compiled.positiontup) + >>> print(re.sub(r"\?", lambda m: next(params), str(compiled))) + SELECT a.id, a.data + FROM a + WHERE a.data IN (UUID('aa1944d6-9a5a-45d5-b8da-0ba1ef0a4f38'), UUID('a81920e6-15e2-4392-8a3c-d775ffa9ccd2'), UUID('b5574cdb-ff9b-49a3-be52-dbc89f087bfa')) + +.. warning:: + + Remember, **all** of the above code recipes which stringify literal + values, bypassing the use of bound parameters when sending statements + to the database, are **only to be used when**: + + 1. the use is **debugging purposes only** -As described in the previous section, the ``literal_binds`` flag works here -by automatically setting ``render_postcompile`` to True:: + 2. the string **is not to be passed to a live production database** - >>> print(expr.compile(compile_kwargs={"literal_binds": True})) - x IN (1, 2, 3) + 3. only with **local, trusted input** + The above recipes for stringification of literal values are **not secure in + any way and should never be used against production databases**. .. _faq_sql_expression_percent_signs: @@ -230,13 +422,13 @@ I'm using op() to generate a custom operator and my parenthesis are not coming o The :meth:`.Operators.op` method allows one to create a custom database operator otherwise not known by SQLAlchemy:: - >>> print(column('q').op('->')(column('p'))) + >>> print(column("q").op("->")(column("p"))) q -> p However, when using it on the right side of a compound expression, it doesn't generate parenthesis as we expect:: - >>> print((column('q1') + column('q2')).op('->')(column('p'))) + >>> print((column("q1") + column("q2")).op("->")(column("p"))) q1 + q2 -> p Where above, we probably want ``(q1 + q2) -> p``. @@ -246,14 +438,14 @@ the :paramref:`.Operators.op.precedence` parameter, to a high number, where 100 is the maximum value, and the highest number used by any SQLAlchemy operator is currently 15:: - >>> print((column('q1') + column('q2')).op('->', precedence=100)(column('p'))) + >>> print((column("q1") + column("q2")).op("->", precedence=100)(column("p"))) (q1 + q2) -> p We can also usually force parenthesization around a binary expression (e.g. an expression that has left/right operands and an operator) using the :meth:`_expression.ColumnElement.self_group` method:: - >>> print((column('q1') + column('q2')).self_group().op('->')(column('p'))) + >>> print((column("q1") + column("q2")).self_group().op("->")(column("p"))) (q1 + q2) -> p Why are the parentheses rules like this? @@ -265,7 +457,7 @@ generate parenthesis based on groupings, it uses operator precedence and if the operator is known to be associative, so that parenthesis are generated minimally. Otherwise, an expression like:: - column('a') & column('b') & column('c') & column('d') + column("a") & column("b") & column("c") & column("d") would produce:: @@ -275,7 +467,7 @@ which is fine but would probably annoy people (and be reported as a bug). In other cases, it leads to things that are more likely to confuse databases or at the very least readability, such as:: - column('q', ARRAY(Integer, dimensions=2))[5][6] + column("q", ARRAY(Integer, dimensions=2))[5][6] would produce:: @@ -292,16 +484,16 @@ What if we defaulted the value of :paramref:`.Operators.op.precedence` to 100, e.g. the highest? Then this expression makes more parenthesis, but is otherwise OK, that is, these two are equivalent:: - >>> print((column('q') - column('y')).op('+', precedence=100)(column('z'))) + >>> print((column("q") - column("y")).op("+", precedence=100)(column("z"))) (q - y) + z - >>> print((column('q') - column('y')).op('+')(column('z'))) + >>> print((column("q") - column("y")).op("+")(column("z"))) q - y + z but these two are not:: - >>> print(column('q') - column('y').op('+', precedence=100)(column('z'))) + >>> print(column("q") - column("y").op("+", precedence=100)(column("z"))) q - y + z - >>> print(column('q') - column('y').op('+')(column('z'))) + >>> print(column("q") - column("y").op("+")(column("z"))) q - (y + z) For now, it's not clear that as long as we are doing parenthesization based on diff --git a/doc/build/faq/thirdparty.rst b/doc/build/faq/thirdparty.rst index 27c8fbf7434..4b8bb7c556c 100644 --- a/doc/build/faq/thirdparty.rst +++ b/doc/build/faq/thirdparty.rst @@ -28,17 +28,18 @@ by queries. This may be illustrated from code based on the following:: import numpy + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(Integer) + # .. later session.add(A(data=numpy.int64(10))) session.commit() - In the latter case, the issue is due to the ``numpy.int64`` datatype overriding the ``__eq__()`` method and enforcing that the return type of an expression is ``numpy.True`` or ``numpy.False``, which breaks SQLAlchemy's expression @@ -47,9 +48,9 @@ expressions from Python equality comparisons:: >>> import numpy >>> from sqlalchemy import column, Integer - >>> print(column('x', Integer) == numpy.int64(10)) # works + >>> print(column("x", Integer) == numpy.int64(10)) # works x = :x_1 - >>> print(numpy.int64(10) == column('x', Integer)) # breaks + >>> print(numpy.int64(10) == column("x", Integer)) # breaks False These errors are both solved in the same way, which is that special numpy @@ -61,9 +62,7 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and session.add(A(data=int(data))) - result = session.execute( - select(A.data).where(int(data) == A.data) - ) + result = session.execute(select(A.data).where(int(data) == A.data)) session.commit() @@ -72,4 +71,4 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and SQL expression for WHERE/HAVING role expected, got True ------------------------------------------------------- -See :ref:`numpy_int64`. \ No newline at end of file +See :ref:`numpy_int64`. diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index f979df1476d..51d98f4655f 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -74,6 +74,33 @@ Glossary # Session returns a Result that has ORM entities list_of_users = result.scalars().all() + reflection + reflected + In SQLAlchemy, this term refers to the feature of querying a database's + schema catalogs in order to load information about existing tables, + columns, constraints, and other constructs. SQLAlchemy includes + features that can both provide raw data for this information, as well + as that it can construct Core/ORM usable :class:`.Table` objects + from database schema catalogs automatically. + + .. seealso:: + + :ref:`metadata_reflection_toplevel` - complete background on + database reflection. + + + imperative + declarative + + In the SQLAlchemy ORM, these terms refer to two different styles of + mapping Python classes to database tables. + + .. seealso:: + + :ref:`orm_declarative_mapping` + + :ref:`orm_imperative_mapping` + facade An object that serves as a front-facing interface masking more complex @@ -146,7 +173,7 @@ Glossary `bind parameters `_ - at Use The Index, Luke! - + :ref:`tutorial_sending_parameters` - in the :ref:`unified_tutorial` selectable A term used in SQLAlchemy to describe a SQL construct that represents @@ -163,7 +190,7 @@ Glossary dictionary is associated with a copy of the object, which contains key/value pairs significant to various internal systems, mostly within the ORM:: - some_column = Column('some_column', Integer) + some_column = Column("some_column", Integer) some_column_annotated = some_column._annotate({"entity": User}) The annotation system differs from the public dictionary :attr:`_schema.Column.info` @@ -179,8 +206,9 @@ Glossary within the join expression. plugin + plugin-enabled plugin-specific - "plugin-specific" generally indicates a function or method in + "plugin-enabled" or "plugin-specific" generally indicates a function or method in SQLAlchemy Core which will behave differently when used in an ORM context. @@ -236,7 +264,7 @@ Glossary on mapped classes. When a class is mapped as such:: class MyClass(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) data = Column(String) @@ -285,8 +313,8 @@ Glossary An acronym for **Data Manipulation Language**. DML is the subset of SQL that relational databases use to *modify* the data in tables. DML typically refers to the three widely familiar statements of INSERT, - UPDATE and DELETE, otherwise known as :term:`CRUD` (acronym for "CReate, - Update, Delete"). + UPDATE and DELETE, otherwise known as :term:`CRUD` (acronym for "Create, + Read, Update, Delete"). .. seealso:: @@ -407,6 +435,11 @@ Glossary class each of which represents a particular database column or relationship to a related class. + identity key + A key associated with ORM-mapped objects that identifies their + primary key identity within the database, as well as their unique + identity within a :class:`_orm.Session` :term:`identity map`. + identity map A mapping between Python objects and their database identities. The identity map is a collection that's associated with an @@ -625,17 +658,28 @@ Glossary `Domain Model (via Wikipedia) `_ unit of work - This pattern is where the system transparently keeps - track of changes to objects and periodically flushes all those - pending changes out to the database. SQLAlchemy's Session - implements this pattern fully in a manner similar to that of - Hibernate. + A software architecture where a persistence system such as an object + relational mapper maintains a list of changes made to a series of + objects, and periodically flushes all those pending changes out to the + database. + + SQLAlchemy's :class:`_orm.Session` implements the unit of work pattern, + where objects that are added to the :class:`_orm.Session` using methods + like :meth:`_orm.Session.add` will then participate in unit-of-work + style persistence. + + For a walk-through of what unit of work persistence looks like in + SQLAlchemy, start with the section :ref:`tutorial_orm_data_manipulation` + in the :ref:`unified_tutorial`. Then for more detail, see + :ref:`session_basics` in the general reference documentation. .. seealso:: `Unit of Work (via Martin Fowler) `_ - :doc:`orm/session` + :ref:`tutorial_orm_data_manipulation` + + :ref:`session_basics` expire expired @@ -864,7 +908,6 @@ Glossary isolation isolated - Isolation isolation level The isolation property of the :term:`ACID` model ensures that the concurrent execution @@ -1017,16 +1060,17 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) .. seealso:: @@ -1068,15 +1112,16 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) department = relationship("Department") .. seealso:: @@ -1101,16 +1146,17 @@ Glossary used in :term:`one to many` as follows:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee", backref="department") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) A backref can be applied to any relationship, including one to many, many to one, and :term:`many to many`. @@ -1162,26 +1208,27 @@ Glossary specified using plain table metadata:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) projects = relationship( "Project", - secondary=Table('employee_project', Base.metadata, - Column("employee_id", Integer, ForeignKey('employee.id'), - primary_key=True), - Column("project_id", Integer, ForeignKey('project.id'), - primary_key=True) - ), - backref="employees" - ) + secondary=Table( + "employee_project", + Base.metadata, + Column("employee_id", Integer, ForeignKey("employee.id"), primary_key=True), + Column("project_id", Integer, ForeignKey("project.id"), primary_key=True), + ), + backref="employees", + ) + class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) Above, the ``Employee.projects`` and back-referencing ``Project.employees`` @@ -1275,30 +1322,29 @@ Glossary A SQLAlchemy declarative mapping for the above might look like:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) class EmployeeProject(Base): - __tablename__ = 'employee_project' + __tablename__ = "employee_project" - employee_id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - project_id = Column(Integer, ForeignKey('project.id'), primary_key=True) + employee_id = Column(Integer, ForeignKey("employee.id"), primary_key=True) + project_id = Column(Integer, ForeignKey("project.id"), primary_key=True) role_name = Column(String(30)) project = relationship("Project", backref="project_employees") employee = relationship("Employee", backref="employee_projects") - Employees can be added to a project given a role name:: proj = Project(name="Client A") @@ -1306,10 +1352,12 @@ Glossary emp1 = Employee(name="emp1") emp2 = Employee(name="emp2") - proj.project_employees.extend([ - EmployeeProject(employee=emp1, role="tech lead"), - EmployeeProject(employee=emp2, role="account executive") - ]) + proj.project_employees.extend( + [ + EmployeeProject(employee=emp1, role_name="tech lead"), + EmployeeProject(employee=emp2, role_name="account executive"), + ] + ) .. seealso:: @@ -1502,3 +1550,11 @@ Glossary :ref:`session_object_states` + attached + Indicates an ORM object that is presently associated with a specific + :term:`Session`. + + .. seealso:: + + :ref:`session_object_states` + diff --git a/doc/build/index.rst b/doc/build/index.rst index 35005872f43..361ccfa5d46 100644 --- a/doc/build/index.rst +++ b/doc/build/index.rst @@ -16,16 +16,11 @@ SQLAlchemy Documentation .. container:: - A high level view and getting set up. + New to SQLAlchemy? Start here: - :doc:`Overview ` | - :ref:`Installation Guide ` | - :doc:`Frequently Asked Questions ` | - :doc:`Migration from 1.3 ` | - :doc:`Glossary ` | - :doc:`Error Messages ` | - :doc:`Changelog catalog ` + * **For Python Beginners:** :ref:`Installation Guide ` - basic guidance on installing with pip and similar + * **For Python Veterans:** :doc:`SQLAlchemy Overview ` - brief architectural overview .. container:: left_right_container @@ -37,20 +32,19 @@ SQLAlchemy Documentation .. container:: - **SQLAlchemy 1.4 / 2.0 Transitional** + **SQLAlchemy 1.4 / 2.0** SQLAlchemy 2.0 is functionally available as part of SQLAlchemy 1.4, and integrates Core and ORM working styles more closely than ever. The new tutorial introduces - both concepts in parallel. New users and those starting new projects should start here! + both concepts in parallel. - * :doc:`/tutorial/index` - SQLAlchemy 2.0's main tutorial - - * :doc:`Migrating to SQLAlchemy 2.0 ` - Complete background on migrating from 1.3 or 1.4 to 2.0 + * **For a quick glance:** :doc:`/orm/quickstart` - a glimpse at what working with the ORM looks like + * **For all users:** :doc:`/tutorial/index` - The new SQLAlchemy 1.4/2.0 Tutorial .. container:: - **SQLAlchemy 1.x Releases** + **Legacy SQLAlchemy 1.x Tutorials** The 1.x Object Relational Tutorial and Core Tutorial are the legacy tutorials that should be consulted for existing SQLAlchemy codebases. @@ -60,6 +54,21 @@ SQLAlchemy Documentation * :doc:`core/tutorial` +.. container:: left_right_container + + .. container:: leftmost + + .. rst-class:: h2 + + Migration Notes + + .. container:: + + * :doc:`Migration from 1.3 ` - Migration notes for SQLAlchemy Version 1.4 + * :doc:`SQLAlchemy 2.0 Preview ` - Background on preparing a SQLAlchemy 1.4 application for SQLAlchemy 2.0 + * :doc:`Changelog catalog ` - Detailed changelogs for all SQLAlchemy Versions + + .. container:: left_right_container .. container:: leftmost @@ -74,7 +83,7 @@ SQLAlchemy Documentation **SQLAlchemy ORM** * **ORM Configuration:** - :doc:`Mapper Configuration ` | + :doc:`Mapped Class Configuration ` | :doc:`Relationship Configuration ` * **ORM Usage:** @@ -150,3 +159,18 @@ SQLAlchemy Documentation :doc:`More Dialects ... ` +.. container:: left_right_container + + .. container:: leftmost + + .. rst-class:: h2 + + Supplementary + + .. container:: + + * :doc:`Frequently Asked Questions ` - A collection of common problems and solutions + * :doc:`Glossary ` - Terms used in SQLAlchemy's documentation + * :doc:`Error Message Guide ` - Explainations of many SQLAlchemy Errors + * :doc:`Complete table of of contents ` + * :ref:`Index ` diff --git a/doc/build/intro.rst b/doc/build/intro.rst index 01e33df0346..46255e79f9d 100644 --- a/doc/build/intro.rst +++ b/doc/build/intro.rst @@ -22,7 +22,7 @@ Core contains the breadth of SQLAlchemy's SQL and database integration and description services, the most prominent part of this being the **SQL Expression Language**. -The SQL Expression Language is a toolkit all its own, independent of the ORM +The SQL Expression Language is a toolkit on its own, independent of the ORM package, which provides a system of constructing SQL expressions represented by composable objects, which can then be "executed" against a target database within the scope of a specific transaction, returning a result set. @@ -42,7 +42,7 @@ augmented by ORM-specific automations and object-centric querying capabilities. Whereas working with Core and the SQL Expression language presents a schema-centric view of the database, along with a programming paradigm that is oriented around immutability, the ORM builds on top of this a domain-centric -view of the database with a programming paradigm that is more explcitly +view of the database with a programming paradigm that is more explicitly object-oriented and reliant upon mutability. Since a relational database is itself a mutable service, the difference is that Core/SQL Expression language is command oriented whereas the ORM is state oriented. @@ -203,9 +203,15 @@ Python prompt like this: .. sourcecode:: python+sql >>> import sqlalchemy - >>> sqlalchemy.__version__ # doctest: +SKIP + >>> sqlalchemy.__version__ # doctest: +SKIP 1.4.0 +Next Steps +---------- + +With SQLAlchemy installed, new and old users alike can +:ref:`Proceed to the SQLAlchemy Tutorial `. + .. _migration: 1.3 to 1.4 Migration diff --git a/doc/build/orm/backref.rst b/doc/build/orm/backref.rst index 65d19eb185c..edc87cd19dd 100644 --- a/doc/build/orm/backref.rst +++ b/doc/build/orm/backref.rst @@ -1,148 +1,137 @@ .. _relationships_backref: -Linking Relationships with Backref ----------------------------------- +Using the legacy 'backref' relationship parameter +-------------------------------------------------- + +.. note:: The :paramref:`_orm.relationship.backref` keyword should be considered + legacy, and use of :paramref:`_orm.relationship.back_populates` with explicit + :func:`_orm.relationship` constructs should be preferred. Using + individual :func:`_orm.relationship` constructs provides advantages + including that both ORM mapped classes will include their attributes + up front as the class is constructed, rather than as a deferred step, + and configuration is more straightforward as all arguments are explicit. + New :pep:`484` features in SQLAlchemy 2.0 also take advantage of + attributes being explicitly present in source code rather than + using dynamic attribute generation. -The :paramref:`_orm.relationship.backref` keyword argument was first introduced in :ref:`ormtutorial_toplevel`, and has been -mentioned throughout many of the examples here. What does it actually do ? Let's start -with the canonical ``User`` and ``Address`` scenario:: +.. seealso:: + + For general information about bidirectional relationships, see the + following sections: + + :ref:`tutorial_orm_related_objects` - in the :ref:`unified_tutorial`, + presents an overview of bi-directional relationship configuration + and behaviors using :paramref:`_orm.relationship.back_populates` + + :ref:`back_populates_cascade` - notes on bi-directional :func:`_orm.relationship` + behavior regarding :class:`_orm.Session` cascade behaviors. + + :paramref:`_orm.relationship.back_populates` + + +The :paramref:`_orm.relationship.backref` keyword argument on the +:func:`_orm.relationship` construct allows the +automatic generation of a new :func:`_orm.relationship` that will be automatically +be added to the ORM mapping for the related class. It will then be +placed into a :paramref:`_orm.relationship.back_populates` configuration +against the current :func:`_orm.relationship` being configured, with both +:func:`_orm.relationship` constructs referring to each other. - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship +Starting with the following example:: + + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) The above configuration establishes a collection of ``Address`` objects on ``User`` called ``User.addresses``. It also establishes a ``.user`` attribute on ``Address`` which will -refer to the parent ``User`` object. +refer to the parent ``User`` object. Using :paramref:`_orm.relationship.back_populates` +it's equivalent to the following:: -In fact, the :paramref:`_orm.relationship.backref` keyword is only a common shortcut for placing a second -:func:`_orm.relationship` onto the ``Address`` mapping, including the establishment -of an event listener on both sides which will mirror attribute operations -in both directions. The above configuration is equivalent to:: - - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", back_populates="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) user = relationship("User", back_populates="addresses") -Above, we add a ``.user`` relationship to ``Address`` explicitly. On -both relationships, the :paramref:`_orm.relationship.back_populates` directive tells each relationship -about the other one, indicating that they should establish "bidirectional" -behavior between each other. The primary effect of this configuration -is that the relationship adds event handlers to both attributes -which have the behavior of "when an append or set event occurs here, set ourselves -onto the incoming attribute using this particular attribute name". -The behavior is illustrated as follows. Start with a ``User`` and an ``Address`` -instance. The ``.addresses`` collection is empty, and the ``.user`` attribute -is ``None``:: - - >>> u1 = User() - >>> a1 = Address() - >>> u1.addresses - [] - >>> print(a1.user) - None - -However, once the ``Address`` is appended to the ``u1.addresses`` collection, -both the collection and the scalar attribute have been populated:: - - >>> u1.addresses.append(a1) - >>> u1.addresses - [<__main__.Address object at 0x12a6ed0>] - >>> a1.user - <__main__.User object at 0x12a6590> - -This behavior of course works in reverse for removal operations as well, as well -as for equivalent operations on both sides. Such as -when ``.user`` is set again to ``None``, the ``Address`` object is removed -from the reverse collection:: - - >>> a1.user = None - >>> u1.addresses - [] - -The manipulation of the ``.addresses`` collection and the ``.user`` attribute -occurs entirely in Python without any interaction with the SQL database. -Without this behavior, the proper state would be apparent on both sides once the -data has been flushed to the database, and later reloaded after a commit or -expiration operation occurs. The :paramref:`_orm.relationship.backref`/:paramref:`_orm.relationship.back_populates` behavior has the advantage -that common bidirectional operations can reflect the correct state without requiring -a database round trip. - -Remember, when the :paramref:`_orm.relationship.backref` keyword is used on a single relationship, it's -exactly the same as if the above two relationships were created individually -using :paramref:`_orm.relationship.back_populates` on each. - -Backref Arguments -~~~~~~~~~~~~~~~~~ - -We've established that the :paramref:`_orm.relationship.backref` keyword is merely a shortcut for building -two individual :func:`_orm.relationship` constructs that refer to each other. Part of -the behavior of this shortcut is that certain configurational arguments applied to -the :func:`_orm.relationship` -will also be applied to the other direction - namely those arguments that describe -the relationship at a schema level, and are unlikely to be different in the reverse -direction. The usual case -here is a many-to-many :func:`_orm.relationship` that has a :paramref:`_orm.relationship.secondary` argument, -or a one-to-many or many-to-one which has a :paramref:`_orm.relationship.primaryjoin` argument (the -:paramref:`_orm.relationship.primaryjoin` argument is discussed in :ref:`relationship_primaryjoin`). Such -as if we limited the list of ``Address`` objects to those which start with "tony":: - - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship +The behavior of the ``User.addresses`` and ``Address.user`` relationships +is that they now behave in a **bi-directional** way, indicating that +changes on one side of the relationship impact the other. An example +and discussion of this behavior is in the :ref:`unified_tutorial` +at :ref:`tutorial_orm_related_objects`. + + +Backref Default Arguments +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since :paramref:`_orm.relationship.backref` generates a whole new +:func:`_orm.relationship`, the generation process by default +will attempt to include corresponding arguments in the new +:func:`_orm.relationship` that correspond to the original arguments. +As an example, below is a :func:`_orm.relationship` that includes a +:ref:`custom join condition ` +which also includes the :paramref:`_orm.relationship.backref` keyword:: + + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.email.startswith('tony'))", - backref="user") + addresses = relationship( + "Address", + primaryjoin=( + "and_(User.id==Address.user_id, Address.email.startswith('tony'))" + ), + backref="user", + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) -We can observe, by inspecting the resulting property, that both sides -of the relationship have this join condition applied:: +When the "backref" is generated, the :paramref:`_orm.relationship.primaryjoin` +condition is copied to the new :func:`_orm.relationship` as well:: >>> print(User.addresses.property.primaryjoin) "user".id = address.user_id AND address.email LIKE :email_1 || '%%' @@ -151,33 +140,40 @@ of the relationship have this join condition applied:: "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> -This reuse of arguments should pretty much do the "right thing" - it -uses only arguments that are applicable, and in the case of a many-to- -many relationship, will reverse the usage of +Other arguments that are transferrable include the +:paramref:`_orm.relationship.secondary` parameter that refers to a +many-to-many association table, as well as the "join" arguments :paramref:`_orm.relationship.primaryjoin` and -:paramref:`_orm.relationship.secondaryjoin` to correspond to the other -direction (see the example in :ref:`self_referential_many_to_many` for -this). +:paramref:`_orm.relationship.secondaryjoin`; "backref" is smart enough to know +that these two arguments should also be "reversed" when generating +the opposite side. -It's very often the case however that we'd like to specify arguments -that are specific to just the side where we happened to place the -"backref". This includes :func:`_orm.relationship` arguments like +Specifying Backref Arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Lots of other arguments for a "backref" are not implicit, and +include arguments like :paramref:`_orm.relationship.lazy`, :paramref:`_orm.relationship.remote_side`, :paramref:`_orm.relationship.cascade` and :paramref:`_orm.relationship.cascade_backrefs`. For this case we use -the :func:`.backref` function in place of a string:: +the :func:`.backref` function in place of a string; this will store +a specific set of arguments that will be transferred to the new +:func:`_orm.relationship` when generated:: # from sqlalchemy.orm import backref + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - addresses = relationship("Address", - backref=backref("user", lazy="joined")) + addresses = relationship( + "Address", + backref=backref("user", lazy="joined"), + ) Where above, we placed a ``lazy="joined"`` directive only on the ``Address.user`` side, indicating that when a query against ``Address`` is made, a join to the ``User`` @@ -186,139 +182,3 @@ returned ``Address``. The :func:`.backref` function formatted the arguments we it into a form that is interpreted by the receiving :func:`_orm.relationship` as additional arguments to be applied to the new relationship it creates. -Setting cascade for backrefs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A key behavior that occurs in the 1.x series of SQLAlchemy regarding backrefs -is that :ref:`cascades ` will occur bidirectionally by -default. This basically means, if one starts with an ``User`` object -that's been persisted in the :class:`.Session`:: - - user = session.query(User).filter(User.id == 1).first() - -The above ``User`` is :term:`persistent` in the :class:`.Session`. It usually -is intuitive that if we create an ``Address`` object and append to the -``User.addresses`` collection, it is automatically added to the -:class:`.Session` as in the example below:: - - user = session.query(User).filter(User.id == 1).first() - address = Address(email_address='foo') - user.addresses.append(address) - -The above behavior is known as the "save update cascade" and is described -in the section :ref:`unitofwork_cascades`. - -However, if we instead created a new ``Address`` object, and associated the -``User`` object with the ``Address`` as follows:: - - address = Address(email_address='foo', user=user) - -In the above example, it is **not** as intuitive that the ``Address`` would -automatically be added to the :class:`.Session`. However, the backref behavior -of ``Address.user`` indicates that the ``Address`` object is also appended to -the ``User.addresses`` collection. This in turn initiates a **cascade** -operation which indicates that this ``Address`` should be placed into the -:class:`.Session` as a :term:`pending` object. - -Since this behavior has been identified as counter-intuitive to most people, -it can be disabled by setting :paramref:`_orm.relationship.cascade_backrefs` -to False, as in:: - - - class User(Base): - # ... - - addresses = relationship("Address", back_populates="user", cascade_backrefs=False) - -See the example in :ref:`backref_cascade` for further information. - -.. seealso:: - - :ref:`backref_cascade`. - - -One Way Backrefs -~~~~~~~~~~~~~~~~ - -An unusual case is that of the "one way backref". This is where the -"back-populating" behavior of the backref is only desirable in one -direction. An example of this is a collection which contains a -filtering :paramref:`_orm.relationship.primaryjoin` condition. We'd -like to append items to this collection as needed, and have them -populate the "parent" object on the incoming object. However, we'd -also like to have items that are not part of the collection, but still -have the same "parent" association - these items should never be in -the collection. - -Taking our previous example, where we established a -:paramref:`_orm.relationship.primaryjoin` that limited the collection -only to ``Address`` objects whose email address started with the word -``tony``, the usual backref behavior is that all items populate in -both directions. We wouldn't want this behavior for a case like the -following:: - - >>> u1 = User() - >>> a1 = Address(email='mary') - >>> a1.user = u1 - >>> u1.addresses - [<__main__.Address object at 0x1411910>] - -Above, the ``Address`` object that doesn't match the criterion of "starts with 'tony'" -is present in the ``addresses`` collection of ``u1``. After these objects are flushed, -the transaction committed and their attributes expired for a re-load, the ``addresses`` -collection will hit the database on next access and no longer have this ``Address`` object -present, due to the filtering condition. But we can do away with this unwanted side -of the "backref" behavior on the Python side by using two separate :func:`_orm.relationship` constructs, -placing :paramref:`_orm.relationship.back_populates` only on one side:: - - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship - - Base = declarative_base() - - class User(Base): - __tablename__ = 'user' - id = Column(Integer, primary_key=True) - name = Column(String) - addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.email.startswith('tony'))", - back_populates="user") - - class Address(Base): - __tablename__ = 'address' - id = Column(Integer, primary_key=True) - email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) - user = relationship("User") - -With the above scenario, appending an ``Address`` object to the ``.addresses`` -collection of a ``User`` will always establish the ``.user`` attribute on that -``Address``:: - - >>> u1 = User() - >>> a1 = Address(email='tony') - >>> u1.addresses.append(a1) - >>> a1.user - <__main__.User object at 0x1411850> - -However, applying a ``User`` to the ``.user`` attribute of an ``Address``, -will not append the ``Address`` object to the collection:: - - >>> a2 = Address(email='mary') - >>> a2.user = u1 - >>> a2 in u1.addresses - False - -Of course, we've disabled some of the usefulness of -:paramref:`_orm.relationship.backref` here, in that when we do append an -``Address`` that corresponds to the criteria of -``email.startswith('tony')``, it won't show up in the -``User.addresses`` collection until the session is flushed, and the -attributes reloaded after a commit or expire operation. While we -could consider an attribute event that checks this criterion in -Python, this starts to cross the line of duplicating too much SQL -behavior in Python. The backref behavior itself is only a slight -transgression of this philosophy - SQLAlchemy tries to keep these to a -minimum overall. diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst index 40b3590b6fa..f50e5045d6a 100644 --- a/doc/build/orm/basic_relationships.rst +++ b/doc/build/orm/basic_relationships.rst @@ -7,13 +7,11 @@ A quick walkthrough of the basic relational patterns. The imports used for each of the following sections is as follows:: - from sqlalchemy import Table, Column, Integer, ForeignKey - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy import Column, ForeignKey, Integer, Table + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() - .. _relationship_patterns_o2m: One To Many @@ -24,28 +22,30 @@ the parent. :func:`_orm.relationship` is then specified on the parent, as refer a collection of items represented by the child:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) children = relationship("Child") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent_table.id")) To establish a bidirectional relationship in one-to-many, where the "reverse" side is a many to one, specify an additional :func:`_orm.relationship` and connect the two using the :paramref:`_orm.relationship.back_populates` parameter:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) children = relationship("Child", back_populates="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent_table.id")) parent = relationship("Parent", back_populates="children") ``Child`` will get a ``parent`` attribute with many-to-one semantics. @@ -55,7 +55,7 @@ on a single :func:`_orm.relationship` instead of using :paramref:`_orm.relationship.back_populates`:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") @@ -88,13 +88,14 @@ Many to one places a foreign key in the parent table referencing the child. attribute will be created:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey('child.id')) + child_id = Column(Integer, ForeignKey("child_table.id")) child = relationship("Child") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child_table" id = Column(Integer, primary_key=True) Bidirectional behavior is achieved by adding a second :func:`_orm.relationship` @@ -102,13 +103,14 @@ and applying the :paramref:`_orm.relationship.back_populates` parameter in both directions:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey('child.id')) + child_id = Column(Integer, ForeignKey("child_table.id")) child = relationship("Child", back_populates="parents") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child_table" id = Column(Integer, primary_key=True) parents = relationship("Parent", back_populates="child") @@ -116,9 +118,9 @@ Alternatively, the :paramref:`_orm.relationship.backref` parameter may be applied to a single :func:`_orm.relationship`, such as ``Parent.child``:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey('child.id')) + child_id = Column(Integer, ForeignKey("child_table.id")) child = relationship("Child", backref="parents") .. _relationships_one_to_one: @@ -143,16 +145,17 @@ a :ref:`many-to-one ` (``Child.parent``) relationships:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) # one-to-many collection children = relationship("Child", back_populates="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent_table.id")) # many-to-one scalar parent = relationship("Parent", back_populates="children") @@ -164,17 +167,18 @@ is converted into a scalar relationship using the ``uselist=False`` flag, renaming ``Parent.children`` to ``Parent.child`` for clarity:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) # previously one-to-many Parent.children is now # one-to-one Parent.child child = relationship("Child", back_populates="parent", uselist=False) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent_table.id")) # many-to-one side remains, see tip below parent = relationship("Parent", back_populates="child") @@ -212,18 +216,18 @@ in this case the ``uselist`` parameter:: from sqlalchemy.orm import backref + class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent_table.id")) parent = relationship("Parent", backref=backref("child", uselist=False)) - - .. _relationships_many_to_many: Many To Many @@ -236,19 +240,22 @@ table is indicated by the :paramref:`_orm.relationship.secondary` argument to class, so that the :class:`_schema.ForeignKey` directives can locate the remote tables with which to link:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id')), - Column('right_id', ForeignKey('right.id')) + association_table = Table( + "association_table", + Base.metadata, + Column("left_id", ForeignKey("left_table.id")), + Column("right_id", ForeignKey("right_table.id")), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary=association_table) + children = relationship("Child", secondary=association_table) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right_table" id = Column(Integer, primary_key=True) .. tip:: @@ -263,55 +270,61 @@ remote tables with which to link:: this ensures that duplicate rows won't be persisted within the table regardless of issues on the application side:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id'), primary_key=True), - Column('right_id', ForeignKey('right.id'), primary_key=True) + association_table = Table( + "association_table", + Base.metadata, + Column("left_id", ForeignKey("left_table.id"), primary_key=True), + Column("right_id", ForeignKey("right_table.id"), primary_key=True), ) For a bidirectional relationship, both sides of the relationship contain a collection. Specify using :paramref:`_orm.relationship.back_populates`, and for each :func:`_orm.relationship` specify the common association table:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id'), primary_key=True), - Column('right_id', ForeignKey('right.id'), primary_key=True) + association_table = Table( + "association_table", + Base.metadata, + Column("left_id", ForeignKey("left_table.id"), primary_key=True), + Column("right_id", ForeignKey("right_table.id"), primary_key=True), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship( - "Child", - secondary=association_table, - back_populates="parents") + "Child", secondary=association_table, back_populates="parents" + ) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right_table" id = Column(Integer, primary_key=True) parents = relationship( - "Parent", - secondary=association_table, - back_populates="children") + "Parent", secondary=association_table, back_populates="children" + ) When using the :paramref:`_orm.relationship.backref` parameter instead of :paramref:`_orm.relationship.back_populates`, the backref will automatically use the same :paramref:`_orm.relationship.secondary` argument for the reverse relationship:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id'), primary_key=True), - Column('right_id', ForeignKey('right.id'), primary_key=True) + association_table = Table( + "association_table", + Base.metadata, + Column("left_id", ForeignKey("left_table.id"), primary_key=True), + Column("right_id", ForeignKey("right_table.id"), primary_key=True), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary=association_table, - backref="parents") + children = relationship("Child", secondary=association_table, backref="parents") + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right_table" id = Column(Integer, primary_key=True) The :paramref:`_orm.relationship.secondary` argument of @@ -321,21 +334,21 @@ can define the ``association_table`` at a later point, as long as it's available to the callable after all module initialization is complete:: class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary=lambda: association_table, - backref="parents") + children = relationship( + "Child", + secondary=lambda: association_table, + backref="parents", + ) With the declarative extension in use, the traditional "string name of the table" is accepted as well, matching the name of the table as stored in ``Base.metadata.tables``:: class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary="association", - backref="parents") + children = relationship("Child", secondary="association_table", backref="parents") .. warning:: When passed as a Python-evaluable string, the :paramref:`_orm.relationship.secondary` argument is interpreted using Python's @@ -421,39 +434,43 @@ is stored along with each association between ``Parent`` and ``Child``:: class Association(Base): - __tablename__ = 'association' - left_id = Column(ForeignKey('left.id'), primary_key=True) - right_id = Column(ForeignKey('right.id'), primary_key=True) + __tablename__ = "association_table" + left_id = Column(ForeignKey("left_table.id"), primary_key=True) + right_id = Column(ForeignKey("right_table.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child") + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship("Association") + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right_table" id = Column(Integer, primary_key=True) As always, the bidirectional version makes use of :paramref:`_orm.relationship.back_populates` or :paramref:`_orm.relationship.backref`:: class Association(Base): - __tablename__ = 'association' - left_id = Column(ForeignKey('left.id'), primary_key=True) - right_id = Column(ForeignKey('right.id'), primary_key=True) + __tablename__ = "association_table" + left_id = Column(ForeignKey("left_table.id"), primary_key=True) + right_id = Column(ForeignKey("right_table.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", back_populates="parents") parent = relationship("Parent", back_populates="children") + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship("Association", back_populates="parent") + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right_table" id = Column(Integer, primary_key=True) parents = relationship("Association", back_populates="child") @@ -494,23 +511,25 @@ associated object, and a second to a target attribute. after :meth:`.Session.commit`:: class Association(Base): - __tablename__ = 'association' + __tablename__ = "association_table" - left_id = Column(ForeignKey('left.id'), primary_key=True) - right_id = Column(ForeignKey('right.id'), primary_key=True) + left_id = Column(ForeignKey("left_table.id"), primary_key=True) + right_id = Column(ForeignKey("right_table.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", backref="parent_associations") parent = relationship("Parent", backref="child_associations") + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left_table" id = Column(Integer, primary_key=True) - children = relationship("Child", secondary="association") + children = relationship("Child", secondary="association_table") + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right_table" id = Column(Integer, primary_key=True) Additionally, just as changes to one relationship aren't reflected in the @@ -548,6 +567,7 @@ classes using a string name, rather than the class itself:: children = relationship("Child", back_populates="parent") + class Child(Base): # ... @@ -584,7 +604,7 @@ package, including expression functions like :func:`_sql.desc` and children = relationship( "Child", order_by="desc(Child.email_address)", - primaryjoin="Parent.id == Child.parent_id" + primaryjoin="Parent.id == Child.parent_id", ) For the case where more than one module contains a class of the same name, @@ -597,7 +617,7 @@ within any of these string expressions:: children = relationship( "myapp.mymodel.Child", order_by="desc(myapp.mymodel.Child.email_address)", - primaryjoin="myapp.mymodel.Parent.id == myapp.mymodel.Child.parent_id" + primaryjoin="myapp.mymodel.Parent.id == myapp.mymodel.Child.parent_id", ) The qualified path can be any partial path that removes ambiguity between @@ -611,7 +631,7 @@ we can specify ``model1.Child`` or ``model2.Child``:: children = relationship( "model1.Child", order_by="desc(mymodel1.Child.email_address)", - primaryjoin="Parent.id == model1.Child.parent_id" + primaryjoin="Parent.id == model1.Child.parent_id", ) The :func:`_orm.relationship` construct also accepts Python functions or @@ -622,17 +642,20 @@ A Python functional approach might look like the following:: from sqlalchemy import desc + def _resolve_child_model(): - from myapplication import Child - return Child + from myapplication import Child + + return Child + class Parent(Base): # ... children = relationship( - _resolve_child_model(), + _resolve_child_model, order_by=lambda: desc(_resolve_child_model().email_address), - primaryjoin=lambda: Parent.id == _resolve_child_model().parent_id + primaryjoin=lambda: Parent.id == _resolve_child_model().parent_id, ) The full list of parameters which accept Python functions/lambdas or strings @@ -674,23 +697,23 @@ class were available, we could also apply it afterwards:: # first, module A, where Child has not been created yet, # we create a Parent class which knows nothing about Child + class Parent(Base): - # ... + ... - #... later, in Module B, which is imported after module A: + # ... later, in Module B, which is imported after module A: + class Child(Base): - # ... + ... + from module_a import Parent # assign the User.addresses relationship as a class variable. The # declarative base class will intercept this and map the relationship. - Parent.children = relationship( - Child, - primaryjoin=Child.parent_id==Parent.id - ) + Parent.children = relationship(Child, primaryjoin=Child.parent_id == Parent.id) .. note:: assignment of mapped properties to a declaratively mapped class will only function correctly if the "declarative base" class is used, which also @@ -718,15 +741,17 @@ declarative base and its :class:`_orm.registry`. We can then refer to this parameter:: keyword_author = Table( - 'keyword_author', Base.metadata, - Column('author_id', Integer, ForeignKey('authors.id')), - Column('keyword_id', Integer, ForeignKey('keywords.id')) - ) + "keyword_author_table", + Base.metadata, + Column("author_id", Integer, ForeignKey("authors_table.id")), + Column("keyword_id", Integer, ForeignKey("keywords_table.id")), + ) + class Author(Base): - __tablename__ = 'authors' + __tablename__ = "authors_table" id = Column(Integer, primary_key=True) - keywords = relationship("Keyword", secondary="keyword_author") + keywords = relationship("Keyword", secondary="keyword_author_table") For additional detail on many-to-many relationships see the section :ref:`relationships_many_to_many`. diff --git a/doc/build/orm/cascades.rst b/doc/build/orm/cascades.rst index 1a2a7804c21..7cfd5d19dd5 100644 --- a/doc/build/orm/cascades.rst +++ b/doc/build/orm/cascades.rst @@ -22,7 +22,7 @@ Cascade behavior is configured using the :func:`~sqlalchemy.orm.relationship`:: class Order(Base): - __tablename__ = 'order' + __tablename__ = "order" items = relationship("Item", cascade="all, delete-orphan") customer = relationship("User", cascade="save-update") @@ -32,11 +32,11 @@ To set cascades on a backref, the same flag can be used with the its arguments back into :func:`~sqlalchemy.orm.relationship`:: class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" - order = relationship("Order", - backref=backref("items", cascade="all, delete-orphan") - ) + order = relationship( + "Order", backref=backref("items", cascade="all, delete-orphan") + ) .. sidebar:: The Origins of Cascade @@ -109,10 +109,10 @@ and added to another:: >>> user1 = sess1.query(User).filter_by(id=1).first() >>> address1 = user1.addresses[0] - >>> sess1.close() # user1, address1 no longer associated with sess1 + >>> sess1.close() # user1, address1 no longer associated with sess1 >>> user1.addresses.remove(address1) # address1 no longer associated with user1 >>> sess2 = Session() - >>> sess2.add(user1) # ... but it still gets added to the new session, + >>> sess2.add(user1) # ... but it still gets added to the new session, >>> address1 in sess2 # because it's still "pending" for flush True @@ -226,23 +226,27 @@ The following example adapts that of :ref:`relationships_many_to_many` to illustrate the ``cascade="all, delete"`` setting on **one** side of the association:: - association_table = Table('association', Base.metadata, - Column('left_id', Integer, ForeignKey('left.id')), - Column('right_id', Integer, ForeignKey('right.id')) + association_table = Table( + "association", + Base.metadata, + Column("left_id", Integer, ForeignKey("left.id")), + Column("right_id", Integer, ForeignKey("right.id")), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship( "Child", secondary=association_table, back_populates="parents", - cascade="all, delete" + cascade="all, delete", ) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) parents = relationship( "Parent", @@ -305,18 +309,20 @@ on the relevant ``FOREIGN KEY`` constraint as well:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship( - "Child", back_populates="parent", + "Child", + back_populates="parent", cascade="all, delete", - passive_deletes=True + passive_deletes=True, ) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id', ondelete="CASCADE")) + parent_id = Column(Integer, ForeignKey("parent.id", ondelete="CASCADE")) parent = relationship("Parent", back_populates="children") The behavior of the above configuration when a parent row is deleted @@ -455,13 +461,16 @@ on the parent->child side of the relationship, and we can then configure ``passive_deletes=True`` on the **other** side of the bidirectional relationship as illustrated below:: - association_table = Table('association', Base.metadata, - Column('left_id', Integer, ForeignKey('left.id', ondelete="CASCADE")), - Column('right_id', Integer, ForeignKey('right.id', ondelete="CASCADE")) + association_table = Table( + "association", + Base.metadata, + Column("left_id", Integer, ForeignKey("left.id", ondelete="CASCADE")), + Column("right_id", Integer, ForeignKey("right.id", ondelete="CASCADE")), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship( "Child", @@ -470,14 +479,15 @@ relationship as illustrated below:: cascade="all, delete", ) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) parents = relationship( "Parent", secondary=association_table, back_populates="children", - passive_deletes=True + passive_deletes=True, ) Using the above configuration, the deletion of a ``Parent`` object proceeds @@ -560,6 +570,8 @@ expunge from the :class:`.Session` using :meth:`.Session.expunge`, the operation should be propagated down to referred objects. +.. _back_populates_cascade: + .. _backref_cascade: Controlling Cascade on Backrefs @@ -576,9 +588,9 @@ default takes place on attribute change events emitted from backrefs. This is probably a confusing statement more easily described through demonstration; it means that, given a mapping such as this:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship(Item, backref='order') - }) + mapper_registry.map_imperatively( + Order, order_table, properties={"items": relationship(Item, backref="order")} + ) If an ``Order`` is already in the session, and is assigned to the ``order`` attribute of an ``Item``, the backref appends the ``Item`` to the ``items`` @@ -599,9 +611,11 @@ place:: This behavior can be disabled using the :paramref:`_orm.relationship.cascade_backrefs` flag:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship(Item, backref='order', cascade_backrefs=False) - }) + mapper_registry.map_imperatively( + Order, + order_table, + properties={"items": relationship(Item, backref="order", cascade_backrefs=False)}, + ) So above, the assignment of ``i1.order = o1`` will append ``i1`` to the ``items`` collection of ``o1``, but will not add ``i1`` to the session. You can, of @@ -616,11 +630,17 @@ parameter may be set to ``False`` on the backref side by using the :func:`_orm.backref` function instead of a string. For example, the above relationship could be declared:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship( - Item, backref=backref('order', cascade_backrefs=False), cascade_backrefs=False - ) - }) + mapper_registry.map_imperatively( + Order, + order_table, + properties={ + "items": relationship( + Item, + backref=backref("order", cascade_backrefs=False), + cascade_backrefs=False, + ) + }, + ) This sets the ``cascade_backrefs=False`` behavior on both relationships. @@ -682,12 +702,12 @@ parent collection. The ``delete-orphan`` cascade accomplishes this, as illustrated in the example below:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" # ... - addresses = relationship( - "Address", cascade="all, delete-orphan") + addresses = relationship("Address", cascade="all, delete-orphan") + # ... @@ -709,9 +729,8 @@ that this related object is not to shared with any other parent simultaneously:: # ... preference = relationship( - "Preference", cascade="all, delete-orphan", - single_parent=True) - + "Preference", cascade="all, delete-orphan", single_parent=True + ) Above, if a hypothetical ``Preference`` object is removed from a ``User``, it will be deleted on flush:: diff --git a/doc/build/orm/classical.rst b/doc/build/orm/classical.rst index 3fd149f9285..a0bc70d890a 100644 --- a/doc/build/orm/classical.rst +++ b/doc/build/orm/classical.rst @@ -1,5 +1,5 @@ :orphan: -Moved! :ref:`classical_mapping` +Moved! :ref:`orm_imperative_mapping` diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index bc98b4b41d8..da50b3f8dbf 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -32,10 +32,10 @@ loading of child items both at load time as well as deletion time. Dynamic Relationship Loaders ---------------------------- -.. note:: This is a legacy feature. Using the :func:`_orm.with_parent` - filter in conjunction with :func:`_sql.select` is the :term:`2.0 style` - method of use. For relationships that shouldn't load, set - :paramref:`_orm.relationship.lazy` to ``noload``. +.. note:: SQLAlchemy 2.0 will have a slightly altered pattern for "dynamic" + loaders that does not rely upon the :class:`_orm.Query` object, which + will be legacy in 2.0. For current migration strategies, + see :ref:`migration_20_dynamic_loaders`. .. note:: This loader is in the general case not compatible with the :ref:`asyncio_toplevel` extension. It can be used with some limitations, as indicated in :ref:`Asyncio dynamic guidelines `. @@ -48,14 +48,15 @@ when accessed. Filtering criterion may be applied as well as limits and offsets, either explicitly or via array slices:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" posts = relationship(Post, lazy="dynamic") - jack = session.query(User).get(id) + + jack = session.get(User, id) # filter Jack's blog posts - posts = jack.posts.filter(Post.headline=='this is a post') + posts = jack.posts.filter(Post.headline == "this is a post") # apply array slices posts = jack.posts[5:20] @@ -63,10 +64,10 @@ offsets, either explicitly or via array slices:: The dynamic relationship supports limited write operations, via the :meth:`_orm.AppenderQuery.append` and :meth:`_orm.AppenderQuery.remove` methods:: - oldpost = jack.posts.filter(Post.headline=='old post').one() + oldpost = jack.posts.filter(Post.headline == "old post").one() jack.posts.remove(oldpost) - jack.posts.append(Post('new post')) + jack.posts.append(Post("new post")) Since the read side of the dynamic relationship always queries the database, changes to the underlying collection will not be visible @@ -81,9 +82,7 @@ function in conjunction with ``lazy='dynamic'``:: class Post(Base): __table__ = posts_table - user = relationship(User, - backref=backref('posts', lazy='dynamic') - ) + user = relationship(User, backref=backref("posts", lazy="dynamic")) Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time. @@ -111,9 +110,9 @@ A "noload" relationship never loads from the database, even when accessed. It is configured using ``lazy='noload'``:: class MyClass(Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" - children = relationship(MyOtherClass, lazy='noload') + children = relationship(MyOtherClass, lazy="noload") Above, the ``children`` collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the @@ -127,9 +126,9 @@ Alternatively, a "raise"-loaded relationship will raise an emit a lazy load:: class MyClass(Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" - children = relationship(MyOtherClass, lazy='raise') + children = relationship(MyOtherClass, lazy="raise") Above, attribute access on the ``children`` collection will raise an exception if it was not previously eagerloaded. This includes read access but for @@ -166,11 +165,12 @@ values accessible through an attribute on the parent instance. By default, this collection is a ``list``:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" parent_id = Column(Integer, primary_key=True) children = relationship(Child) + parent = Parent() parent.children.append(Child()) print(parent.children[0]) @@ -181,12 +181,13 @@ default list, by specifying the :paramref:`_orm.relationship.collection_class` o :func:`~sqlalchemy.orm.relationship`:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" parent_id = Column(Integer, primary_key=True) # use a set children = relationship(Child, collection_class=set) + parent = Parent() child = Child() parent.children.add(child) @@ -203,24 +204,27 @@ to achieve a simple dictionary collection. It produces a dictionary class that of the mapped class as a key. Below we map an ``Item`` class containing a dictionary of ``Note`` items keyed to the ``Note.keyword`` attribute:: - from sqlalchemy import Column, Integer, String, ForeignKey - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection - from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() + class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=attribute_mapped_collection('keyword'), - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=attribute_mapped_collection("keyword"), + cascade="all, delete-orphan", + ) + class Note(Base): - __tablename__ = 'note' + __tablename__ = "note" id = Column(Integer, primary_key=True) - item_id = Column(Integer, ForeignKey('item.id'), nullable=False) + item_id = Column(Integer, ForeignKey("item.id"), nullable=False) keyword = Column(String) text = Column(String) @@ -231,7 +235,7 @@ a dictionary of ``Note`` items keyed to the ``Note.keyword`` attribute:: ``Item.notes`` is then a dictionary:: >>> item = Item() - >>> item.notes['a'] = Note('a', 'atext') + >>> item.notes["a"] = Note("a", "atext") >>> item.notes.items() {'a': <__main__.Note object at 0x2eaaf0>} @@ -242,9 +246,9 @@ key we supply must match that of the actual ``Note`` object:: item = Item() item.notes = { - 'a': Note('a', 'atext'), - 'b': Note('b', 'btext') - } + "a": Note("a", "atext"), + "b": Note("b", "btext"), + } The attribute which :func:`.attribute_mapped_collection` uses as a key does not need to be mapped at all! Using a regular Python ``@property`` allows virtually @@ -253,17 +257,20 @@ below when we establish it as a tuple of ``Note.keyword`` and the first ten lett of the ``Note.text`` field:: class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=attribute_mapped_collection('note_key'), - backref="item", - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=attribute_mapped_collection("note_key"), + backref="item", + cascade="all, delete-orphan", + ) + class Note(Base): - __tablename__ = 'note' + __tablename__ = "note" id = Column(Integer, primary_key=True) - item_id = Column(Integer, ForeignKey('item.id'), nullable=False) + item_id = Column(Integer, ForeignKey("item.id"), nullable=False) keyword = Column(String) text = Column(String) @@ -290,12 +297,15 @@ object directly:: from sqlalchemy.orm.collections import column_mapped_collection + class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=column_mapped_collection(Note.__table__.c.keyword), - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=column_mapped_collection(Note.__table__.c.keyword), + cascade="all, delete-orphan", + ) as well as :func:`.mapped_collection` which is passed any callable function. Note that it's usually easier to use :func:`.attribute_mapped_collection` along @@ -303,12 +313,15 @@ with a ``@property`` as mentioned earlier:: from sqlalchemy.orm.collections import mapped_collection + class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=mapped_collection(lambda note: note.text[0:10]), - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=mapped_collection(lambda note: note.text[0:10]), + cascade="all, delete-orphan", + ) Dictionary mappings are often combined with the "Association Proxy" extension to produce streamlined dictionary views. See :ref:`proxying_dictionaries` and :ref:`composite_association_proxy` @@ -357,7 +370,7 @@ if the value of ``B.data`` is not set yet, the key will be ``None``:: Setting ``b1.data`` after the fact does not update the collection:: - >>> b1.data = 'the key' + >>> b1.data = "the key" >>> a1.bs {None: } @@ -365,14 +378,14 @@ Setting ``b1.data`` after the fact does not update the collection:: This can also be seen if one attempts to set up ``B()`` in the constructor. The order of arguments changes the result:: - >>> B(a=a1, data='the key') + >>> B(a=a1, data="the key") >>> a1.bs {None: } vs:: - >>> B(data='the key', a=a1) + >>> B(data="the key", a=a1) >>> a1.bs {'the key': } @@ -384,9 +397,9 @@ An event handler such as the following may also be used to track changes in the collection as well:: from sqlalchemy import event - from sqlalchemy.orm import attributes + @event.listens_for(B.data, "set") def set_item(obj, value, previous, initiator): if obj.a is not None: @@ -394,8 +407,6 @@ collection as well:: obj.a.bs[value] = obj obj.a.bs.pop(previous) - - .. autofunction:: attribute_mapped_collection .. autofunction:: column_mapped_collection @@ -456,16 +467,21 @@ interface are detected and instrumented via duck-typing: class ListLike(object): def __init__(self): self.data = [] + def append(self, item): self.data.append(item) + def remove(self, item): self.data.remove(item) + def extend(self, items): self.data.extend(items) + def __iter__(self): return iter(self.data) + def foo(self): - return 'foo' + return "foo" ``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't @@ -480,10 +496,13 @@ explicit about the interface you are implementing by providing an def __init__(self): self.data = set() + def append(self, item): self.data.add(item) + def remove(self, item): self.data.remove(item) + def __iter__(self): return iter(self.data) @@ -511,6 +530,7 @@ get the job done. from sqlalchemy.orm.collections import collection + class SetLike(object): __emulates__ = set @@ -569,6 +589,7 @@ collection support to other classes. It uses a keying function to delegate to from sqlalchemy.util import OrderedDict from sqlalchemy.orm.collections import MappedCollection + class NodeMap(OrderedDict, MappedCollection): """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained.""" @@ -585,8 +606,8 @@ from within an already instrumented call can cause events to be fired off repeatedly, or inappropriately, leading to internal state corruption in rare cases:: - from sqlalchemy.orm.collections import MappedCollection,\ - collection + from sqlalchemy.orm.collections import MappedCollection, collection + class MyMappedCollection(MappedCollection): """Use @internally_instrumented when your methods @@ -611,20 +632,6 @@ must decorate appender and remover methods, however- there are no compatible methods in the basic dictionary interface for SQLAlchemy to use by default. Iteration will go through ``itervalues()`` unless otherwise decorated. -.. note:: - - Due to a bug in MappedCollection prior to version 0.7.6, this - workaround usually needs to be called before a custom subclass - of :class:`.MappedCollection` which uses :meth:`.collection.internally_instrumented` - can be used:: - - from sqlalchemy.orm.collections import _instrument_class, MappedCollection - _instrument_class(MappedCollection) - - This will ensure that the :class:`.MappedCollection` has been properly - initialized with custom ``__setitem__()`` and ``__delitem__()`` - methods before used in a custom subclass. - .. autoclass:: sqlalchemy.orm.collections.MappedCollection :members: @@ -646,6 +653,7 @@ to restrict the decorations to just your usage in relationships. For example: class MyAwesomeList(some.great.library.AwesomeList): pass + # ... relationship(..., collection_class=MyAwesomeList) The ORM uses this approach for built-ins, quietly substituting a trivial @@ -658,8 +666,6 @@ Various internal methods. .. autofunction:: bulk_replace -.. autoclass:: collection - .. autodata:: collection_adapter .. autoclass:: CollectionAdapter diff --git a/doc/build/orm/composites.rst b/doc/build/orm/composites.rst index fb3ca476783..670ae871fd1 100644 --- a/doc/build/orm/composites.rst +++ b/doc/build/orm/composites.rst @@ -21,12 +21,10 @@ A simple example represents pairs of columns as a ``Point`` object. return self.x, self.y def __repr__(self): - return "Point(x=%r, y=%r)" % (self.x, self.y) + return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return isinstance(other, Point) and \ - other.x == self.x and \ - other.y == self.y + return isinstance(other, Point) and other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) @@ -44,13 +42,13 @@ objects. Then, the :func:`.composite` function is used to assign new attributes that will represent sets of columns via the ``Point`` class:: from sqlalchemy import Column, Integer - from sqlalchemy.orm import composite - from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.orm import composite, declarative_base Base = declarative_base() + class Vertex(Base): - __tablename__ = 'vertices' + __tablename__ = "vertices" id = Column(Integer, primary_key=True) x1 = Column(Integer) @@ -64,10 +62,14 @@ attributes that will represent sets of columns via the ``Point`` class:: A classical mapping above would define each :func:`.composite` against the existing table:: - mapper_registry.map_imperatively(Vertex, vertices_table, properties={ - 'start':composite(Point, vertices_table.c.x1, vertices_table.c.y1), - 'end':composite(Point, vertices_table.c.x2, vertices_table.c.y2), - }) + mapper_registry.map_imperatively( + Vertex, + vertices_table, + properties={ + "start": composite(Point, vertices_table.c.x1, vertices_table.c.y1), + "end": composite(Point, vertices_table.c.x2, vertices_table.c.y2), + }, + ) We can now persist and use ``Vertex`` instances, as well as query for them, using the ``.start`` and ``.end`` attributes against ad-hoc ``Point`` instances: @@ -118,19 +120,27 @@ to define existing or new operations. Below we illustrate the "greater than" operator, implementing the same expression that the base "greater than" does:: - from sqlalchemy.orm.properties import CompositeProperty from sqlalchemy import sql + from sqlalchemy.orm.properties import CompositeProperty + class PointComparator(CompositeProperty.Comparator): def __gt__(self, other): """redefine the 'greater than' operation""" - return sql.and_(*[a>b for a, b in - zip(self.__clause_element__().clauses, - other.__composite_values__())]) + return sql.and_( + *[ + a > b + for a, b in zip( + self.__clause_element__().clauses, + other.__composite_values__(), + ) + ] + ) + class Vertex(Base): - ___tablename__ = 'vertices' + __tablename__ = "vertices" id = Column(Integer, primary_key=True) x1 = Column(Integer) @@ -138,10 +148,8 @@ the same expression that the base "greater than" does:: x2 = Column(Integer) y2 = Column(Integer) - start = composite(Point, x1, y1, - comparator_factory=PointComparator) - end = composite(Point, x2, y2, - comparator_factory=PointComparator) + start = composite(Point, x1, y1, comparator_factory=PointComparator) + end = composite(Point, x2, y2, comparator_factory=PointComparator) Nesting Composites ------------------- @@ -155,7 +163,8 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: from sqlalchemy.orm import composite - class Point(object): + + class Point: def __init__(self, x, y): self.x = x self.y = y @@ -164,17 +173,16 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: return self.x, self.y def __repr__(self): - return "Point(x=%r, y=%r)" % (self.x, self.y) + return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return isinstance(other, Point) and \ - other.x == self.x and \ - other.y == self.y + return isinstance(other, Point) and other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) - class Vertex(object): + + class Vertex: def __init__(self, start, end): self.start = start self.end = end @@ -182,18 +190,14 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: @classmethod def _generate(self, x1, y1, x2, y2): """generate a Vertex from a row""" - return Vertex( - Point(x1, y1), - Point(x2, y2) - ) + return Vertex(Point(x1, y1), Point(x2, y2)) def __composite_values__(self): - return \ - self.start.__composite_values__() + \ - self.end.__composite_values__() + return self.start.__composite_values__() + self.end.__composite_values__() + class HasVertex(Base): - __tablename__ = 'has_vertex' + __tablename__ = "has_vertex" id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) @@ -209,7 +213,10 @@ We can then use the above mapping as:: s.add(hv) s.commit() - hv = s.query(HasVertex).filter( - HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))).first() + hv = ( + s.query(HasVertex) + .filter(HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))) + .first() + ) print(hv.vertex.start) print(hv.vertex.end) diff --git a/doc/build/orm/constructors.rst b/doc/build/orm/constructors.rst index b78b0f0cb97..50ae218c2fe 100644 --- a/doc/build/orm/constructors.rst +++ b/doc/build/orm/constructors.rst @@ -1,3 +1,5 @@ +:orphan: + .. currentmodule:: sqlalchemy.orm .. _mapping_constructors: @@ -5,58 +7,6 @@ Constructors and Object Initialization ====================================== -Mapping imposes no restrictions or requirements on the constructor -(``__init__``) method for the class. You are free to require any arguments for -the function that you wish, assign attributes to the instance that are unknown -to the ORM, and generally do anything else you would normally do when writing -a constructor for a Python class. - -The SQLAlchemy ORM does not call ``__init__`` when recreating objects from -database rows. The ORM's process is somewhat akin to the Python standard -library's ``pickle`` module, invoking the low level ``__new__`` method and -then quietly restoring attributes directly on the instance rather than calling -``__init__``. - -If you need to do some setup on database-loaded instances before they're ready -to use, there is an event hook known as :meth:`.InstanceEvents.load` which -can achieve this; it is also available via a class-specific decorator called -:func:`_orm.reconstructor`. When using :func:`_orm.reconstructor`, -the mapper will invoke a single decorated method with no -arguments every time it loads or reconstructs an instance of the -class. This is -useful for recreating transient properties that are normally assigned in -``__init__``:: - - from sqlalchemy import orm - - class MyMappedClass(object): - def __init__(self, data): - self.data = data - # we need stuff on all instances, but not in the database. - self.stuff = [] - - @orm.reconstructor - def init_on_load(self): - self.stuff = [] - -Above, when ``obj = MyMappedClass()`` is executed, the ``__init__`` constructor -is invoked normally and the ``data`` argument is required. When instances are -loaded during a :class:`~sqlalchemy.orm.query.Query` operation as in -``query(MyMappedClass).one()``, ``init_on_load`` is called. - -Any method may be tagged as the :func:`_orm.reconstructor`, even -the ``__init__`` method itself, but only one method may be tagged as such. It is invoked after all immediate -column-level attributes are loaded as well as after eagerly-loaded scalar -relationships. Eagerly loaded collections may be only partially populated -or not populated at all, depending on the kind of eager loading used. - -ORM state changes made to objects at this stage will not be recorded for the -next flush operation, so the activity within a reconstructor should be -conservative. - -:func:`_orm.reconstructor` is a shortcut into a larger system -of "instance level" events, which can be subscribed to using the -event API - see :class:`.InstanceEvents` for the full API description -of these events. +This document has been removed. See :ref:`orm_mapped_class_behavior` +as well as :meth:`_orm.InstanceEvents.load` for what was covered here. -.. autofunction:: reconstructor diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst index 2e4dbd93b08..adafc4bab61 100644 --- a/doc/build/orm/contextual.rst +++ b/doc/build/orm/contextual.rst @@ -19,6 +19,21 @@ The object is the :class:`.scoped_session` object, and it represents a registry pattern, a good introduction can be found in `Patterns of Enterprise Architecture `_. +.. warning:: + + The :class:`.scoped_session` registry by default uses a Python + ``threading.local()`` + in order to track :class:`_orm.Session` instances. **This is not + necessarily compatible with all application servers**, particularly those + which make use of greenlets or other alternative forms of concurrency + control, which may lead to race conditions (e.g. randomly occurring + failures) when used in moderate to high concurrency scenarios. + Please read :ref:`unitofwork_contextual_threadlocal` and + :ref:`session_lifespan` below to more fully understand the implications + of using ``threading.local()`` to track :class:`_orm.Session` objects + and consider more explicit means of scoping when using application servers + which are not based on traditional threads. + .. note:: The :class:`.scoped_session` object is a very popular and useful object @@ -103,6 +118,8 @@ underlying :class:`.Session` being maintained by the registry:: The above code accomplishes the same task as that of acquiring the current :class:`.Session` by calling upon the registry, then using that :class:`.Session`. +.. _unitofwork_contextual_threadlocal: + Thread-Local Scope ------------------ @@ -236,6 +253,7 @@ this in conjunction with a hypothetical event marker provided by the web framewo Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request) + @on_request_end def remove_session(req): Session.remove() @@ -251,7 +269,7 @@ otherwise self-managed. Contextual Session API ---------------------- -.. autoclass:: sqlalchemy.orm.scoping.scoped_session +.. autoclass:: sqlalchemy.orm.scoped_session :members: :inherited-members: diff --git a/doc/build/orm/dataclasses.rst b/doc/build/orm/dataclasses.rst new file mode 100644 index 00000000000..4e9943e76ee --- /dev/null +++ b/doc/build/orm/dataclasses.rst @@ -0,0 +1,517 @@ +.. _orm_dataclasses_toplevel: + +====================================== +Integration with dataclasses and attrs +====================================== + +SQLAlchemy 1.4 has limited support for ORM mappings that are established +against classes that have already been pre-instrumented using either Python's +built-in dataclasses_ library or the attrs_ third party integration library. + +.. tip:: SQLAlchemy 2.0 will include a new dataclass integration feature which + allows for a particular class to be mapped and converted into a Python + dataclass simultaneously, with full support for SQLAlchemy's declarative + syntax. Within the scope of the 1.4 release, the ``@dataclass`` decorator + is used separately as documented in this section. + +.. _orm_declarative_dataclasses: + +Applying ORM Mappings to an existing dataclass +---------------------------------------------- + +The dataclasses_ module, added in Python 3.7, provides a ``@dataclass`` class +decorator to automatically generate boilerplate definitions of common object +methods including ``__init__()``, ``__repr()__``, and other methods. SQLAlchemy +supports the application of ORM mappings to a class after it has been processed +with the ``@dataclass`` decorator, by using either the +:meth:`_orm.registry.mapped` class decorator, or the +:meth:`_orm.registry.map_imperatively` method to apply ORM mappings to the +class using Imperative. + +.. versionadded:: 1.4 Added support for direct mapping of Python dataclasses + +To map an existing dataclass, SQLAlchemy's "inline" declarative directives +cannot be used directly; ORM directives are assigned using one of three +techniques: + +* Using "Declarative with Imperative Table", the table / column to be mapped + is defined using a :class:`_schema.Table` object assigned to the + ``__table__`` attribute of the class; relationships are defined within + ``__mapper_args__`` dictionary. The class is mapped using the + :meth:`_orm.registry.mapped` decorator. An example is below at + :ref:`orm_declarative_dataclasses_imperative_table`. + +* Using full "Declarative", the Declarative-interpreted directives such as + :class:`_schema.Column`, :func:`_orm.relationship` are added to the + ``.metadata`` dictionary of the ``dataclasses.field()`` construct, where + they are consumed by the declarative process. The class is again + mapped using the :meth:`_orm.registry.mapped` decorator. See the example + below at :ref:`orm_declarative_dataclasses_declarative_table`. + +* An "Imperative" mapping can be applied to an existing dataclass using + the :meth:`_orm.registry.map_imperatively` method to produce the mapping + in exactly the same way as described at :ref:`orm_imperative_mapping`. + This is illustrated below at :ref:`orm_imperative_dataclasses`. + +The general process by which SQLAlchemy applies mappings to a dataclass +is the same as that of an ordinary class, but also includes that +SQLAlchemy will detect class-level attributes that were part of the +dataclasses declaration process and replace them at runtime with +the usual SQLAlchemy ORM mapped attributes. The ``__init__`` method that +would have been generated by dataclasses is left intact, as is the same +for all the other methods that dataclasses generates such as +``__eq__()``, ``__repr__()``, etc. + +.. _orm_declarative_dataclasses_imperative_table: + +Mapping dataclasses using Declarative With Imperative Table +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An example of a mapping using ``@dataclass`` using +:ref:`orm_imperative_table_configuration` is below. A complete +:class:`_schema.Table` object is constructed explicitly and assigned to the +``__table__`` attribute. Instance fields are defined using normal dataclass +syntaxes. Additional :class:`.MapperProperty` +definitions such as :func:`.relationship`, are placed in the +:ref:`__mapper_args__ ` class-level +dictionary underneath the ``properties`` key, corresponding to the +:paramref:`_orm.mapper.properties` parameter:: + + from __future__ import annotations + + from dataclasses import dataclass, field + from typing import List, Optional + + from sqlalchemy import Column, ForeignKey, Integer, String, Table + from sqlalchemy.orm import registry, relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @dataclass + class User: + __table__ = Table( + "user", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + id: int = field(init=False) + name: Optional[str] = None + fullname: Optional[str] = None + nickname: Optional[str] = None + addresses: List[Address] = field(default_factory=list) + + __mapper_args__ = { # type: ignore + "properties": { + "addresses": relationship("Address"), + } + } + + + @mapper_registry.mapped + @dataclass + class Address: + __table__ = Table( + "address", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) + id: int = field(init=False) + user_id: int = field(init=False) + email_address: Optional[str] = None + +In the above example, the ``User.id``, ``Address.id``, and ``Address.user_id`` +attributes are defined as ``field(init=False)``. This means that parameters for +these won't be added to ``__init__()`` methods, but +:class:`.Session` will still be able to set them after getting their values +during flush from autoincrement or other default value generator. To +allow them to be specified in the constructor explicitly, they would instead +be given a default value of ``None``. + +For a :func:`_orm.relationship` to be declared separately, it needs to be +specified directly within the :paramref:`_orm.mapper.properties` dictionary +which itself is specified within the ``__mapper_args__`` dictionary, so that it +is passed to the :func:`_orm.mapper` construction function. An alternative to this +approach is in the next example. + +.. _orm_declarative_dataclasses_declarative_table: + +Mapping dataclasses using Declarative Mapping +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The fully declarative approach requires that :class:`_schema.Column` objects +are declared as class attributes, which when using dataclasses would conflict +with the dataclass-level attributes. An approach to combine these together +is to make use of the ``metadata`` attribute on the ``dataclass.field`` +object, where SQLAlchemy-specific mapping information may be supplied. +Declarative supports extraction of these parameters when the class +specifies the attribute ``__sa_dataclass_metadata_key__``. This also +provides a more succinct method of indicating the :func:`_orm.relationship` +association:: + + + from __future__ import annotations + + from dataclasses import dataclass, field + from typing import List + + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import registry, relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @dataclass + class User: + __tablename__ = "user" + + __sa_dataclass_metadata_key__ = "sa" + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) + name: str = field(default=None, metadata={"sa": Column(String(50))}) + fullname: str = field(default=None, metadata={"sa": Column(String(50))}) + nickname: str = field(default=None, metadata={"sa": Column(String(12))}) + addresses: List[Address] = field( + default_factory=list, metadata={"sa": relationship("Address")} + ) + + + @mapper_registry.mapped + @dataclass + class Address: + __tablename__ = "address" + __sa_dataclass_metadata_key__ = "sa" + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) + user_id: int = field(init=False, metadata={"sa": Column(ForeignKey("user.id"))}) + email_address: str = field(default=None, metadata={"sa": Column(String(50))}) + +.. _orm_imperative_dataclasses: + +Mapping dataclasses using Imperative Mapping +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As described previously, a class which is set up as a dataclass using the +``@dataclass`` decorator can then be further decorated using the +:meth:`_orm.registry.mapped` decorator in order to apply declarative-style +mapping to the class. As an alternative to using the +:meth:`_orm.registry.mapped` decorator, we may also pass the class through the +:meth:`_orm.registry.map_imperatively` method instead, so that we may pass all +:class:`_schema.Table` and :func:`_orm.mapper` configuration imperatively to +the function rather than having them defined on the class itself as class +variables:: + + from __future__ import annotations + + from dataclasses import dataclass + from dataclasses import field + from typing import List + + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + + @dataclass + class User: + id: int = field(init=False) + name: str = None + fullname: str = None + nickname: str = None + addresses: List[Address] = field(default_factory=list) + + + @dataclass + class Address: + id: int = field(init=False) + user_id: int = field(init=False) + email_address: str = None + + + metadata_obj = MetaData() + + user = Table( + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + + address = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) + + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) + + mapper_registry.map_imperatively(Address, address) + +.. _orm_declarative_dataclasses_mixin: + +Using Declarative Mixins with Dataclasses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the section :ref:`orm_mixins_toplevel`, Declarative Mixin classes +are introduced. One requirement of declarative mixins is that certain +constructs that can't be easily duplicated must be given as callables, +using the :class:`_orm.declared_attr` decorator, such as in the +example at :ref:`orm_declarative_mixins_relationships`:: + + class RefTargetMixin: + @declared_attr + def target_id(cls): + return Column("target_id", ForeignKey("target.id")) + + @declared_attr + def target(cls): + return relationship("Target") + +This form is supported within the Dataclasses ``field()`` object by using +a lambda to indicate the SQLAlchemy construct inside the ``field()``. +Using :func:`_orm.declared_attr` to surround the lambda is optional. +If we wanted to produce our ``User`` class above where the ORM fields +came from a mixin that is itself a dataclass, the form would be:: + + @dataclass + class UserMixin: + __tablename__ = "user" + + __sa_dataclass_metadata_key__ = "sa" + + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) + + addresses: List[Address] = field( + default_factory=list, metadata={"sa": lambda: relationship("Address")} + ) + + + @dataclass + class AddressMixin: + __tablename__ = "address" + __sa_dataclass_metadata_key__ = "sa" + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) + user_id: int = field( + init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} + ) + email_address: str = field(default=None, metadata={"sa": Column(String(50))}) + + + @mapper_registry.mapped + class User(UserMixin): + pass + + + @mapper_registry.mapped + class Address(AddressMixin): + pass + +.. versionadded:: 1.4.2 Added support for "declared attr" style mixin attributes, + namely :func:`_orm.relationship` constructs as well as :class:`_schema.Column` + objects with foreign key declarations, to be used within "Dataclasses + with Declarative Table" style mappings. + + + +.. _orm_declarative_attrs_imperative_table: + +Applying ORM mappings to an existing attrs class +------------------------------------------------- + +The attrs_ library is a popular third party library that provides similar +features as dataclasses, with many additional features provided not +found in ordinary dataclasses. + +A class augmented with attrs_ uses the ``@define`` decorator. This decorator +initiates a process to scan the class for attributes that define the class' +behavior, which are then used to generate methods, documentation, and +annotations. + +The SQLAlchemy ORM supports mapping an attrs_ class using **Declarative with +Imperative Table** or **Imperative** mapping. The general form of these two +styles is fully equivalent to the +:ref:`orm_declarative_dataclasses_declarative_table` and +:ref:`orm_declarative_dataclasses_imperative_table` mapping forms used with +dataclasses, where the inline attribute directives used by dataclasses or attrs +are unchanged, and SQLAlchemy's table-oriented instrumentation is applied at +runtime. + +The ``@define`` decorator of attrs_ by default replaces the annotated class +with a new __slots__ based class, which is not supported. When using the old +style annotation ``@attr.s`` or using ``define(slots=False)``, the class +does not get replaced. Furthermore attrs removes its own class-bound attributes +after the decorator runs, so that SQLAlchemy's mapping process takes over these +attributes without any issue. Both decorators, ``@attr.s`` and ``@define(slots=False)`` +work with SQLAlchemy. + +Mapping attrs with Declarative "Imperative Table" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the "Declarative with Imperative Table" style, a :class:`_schema.Table` +object is declared inline with the declarative class. The +``@define`` decorator is applied to the class first, then the +:meth:`_orm.registry.mapped` decorator second:: + + + from __future__ import annotations + + from typing import List + + from attrs import define + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @define(slots=False) + class User: + __table__ = Table( + "user", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + id: int + name: str + fullname: str + nickname: str + addresses: List[Address] + + __mapper_args__ = { # type: ignore + "properties": { + "addresses": relationship("Address"), + } + } + + + @mapper_registry.mapped + @define(slots=False) + class Address: + __table__ = Table( + "address", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) + id: int + user_id: int + email_address: Optional[str] + +.. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on + a mapped class, cannot be used with SQLAlchemy mappings without fully + implementing alternative + :ref:`attribute instrumentation `, as mapped + classes normally rely upon direct access to ``__dict__`` for state storage. + Behavior is undefined when this option is present. + + + +Mapping attrs with Imperative Mapping +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Just as is the case with dataclasses, we can make use of +:meth:`_orm.registry.map_imperatively` to map an existing ``attrs`` class +as well:: + + from __future__ import annotations + + from typing import List + + from attrs import define + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + + @define(slots=False) + class User: + id: int + name: str + fullname: str + nickname: str + addresses: List[Address] + + + @define(slots=False) + class Address: + id: int + user_id: int + email_address: Optional[str] + + + metadata_obj = MetaData() + + user = Table( + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + + address = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) + + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) + + mapper_registry.map_imperatively(Address, address) + +The above form is equivalent to the previous example using +Declarative with Imperative Table. + + + +.. _dataclasses: https://docs.python.org/3/library/dataclasses.html +.. _attrs: https://pypi.org/project/attrs/ diff --git a/doc/build/orm/declarative_config.rst b/doc/build/orm/declarative_config.rst index 9240d9011b2..9f031bd6e1d 100644 --- a/doc/build/orm/declarative_config.rst +++ b/doc/build/orm/declarative_config.rst @@ -42,14 +42,19 @@ objects but also relationships and SQL expressions:: # mapping attributes using declarative with declarative table # i.e. __tablename__ - from sqlalchemy import Column, Integer, String, Text, ForeignKey - from sqlalchemy.orm import column_property, relationship, deferred - from sqlalchemy.orm import declarative_base + from sqlalchemy import Column, ForeignKey, Integer, String, Text + from sqlalchemy.orm import ( + column_property, + declarative_base, + deferred, + relationship, + ) Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -60,8 +65,9 @@ objects but also relationships and SQL expressions:: addresses = relationship("Address", back_populates="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -90,13 +96,17 @@ hybrid table style:: # mapping attributes using declarative with imperative table # i.e. __table__ - from sqlalchemy import Table - from sqlalchemy import Column, Integer, String, Text, ForeignKey - from sqlalchemy.orm import column_property, relationship, deferred - from sqlalchemy.orm import declarative_base + from sqlalchemy import Column, ForeignKey, Integer, String, Table, Text + from sqlalchemy.orm import ( + column_property, + declarative_base, + deferred, + relationship, + ) Base = declarative_base() + class User(Base): __table__ = Table( "user", @@ -104,13 +114,14 @@ hybrid table style:: Column("id", Integer, primary_key=True), Column("name", String), Column("firstname", String(50)), - Column("lastname", String(50)) + Column("lastname", String(50)), ) fullname = column_property(__table__.c.firstname + " " + __table__.c.lastname) addresses = relationship("Address", back_populates="user") + class Address(Base): __table__ = Table( "address", @@ -118,7 +129,7 @@ hybrid table style:: Column("id", Integer, primary_key=True), Column("user_id", ForeignKey("user.id")), Column("email_address", String), - Column("address_statistics", Text) + Column("address_statistics", Text), ) address_statistics = deferred(__table__.c.address_statistics) @@ -161,54 +172,140 @@ using the ``__mapper_args__`` declarative class variable, which is a dictionary that is passed as keyword arguments to the :func:`_orm.mapper` function. Some examples: +**Map Specific Primary Key Columns** + +The example below illustrates Declarative-level settings for the +:paramref:`_orm.mapper.primary_key` parameter, which establishes +particular columns as part of what the ORM should consider to be a primary +key for the class, independently of schema-level primary key constraints:: + + class GroupUsers(Base): + __tablename__ = "group_users" + + user_id = Column(String(40)) + group_id = Column(String(40)) + + __mapper_args__ = {"primary_key": [user_id, group_id]} + +.. seealso:: + + :ref:`mapper_primary_key` - further background on ORM mapping of explicit + columns as primary key columns + **Version ID Column** -The :paramref:`_orm.mapper.version_id_col` and -:paramref:`_orm.mapper.version_id_generator` parameters:: +The example below illustrates Declarative-level settings for the +:paramref:`_orm.mapper.version_id_col` and +:paramref:`_orm.mapper.version_id_generator` parameters, which configure +an ORM-maintained version counter that is updated and checked within the +:term:`unit of work` flush process:: from datetime import datetime + class Widget(Base): - __tablename__ = 'widgets' + __tablename__ = "widgets" id = Column(Integer, primary_key=True) timestamp = Column(DateTime, nullable=False) __mapper_args__ = { - 'version_id_col': timestamp, - 'version_id_generator': lambda v:datetime.now() + "version_id_col": timestamp, + "version_id_generator": lambda v: datetime.now(), } +.. seealso:: + + :ref:`mapper_version_counter` - background on the ORM version counter feature + **Single Table Inheritance** -The :paramref:`_orm.mapper.polymorphic_on` and -:paramref:`_orm.mapper.polymorphic_identity` parameters:: +The example below illustrates Declarative-level settings for the +:paramref:`_orm.mapper.polymorphic_on` and +:paramref:`_orm.mapper.polymorphic_identity` parameters, which are used when +configuring a single-table inheritance mapping:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" person_id = Column(Integer, primary_key=True) type = Column(String, nullable=False) __mapper_args__ = dict( polymorphic_on=type, - polymorphic_identity="person" + polymorphic_identity="person", ) + class Employee(Person): __mapper_args__ = dict( - polymorphic_identity="employee" + polymorphic_identity="employee", ) +.. seealso:: + + :ref:`single_inheritance` - background on the ORM single table inheritance + mapping feature. + +Constructing mapper arguments dynamically +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + The ``__mapper_args__`` dictionary may be generated from a class-bound descriptor method rather than from a fixed dictionary by making use of the -:func:`_orm.declared_attr` construct. The section :ref:`orm_mixins_toplevel` -discusses this concept further. +:func:`_orm.declared_attr` construct. This is useful to create arguments +for mappers that are programmatically derived from the table configuration +or other aspects of the mapped class. A dynamic ``__mapper_args__`` +attribute will typically be useful when using a Declarative Mixin or +abstract base class. + +For example, to omit from the mapping +any columns that have a special :attr:`.Column.info` value, a mixin +can use a ``__mapper_args__`` method that scans for these columns from the +``cls.__table__`` attribute and passes them to the :paramref:`_orm.mapper.exclude_properties` +collection:: + + from sqlalchemy import Column + from sqlalchemy import Integer + from sqlalchemy import select + from sqlalchemy import String + from sqlalchemy.orm import declarative_base + from sqlalchemy.orm import declared_attr + + + class ExcludeColsWFlag: + @declared_attr + def __mapper_args__(cls): + return { + "exclude_properties": [ + column.key + for column in cls.__table__.c + if column.info.get("exclude", False) + ] + } + + + Base = declarative_base() + + + class SomeClass(ExcludeColsWFlag, Base): + __tablename__ = "some_table" + + id = Column(Integer, primary_key=True) + data = Column(String) + not_needed = Column(String, info={"exclude": True}) + +Above, the ``ExcludeColsWFlag`` mixin provides a per-class ``__mapper_args__`` +hook that will scan for :class:`.Column` objects that include the key/value +``'exclude': True`` passed to the :paramref:`.Column.info` parameter, and then +add their string "key" name to the :paramref:`_orm.mapper.exclude_properties` +collection which will prevent the resulting :class:`.Mapper` from considering +these columns for any SQL operations. .. seealso:: :ref:`orm_mixins_toplevel` + Other Declarative Mapping Directives -------------------------------------- @@ -223,7 +320,7 @@ assumed to be completed and the 'configure' step has finished:: class MyClass(Base): @classmethod def __declare_last__(cls): - "" + """ """ # do something with mappings ``__declare_first__()`` @@ -235,7 +332,7 @@ configuration via the :meth:`.MapperEvents.before_configured` event:: class MyClass(Base): @classmethod def __declare_first__(cls): - "" + """ """ # do something before mappings are configured .. versionadded:: 0.9.3 @@ -268,31 +365,35 @@ be illustrated using :meth:`_orm.registry.mapped` as follows:: reg = registry() + class BaseOne: metadata = MetaData() + class BaseTwo: metadata = MetaData() + @reg.mapped class ClassOne: - __tablename__ = 't1' # will use reg.metadata + __tablename__ = "t1" # will use reg.metadata id = Column(Integer, primary_key=True) + @reg.mapped class ClassTwo(BaseOne): - __tablename__ = 't1' # will use BaseOne.metadata + __tablename__ = "t1" # will use BaseOne.metadata id = Column(Integer, primary_key=True) + @reg.mapped class ClassThree(BaseTwo): - __tablename__ = 't1' # will use BaseTwo.metadata + __tablename__ = "t1" # will use BaseTwo.metadata id = Column(Integer, primary_key=True) - .. versionchanged:: 1.4.3 The :meth:`_orm.registry.mapped` decorator will honor an attribute named ``.metadata`` on the class as an alternate :class:`_schema.MetaData` collection to be used in place of the @@ -322,24 +423,27 @@ subclasses to extend just from the special class:: __abstract__ = True def some_helpful_method(self): - "" + """ """ @declared_attr def __mapper_args__(cls): - return {"helpful mapper arguments":True} + return {"helpful mapper arguments": True} + class MyMappedClass(SomeAbstractBase): - "" + pass One possible use of ``__abstract__`` is to use a distinct :class:`_schema.MetaData` for different bases:: Base = declarative_base() + class DefaultBase(Base): __abstract__ = True metadata = MetaData() + class OtherBase(Base): __abstract__ = True metadata = MetaData() @@ -352,7 +456,6 @@ created perhaps within distinct databases:: DefaultBase.metadata.create_all(some_engine) OtherBase.metadata.create_all(some_other_engine) - ``__table_cls__`` ~~~~~~~~~~~~~~~~~ @@ -363,10 +466,7 @@ to a :class:`_schema.Table` that one generates here:: class MyMixin(object): @classmethod def __table_cls__(cls, name, metadata_obj, *arg, **kw): - return Table( - "my_" + name, - metadata_obj, *arg, **kw - ) + return Table(f"my_{name}", metadata_obj, *arg, **kw) The above mixin would cause all :class:`_schema.Table` objects generated to include the prefix ``"my_"``, followed by the name normally specified using the @@ -386,15 +486,18 @@ such as, define as single-inheritance if there is no primary key present:: @classmethod def __table_cls__(cls, *arg, **kw): for obj in arg[1:]: - if (isinstance(obj, Column) and obj.primary_key) or \ - isinstance(obj, PrimaryKeyConstraint): + if (isinstance(obj, Column) and obj.primary_key) or isinstance( + obj, PrimaryKeyConstraint + ): return Table(*arg, **kw) return None + class Person(AutoTable, Base): id = Column(Integer, primary_key=True) + class Employee(Person): employee_name = Column(String) diff --git a/doc/build/orm/declarative_mapping.rst b/doc/build/orm/declarative_mapping.rst index 9d2f3af40a2..1bb07e6af4a 100644 --- a/doc/build/orm/declarative_mapping.rst +++ b/doc/build/orm/declarative_mapping.rst @@ -12,6 +12,7 @@ top level introduction. .. toctree:: :maxdepth: 3 + declarative_styles declarative_tables declarative_config declarative_mixins diff --git a/doc/build/orm/declarative_mixins.rst b/doc/build/orm/declarative_mixins.rst index 9bb4c782e4d..29ac56b97d6 100644 --- a/doc/build/orm/declarative_mixins.rst +++ b/doc/build/orm/declarative_mixins.rst @@ -14,22 +14,30 @@ usage of mixin classes, as well as via augmenting the declarative base produced by either the :meth:`_orm.registry.generate_base` method or :func:`_orm.declarative_base` functions. +When using mixins or abstract base classes with Declarative, a decorator +known as :func:`_orm.declared_attr` is frequently used. This decorator +allows the creation of class methods that produce a parameter or ORM construct that will be +part of a declarative mapping. Generating constructs using a callable +allows for Declarative to get a new copy of a particular kind of object +each time it calls upon the mixin or abstract base on behalf of a new +class that's being mapped. + An example of some commonly mixed-in idioms is below:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class MyMixin: - @declared_attr def __tablename__(cls): return cls.__name__.lower() - __table_args__ = {'mysql_engine': 'InnoDB'} - __mapper_args__= {'always_refresh': True} + __table_args__ = {"mysql_engine": "InnoDB"} + __mapper_args__ = {"always_refresh": True} + + id = Column(Integer, primary_key=True) - id = Column(Integer, primary_key=True) class MyModel(MyMixin, Base): name = Column(String(1000)) @@ -37,7 +45,11 @@ An example of some commonly mixed-in idioms is below:: Where above, the class ``MyModel`` will contain an "id" column as the primary key, a ``__tablename__`` attribute that derives from the name of the class itself, as well as ``__table_args__`` -and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. +and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. The +:func:`_orm.declared_attr` decorator applied to a class method called +``def __tablename__(cls):`` has the effect of turning the method into a class +method while also indicating to Declarative that this attribute is significant +within the mapping. .. tip:: @@ -69,21 +81,22 @@ section can also be applied to the base class itself, for patterns that should apply to all classes derived from a particular base. This is achieved using the ``cls`` argument of the :func:`_orm.declarative_base` function:: - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_base, declared_attr + class Base: @declared_attr def __tablename__(cls): return cls.__name__.lower() - __table_args__ = {'mysql_engine': 'InnoDB'} + __table_args__ = {"mysql_engine": "InnoDB"} - id = Column(Integer, primary_key=True) + id = Column(Integer, primary_key=True) - from sqlalchemy.orm import declarative_base Base = declarative_base(cls=Base) + class MyModel(Base): name = Column(String(1000)) @@ -101,10 +114,11 @@ declaration:: class TimestampMixin: created_at = Column(DateTime, default=func.now()) + class MyModel(TimestampMixin, Base): - __tablename__ = 'test' + __tablename__ = "test" - id = Column(Integer, primary_key=True) + id = Column(Integer, primary_key=True) name = Column(String(1000)) Where above, all declarative classes that include ``TimestampMixin`` @@ -135,14 +149,16 @@ patterns common to many classes can be defined as callables:: from sqlalchemy.orm import declared_attr + @declarative_mixin class ReferenceAddressMixin: @declared_attr def address_id(cls): - return Column(Integer, ForeignKey('address.id')) + return Column(Integer, ForeignKey("address.id")) + class User(ReferenceAddressMixin, Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) Where above, the ``address_id`` class-level callable is executed at the @@ -161,11 +177,12 @@ will resolve them at class construction time:: def type_(cls): return Column(String(50)) - __mapper_args__= {'polymorphic_on':type_} + __mapper_args__ = {"polymorphic_on": type_} + class MyModel(MyMixin, Base): - __tablename__='test' - id = Column(Integer, primary_key=True) + __tablename__ = "test" + id = Column(Integer, primary_key=True) .. _orm_declarative_mixins_relationships: @@ -184,25 +201,27 @@ reference a common target class via many-to-one:: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): return relationship("Target") + class Foo(RefTargetMixin, Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) + class Bar(RefTargetMixin, Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) + class Target(Base): - __tablename__ = 'target' + __tablename__ = "target" id = Column(Integer, primary_key=True) - Using Advanced Relationship Arguments (e.g. ``primaryjoin``, etc.) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -220,16 +239,17 @@ Declarative will be using as it calls the methods on its own, thus using The canonical example is the primaryjoin condition that depends upon another mixed-in column:: - @declarative_mixin - class RefTargetMixin: + @declarative_mixin + class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): - return relationship(Target, - primaryjoin=Target.id==cls.target_id # this is *incorrect* + return relationship( + Target, + primaryjoin=Target.id == cls.target_id, # this is *incorrect* ) Mapping a class using the above mixin, we will get an error like:: @@ -247,13 +267,11 @@ The condition above is resolved using a lambda:: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): - return relationship(Target, - primaryjoin=lambda: Target.id==cls.target_id - ) + return relationship(Target, primaryjoin=lambda: Target.id == cls.target_id) or alternatively, the string form (which ultimately generates a lambda):: @@ -261,13 +279,11 @@ or alternatively, the string form (which ultimately generates a lambda):: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): - return relationship("Target", - primaryjoin="Target.id==%s.target_id" % cls.__name__ - ) + return relationship(Target, primaryjoin=f"Target.id=={cls.__name__}.target_id") .. seealso:: @@ -285,11 +301,11 @@ requirement so that no reliance on copying is needed:: @declarative_mixin class SomethingMixin: - @declared_attr def dprop(cls): return deferred(Column(Integer)) + class Something(SomethingMixin, Base): __tablename__ = "something" @@ -300,96 +316,16 @@ the :class:`_orm.declared_attr` is invoked:: @declarative_mixin class SomethingMixin: x = Column(Integer) - y = Column(Integer) @declared_attr def x_plus_y(cls): return column_property(cls.x + cls.y) - .. versionchanged:: 1.0.0 mixin columns are copied to the final mapped class so that :class:`_orm.declared_attr` methods can access the actual column that will be mapped. -Mixing in Association Proxy and Other Attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Mixins can specify user-defined attributes as well as other extension -units such as :func:`.association_proxy`. The usage of -:class:`_orm.declared_attr` is required in those cases where the attribute must -be tailored specifically to the target subclass. An example is when -constructing multiple :func:`.association_proxy` attributes which each -target a different type of child object. Below is an -:func:`.association_proxy` mixin example which provides a scalar list of -string values to an implementing class:: - - from sqlalchemy import Column, Integer, ForeignKey, String - from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import declarative_base - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr - from sqlalchemy.orm import relationship - - Base = declarative_base() - - @declarative_mixin - class HasStringCollection: - @declared_attr - def _strings(cls): - class StringAttribute(Base): - __tablename__ = cls.string_table_name - id = Column(Integer, primary_key=True) - value = Column(String(50), nullable=False) - parent_id = Column(Integer, - ForeignKey('%s.id' % cls.__tablename__), - nullable=False) - def __init__(self, value): - self.value = value - - return relationship(StringAttribute) - - @declared_attr - def strings(cls): - return association_proxy('_strings', 'value') - - class TypeA(HasStringCollection, Base): - __tablename__ = 'type_a' - string_table_name = 'type_a_strings' - id = Column(Integer(), primary_key=True) - - class TypeB(HasStringCollection, Base): - __tablename__ = 'type_b' - string_table_name = 'type_b_strings' - id = Column(Integer(), primary_key=True) - -Above, the ``HasStringCollection`` mixin produces a :func:`_orm.relationship` -which refers to a newly generated class called ``StringAttribute``. The -``StringAttribute`` class is generated with its own :class:`_schema.Table` -definition which is local to the parent class making usage of the -``HasStringCollection`` mixin. It also produces an :func:`.association_proxy` -object which proxies references to the ``strings`` attribute onto the ``value`` -attribute of each ``StringAttribute`` instance. - -``TypeA`` or ``TypeB`` can be instantiated given the constructor -argument ``strings``, a list of strings:: - - ta = TypeA(strings=['foo', 'bar']) - tb = TypeB(strings=['bat', 'bar']) - -This list will generate a collection -of ``StringAttribute`` objects, which are persisted into a table that's -local to either the ``type_a_strings`` or ``type_b_strings`` table:: - - >>> print(ta._strings) - [<__main__.StringAttribute object at 0x10151cd90>, - <__main__.StringAttribute object at 0x10151ce10>] - -When constructing the :func:`.association_proxy`, the -:class:`_orm.declared_attr` decorator must be used so that a distinct -:func:`.association_proxy` object is created for each of the ``TypeA`` -and ``TypeB`` classes. - .. _decl_mixin_inheritance: Controlling table inheritance with mixins @@ -411,8 +347,8 @@ correct answer for each. For example, to create a mixin that gives every class a simple table name based on class name:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class Tablename: @@ -420,14 +356,16 @@ name based on class name:: def __tablename__(cls): return cls.__name__.lower() + class Person(Tablename, Base): id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): __tablename__ = None - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} primary_language = Column(String(50)) Alternatively, we can modify our ``__tablename__`` function to return @@ -435,9 +373,12 @@ Alternatively, we can modify our ``__tablename__`` function to return the effect of those subclasses being mapped with single table inheritance against the parent:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr - from sqlalchemy.orm import has_inherited_table + from sqlalchemy.orm import ( + declarative_mixin, + declared_attr, + has_inherited_table, + ) + @declarative_mixin class Tablename: @@ -447,14 +388,16 @@ against the parent:: return None return cls.__name__.lower() + class Person(Tablename, Base): id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} .. _mixin_inheritance_columns: @@ -473,17 +416,19 @@ a primary key:: class HasId: @declared_attr def id(cls): - return Column('id', Integer, primary_key=True) + return Column("id", Integer, primary_key=True) + class Person(HasId, Base): - __tablename__ = 'person' - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + __tablename__ = "person" + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): - __tablename__ = 'engineer' + __tablename__ = "engineer" primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} It is usually the case in joined-table inheritance that we want distinctly named columns on each subclass. However in this case, we may want to have @@ -498,19 +443,21 @@ function should be invoked **for each class in the hierarchy**, in *almost* @declared_attr.cascading def id(cls): if has_inherited_table(cls): - return Column(ForeignKey('person.id'), primary_key=True) + return Column(ForeignKey("person.id"), primary_key=True) else: return Column(Integer, primary_key=True) + class Person(HasIdMixin, Base): - __tablename__ = 'person' - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + __tablename__ = "person" + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): - __tablename__ = 'engineer' + __tablename__ = "engineer" primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} .. warning:: @@ -537,19 +484,21 @@ define on the class itself. The here to create user-defined collation routines that pull from multiple collections:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class MySQLSettings: - __table_args__ = {'mysql_engine':'InnoDB'} + __table_args__ = {"mysql_engine": "InnoDB"} + @declarative_mixin class MyOtherMixin: - __table_args__ = {'info':'foo'} + __table_args__ = {"info": "foo"} + class MyModel(MySQLSettings, MyOtherMixin, Base): - __tablename__='my_model' + __tablename__ = "my_model" @declared_attr def __table_args__(cls): @@ -558,7 +507,7 @@ from multiple collections:: args.update(MyOtherMixin.__table_args__) return args - id = Column(Integer, primary_key=True) + id = Column(Integer, primary_key=True) Creating Indexes with Mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -569,13 +518,15 @@ establish it as part of ``__table_args__``:: @declarative_mixin class MyMixin: - a = Column(Integer) - b = Column(Integer) + a = Column(Integer) + b = Column(Integer) @declared_attr def __table_args__(cls): - return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),) + return (Index(f"test_idx_{cls.__tablename__}", "a", "b"),) + class MyModel(MyMixin, Base): - __tablename__ = 'atable' - c = Column(Integer,primary_key=True) + __tablename__ = "atable" + c = Column(Integer, primary_key=True) + diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst new file mode 100644 index 00000000000..c1536a78ba3 --- /dev/null +++ b/doc/build/orm/declarative_styles.rst @@ -0,0 +1,191 @@ +.. _orm_declarative_styles_toplevel: + +========================== +Declarative Mapping Styles +========================== + +As introduced at :ref:`orm_declarative_mapping`, the **Declarative Mapping** is +the typical way that mappings are constructed in modern SQLAlchemy. This +section will provide an overview of forms that may be used for Declarative +mapper configuration. + + +.. _orm_declarative_generated_base_class: + +Using a Generated Base Class +---------------------------- + +The most common approach is to generate a "base" class using the +:func:`_orm.declarative_base` function:: + + from sqlalchemy.orm import declarative_base + + # declarative base class + Base = declarative_base() + +The declarative base class may also be created from an existing +:class:`_orm.registry`, by using the :meth:`_orm.registry.generate_base` +method:: + + from sqlalchemy.orm import registry + + reg = registry() + + # declarative base class + Base = reg.generate_base() + +With the declarative base class, new mapped classes are declared as subclasses +of the base:: + + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base + + # declarative base class + Base = declarative_base() + + + # an example mapping using the base + class User(Base): + __tablename__ = "user" + + id = Column(Integer, primary_key=True) + name = Column(String) + fullname = Column(String) + nickname = Column(String) + +Above, the :func:`_orm.declarative_base` function returns a new base class from +which new classes to be mapped may inherit from, as above a new mapped +class ``User`` is constructed. + +For each subclass constructed, the body of the class then follows the +declarative mapping approach which defines both a :class:`_schema.Table` +as well as a :class:`_orm.Mapper` object behind the scenes which comprise +a full mapping. + +.. seealso:: + + :ref:`orm_declarative_table_config_toplevel` + + :ref:`orm_declarative_mapper_config_toplevel` + + +.. _orm_explicit_declarative_base: + +Creating an Explicit Base Non-Dynamically (for use with mypy, similar) +---------------------------------------------------------------------- + +SQLAlchemy includes a :ref:`Mypy plugin ` that automatically +accommodates for the dynamically generated ``Base`` class delivered by +SQLAlchemy functions like :func:`_orm.declarative_base`. For the **SQLAlchemy +1.4 series only**, this plugin works along with a new set of typing stubs +published at `sqlalchemy2-stubs `_. + +When this plugin is not in use, or when using other :pep:`484` tools which +may not know how to interpret this class, the declarative base class may +be produced in a fully explicit fashion using the +:class:`_orm.DeclarativeMeta` directly as follows:: + + from sqlalchemy.orm import registry + from sqlalchemy.orm.decl_api import DeclarativeMeta + + mapper_registry = registry() + + + class Base(metaclass=DeclarativeMeta): + __abstract__ = True + + registry = mapper_registry + metadata = mapper_registry.metadata + + __init__ = mapper_registry.constructor + +The above ``Base`` is equivalent to one created using the +:meth:`_orm.registry.generate_base` method and will be fully understood by +type analysis tools without the use of plugins. + +.. seealso:: + + :ref:`mypy_toplevel` - background on the Mypy plugin which applies the + above structure automatically when running Mypy. + + +.. _orm_declarative_decorator: + +Declarative Mapping using a Decorator (no declarative base) +------------------------------------------------------------ + +As an alternative to using the "declarative base" class is to apply +declarative mapping to a class explicitly, using either an imperative technique +similar to that of a "classical" mapping, or more succinctly by using +a decorator. The :meth:`_orm.registry.mapped` function is a class decorator +that can be applied to any Python class with no hierarchy in place. The +Python class otherwise is configured in declarative style normally:: + + from sqlalchemy import Column, ForeignKey, Integer, String, Text + from sqlalchemy.orm import registry, relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + class User: + __tablename__ = "user" + + id = Column(Integer, primary_key=True) + name = Column(String) + + addresses = relationship("Address", back_populates="user") + + + @mapper_registry.mapped + class Address: + __tablename__ = "address" + + id = Column(Integer, primary_key=True) + user_id = Column(ForeignKey("user.id")) + email_address = Column(String) + + user = relationship("User", back_populates="addresses") + +Above, the same :class:`_orm.registry` that we'd use to generate a declarative +base class via its :meth:`_orm.registry.generate_base` method may also apply +a declarative-style mapping to a class without using a base. When using +the above style, the mapping of a particular class will **only** proceed +if the decorator is applied to that class directly. For inheritance +mappings, the decorator should be applied to each subclass:: + + from sqlalchemy.orm import registry + + mapper_registry = registry() + + + @mapper_registry.mapped + class Person: + __tablename__ = "person" + + person_id = Column(Integer, primary_key=True) + type = Column(String, nullable=False) + + __mapper_args__ = { + "polymorphic_on": type, + "polymorphic_identity": "person", + } + + + @mapper_registry.mapped + class Employee(Person): + __tablename__ = "employee" + + person_id = Column(ForeignKey("person.person_id"), primary_key=True) + + __mapper_args__ = { + "polymorphic_identity": "employee", + } + +Both the "declarative table" and "imperative table" styles of declarative +mapping may be used with the above mapping style. + +The decorator form of mapping is particularly useful when combining a +SQLAlchemy declarative mapping with other forms of class declaration, notably +the Python ``dataclasses`` module. See the next section. + diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index e935193c7d3..986205ec0d2 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -29,13 +29,14 @@ With the declarative base class, the typical form of mapping includes an attribute ``__tablename__`` that indicates the name of a :class:`_schema.Table` that should be generated along with the mapping:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -63,6 +64,11 @@ to produce a :class:`_schema.Table` that is equivalent to:: Column("nickname", String), ) +.. seealso:: + + :ref:`mapping_columns_toplevel` - contains additional notes on affecting + how :class:`_orm.Mapper` interprets incoming :class:`.Column` objects. + .. _orm_declarative_metadata: Accessing Table and Metadata @@ -114,29 +120,29 @@ The attribute can be specified in one of two forms. One is as a dictionary:: class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = {'mysql_engine':'InnoDB'} + __tablename__ = "sometable" + __table_args__ = {"mysql_engine": "InnoDB"} The other, a tuple, where each argument is positional (usually constraints):: class MyClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - ) + ForeignKeyConstraint(["id"], ["remote_table.id"]), + UniqueConstraint("foo"), + ) Keyword arguments can be specified with the above form by specifying the last argument as a dictionary:: class MyClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - {'autoload':True} - ) + ForeignKeyConstraint(["id"], ["remote_table.id"]), + UniqueConstraint("foo"), + {"autoload": True}, + ) A class may also specify the ``__table_args__`` declarative attribute, as well as the ``__tablename__`` attribute, in a dynamic style using the @@ -156,9 +162,8 @@ dictionary:: class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = {'schema': 'some_schema'} - + __tablename__ = "sometable" + __table_args__ = {"schema": "some_schema"} The schema name can also be applied to all :class:`_schema.Table` objects globally by using the :paramref:`_schema.MetaData.schema` parameter documented @@ -167,15 +172,15 @@ may be constructed separately and passed either to :func:`_orm.registry` or :func:`_orm.declarative_base`:: from sqlalchemy import MetaData + metadata_obj = MetaData(schema="some_schema") - Base = declarative_base(metadata = metadata_obj) + Base = declarative_base(metadata=metadata_obj) class MyClass(Base): # will use "some_schema" by default - __tablename__ = 'sometable' - + __tablename__ = "sometable" .. seealso:: @@ -183,36 +188,26 @@ or :func:`_orm.declarative_base`:: .. _orm_declarative_table_adding_columns: -Adding New Columns -^^^^^^^^^^^^^^^^^^^ - -The declarative table configuration allows the addition of new -:class:`_schema.Column` objects under two scenarios. The most basic -is that of simply assigning new :class:`_schema.Column` objects to the -class:: - - MyClass.some_new_column = Column('data', Unicode) +Appending additional columns to an existing Declarative mapped class +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The above operation performed against a declarative class that has been -mapped using the declarative base (note, not the decorator form of declarative) -will add the above :class:`_schema.Column` to the :class:`_schema.Table` -using the :meth:`_schema.Table.append_column` method and will also add the -column to the :class:`_orm.Mapper` to be fully mapped. +A declarative table configuration allows the addition of new +:class:`_schema.Column` objects to an existing mapping after the :class:`.Table` +metadata has already been generated. -.. note:: assignment of new columns to an existing declaratively mapped class - will only function correctly if the "declarative base" class is used, which - also provides for a metaclass-driven ``__setattr__()`` method which will - intercept these operations. It will **not** work if the declarative - decorator provided by - :meth:`_orm.registry.mapped` is used, nor will it work for an imperatively - mapped class mapped by :meth:`_orm.registry.map_imperatively`. +For a declarative class that is declared using a declarative base class, +the underlying metaclass :class:`.DeclarativeMeta` includes a ``__setattr__()`` +method that will intercept additional :class:`.Column` objects and +add them to both the :class:`.Table` using :meth:`.Table.append_column` +as well as to the existing :class:`.Mapper` using :meth:`.Mapper.add_property`:: + MyClass.some_new_column = Column("data", Unicode) -The other scenario where a :class:`_schema.Column` is added on the fly is -when an inheriting subclass that has no table of its own indicates -additional columns; these columns will be added to the superclass table. -The section :ref:`single_inheritance` discusses single table inheritance. - +Additional :class:`_schema.Column` objects may also be added to a mapping +in the specific circumstance of using single table inheritance, where +additional columns are present on mapped subclasses that have +no :class:`.Table` of their own. This is illustrated in the section +:ref:`single_inheritance`. .. _orm_imperative_table_configuration: @@ -231,9 +226,8 @@ object is produced separately and passed to the declarative process directly:: + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base - from sqlalchemy import Column, Integer, String, ForeignKey - Base = declarative_base() @@ -250,6 +244,7 @@ directly:: Column("nickname", String), ) + # construct the User class using this table. class User(Base): __table__ = user_table @@ -278,33 +273,40 @@ mapper configuration:: class Person(Base): __table__ = Table( - 'person', + "person", Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('type', String(50)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("type", String(50)), ) __mapper_args__ = { "polymorphic_on": __table__.c.type, - "polymorhpic_identity": "person" + "polymorhpic_identity": "person", } The "imperative table" form is also used when a non-:class:`_schema.Table` construct, such as a :class:`_sql.Join` or :class:`_sql.Subquery` object, is to be mapped. An example below:: - from sqlalchemy import select, func + from sqlalchemy import func, select - subq = select( - func.count(orders.c.id).label('order_count'), - func.max(orders.c.price).label('highest_order'), - orders.c.customer_id - ).group_by(orders.c.customer_id).subquery() + subq = ( + select( + func.count(orders.c.id).label("order_count"), + func.max(orders.c.price).label("highest_order"), + orders.c.customer_id, + ) + .group_by(orders.c.customer_id) + .subquery() + ) + + customer_select = ( + select(customers, subq) + .join_from(customers, subq, customers.c.id == subq.c.customer_id) + .subquery() + ) - customer_select = select(customers, subq).join_from( - customers, subq, customers.c.id == subq.c.customer_id - ).subquery() class Customer(Base): __table__ = customer_select @@ -337,26 +339,62 @@ use a declarative hybrid mapping, passing the :paramref:`_schema.Table.autoload_with` parameter to the :class:`_schema.Table`:: - engine = create_engine("postgresql://user:pass@hostname/my_existing_database") + from sqlalchemy import create_engine + from sqlalchemy import Table + from sqlalchemy.orm import declarative_base + + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") + + Base = declarative_base() + class MyClass(Base): __table__ = Table( - 'mytable', + "mytable", Base.metadata, - autoload_with=engine + autoload_with=engine, ) -A major downside of the above approach however is that it requires the database +A variant on the above pattern that scales much better is to use the +:meth:`.MetaData.reflect` method to reflect a full set of :class:`.Table` +objects at once, then refer to them from the :class:`.MetaData`:: + + + from sqlalchemy import create_engine + from sqlalchemy import Table + from sqlalchemy.orm import declarative_base + + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") + + Base = declarative_base() + + Base.metadata.reflect(engine) + + + class MyClass(Base): + __table__ = Base.metadata.tables["mytable"] + +.. seealso:: + + :ref:`mapper_automated_reflection_schemes` - further notes on using + table reflection with mapped classes + +A major downside to the above approach is that the mapped classes cannot +be declared until the tables have been reflected, which requires the database connectivity source to be present while the application classes are being declared; it's typical that classes are declared as the modules of an application are being imported, but database connectivity isn't available until the application starts running code so that it can consume configuration -information and create an engine. +information and create an engine. There are currently two approaches +to working around this. + +.. _orm_declarative_reflected_deferred_reflection: Using DeferredReflection ^^^^^^^^^^^^^^^^^^^^^^^^^ -To accommodate this case, a simple extension called the +To accommodate the use case of declaring mapped classes where reflection of +table metadata can occur afterwards, a simple extension called the :class:`.DeferredReflection` mixin is available, which alters the declarative mapping process to be delayed until a special class-level :meth:`.DeferredReflection.prepare` method is called, which will perform @@ -364,22 +402,25 @@ the reflection process against a target database, and will integrate the results with the declarative table mapping process, that is, classes which use the ``__tablename__`` attribute:: - from sqlalchemy.orm import declarative_base from sqlalchemy.ext.declarative import DeferredReflection + from sqlalchemy.orm import declarative_base Base = declarative_base() + class Reflected(DeferredReflection): __abstract__ = True + class Foo(Reflected, Base): - __tablename__ = 'foo' + __tablename__ = "foo" bars = relationship("Bar") + class Bar(Reflected, Base): - __tablename__ = 'bar' + __tablename__ = "bar" - foo_id = Column(Integer, ForeignKey('foo.id')) + foo_id = Column(Integer, ForeignKey("foo.id")) Above, we create a mixin class ``Reflected`` that will serve as a base for classes in our declarative hierarchy that should become mapped when @@ -387,23 +428,28 @@ the ``Reflected.prepare`` method is called. The above mapping is not complete until we do so, given an :class:`_engine.Engine`:: - engine = create_engine("postgresql://user:pass@hostname/my_existing_database") + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Reflected.prepare(engine) The purpose of the ``Reflected`` class is to define the scope at which classes should be reflectively mapped. The plugin will search among the subclass tree of the target against which ``.prepare()`` is called and reflect -all tables. +all tables which are named by declared classes; tables in the target database +that are not part of mappings and are not related to the target tables +via foreign key constraint will not be reflected. Using Automap ^^^^^^^^^^^^^^ -A more automated solution to mapping against an existing database where -table reflection is to be used is to use the :ref:`automap_toplevel` -extension. This extension will generate entire mapped classes from a -database schema, and allows several hooks for customization including the -ability to explicitly map some or all classes while still making use of -reflection to fill in the remaining columns. +A more automated solution to mapping against an existing database where table +reflection is to be used is to use the :ref:`automap_toplevel` extension. This +extension will generate entire mapped classes from a database schema, including +relationships between classes based on observed foreign key constraints. While +it includes hooks for customization, such as hooks that allow custom +class naming and relationship naming schemes, automap is oriented towards an +expedient zero-configuration style of working. If an application wishes to have +a fully explicit model that makes use of table reflection, the +:ref:`orm_declarative_reflected_deferred_reflection` may be preferable. .. seealso:: diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst index de2001e6f58..214338a0b7a 100644 --- a/doc/build/orm/extensions/associationproxy.rst +++ b/doc/build/orm/extensions/associationproxy.rst @@ -15,6 +15,7 @@ the construction of sophisticated collections and dictionary views of virtually any geometry, persisted to the database using standard, transparently configured relational patterns. +.. _associationproxy_scalar_collections: Simplifying Scalar Collections ------------------------------ @@ -23,47 +24,50 @@ Consider a many-to-many mapping between two classes, ``User`` and ``Keyword``. Each ``User`` can have any number of ``Keyword`` objects, and vice-versa (the many-to-many pattern is described at :ref:`relationships_many_to_many`):: - from sqlalchemy import Column, Integer, String, ForeignKey, Table + from sqlalchemy import Column, ForeignKey, Integer, String, Table from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) - kw = relationship("Keyword", secondary=lambda: userkeywords_table) + kw = relationship("Keyword", secondary=lambda: user_keyword_table) def __init__(self, name): self.name = name + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword - userkeywords_table = Table('userkeywords', Base.metadata, - Column('user_id', Integer, ForeignKey("user.id"), - primary_key=True), - Column('keyword_id', Integer, ForeignKey("keyword.id"), - primary_key=True) + + user_keyword_table = Table( + "user_keyword", + Base.metadata, + Column("user_id", Integer, ForeignKey("user.id"), primary_key=True), + Column("keyword_id", Integer, ForeignKey("keyword.id"), primary_key=True), ) Reading and manipulating the collection of "keyword" strings associated with ``User`` requires traversal from each collection element to the ``.keyword`` attribute, which can be awkward:: - >>> user = User('jek') - >>> user.kw.append(Keyword('cheese inspector')) + >>> user = User("jek") + >>> user.kw.append(Keyword("cheese-inspector")) >>> print(user.kw) [<__main__.Keyword object at 0x12bf830>] >>> print(user.kw[0].keyword) - cheese inspector + cheese-inspector >>> print([keyword.keyword for keyword in user.kw]) - ['cheese inspector'] + ['cheese-inspector'] The ``association_proxy`` is applied to the ``User`` class to produce a "view" of the ``kw`` relationship, which only exposes the string @@ -71,27 +75,28 @@ value of ``.keyword`` associated with each ``Keyword`` object:: from sqlalchemy.ext.associationproxy import association_proxy + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) - kw = relationship("Keyword", secondary=lambda: userkeywords_table) + kw = relationship("Keyword", secondary=lambda: user_keyword_table) def __init__(self, name): self.name = name # proxy the 'keyword' attribute from the 'kw' relationship - keywords = association_proxy('kw', 'keyword') + keywords = association_proxy("kw", "keyword") We can now reference the ``.keywords`` collection as a listing of strings, which is both readable and writable. New ``Keyword`` objects are created for us transparently:: - >>> user = User('jek') - >>> user.keywords.append('cheese inspector') + >>> user = User("jek") + >>> user.keywords.append("cheese-inspector") >>> user.keywords - ['cheese inspector'] - >>> user.keywords.append('snack ninja') + ['cheese-inspector'] + >>> user.keywords.append("snack ninja") >>> user.kw [<__main__.Keyword object at 0x12cdd30>, <__main__.Keyword object at 0x12cde30>] @@ -120,11 +125,11 @@ assignment event) is intercepted by the association proxy, it instantiates a new instance of the "intermediary" object using its constructor, passing as a single argument the given value. In our example above, an operation like:: - user.keywords.append('cheese inspector') + user.keywords.append("cheese-inspector") Is translated by the association proxy into the operation:: - user.kw.append(Keyword('cheese inspector')) + user.kw.append(Keyword("cheese-inspector")) The example works here because we have designed the constructor for ``Keyword`` to accept a single positional argument, ``keyword``. For those cases where a @@ -137,8 +142,9 @@ singular argument. Below we illustrate this using a lambda as is typical:: # ... # use Keyword(keyword=kw) on append() events - keywords = association_proxy('kw', 'keyword', - creator=lambda kw: Keyword(keyword=kw)) + keywords = association_proxy( + "kw", "keyword", creator=lambda kw: Keyword(keyword=kw) + ) The ``creator`` function accepts a single argument in the case of a list- or set- based collection, or a scalar attribute. In the case of a dictionary-based @@ -153,47 +159,51 @@ relationship, and is described at :ref:`association_pattern`. Association proxies are useful for keeping "association objects" out of the way during regular use. -Suppose our ``userkeywords`` table above had additional columns +Suppose our ``user_keyword`` table above had additional columns which we'd like to map explicitly, but in most cases we don't require direct access to these attributes. Below, we illustrate -a new mapping which introduces the ``UserKeyword`` class, which -is mapped to the ``userkeywords`` table illustrated earlier. +a new mapping which introduces the ``UserKeywordAssociation`` class, which +is mapped to the ``user_keyword`` table illustrated earlier. This class adds an additional column ``special_key``, a value which we occasionally want to access, but not in the usual case. We create an association proxy on the ``User`` class called -``keywords``, which will bridge the gap from the ``user_keywords`` +``keywords``, which will bridge the gap from the ``user_keyword_associations`` collection of ``User`` to the ``.keyword`` attribute present on each -``UserKeyword``:: +``UserKeywordAssociation``:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" + id = Column(Integer, primary_key=True) name = Column(String(64)) - # association proxy of "user_keywords" collection + user_keyword_associations = relationship( + "UserKeywordAssociation", + back_populates="user", + cascade="all, delete-orphan", + ) + # association proxy of "user_keyword_associations" collection # to "keyword" attribute - keywords = association_proxy('user_keywords', 'keyword') + keywords = association_proxy("user_keyword_associations", "keyword") def __init__(self, name): self.name = name - class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + + class UserKeywordAssociation(Base): + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) special_key = Column(String(50)) - # bidirectional attribute/collection of "user"/"user_keywords" - user = relationship(User, - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + user = relationship(User, back_populates="user_keyword_associations") # reference to the "Keyword" object keyword = relationship("Keyword") @@ -203,46 +213,57 @@ collection of ``User`` to the ``.keyword`` attribute present on each self.keyword = keyword self.special_key = special_key + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): - return 'Keyword(%s)' % repr(self.keyword) + return "Keyword(%s)" % repr(self.keyword) -With the above configuration, we can operate upon the ``.keywords`` -collection of each ``User`` object, and the usage of ``UserKeyword`` -is concealed:: +With the above configuration, we can operate upon the ``.keywords`` collection +of each ``User`` object, each of which exposes a collection of ``Keyword`` +objects that are obtained from the underlying ``UserKeywordAssociation`` elements:: - >>> user = User('log') - >>> for kw in (Keyword('new_from_blammo'), Keyword('its_big')): + + >>> user = User("log") + >>> for kw in (Keyword("new_from_blammo"), Keyword("its_big")): ... user.keywords.append(kw) - ... >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] -Where above, each ``.keywords.append()`` operation is equivalent to:: - - >>> user.user_keywords.append(UserKeyword(Keyword('its_heavy'))) - -The ``UserKeyword`` association object has two attributes here which are populated; -the ``.keyword`` attribute is populated directly as a result of passing -the ``Keyword`` object as the first argument. The ``.user`` argument is then -assigned as the ``UserKeyword`` object is appended to the ``User.user_keywords`` -collection, where the bidirectional relationship configured between ``User.user_keywords`` -and ``UserKeyword.user`` results in a population of the ``UserKeyword.user`` attribute. -The ``special_key`` argument above is left at its default value of ``None``. +This example is in contrast to the example illustrated previously at +:ref:`associationproxy_scalar_collections`, where the association proxy exposed +a collection of strings, rather than a collection of composed objects. +In this case, each ``.keywords.append()`` operation is equivalent to:: + + >>> user.user_keyword_associations.append(UserKeywordAssociation(Keyword("its_heavy"))) + +The ``UserKeywordAssociation`` object has two attributes that are both +populated within the scope of the ``append()`` operation of the association +proxy; ``.keyword``, which refers to the +``Keyword` object, and ``.user``, which refers to the ``User``. +The ``.keyword`` attribute is populated first, as the association proxy +generates a new ``UserKeywordAssociation`` object in response to the ``.append()`` +operation, assigning the given ``Keyword`` instance to the ``.keyword`` +attribute. Then, as the ``UserKeywordAssociation`` object is appended to the +``User.user_keyword_associations`` collection, the ``UserKeywordAssociation.user`` attribute, +configured as ``back_populates`` for ``User.user_keyword_associations``, is initialized +upon the given ``UserKeywordAssociation`` instance to refer to the parent ``User`` +receiving the append operation. The ``special_key`` +argument above is left at its default value of ``None``. For those cases where we do want ``special_key`` to have a value, we -create the ``UserKeyword`` object explicitly. Below we assign all three -attributes, where the assignment of ``.user`` has the effect of the ``UserKeyword`` -being appended to the ``User.user_keywords`` collection:: +create the ``UserKeywordAssociation`` object explicitly. Below we assign all +three attributes, wherein the assignment of ``.user`` during +construction, has the effect of appending the new ``UserKeywordAssociation`` to +the ``User.user_keyword_associations`` collection (via the relationship):: - >>> UserKeyword(Keyword('its_wood'), user, special_key='my special key') + >>> UserKeywordAssociation(Keyword("its_wood"), user, special_key="my special key") The association proxy returns to us a collection of ``Keyword`` objects represented by all these operations:: @@ -267,69 +288,77 @@ arguments to the creation function instead of one, the key and the value. As always, this creation function defaults to the constructor of the intermediary class, and can be customized using the ``creator`` argument. -Below, we modify our ``UserKeyword`` example such that the ``User.user_keywords`` -collection will now be mapped using a dictionary, where the ``UserKeyword.special_key`` -argument will be used as the key for the dictionary. We then apply a ``creator`` +Below, we modify our ``UserKeywordAssociation`` example such that the ``User.user_keyword_associations`` +collection will now be mapped using a dictionary, where the ``UserKeywordAssociation.special_key`` +argument will be used as the key for the dictionary. We also apply a ``creator`` argument to the ``User.keywords`` proxy so that these values are assigned appropriately when new elements are added to the dictionary:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) - # proxy to 'user_keywords', instantiating UserKeyword - # assigning the new key to 'special_key', values to - # 'keyword'. - keywords = association_proxy('user_keywords', 'keyword', - creator=lambda k, v: - UserKeyword(special_key=k, keyword=v) - ) + # user/user_keyword_associations relationship, mapping + # user_keyword_associations with a dictionary against "special_key" as key. + user_keyword_associations = relationship( + "UserKeywordAssociation", + back_populates="user", + collection_class=attribute_mapped_collection("special_key"), + cascade="all, delete-orphan", + ) + # proxy to 'user_keyword_associations', instantiating + # UserKeywordAssociation assigning the new key to 'special_key', + # values to 'keyword'. + keywords = association_proxy( + "user_keyword_associations", + "keyword", + creator=lambda k, v: UserKeywordAssociation(special_key=k, keyword=v), + ) def __init__(self, name): self.name = name - class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + + class UserKeywordAssociation(Base): + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) special_key = Column(String) - # bidirectional user/user_keywords relationships, mapping - # user_keywords with a dictionary against "special_key" as key. - user = relationship(User, backref=backref( - "user_keywords", - collection_class=attribute_mapped_collection("special_key"), - cascade="all, delete-orphan" - ) - ) + user = relationship( + User, + back_populates="user_keyword_associations", + ) keyword = relationship("Keyword") + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): - return 'Keyword(%s)' % repr(self.keyword) + return "Keyword(%s)" % repr(self.keyword) We illustrate the ``.keywords`` collection as a dictionary, mapping the -``UserKeyword.special_key`` value to ``Keyword`` objects:: +``UserKeywordAssociation.special_key`` value to ``Keyword`` objects:: - >>> user = User('log') + >>> user = User("log") - >>> user.keywords['sk1'] = Keyword('kw1') - >>> user.keywords['sk2'] = Keyword('kw2') + >>> user.keywords["sk1"] = Keyword("kw1") + >>> user.keywords["sk2"] = Keyword("kw2") >>> print(user.keywords) {'sk1': Keyword('kw1'), 'sk2': Keyword('kw2')} @@ -343,49 +372,50 @@ Given our previous examples of proxying from relationship to scalar attribute, proxying across an association object, and proxying dictionaries, we can combine all three techniques together to give ``User`` a ``keywords`` dictionary that deals strictly with the string value -of ``special_key`` mapped to the string ``keyword``. Both the ``UserKeyword`` +of ``special_key`` mapped to the string ``keyword``. Both the ``UserKeywordAssociation`` and ``Keyword`` classes are entirely concealed. This is achieved by building an association proxy on ``User`` that refers to an association proxy -present on ``UserKeyword``:: +present on ``UserKeywordAssociation``:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) - # the same 'user_keywords'->'keyword' proxy as in + user_keyword_associations = relationship( + "UserKeywordAssociation", + back_populates="user", + collection_class=attribute_mapped_collection("special_key"), + cascade="all, delete-orphan", + ) + # the same 'user_keyword_associations'->'keyword' proxy as in # the basic dictionary example. keywords = association_proxy( - 'user_keywords', - 'keyword', - creator=lambda k, v: UserKeyword(special_key=k, keyword=v) + "user_keyword_associations", + "keyword", + creator=lambda k, v: UserKeywordAssociation(special_key=k, keyword=v), ) - # another proxy that is directly column-targeted - special_keys = association_proxy("user_keywords", "special_key") - def __init__(self, name): self.name = name - class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(ForeignKey('user.id'), primary_key=True) - keyword_id = Column(ForeignKey('keyword.id'), primary_key=True) + + class UserKeywordAssociation(Base): + __tablename__ = "user_keyword" + user_id = Column(ForeignKey("user.id"), primary_key=True) + keyword_id = Column(ForeignKey("keyword.id"), primary_key=True) special_key = Column(String) user = relationship( User, - backref=backref( - "user_keywords", - collection_class=attribute_mapped_collection("special_key"), - cascade="all, delete-orphan" - ) + back_populates="user_keyword_associations", ) # the relationship to Keyword is now called @@ -394,38 +424,35 @@ present on ``UserKeyword``:: # 'keyword' is changed to be a proxy to the # 'keyword' attribute of 'Keyword' - keyword = association_proxy('kw', 'keyword') + keyword = association_proxy("kw", "keyword") + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword - ``User.keywords`` is now a dictionary of string to string, where -``UserKeyword`` and ``Keyword`` objects are created and removed for us +``UserKeywordAssociation`` and ``Keyword`` objects are created and removed for us transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: - >>> user = User('log') - >>> user.keywords = { - ... 'sk1':'kw1', - ... 'sk2':'kw2' - ... } + >>> user = User("log") + >>> user.keywords = {"sk1": "kw1", "sk2": "kw2"} >>> print(user.keywords) {'sk1': 'kw1', 'sk2': 'kw2'} - >>> user.keywords['sk3'] = 'kw3' - >>> del user.keywords['sk2'] + >>> user.keywords["sk3"] = "kw3" + >>> del user.keywords["sk2"] >>> print(user.keywords) {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage - ... print(user.user_keywords['sk3'].kw) + ... print(user.user_keyword_associations["sk3"].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created @@ -442,10 +469,10 @@ Querying with Association Proxies --------------------------------- The :class:`.AssociationProxy` features simple SQL construction capabilities -which work at the class level in a similar way as other ORM-mapped attributes. -Class-bound attributes such as ``User.keywords`` and ``User.special_keys`` -in the preceding example will provide for a SQL generating construct -when accessed at the class level. +which work at the class level in a similar way as other ORM-mapped attributes, +and provide rudimentary filtering support primarily based on the +SQL ``EXISTS`` keyword. + .. note:: The primary purpose of the association proxy extension is to allow for improved persistence and object-access patterns with mapped object @@ -454,6 +481,51 @@ when accessed at the class level. attributes when constructing SQL queries with JOINs, eager loading options, etc. +For this section, assume a class with both an association proxy +that refers to a column, as well as an association proxy that refers +to a related object, as in the example mapping below:: + + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.ext.associationproxy import association_proxy + from sqlalchemy.orm import declarative_base, relationship + from sqlalchemy.orm.collections import attribute_mapped_collection + + Base = declarative_base() + + + class User(Base): + __tablename__ = "user" + id = Column(Integer, primary_key=True) + name = Column(String(64)) + + user_keyword_associations = relationship( + "UserKeywordAssociation", + cascade="all, delete-orphan", + ) + + # object-targeted association proxy + keywords = association_proxy( + "user_keyword_associations", + "keyword", + ) + + # column-targeted association proxy + special_keys = association_proxy("user_keyword_associations", "special_key") + + + class UserKeywordAssociation(Base): + __tablename__ = "user_keyword" + user_id = Column(ForeignKey("user.id"), primary_key=True) + keyword_id = Column(ForeignKey("keyword.id"), primary_key=True) + special_key = Column(String) + keyword = relationship("Keyword") + + + class Keyword(Base): + __tablename__ = "keyword" + id = Column(Integer, primary_key=True) + keyword = Column("keyword", String(64)) + The SQL generated takes the form of a correlated subquery against the EXISTS SQL operator so that it can be used in a WHERE clause without the need for additional modifications to the enclosing query. If the @@ -514,23 +586,22 @@ Cascading Scalar Deletes Given a mapping as:: class A(Base): - __tablename__ = 'test_a' + __tablename__ = "test_a" id = Column(Integer, primary_key=True) - ab = relationship( - 'AB', backref='a', uselist=False) + ab = relationship("AB", backref="a", uselist=False) b = association_proxy( - 'ab', 'b', creator=lambda b: AB(b=b), - cascade_scalar_deletes=True) + "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True + ) class B(Base): - __tablename__ = 'test_b' + __tablename__ = "test_b" id = Column(Integer, primary_key=True) - ab = relationship('AB', backref='b', cascade='all, delete-orphan') + ab = relationship("AB", backref="b", cascade="all, delete-orphan") class AB(Base): - __tablename__ = 'test_ab' + __tablename__ = "test_ab" a_id = Column(Integer, ForeignKey(A.id), primary_key=True) b_id = Column(Integer, ForeignKey(B.id), primary_key=True) diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index fcaf104467c..4bf087e0583 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -8,7 +8,10 @@ included, using asyncio-compatible dialects. .. versionadded:: 1.4 -.. note:: The asyncio extension as of SQLAlchemy 1.4.3 can now be considered to +.. warning:: Please read :ref:`asyncio_install` for important platform + installation notes for many platforms, including **Apple M1 Architecture**. + +.. tip:: The asyncio extension as of SQLAlchemy 1.4.3 can now be considered to be **beta level** software. API details are subject to change however at this point it is unlikely for there to be significant backwards-incompatible changes. @@ -22,22 +25,33 @@ included, using asyncio-compatible dialects. .. _asyncio_install: -Asyncio Platform Installation Notes ------------------------------------- +Asyncio Platform Installation Notes (Including Apple M1) +--------------------------------------------------------- -The asyncio extension requires at least Python version 3.6. It also depends +The asyncio extension requires Python 3 only. It also depends upon the `greenlet `_ library. This dependency is installed by default on common machine platforms including:: x86_64 aarch64 ppc64le amd64 win32 For the above platforms, ``greenlet`` is known to supply pre-built wheel files. -To ensure the ``greenlet`` dependency is present on other platforms, the -``[asyncio]`` extra may be installed as follows, which will include an attempt -to build and install ``greenlet``:: +For other platforms, **greenlet does not install by default**; +the current file listing for greenlet can be seen at +`Greenlet - Download Files `_. +Note that **there are many architectures omitted, including Apple M1**. + +To install SQLAlchemy while ensuring the ``greenlet`` dependency is present +regardless of what platform is in use, the +``[asyncio]`` `setuptools extra `_ +may be installed +as follows, which will include also instruct ``pip`` to install ``greenlet``:: pip install sqlalchemy[asyncio] +Note that installation of ``greenlet`` on platforms that do not have a pre-built +wheel file means that ``greenlet`` will be built from source, which requires +that Python's development libraries also be present. + Synopsis - Core --------------- @@ -57,9 +71,11 @@ to deliver a streaming server-side :class:`_asyncio.AsyncResult`:: from sqlalchemy.ext.asyncio import create_async_engine + async def async_main(): engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost/test", echo=True, + "postgresql+asyncpg://scott:tiger@localhost/test", + echo=True, ) async with engine.begin() as conn: @@ -71,7 +87,6 @@ to deliver a streaming server-side :class:`_asyncio.AsyncResult`:: ) async with engine.connect() as conn: - # select a Result, which will be delivered with buffered # results result = await conn.execute(select(t1).where(t1.c.name == "some name 1")) @@ -82,6 +97,7 @@ to deliver a streaming server-side :class:`_asyncio.AsyncResult`:: # clean-up pooled connections await engine.dispose() + asyncio.run(async_main()) Above, the :meth:`_asyncio.AsyncConnection.run_sync` method may be used to @@ -109,7 +125,7 @@ cursor and provides an async/await API, such as an async iterator:: async_result = await conn.stream(select(t1)) async for row in async_result: - print("row: %s" % (row, )) + print("row: %s" % (row,)) .. _asyncio_orm: @@ -176,9 +192,7 @@ illustrates a complete example including mapper and session configuration:: # expire_on_commit=False will prevent attributes from being expired # after commit. - async_session = sessionmaker( - engine, expire_on_commit=False, class_=AsyncSession - ) + async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession) async with async_session() as session: async with session.begin(): @@ -275,7 +289,6 @@ prevent this: ) async with async_session() as session: - result = await session.execute(select(A).order_by(A.id)) a1 = result.scalars().first() @@ -343,6 +356,10 @@ Other guidelines include: ) addresses_filter = (await session.scalars(stmt)).all() + .. seealso:: + + :ref:`migration_20_dynamic_loaders` - notes on migration to 2.0 style + .. _session_run_sync: Running Synchronous Methods and Functions under asyncio @@ -376,8 +393,9 @@ attribute accesses within a separate function:: import asyncio - from sqlalchemy.ext.asyncio import create_async_engine - from sqlalchemy.ext.asyncio import AsyncSession + from sqlalchemy import select + from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine + def fetch_and_update_objects(session): """run traditional sync-style ORM code in a function that will be @@ -406,7 +424,8 @@ attribute accesses within a separate function:: async def async_main(): engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost/test", echo=True, + "postgresql+asyncpg://scott:tiger@localhost/test", + echo=True, ) async with engine.begin() as conn: await conn.run_sync(Base.metadata.drop_all) @@ -430,6 +449,7 @@ attribute accesses within a separate function:: # clean-up pooled connections await engine.dispose() + asyncio.run(async_main()) The above approach of running certain functions within a "sync" runner @@ -506,18 +526,15 @@ constructs are illustrated below:: import asyncio - from sqlalchemy import text + from sqlalchemy import event, text from sqlalchemy.engine import Engine - from sqlalchemy import event - from sqlalchemy.ext.asyncio import AsyncSession - from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine from sqlalchemy.orm import Session ## Core events ## - engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost:5432/test" - ) + engine = create_async_engine("postgresql+asyncpg://scott:tiger@localhost:5432/test") + # connect event on instance of Engine @event.listens_for(engine.sync_engine, "connect") @@ -529,10 +546,15 @@ constructs are illustrated below:: cursor.execute("select 'execute from event'") print(cursor.fetchone()[0]) + # before_execute event on all Engine instances @event.listens_for(Engine, "before_execute") def my_before_execute( - conn, clauseelement, multiparams, params, execution_options + conn, + clauseelement, + multiparams, + params, + execution_options, ): print("before execute!") @@ -541,6 +563,7 @@ constructs are illustrated below:: session = AsyncSession(engine) + # before_commit event on instance of Session @event.listens_for(session.sync_session, "before_commit") def my_before_commit(session): @@ -553,11 +576,13 @@ constructs are illustrated below:: result = connection.execute(text("select 'execute from event'")) print(result.first()) + # after_commit event on all Session instances @event.listens_for(Session, "after_commit") def my_after_commit(session): print("after commit!") + async def go(): await session.execute(text("select 1")) await session.commit() @@ -565,6 +590,7 @@ constructs are illustrated below:: await session.close() await engine.dispose() + asyncio.run(go()) The above example prints something along the lines of:: @@ -618,12 +644,66 @@ The above example prints something along the lines of:: to sync, and outgoing messages to the database API will be converted to asyncio transparently. +.. _asyncio_events_run_async: + +Using awaitable-only driver methods in connection pool and other events +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As discussed in the above section, event handlers such as those oriented +around the :class:`.PoolEvents` event handlers receive a sync-style "DBAPI" connection, +which is a wrapper object supplied by SQLAlchemy asyncio dialects to adapt +the underlying asyncio "driver" connection into one that can be used by +SQLAlchemy's internals. A special use case arises when the user-defined +implementation for such an event handler needs to make use of the +ultimate "driver" connection directly, using awaitable only methods on that +driver connection. One such example is the ``.set_type_codec()`` method +supplied by the asyncpg driver. + +To accommodate this use case, SQLAlchemy's :class:`.AdaptedConnection` +class provides a method :meth:`.AdaptedConnection.run_async` that allows +an awaitable function to be invoked within the "synchronous" context of +an event handler or other SQLAlchemy internal. This method is directly +analogous to the :meth:`_asyncio.AsyncConnection.run_sync` method that +allows a sync-style method to run under async. + +:meth:`.AdaptedConnection.run_async` should be passed a function that will +accept the innermost "driver" connection as a single argument, and return +an awaitable that will be invoked by the :meth:`.AdaptedConnection.run_async` +method. The given function itself does not need to be declared as ``async``; +it's perfectly fine for it to be a Python ``lambda:``, as the return awaitable +value will be invoked after being returned:: + + from sqlalchemy import event + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine(...) + + + @event.listens_for(engine.sync_engine, "connect") + def register_custom_types(dbapi_connection, ...): + dbapi_connection.run_async( + lambda connection: connection.set_type_codec( + "MyCustomType", encoder, decoder, ... + ) + ) + +Above, the object passed to the ``register_custom_types`` event handler +is an instance of :class:`.AdaptedConnection`, which provides a DBAPI-like +interface to an underlying async-only driver-level connection object. +The :meth:`.AdaptedConnection.run_async` method then provides access to an +awaitable environment where the underlying driver level connection may be +acted upon. + +.. versionadded:: 1.4.30 + + Using multiple asyncio event loops ---------------------------------- -An application that makes use of multiple event loops, for example by combining asyncio -with multithreading, should not share the same :class:`_asyncio.AsyncEngine` -with different event loops when using the default pool implementation. +An application that makes use of multiple event loops, for example in the +uncommon case of combining asyncio with multithreading, should not share the +same :class:`_asyncio.AsyncEngine` with different event loops when using the +default pool implementation. If an :class:`_asyncio.AsyncEngine` is be passed from one event loop to another, the method :meth:`_asyncio.AsyncEngine.dispose()` should be called before it's @@ -635,21 +715,34 @@ If the same engine must be shared between different loop, it should be configure to disable pooling using :class:`~sqlalchemy.pool.NullPool`, preventing the Engine from using any connection more than once:: + from sqlalchemy.ext.asyncio import create_async_engine from sqlalchemy.pool import NullPool + engine = create_async_engine( - "postgresql+asyncpg://user:pass@host/dbname", poolclass=NullPool + "postgresql+asyncpg://user:pass@host/dbname", + poolclass=NullPool, ) - .. _asyncio_scoped_session: Using asyncio scoped session ---------------------------- -The usage of :class:`_asyncio.async_scoped_session` is mostly similar to -:class:`.scoped_session`. However, since there's no "thread-local" concept in -the asyncio context, the "scopefunc" parameter must be provided to the -constructor:: +The "scoped session" pattern used in threaded SQLAlchemy with the +:class:`.scoped_session` object is also available in asyncio, using +an adapted version called :class:`_asyncio.async_scoped_session`. + +.. tip:: SQLAlchemy generally does not recommend the "scoped" pattern + for new development as it relies upon mutable global state that must also be + explicitly torn down when work within the thread or task is complete. + Particularly when using asyncio, it's likely a better idea to pass the + :class:`_asyncio.AsyncSession` directly to the awaitable functions that need + it. + +When using :class:`_asyncio.async_scoped_session`, as there's no "thread-local" +concept in the asyncio context, the "scopefunc" parameter must be provided to +the constructor. The example below illustrates using the +``asyncio.current_task()`` function for this purpose:: from asyncio import current_task @@ -657,26 +750,40 @@ constructor:: from sqlalchemy.ext.asyncio import async_scoped_session from sqlalchemy.ext.asyncio import AsyncSession - async_session_factory = sessionmaker(some_async_engine, class_=_AsyncSession) - AsyncSession = async_scoped_session(async_session_factory, scopefunc=current_task) + async_session_factory = sessionmaker(some_async_engine, class_=AsyncSession) + AsyncScopedSession = async_scoped_session(async_session_factory, scopefunc=current_task) + + some_async_session = AsyncScopedSession() - some_async_session = AsyncSession() +.. warning:: The "scopefunc" used by :class:`_asyncio.async_scoped_session` + is invoked **an arbitrary number of times** within a task, once for each + time the underlying :class:`_asyncio.AsyncSession` is accessed. The function + should therefore be **idempotent** and lightweight, and should not attempt + to create or mutate any state, such as establishing callbacks, etc. -:class:`_asyncio.async_scoped_session` also includes **proxy +.. warning:: Using ``current_task()`` for the "key" in the scope requires that + the :meth:`_asyncio.async_scoped_session.remove` method is called from + within the outermost awaitable, to ensure the key is removed from the + registry when the task is complete, otherwise the task handle as well as + the :class:`_asyncio.AsyncSession` will remain in memory, essentially + creating a memory leak. See the following example which illustrates + the correct use of :meth:`_asyncio.async_scoped_session.remove`. + +:class:`_asyncio.async_scoped_session` includes **proxy behavior** similar to that of :class:`.scoped_session`, which means it can be treated as a :class:`_asyncio.AsyncSession` directly, keeping in mind that the usual ``await`` keywords are necessary, including for the :meth:`_asyncio.async_scoped_session.remove` method:: async def some_function(some_async_session, some_object): - # use the AsyncSession directly - some_async_session.add(some_object) + # use the AsyncSession directly + some_async_session.add(some_object) - # use the AsyncSession via the context-local proxy - await AsyncSession.commit() + # use the AsyncSession via the context-local proxy + await AsyncScopedSession.commit() - # "remove" the current proxied AsyncSession for the local context - await AsyncSession.remove() + # "remove" the current proxied AsyncSession for the local context + await AsyncScopedSession.remove() .. versionadded:: 1.4.19 @@ -696,13 +803,11 @@ leveraging the :meth:`_asyncio.AsyncConnection.run_sync` method of import asyncio - from sqlalchemy.ext.asyncio import create_async_engine - from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy import inspect + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine("postgresql+asyncpg://scott:tiger@localhost/test") - engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost/test" - ) def use_inspector(conn): inspector = inspect(conn) @@ -711,10 +816,12 @@ leveraging the :meth:`_asyncio.AsyncConnection.run_sync` method of # return any value to the caller return inspector.get_table_names() + async def async_main(): async with engine.connect() as conn: tables = await conn.run_sync(use_inspector) + asyncio.run(async_main()) .. seealso:: @@ -728,6 +835,8 @@ Engine API Documentation .. autofunction:: create_async_engine +.. autofunction:: async_engine_from_config + .. autoclass:: AsyncEngine :members: @@ -748,12 +857,15 @@ cursor. .. autoclass:: AsyncResult :members: + :inherited-members: .. autoclass:: AsyncScalarResult :members: + :inherited-members: .. autoclass:: AsyncMappingResult :members: + :inherited-members: ORM Session API Documentation ----------------------------- diff --git a/doc/build/orm/extensions/baked.rst b/doc/build/orm/extensions/baked.rst index 4751fef3638..60bf06b2a14 100644 --- a/doc/build/orm/extensions/baked.rst +++ b/doc/build/orm/extensions/baked.rst @@ -57,15 +57,15 @@ query build-up looks like the following:: from sqlalchemy import bindparam - def search_for_user(session, username, email=None): + def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter(User.name == bindparam('username')) + baked_query += lambda q: q.filter(User.name == bindparam("username")) baked_query += lambda q: q.order_by(User.id) if email: - baked_query += lambda q: q.filter(User.email == bindparam('email')) + baked_query += lambda q: q.filter(User.email == bindparam("email")) result = baked_query(session).params(username=username, email=email).all() @@ -130,7 +130,7 @@ compared to the equivalent "baked" query:: s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery(lambda s: s.query(Customer)) - q += lambda q: q.filter(Customer.id == bindparam('id')) + q += lambda q: q.filter(Customer.id == bindparam("id")) q(s).params(id=id_).one() The difference in Python function call count for an iteration of 10000 @@ -178,9 +178,10 @@ just building up the query, and removing its :class:`.Session` by calling my_simple_cache = {} + def lookup(session, id_argument): if "my_key" not in my_simple_cache: - query = session.query(Model).filter(Model.id == bindparam('id')) + query = session.query(Model).filter(Model.id == bindparam("id")) my_simple_cache["my_key"] = query.with_session(None) else: query = my_simple_cache["my_key"].with_session(session) @@ -212,10 +213,10 @@ Our example becomes:: my_simple_cache = {} - def lookup(session, id_argument): + def lookup(session, id_argument): if "my_key" not in my_simple_cache: - query = session.query(Model).filter(Model.id == bindparam('id')) + query = session.query(Model).filter(Model.id == bindparam("id")) my_simple_cache["my_key"] = query.with_session(None).bake() else: query = my_simple_cache["my_key"].with_session(session) @@ -231,9 +232,10 @@ a simple improvement upon the simple "reuse a query" approach:: bakery = baked.bakery() + def lookup(session, id_argument): def create_model_query(session): - return session.query(Model).filter(Model.id == bindparam('id')) + return session.query(Model).filter(Model.id == bindparam("id")) parameterized_query = bakery.bake(create_model_query) return parameterized_query(session).params(id=id_argument).all() @@ -256,6 +258,7 @@ query on a conditional basis:: my_simple_cache = {} + def lookup(session, id_argument, include_frobnizzle=False): if include_frobnizzle: cache_key = "my_key_with_frobnizzle" @@ -263,7 +266,7 @@ query on a conditional basis:: cache_key = "my_key_without_frobnizzle" if cache_key not in my_simple_cache: - query = session.query(Model).filter(Model.id == bindparam('id')) + query = session.query(Model).filter(Model.id == bindparam("id")) if include_frobnizzle: query = query.filter(Model.frobnizzle == True) @@ -284,18 +287,21 @@ into a direct use of "bakery" as follows:: bakery = baked.bakery() + def lookup(session, id_argument, include_frobnizzle=False): def create_model_query(session): - return session.query(Model).filter(Model.id == bindparam('id')) + return session.query(Model).filter(Model.id == bindparam("id")) parameterized_query = bakery.bake(create_model_query) if include_frobnizzle: + def include_frobnizzle_in_query(query): return query.filter(Model.frobnizzle == True) parameterized_query = parameterized_query.with_criteria( - include_frobnizzle_in_query) + include_frobnizzle_in_query + ) return parameterized_query(session).params(id=id_argument).all() @@ -315,10 +321,11 @@ means to reduce verbosity:: bakery = baked.bakery() + def lookup(session, id_argument, include_frobnizzle=False): parameterized_query = bakery.bake( - lambda s: s.query(Model).filter(Model.id == bindparam('id')) - ) + lambda s: s.query(Model).filter(Model.id == bindparam("id")) + ) if include_frobnizzle: parameterized_query += lambda q: q.filter(Model.frobnizzle == True) @@ -357,11 +364,9 @@ statement compilation time:: bakery = baked.bakery() baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter( - User.name.in_(bindparam('username', expanding=True))) + baked_query += lambda q: q.filter(User.name.in_(bindparam("username", expanding=True))) - result = baked_query.with_session(session).params( - username=['ed', 'fred']).all() + result = baked_query.with_session(session).params(username=["ed", "fred"]).all() .. seealso:: @@ -388,8 +393,7 @@ of the baked query:: # select a correlated subquery in the top columns list, # we have the "session" argument, pass that - my_q = bakery( - lambda s: s.query(Address.id, my_subq.to_query(s).as_scalar())) + my_q = bakery(lambda s: s.query(Address.id, my_subq.to_query(s).as_scalar())) # use a correlated subquery in some of the criteria, we have # the "query" argument, pass that. @@ -413,12 +417,11 @@ alter the query differently each time. To allow a still to allow the result to be cached, the event can be registered passing the ``bake_ok=True`` flag:: - @event.listens_for( - Query, "before_compile", retval=True, bake_ok=True) + @event.listens_for(Query, "before_compile", retval=True, bake_ok=True) def my_event(query): for desc in query.column_descriptions: - if desc['type'] is User: - entity = desc['entity'] + if desc["type"] is User: + entity = desc["entity"] query = query.filter(entity.deleted == False) return query @@ -472,4 +475,5 @@ API Documentation .. autoclass:: Result :members: + :noindex: diff --git a/doc/build/orm/extensions/declarative/api.rst b/doc/build/orm/extensions/declarative/api.rst index e41e735d37b..98924c2e275 100644 --- a/doc/build/orm/extensions/declarative/api.rst +++ b/doc/build/orm/extensions/declarative/api.rst @@ -1,3 +1,5 @@ +:orphan: + .. automodule:: sqlalchemy.ext.declarative =============== diff --git a/doc/build/orm/extensions/declarative/basic_use.rst b/doc/build/orm/extensions/declarative/basic_use.rst index f1ce1d4a026..49903559d5c 100644 --- a/doc/build/orm/extensions/declarative/basic_use.rst +++ b/doc/build/orm/extensions/declarative/basic_use.rst @@ -1,3 +1,5 @@ +:orphan: + ========= Basic Use ========= @@ -20,11 +22,7 @@ This section has moved to :ref:`orm_declarative_metadata`. Class Constructor ================= -As a convenience feature, the :func:`declarative_base` sets a default -constructor on classes which takes keyword arguments, and assigns them -to the named attributes:: - - e = Engineer(primary_language='python') +This section has moved to :ref:`orm_mapper_configuration_overview`. Mapper Configuration ==================== diff --git a/doc/build/orm/extensions/declarative/index.rst b/doc/build/orm/extensions/declarative/index.rst index 7ef2551c619..6cf1a60a1c6 100644 --- a/doc/build/orm/extensions/declarative/index.rst +++ b/doc/build/orm/extensions/declarative/index.rst @@ -22,15 +22,3 @@ mapping API. .. autoclass:: DeferredReflection :members: -.. these pages have all been integrated into the main ORM documentation - however are still here as placeholder docs with links to where they've moved - -.. toctree:: - :hidden: - - api - basic_use - inheritance - mixins - relationships - table_config \ No newline at end of file diff --git a/doc/build/orm/extensions/declarative/inheritance.rst b/doc/build/orm/extensions/declarative/inheritance.rst index d65cafd355a..849664a3c33 100644 --- a/doc/build/orm/extensions/declarative/inheritance.rst +++ b/doc/build/orm/extensions/declarative/inheritance.rst @@ -1,3 +1,5 @@ +:orphan: + .. _declarative_inheritance: Declarative Inheritance diff --git a/doc/build/orm/extensions/declarative/mixins.rst b/doc/build/orm/extensions/declarative/mixins.rst index 221e8f8f8c9..7a18f07a7f3 100644 --- a/doc/build/orm/extensions/declarative/mixins.rst +++ b/doc/build/orm/extensions/declarative/mixins.rst @@ -1,6 +1,8 @@ +:orphan: + .. _declarative_mixins: Mixin and Custom Base Classes ============================= -See :ref:`orm_mixins_toplevel` for this section. \ No newline at end of file +See :ref:`orm_mixins_toplevel` for this section. diff --git a/doc/build/orm/extensions/declarative/relationships.rst b/doc/build/orm/extensions/declarative/relationships.rst index c5c83b1711c..c0df8b49cff 100644 --- a/doc/build/orm/extensions/declarative/relationships.rst +++ b/doc/build/orm/extensions/declarative/relationships.rst @@ -1,3 +1,5 @@ +:orphan: + .. _declarative_configuring_relationships: ========================= diff --git a/doc/build/orm/extensions/declarative/table_config.rst b/doc/build/orm/extensions/declarative/table_config.rst index d51fb1831d7..05ad46d6ccc 100644 --- a/doc/build/orm/extensions/declarative/table_config.rst +++ b/doc/build/orm/extensions/declarative/table_config.rst @@ -1,3 +1,5 @@ +:orphan: + .. _declarative_table_args: =================== diff --git a/doc/build/orm/extensions/hybrid.rst b/doc/build/orm/extensions/hybrid.rst index 16cdafebcca..96214845937 100644 --- a/doc/build/orm/extensions/hybrid.rst +++ b/doc/build/orm/extensions/hybrid.rst @@ -15,7 +15,7 @@ API Reference :members: .. autoclass:: Comparator - + .. autodata:: HYBRID_METHOD diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index b710d1f4430..0b7e332c533 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -4,15 +4,67 @@ Mypy / Pep-484 Support for ORM Mappings ======================================== Support for :pep:`484` typing annotations as well as the -`Mypy `_ type checking tool. +MyPy_ type checking tool. +.. deprecated:: 2.0 + + **The SQLAlchemy Mypy Plugin is DEPRECATED, and will be removed possibly + as early as the SQLAlchemy 2.1 release. We would urge users to please + migrate away from it ASAP. The mypy plugin also works only up until + mypy version 1.10.1. version 1.11.0 and greater may not work properly.** + + This plugin cannot be maintained across constantly changing releases + of mypy and its stability going forward CANNOT be guaranteed. + + Modern SQLAlchemy now offers fully pep-484 compliant mapping syntaxes; see + the migration guide in the SQLAlchemy 2.0 documentation for + details. + +.. topic:: SQLAlchemy Mypy Plugin Status Update + + **Updated July 2024** + + The mypy plugin is supported **only up until mypy 1.10.1, and it will have + issues running with 1.11.0 or greater**. Use with mypy 1.11.0 or greater + may have error conditions which currently cannot be resolved. + + For SQLAlchemy 2.0, the Mypy plugin continues to work at the level at which + it reached in the SQLAlchemy 1.4 release. SQLAlchemy 2.0 however features + an all new typing system for ORM Declarative models that removes the need + for the Mypy plugin and delivers much more consistent behavior with + generally superior capabilities. Note that this new capability is **not part + of SQLAlchemy 1.4, it is only in SQLAlchemy 2.0**. + + The SQLAlchemy Mypy plugin, while it has technically never left the "alpha" + stage, should **now be considered as deprecated in SQLAlchemy 2.0, even + though it is still necessary for full Mypy support when using + SQLAlchemy 1.4**. + + The Mypy plugin itself does not solve the issue of supplying correct typing + with other typing tools such as Pylance/Pyright, Pytype, Pycharm, etc, which + cannot make use of Mypy plugins. Additionally, Mypy plugins are extremely + difficult to develop, maintain and test, as a Mypy plugin must be deeply + integrated with Mypy's internal datastructures and processes, which itself + are not stable within the Mypy project itself. The SQLAlchemy Mypy plugin + has lots of limitations when used with code that deviates from very basic + patterns which are reported regularly. + + For these reasons, new non-regression issues reported against the Mypy + plugin are unlikely to be fixed. **Existing code that passes Mypy checks + using the plugin with SQLAlchemy 1.4 installed will continue to pass all + checks in SQLAlchemy 2.0 without any changes required, provided the plugin + is still used. SQLAlchemy 2.0's API is fully + backwards compatible with the SQLAlchemy 1.4 API and Mypy plugin behavior.** + + End-user code that passes all checks under SQLAlchemy 1.4 with the Mypy + plugin may incrementally migrate to the new structures, once + that code is running exclusively on SQLAlchemy 2.0. + + Code that is running exclusively on SQLAlchemy version + 2.0 and has fully migrated to the new declarative constructs will enjoy full + compliance with pep-484 as well as working correctly within IDEs and other + typing tools, without the need for plugins. -.. note:: The Mypy plugin and typing annotations should be regarded as - **alpha level** for the - early 1.4 releases of SQLAlchemy. The plugin has not been tested in real world - scenarios and may have many unhandled cases and error conditions. - Specifics of the new typing stubs are also **subject to change** during - the 1.4 series. Installation ------------ @@ -21,7 +73,7 @@ The Mypy plugin depends upon new stubs for SQLAlchemy packaged at `sqlalchemy2-stubs `_. These stubs necessarily fully replace the previous ``sqlalchemy-stubs`` typing annotations published by Dropbox, as they occupy the same ``sqlalchemy-stubs`` -namespace as specified by :pep:`561`. The `Mypy `_ +namespace as specified by :pep:`561`. The Mypy_ package itself is also a dependency. Both packages may be installed using the "mypy" extras hook using pip:: @@ -54,32 +106,31 @@ alter classes dynamically at runtime. To cover the major areas where this occurs, consider the following ORM mapping, using the typical example of the ``User`` class:: - from sqlalchemy import Column - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy import select + from sqlalchemy import Column, Integer, String, select from sqlalchemy.orm import declarative_base # "Base" is a class that is created dynamically from the # declarative_base() function Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) + # "some_user" is an instance of the User class, which # accepts "id" and "name" kwargs based on the mapping - some_user = User(id=5, name='user') + some_user = User(id=5, name="user") # it has an attribute called .name that's a string print(f"Username: {some_user.name}") # a select() construct makes use of SQL expressions derived from the # User class itself - select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains('s')) + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) Above, the steps that the Mypy extension can take include: @@ -105,35 +156,31 @@ When the Mypy plugin processes the above file, the resulting static class definition and Python code passed to the Mypy tool is equivalent to the following:: - from sqlalchemy import Column - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy import select - from sqlalchemy.orm import declarative_base - from sqlalchemy.orm.decl_api import DeclarativeMeta + from sqlalchemy import Column, Integer, String, select from sqlalchemy.orm import Mapped + from sqlalchemy.orm.decl_api import DeclarativeMeta + class Base(metaclass=DeclarativeMeta): __abstract__ = True + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id: Mapped[Optional[int]] = Mapped._special_method( Column(Integer, primary_key=True) ) - name: Mapped[Optional[str]] = Mapped._special_method( - Column(String) - ) + name: Mapped[Optional[str]] = Mapped._special_method(Column(String)) - def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: - ... + def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: ... - some_user = User(id=5, name='user') + + some_user = User(id=5, name="user") print(f"Username: {some_user.name}") - select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains('s')) + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) The key steps which have been taken above include: @@ -213,6 +260,7 @@ and convert them to include the ``Mapped[]`` type surrounding them. The from sqlalchemy.orm import Mapped + class MyClass(Base): # ... @@ -269,14 +317,16 @@ needs an explicit type to be sent:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -293,7 +343,7 @@ To resolve, apply an explicit type annotation to the ``Address.user_id`` column:: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -314,7 +364,7 @@ the attributes can be explicitly stated with a complete annotation that Base.metadata, Column(Integer, primary_key=True), Column("employee_name", String(50), nullable=False), - Column(String(50)) + Column(String(50)), ) id: Mapped[int] @@ -341,13 +391,14 @@ present, as well as if the target type of the :func:`_orm.relationship` is a string or callable, and not a class:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -366,7 +417,7 @@ The error can be resolved either by using ``relationship(User, uselist=False)`` or by providing the type, in this case the scalar ``User`` object:: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -381,7 +432,8 @@ by pep-484, ensuring the class is imported with in the `TYPE_CHECKING block `_ as appropriate:: - from typing import List, TYPE_CHECKING + from typing import TYPE_CHECKING, List + from .mymodel import Base if TYPE_CHECKING: @@ -389,8 +441,9 @@ as appropriate:: # that cannot normally be imported at runtime from .myaddressmodel import Address + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -400,15 +453,16 @@ As is the case with columns, the :class:`_orm.Mapped` class may also be applied explicitly:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -431,8 +485,8 @@ such as :meth:`_orm.registry.mapped`) should be decorated with the :func:`_orm.declarative_mixin` decorator, which provides a hint to the Mypy plugin that a particular class intends to serve as a declarative mixin:: - from sqlalchemy.orm import declared_attr - from sqlalchemy.orm import declarative_mixin + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class HasUpdatedAt: @@ -440,9 +494,9 @@ plugin that a particular class intends to serve as a declarative mixin:: def updated_at(cls) -> Column[DateTime]: # uses Column return Column(DateTime) + @declarative_mixin class HasCompany: - @declared_attr def company_id(cls) -> Mapped[int]: # uses Mapped return Column(ForeignKey("company.id")) @@ -451,8 +505,9 @@ plugin that a particular class intends to serve as a declarative mixin:: def company(cls) -> Mapped["Company"]: return relationship("Company") + class Employee(HasUpdatedAt, HasCompany, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String) @@ -467,7 +522,6 @@ this complexity:: company_id: Mapped[int] company: Mapped["Company"] - Combining with Dataclasses or Other Type-Sensitive Attribute Systems ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -477,7 +531,7 @@ use to build the class, and the value given in each assignment statement is significant. That is, a class as follows has to be stated exactly as it is in order to be accepted by dataclasses:: - mapper_registry : registry = registry() + mapper_registry: registry = registry() @mapper_registry.mapped @@ -498,9 +552,7 @@ as it is in order to be accepted by dataclasses:: addresses: List[Address] = field(default_factory=list) __mapper_args__ = { # type: ignore - "properties" : { - "addresses": relationship("Address") - } + "properties": {"addresses": relationship("Address")} } We can't apply our ``Mapped[]`` types to the attributes ``id``, ``name``, @@ -540,12 +592,12 @@ This attribute can be conditional within the ``TYPE_CHECKING`` variable:: _mypy_mapped_attrs = [id, name, "fullname", "nickname", addresses] __mapper_args__ = { # type: ignore - "properties" : { - "addresses": relationship("Address") - } + "properties": {"addresses": relationship("Address")} } With the above recipe, the attributes listed in ``_mypy_mapped_attrs`` will be applied with the :class:`_orm.Mapped` typing information so that the ``User`` class will behave as a SQLAlchemy mapped class when used in a class-bound context. + +.. _Mypy: https://mypy.readthedocs.io/ diff --git a/doc/build/orm/index.rst b/doc/build/orm/index.rst index 8434df62c7d..ee0eaf80547 100644 --- a/doc/build/orm/index.rst +++ b/doc/build/orm/index.rst @@ -11,6 +11,7 @@ tutorial. .. toctree:: :maxdepth: 2 + quickstart tutorial mapper_config relationships @@ -19,3 +20,4 @@ tutorial. extending extensions/index examples + diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index eafbba342ac..9c64668da1c 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -45,14 +45,14 @@ additional arguments that will refer to the polymorphic discriminator column as well as the identifier for the base class:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } Above, an additional column ``type`` is established to act as the @@ -82,21 +82,22 @@ they represent. Each table also must contain a primary key column (or columns), as well as a foreign key reference to the parent table:: class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_identity':'engineer', + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_identity':'manager', + "polymorphic_identity": "manager", } In the above example, each mapping specifies the @@ -159,29 +160,32 @@ the ``company`` table, the relationships are set up between ``Company`` and ``Employee``:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } + class Manager(Employee): - # ... + ... + class Engineer(Employee): - # ... + ... If the foreign key constraint is on a table corresponding to a subclass, the relationship should target that subclass instead. In the example @@ -190,36 +194,39 @@ key constraint from ``manager`` to ``company``, so the relationships are established between the ``Manager`` and ``Company`` classes:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) managers = relationship("Manager", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="managers") __mapper_args__ = { - 'polymorphic_identity':'manager', + "polymorphic_identity": "manager", } + class Engineer(Employee): - # ... + ... Above, the ``Manager`` class will have a ``Manager.company`` attribute; ``Company`` will have a ``Company.managers`` attribute that always @@ -263,28 +270,30 @@ subclasses, indicating that the column is to be mapped only to that subclass; the :class:`_schema.Column` will be applied to the same base :class:`_schema.Table` object:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Manager(Employee): manager_data = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'manager' + "polymorphic_identity": "manager", } + class Engineer(Employee): engineer_info = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'engineer' + "polymorphic_identity": "engineer", } Note that the mappers for the derived classes Manager and Engineer omit the @@ -302,22 +311,28 @@ declaration on a subclass that has no table of its own. A tricky case comes up when two subclasses want to specify *the same* column, as below:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Engineer(Employee): - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = { + "polymorphic_identity": "engineer", + } start_date = Column(DateTime) + class Manager(Employee): - __mapper_args__ = {'polymorphic_identity': 'manager'} + __mapper_args__ = { + "polymorphic_identity": "manager", + } start_date = Column(DateTime) Above, the ``start_date`` column declared on both ``Engineer`` and ``Manager`` @@ -335,32 +350,39 @@ if it already exists:: from sqlalchemy.orm import declared_attr + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Engineer(Employee): - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = { + "polymorphic_identity": "engineer", + } @declared_attr def start_date(cls): "Start date column, if not present already." - return Employee.__table__.c.get('start_date', Column(DateTime)) + return Employee.__table__.c.get("start_date", Column(DateTime)) + class Manager(Employee): - __mapper_args__ = {'polymorphic_identity': 'manager'} + __mapper_args__ = { + "polymorphic_identity": "manager", + } @declared_attr def start_date(cls): "Start date column, if not present already." - return Employee.__table__.c.get('start_date', Column(DateTime)) + return Employee.__table__.c.get("start_date", Column(DateTime)) Above, when ``Manager`` is mapped, the ``start_date`` column is already present on the ``Employee`` class; by returning the existing @@ -372,26 +394,33 @@ to define a particular series of columns and/or other mapped attributes from a reusable mixin class:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class HasStartDate: @declared_attr def start_date(cls): - return cls.__table__.c.get('start_date', Column(DateTime)) + return cls.__table__.c.get("start_date", Column(DateTime)) + class Engineer(HasStartDate, Employee): - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = { + "polymorphic_identity": "engineer", + } + class Manager(HasStartDate, Employee): - __mapper_args__ = {'polymorphic_identity': 'manager'} + __mapper_args__ = { + "polymorphic_identity": "manager", + } Relationships with Single Table Inheritance +++++++++++++++++++++++++++++++++++++++++++ @@ -402,22 +431,23 @@ attribute should be on the same class that's the "foreign" side of the relationship:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } @@ -425,14 +455,15 @@ relationship:: manager_data = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'manager' + "polymorphic_identity": "manager", } + class Engineer(Employee): engineer_info = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'engineer' + "polymorphic_identity": "engineer", } Also, like the case of joined inheritance, we can create relationships @@ -441,31 +472,32 @@ include a WHERE clause that limits the class selection to that subclass or subclasses:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) managers = relationship("Manager", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } class Manager(Employee): manager_name = Column(String(30)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="managers") __mapper_args__ = { - 'polymorphic_identity':'manager', + "polymorphic_identity": "manager", } @@ -473,7 +505,7 @@ or subclasses:: engineer_info = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'engineer' + "polymorphic_identity": "engineer", } Above, the ``Manager`` class will have a ``Manager.company`` attribute; @@ -533,31 +565,33 @@ This indicates to Declarative as well as the mapping that the superclass table should not be considered as part of the mapping:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(50)) __mapper_args__ = { - 'concrete': True + "concrete": True, } + class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(50)) __mapper_args__ = { - 'concrete': True + "concrete": True, } Two critical points should be noted: @@ -603,37 +637,43 @@ Using :class:`.ConcreteBase`, we can set up our concrete mapping in almost the same way as we do other forms of inheritance mappings:: from sqlalchemy.ext.declarative import ConcreteBase + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + class Employee(ConcreteBase, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'concrete': True + "polymorphic_identity": "employee", + "concrete": True, } + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } + class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } Above, Declarative sets up the polymorphic selectable for the @@ -686,6 +726,12 @@ The above UNION query needs to manufacture "NULL" columns for each subtable in order to accommodate for those columns that aren't members of that particular subclass. +.. seealso:: + + :class:`.ConcreteBase` + +.. _abstract_concrete_base: + Abstract Concrete Classes +++++++++++++++++++++++++ @@ -700,29 +746,28 @@ tables, and leave the base class unmapped, this can be achieved very easily. When using Declarative, just declare the base class with the ``__abstract__`` indicator:: + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + + class Employee(Base): __abstract__ = True + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) - __mapper_args__ = { - 'polymorphic_identity': 'manager', - } class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) - __mapper_args__ = { - 'polymorphic_identity': 'engineer', - } - Above, we are not actually making use of SQLAlchemy's inheritance mapping facilities; we can load and persist instances of ``Manager`` and ``Engineer`` normally. The situation changes however when we need to **query polymorphically**, @@ -731,14 +776,6 @@ of ``Manager`` and ``Engineer`` instances. This brings us back into the domain of concrete inheritance, and we must build a special mapper against ``Employee`` in order to achieve this. -.. topic:: Mappers can always SELECT - - In SQLAlchemy, a mapper for a class always has to refer to some - "selectable", which is normally a :class:`_schema.Table` but may also refer to any - :func:`_expression.select` object as well. While it may appear that a "single table - inheritance" mapper does not map to a table, these mappers in fact - implicitly refer to the table that is mapped by a superclass. - To modify our concrete inheritance example to illustrate an "abstract" base that is capable of polymorphic loading, we will have only an ``engineer`` and a ``manager`` table and no ``employee`` @@ -750,39 +787,55 @@ To help with this, Declarative offers a variant of the :class:`.ConcreteBase` class called :class:`.AbstractConcreteBase` which achieves this automatically:: from sqlalchemy.ext.declarative import AbstractConcreteBase + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + class Employee(AbstractConcreteBase, Base): pass + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } + class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } -The :class:`.AbstractConcreteBase` helper class has a more complex internal -process than that of :class:`.ConcreteBase`, in that the entire mapping + + Base.registry.configure() + +Above, the :meth:`_orm.registry.configure` method is invoked, which will +trigger the ``Employee`` class to be actually mapped; before the configuration +step, the class has no mapping as the sub-tables which it will query from +have not yet been defined. This process is more complex than that of +:class:`.ConcreteBase`, in that the entire mapping of the base class must be delayed until all the subclasses have been declared. With a mapping like the above, only instances of ``Manager`` and ``Engineer`` may be persisted; querying against the ``Employee`` class will always produce ``Manager`` and ``Engineer`` objects. +.. seealso:: + + :class:`.AbstractConcreteBase` + Classical and Semi-Classical Concrete Polymorphic Configuration +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -801,34 +854,41 @@ establishes the :class:`_schema.Table` objects separately:: metadata_obj = Base.metadata employees_table = Table( - 'employee', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), + "employee", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) managers_table = Table( - 'manager', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('manager_data', String(50)), + "manager", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("manager_data", String(50)), ) engineers_table = Table( - 'engineer', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('engineer_info', String(50)), + "engineer", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("engineer_info", String(50)), ) Next, the UNION is produced using :func:`.polymorphic_union`:: from sqlalchemy.orm import polymorphic_union - pjoin = polymorphic_union({ - 'employee': employees_table, - 'manager': managers_table, - 'engineer': engineers_table - }, 'type', 'pjoin') + pjoin = polymorphic_union( + { + "employee": employees_table, + "manager": managers_table, + "engineer": engineers_table, + }, + "type", + "pjoin", + ) With the above :class:`_schema.Table` objects, the mappings can be produced using "semi-classical" style, where we use Declarative in conjunction with the ``__table__`` argument; @@ -838,22 +898,26 @@ the :paramref:`.mapper.with_polymorphic` parameter:: class Employee(Base): __table__ = employee_table __mapper_args__ = { - 'polymorphic_on': pjoin.c.type, - 'with_polymorphic': ('*', pjoin), - 'polymorphic_identity': 'employee' + "polymorphic_on": pjoin.c.type, + "with_polymorphic": ("*", pjoin), + "polymorphic_identity": "employee", } + class Engineer(Employee): __table__ = engineer_table __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True} + "polymorphic_identity": "engineer", + "concrete": True, + } + class Manager(Employee): __table__ = manager_table __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True} + "polymorphic_identity": "manager", + "concrete": True, + } Alternatively, the same :class:`_schema.Table` objects can be used in fully "classical" style, without using Declarative at all. @@ -864,16 +928,19 @@ A constructor similar to that supplied by Declarative is illustrated:: for k in kw: setattr(self, k, kw[k]) + class Manager(Employee): pass + class Engineer(Employee): pass + employee_mapper = mapper_registry.map_imperatively( Employee, pjoin, - with_polymorphic=('*', pjoin), + with_polymorphic=("*", pjoin), polymorphic_on=pjoin.c.type, ) manager_mapper = mapper_registry.map_imperatively( @@ -881,18 +948,16 @@ A constructor similar to that supplied by Declarative is illustrated:: managers_table, inherits=employee_mapper, concrete=True, - polymorphic_identity='manager', + polymorphic_identity="manager", ) engineer_mapper = mapper_registry.map_imperatively( Engineer, engineers_table, inherits=employee_mapper, concrete=True, - polymorphic_identity='engineer', + polymorphic_identity="engineer", ) - - The "abstract" example can also be mapped using "semi-classical" or "classical" style. The difference is that instead of applying the "polymorphic union" to the :paramref:`.mapper.with_polymorphic` parameter, we apply it directly @@ -901,37 +966,46 @@ mapping is illustrated below:: from sqlalchemy.orm import polymorphic_union - pjoin = polymorphic_union({ - 'manager': managers_table, - 'engineer': engineers_table - }, 'type', 'pjoin') + pjoin = polymorphic_union( + { + "manager": managers_table, + "engineer": engineers_table, + }, + "type", + "pjoin", + ) + class Employee(Base): __table__ = pjoin __mapper_args__ = { - 'polymorphic_on': pjoin.c.type, - 'with_polymorphic': '*', - 'polymorphic_identity': 'employee' + "polymorphic_on": pjoin.c.type, + "with_polymorphic": "*", + "polymorphic_identity": "employee", } + class Engineer(Employee): __table__ = engineer_table __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True} + "polymorphic_identity": "engineer", + "concrete": True, + } + class Manager(Employee): __table__ = manager_table __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True} + "polymorphic_identity": "manager", + "concrete": True, + } Above, we use :func:`.polymorphic_union` in the same manner as before, except that we omit the ``employee`` table. .. seealso:: - :ref:`classical_mapping` - background information on "classical" mappings + :ref:`orm_imperative_mapping` - background information on imperative, or "classical" mappings @@ -955,47 +1029,47 @@ such a configuration is as follows:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee") class Employee(ConcreteBase, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'concrete': True + "polymorphic_identity": "employee", + "concrete": True, } class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } The next complexity with concrete inheritance and relationships involves @@ -1015,50 +1089,50 @@ each of the relationships:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", back_populates="company") class Employee(ConcreteBase, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'concrete': True + "polymorphic_identity": "employee", + "concrete": True, } class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } The above limitation is related to the current implementation, including diff --git a/doc/build/orm/inheritance_loading.rst b/doc/build/orm/inheritance_loading.rst index daf60b7f834..281a43a5c52 100644 --- a/doc/build/orm/inheritance_loading.rst +++ b/doc/build/orm/inheritance_loading.rst @@ -104,7 +104,7 @@ subclasses: entity = with_polymorphic(Employee, [Engineer, Manager]) # include columns for all mapped subclasses - entity = with_polymorphic(Employee, '*') + entity = with_polymorphic(Employee, "*") .. tip:: @@ -135,18 +135,15 @@ with the same name: .. sourcecode:: python+sql - engineer_employee = with_polymorphic( - Employee, [Engineer], aliased=True) - manager_employee = with_polymorphic( - Employee, [Manager], aliased=True) - - q = s.query(engineer_employee, manager_employee).\ - join( - manager_employee, - and_( - engineer_employee.id > manager_employee.id, - engineer_employee.name == manager_employee.name - ) + engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True) + manager_employee = with_polymorphic(Employee, [Manager], aliased=True) + + q = s.query(engineer_employee, manager_employee).join( + manager_employee, + and_( + engineer_employee.id > manager_employee.id, + engineer_employee.name == manager_employee.name, + ), ) q.all() {opensql} @@ -195,18 +192,15 @@ is necessary: .. sourcecode:: python+sql - engineer_employee = with_polymorphic( - Employee, [Engineer], flat=True) - manager_employee = with_polymorphic( - Employee, [Manager], flat=True) - - q = s.query(engineer_employee, manager_employee).\ - join( - manager_employee, - and_( - engineer_employee.id > manager_employee.id, - engineer_employee.name == manager_employee.name - ) + engineer_employee = with_polymorphic(Employee, [Engineer], flat=True) + manager_employee = with_polymorphic(Employee, [Manager], flat=True) + + q = s.query(engineer_employee, manager_employee).join( + manager_employee, + and_( + engineer_employee.id > manager_employee.id, + engineer_employee.name == manager_employee.name, + ), ) q.all() {opensql} @@ -260,11 +254,11 @@ specific to ``Engineer`` as well as ``Manager`` in terms of ``eng_plus_manager`` eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager).filter( - or_( - eng_plus_manager.Engineer.engineer_info=='x', - eng_plus_manager.Manager.manager_data=='y' - ) - ) + or_( + eng_plus_manager.Engineer.engineer_info == "x", + eng_plus_manager.Manager.manager_data == "y", + ) + ) A query as above would generate SQL resembling the following: @@ -307,15 +301,15 @@ default. We can add the parameter to our ``Employee`` mapping first introduced at :ref:`joined_inheritance`:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, - 'with_polymorphic': '*' + "polymorphic_identity": "employee", + "polymorphic_on": type, + "with_polymorphic": "*", } Above is a common setting for :paramref:`.mapper.with_polymorphic`, @@ -339,22 +333,17 @@ that they should individually participate in polymorphic loading by default using the :paramref:`.mapper.polymorphic_load` parameter:: class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'engineer', - 'polymorphic_load': 'inline' - } + __mapper_args__ = {"polymorphic_identity": "engineer", "polymorphic_load": "inline"} + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_data = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'polymorphic_load': 'inline' - } + __mapper_args__ = {"polymorphic_identity": "manager", "polymorphic_load": "inline"} Setting the :paramref:`.mapper.polymorphic_load` parameter to the value ``"inline"`` means that the ``Engineer`` and ``Manager`` classes above @@ -374,14 +363,9 @@ that entity, so that the entity (and its subclasses) can be referred to directly, rather than using an alias object. For simple cases it might be considered to be more succinct:: - session.query(Employee).\ - with_polymorphic([Engineer, Manager]).\ - filter( - or_( - Engineer.engineer_info=='w', - Manager.manager_data=='q' - ) - ) + session.query(Employee).with_polymorphic([Engineer, Manager]).filter( + or_(Engineer.engineer_info == "w", Manager.manager_data == "q") + ) The :meth:`_query.Query.with_polymorphic` method has a more complicated job than the :func:`_orm.with_polymorphic` function, as it needs to correctly @@ -445,37 +429,35 @@ by default by specifying the :paramref:`.mapper.polymorphic_load` parameter, using the value ``"selectin"`` on a per-subclass basis:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'polymorphic_on': type - } + __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type} + class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'engineer', + "polymorphic_load": "selectin", + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'manager', + "polymorphic_load": "selectin", + "polymorphic_identity": "manager", } - Unlike when using :func:`_orm.with_polymorphic`, when using the :func:`_orm.selectin_polymorphic` style of loading, we do **not** have the ability to refer to the ``Engineer`` or ``Manager`` entities within our main @@ -491,8 +473,7 @@ loading via the :func:`_orm.joinedload` function:: from sqlalchemy.orm import selectin_polymorphic query = session.query(Employee).options( - selectin_polymorphic(Employee, [Manager, Engineer]), - joinedload(Manager.paperwork) + selectin_polymorphic(Employee, [Manager, Engineer]), joinedload(Manager.paperwork) ) Using the query above, we get three SELECT statements emitted, however @@ -541,24 +522,22 @@ a load of ``Manager`` also fully loads ``VicePresident`` subtypes at the same ti # use "Employee" example from the enclosing section + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'manager', + "polymorphic_load": "selectin", + "polymorphic_identity": "manager", } + class VicePresident(Manager): vp_info = Column(String(30)) - __mapper_args__ = { - "polymorphic_load": "inline", - "polymorphic_identity": "vp" - } - + __mapper_args__ = {"polymorphic_load": "inline", "polymorphic_identity": "vp"} Above, we add a ``vp_info`` column to the ``manager`` table, local to the ``VicePresident`` subclass. This subclass is linked to the polymorphic @@ -592,8 +571,7 @@ set up, we could get the same result as follows:: manager_poly = with_polymorphic(Manager, [VicePresident]) - s.query(Employee).options( - selectin_polymorphic(Employee, [manager_poly])).all() + s.query(Employee).options(selectin_polymorphic(Employee, [manager_poly])).all() .. _inheritance_of_type: @@ -619,33 +597,35 @@ with a ``Company`` object. We'll add a ``company_id`` column to the .. sourcecode:: python class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) - employees = relationship("Employee", - backref='company') + employees = relationship("Employee", backref="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) type = Column(String(20)) - company_id = Column(Integer, ForeignKey('company.id')) + company_id = Column(Integer, ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee', + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = Column(String(50)) - __mapper_args__ = {'polymorphic_identity':'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_data = Column(String(50)) - __mapper_args__ = {'polymorphic_identity':'manager'} + __mapper_args__ = {"polymorphic_identity": "manager"} When querying from ``Company`` onto the ``Employee`` relationship, the :meth:`_query.Query.join` method as well as operators like :meth:`.PropComparator.any` @@ -656,34 +636,29 @@ against the ``Engineer`` class, we can tell those methods to join or subquery against the set of columns representing the subclass using the :meth:`~.orm.interfaces.PropComparator.of_type` operator:: - session.query(Company).\ - join(Company.employees.of_type(Engineer)).\ - filter(Engineer.engineer_info=='someinfo') + session.query(Company).join(Company.employees.of_type(Engineer)).filter( + Engineer.engineer_info == "someinfo" + ) Similarly, to join from ``Company`` to the polymorphic entity that includes both ``Engineer`` and ``Manager`` columns:: - manager_and_engineer = with_polymorphic( - Employee, [Manager, Engineer]) + manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer]) - session.query(Company).\ - join(Company.employees.of_type(manager_and_engineer)).\ - filter( - or_( - manager_and_engineer.Engineer.engineer_info == 'someinfo', - manager_and_engineer.Manager.manager_data == 'somedata' - ) + session.query(Company).join(Company.employees.of_type(manager_and_engineer)).filter( + or_( + manager_and_engineer.Engineer.engineer_info == "someinfo", + manager_and_engineer.Manager.manager_data == "somedata", ) + ) The :meth:`.PropComparator.any` and :meth:`.PropComparator.has` operators also can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`, such as when the embedded criterion is in terms of a subclass:: - session.query(Company).\ - filter( - Company.employees.of_type(Engineer). - any(Engineer.engineer_info=='someinfo') - ).all() + session.query(Company).filter( + Company.employees.of_type(Engineer).any(Engineer.engineer_info == "someinfo") + ).all() .. _eagerloading_polymorphic_subtypes: @@ -708,16 +683,11 @@ can be used to combine eager loading and :func:`_orm.with_polymorphic`, so that all sub-attributes of all referenced subtypes can be loaded:: - manager_and_engineer = with_polymorphic( - Employee, [Manager, Engineer], - flat=True) + manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer], flat=True) - session.query(Company).\ - options( - joinedload( - Company.employees.of_type(manager_and_engineer) - ) - ) + session.query(Company).options( + joinedload(Company.employees.of_type(manager_and_engineer)) + ) .. note:: @@ -866,9 +836,7 @@ In our example from :ref:`single_inheritance`, the ``Manager`` mapping for examp class Manager(Employee): manager_data = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'manager' - } + __mapper_args__ = {"polymorphic_identity": "manager"} Above, there would be no ``Employee.manager_data`` attribute, even though the ``employee`` table has a ``manager_data`` column. @@ -914,13 +882,10 @@ inheritance in the case of single inheritance; it allows both for eager loading of subclass attributes as well as specification of subclasses in a query, just without the overhead of using OUTER JOIN:: - employee_poly = with_polymorphic(Employee, '*') + employee_poly = with_polymorphic(Employee, "*") q = session.query(employee_poly).filter( - or_( - employee_poly.name == 'a', - employee_poly.Manager.manager_data == 'b' - ) + or_(employee_poly.name == "a", employee_poly.Manager.manager_data == "b") ) Above, our query remains against a single table however we can refer to the diff --git a/doc/build/orm/internals.rst b/doc/build/orm/internals.rst index 8520fd07c14..54e0dd59cf9 100644 --- a/doc/build/orm/internals.rst +++ b/doc/build/orm/internals.rst @@ -16,7 +16,6 @@ sections, are listed here. .. autoclass:: ClassManager :members: - :inherited-members: .. autoclass:: ColumnProperty :members: diff --git a/doc/build/orm/join_conditions.rst b/doc/build/orm/join_conditions.rst index af314f221eb..e9ab6a39eef 100644 --- a/doc/build/orm/join_conditions.rst +++ b/doc/build/orm/join_conditions.rst @@ -25,8 +25,9 @@ class:: Base = declarative_base() + class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String) @@ -36,8 +37,9 @@ class:: billing_address = relationship("Address") shipping_address = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) street = Column(String) city = Column(String) @@ -64,7 +66,7 @@ by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:: class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String) @@ -127,18 +129,21 @@ load those ``Address`` objects which specify a city of "Boston":: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - boston_addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.city=='Boston')") + boston_addresses = relationship( + "Address", + primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')", + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) street = Column(String) city = Column(String) @@ -208,19 +213,21 @@ type of the other:: Base = declarative_base() + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The above relationship will produce a join like:: @@ -241,8 +248,9 @@ SQL expressions:: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) @@ -250,11 +258,10 @@ SQL expressions:: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. _relationship_custom_operator: @@ -264,35 +271,29 @@ Using custom operators in join conditions Another use case for relationships is the use of custom operators, such as PostgreSQL's "is contained within" ``<<`` operator when joining with types such as :class:`_postgresql.INET` and :class:`_postgresql.CIDR`. -For custom operators we use the :meth:`.Operators.op` function:: +For custom boolean operators we use the :meth:`.Operators.bool_op` function:: - inet_column.op("<<")(cidr_column) + inet_column.bool_op("<<")(cidr_column) -However, if we construct a :paramref:`_orm.relationship.primaryjoin` using this -operator, :func:`_orm.relationship` will still need more information. This is because -when it examines our primaryjoin condition, it specifically looks for operators -used for **comparisons**, and this is typically a fixed list containing known -comparison operators such as ``==``, ``<``, etc. So for our custom operator -to participate in this system, we need it to register as a comparison operator -using the :paramref:`~.Operators.op.is_comparison` parameter:: - - inet_column.op("<<", is_comparison=True)(cidr_column) - -A complete example:: +A comparison like the above may be used directly with +:paramref:`_orm.relationship.primaryjoin` when constructing +a :func:`_orm.relationship`:: class IPA(Base): - __tablename__ = 'ip_address' + __tablename__ = "ip_address" id = Column(Integer, primary_key=True) v4address = Column(INET) - network = relationship("Network", - primaryjoin="IPA.v4address.op('<<', is_comparison=True)" - "(foreign(Network.v4representation))", - viewonly=True - ) + network = relationship( + "Network", + primaryjoin="IPA.v4address.bool_op('<<')" "(foreign(Network.v4representation))", + viewonly=True, + ) + + class Network(Base): - __tablename__ = 'network' + __tablename__ = "network" id = Column(Integer, primary_key=True) v4representation = Column(CIDR) @@ -306,10 +307,6 @@ Will render as:: SELECT ip_address.id AS ip_address_id, ip_address.v4address AS ip_address_v4address FROM ip_address JOIN network ON ip_address.v4address << network.v4representation -.. versionadded:: 0.9.2 - Added the :paramref:`.Operators.op.is_comparison` - flag to assist in the creation of :func:`_orm.relationship` constructs using - custom operators. - .. _relationship_custom_operator_sql_function: Custom operators based on SQL functions @@ -329,6 +326,7 @@ two expressions. The below example illustrates this with the from sqlalchemy import Column, Integer, func from sqlalchemy.orm import relationship, foreign + class Polygon(Base): __tablename__ = "polygon" id = Column(Integer, primary_key=True) @@ -339,6 +337,7 @@ two expressions. The below example illustrates this with the viewonly=True, ) + class Point(Base): __tablename__ = "point" id = Column(Integer, primary_key=True) @@ -368,35 +367,34 @@ for both; then to make ``Article`` refer to ``Writer`` as well, ``Article.magazine`` and ``Article.writer``:: class Magazine(Base): - __tablename__ = 'magazine' + __tablename__ = "magazine" id = Column(Integer, primary_key=True) class Article(Base): - __tablename__ = 'article' + __tablename__ = "article" article_id = Column(Integer) - magazine_id = Column(ForeignKey('magazine.id')) + magazine_id = Column(ForeignKey("magazine.id")) writer_id = Column() magazine = relationship("Magazine") writer = relationship("Writer") __table_args__ = ( - PrimaryKeyConstraint('article_id', 'magazine_id'), + PrimaryKeyConstraint("article_id", "magazine_id"), ForeignKeyConstraint( - ['writer_id', 'magazine_id'], - ['writer.id', 'writer.magazine_id'] + ["writer_id", "magazine_id"], ["writer.id", "writer.magazine_id"] ), ) class Writer(Base): - __tablename__ = 'writer' + __tablename__ = "writer" id = Column(Integer, primary_key=True) - magazine_id = Column(ForeignKey('magazine.id'), primary_key=True) + magazine_id = Column(ForeignKey("magazine.id"), primary_key=True) magazine = relationship("Magazine") When the above mapping is configured, we will see this warning emitted:: @@ -443,7 +441,7 @@ To get just #1 and #2, we could specify only ``Article.writer_id`` as the class Article(Base): # ... - writer = relationship("Writer", foreign_keys='Article.writer_id') + writer = relationship("Writer", foreign_keys="Article.writer_id") However, this has the effect of ``Article.writer`` not taking ``Article.magazine_id`` into account when querying against ``Writer``: @@ -468,7 +466,8 @@ annotating with :func:`_orm.foreign`:: writer = relationship( "Writer", primaryjoin="and_(Writer.id == foreign(Article.writer_id), " - "Writer.magazine_id == Article.magazine_id)") + "Writer.magazine_id == Article.magazine_id)", + ) .. versionchanged:: 1.0.0 the ORM will attempt to warn when a column is used as the synchronization target from more than one relationship @@ -494,16 +493,16 @@ is considered to be "many to one". For the comparison we'll use here, we'll be dealing with collections so we keep things configured as "one to many":: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" path = Column(String, primary_key=True) - descendants = relationship('Element', - primaryjoin= - remote(foreign(path)).like( - path.concat('/%')), - viewonly=True, - order_by=path) + descendants = relationship( + "Element", + primaryjoin=remote(foreign(path)).like(path.concat("/%")), + viewonly=True, + order_by=path, + ) Above, if given an ``Element`` object with a path attribute of ``"/foo/bar2"``, we seek for a load of ``Element.descendants`` to look like:: @@ -542,20 +541,24 @@ is when establishing a many-to-many relationship from a class to itself, as show Base = declarative_base() - node_to_node = Table("node_to_node", Base.metadata, + node_to_node = Table( + "node_to_node", + Base.metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) + class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) label = Column(String) - right_nodes = relationship("Node", - secondary=node_to_node, - primaryjoin=id==node_to_node.c.left_node_id, - secondaryjoin=id==node_to_node.c.right_node_id, - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary=node_to_node, + primaryjoin=id == node_to_node.c.left_node_id, + secondaryjoin=id == node_to_node.c.right_node_id, + backref="left_nodes", ) Where above, SQLAlchemy can't know automatically which columns should connect @@ -573,14 +576,15 @@ When referring to a plain :class:`_schema.Table` object in a declarative string, use the string name of the table as it is present in the :class:`_schema.MetaData`:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) label = Column(String) - right_nodes = relationship("Node", - secondary="node_to_node", - primaryjoin="Node.id==node_to_node.c.left_node_id", - secondaryjoin="Node.id==node_to_node.c.right_node_id", - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary="node_to_node", + primaryjoin="Node.id==node_to_node.c.left_node_id", + secondaryjoin="Node.id==node_to_node.c.right_node_id", + backref="left_nodes", ) .. warning:: When passed as a Python-evaluable string, the @@ -600,26 +604,38 @@ to ``node.c.id``:: metadata_obj = MetaData() mapper_registry = registry() - node_to_node = Table("node_to_node", metadata_obj, + node_to_node = Table( + "node_to_node", + metadata_obj, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) - node = Table("node", metadata_obj, - Column('id', Integer, primary_key=True), - Column('label', String) + node = Table( + "node", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("label", String), ) + + class Node(object): pass - mapper_registry.map_imperatively(Node, node, properties={ - 'right_nodes':relationship(Node, - secondary=node_to_node, - primaryjoin=node.c.id==node_to_node.c.left_node_id, - secondaryjoin=node.c.id==node_to_node.c.right_node_id, - backref="left_nodes" - )}) + mapper_registry.map_imperatively( + Node, + node, + properties={ + "right_nodes": relationship( + Node, + secondary=node_to_node, + primaryjoin=node.c.id == node_to_node.c.left_node_id, + secondaryjoin=node.c.id == node_to_node.c.right_node_id, + backref="left_nodes", + ) + }, + ) Note that in both examples, the :paramref:`_orm.relationship.backref` keyword specifies a ``left_nodes`` backref - when @@ -661,35 +677,38 @@ target consisting of multiple tables. Below is an example of such a join condition (requires version 0.9.2 at least to function as is):: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) + + d = relationship( + "D", + secondary="join(B, D, B.d_id == D.id)." "join(C, C.d_id == D.id)", + primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", + secondaryjoin="D.id == B.d_id", + uselist=False, + viewonly=True, + ) - d = relationship("D", - secondary="join(B, D, B.d_id == D.id)." - "join(C, C.d_id == D.id)", - primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", - secondaryjoin="D.id == B.d_id", - uselist=False, - viewonly=True - ) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - d_id = Column(ForeignKey('d.id')) + d_id = Column(ForeignKey("d.id")) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - d_id = Column(ForeignKey('d.id')) + a_id = Column(ForeignKey("a.id")) + d_id = Column(ForeignKey("d.id")) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) @@ -761,33 +780,37 @@ entities ``C`` and ``D``, which also must have rows that line up with the rows in both ``A`` and ``B`` simultaneously:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) some_c_value = Column(String) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) - c_id = Column(ForeignKey('c.id')) - b_id = Column(ForeignKey('b.id')) + c_id = Column(ForeignKey("c.id")) + b_id = Column(ForeignKey("b.id")) some_d_value = Column(String) + # 1. set up the join() as a variable, so we can refer # to it in the mapping multiple times. j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) @@ -839,9 +862,10 @@ so in terms of ``B_viacd_subquery`` rather than ``B`` directly: .. sourcecode:: python+sql ( - sess.query(A).join(A.b). - filter(B_viacd_subquery.some_b_column == "some b"). - order_by(B_viacd_subquery.id) + sess.query(A) + .join(A.b) + .filter(B_viacd_subquery.some_b_column == "some b") + .order_by(B_viacd_subquery.id) ).all() {opensql}SELECT a.id AS a_id, a.b_id AS a_b_id @@ -863,35 +887,32 @@ illustrates a non-primary mapper relationship that will load the first ten items for each collection:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) + partition = select( - B, - func.row_number().over( - order_by=B.id, partition_by=B.a_id - ).label('index') + B, func.row_number().over(order_by=B.id, partition_by=B.a_id).label("index") ).alias() partitioned_b = aliased(B, partition) A.partitioned_bs = relationship( - partitioned_b, - primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) + partitioned_b, primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) ) We can use the above ``partitioned_bs`` relationship with most of the loader strategies, such as :func:`.selectinload`:: for a1 in s.query(A).options(selectinload(A.partitioned_bs)): - print(a1.partitioned_bs) # <-- will be no more than ten objects + print(a1.partitioned_bs) # <-- will be no more than ten objects Where above, the "selectinload" query looks like: @@ -933,7 +954,7 @@ conjunction with :class:`_query.Query` as follows: .. sourcecode:: python class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) @property @@ -946,4 +967,4 @@ of special Python attributes. .. seealso:: - :ref:`mapper_hybrids` \ No newline at end of file + :ref:`mapper_hybrids` diff --git a/doc/build/orm/loading.rst b/doc/build/orm/loading.rst index 0aca6cd0c97..fdb27806f47 100644 --- a/doc/build/orm/loading.rst +++ b/doc/build/orm/loading.rst @@ -1,3 +1,3 @@ :orphan: -Moved! :doc:`/orm/loading_relationships` \ No newline at end of file +Moved! :doc:`/orm/loading_relationships` diff --git a/doc/build/orm/loading_columns.rst b/doc/build/orm/loading_columns.rst index de10901e463..a50ac07b971 100644 --- a/doc/build/orm/loading_columns.rst +++ b/doc/build/orm/loading_columns.rst @@ -26,8 +26,9 @@ attribute is first referenced on the individual object instance:: from sqlalchemy.orm import deferred from sqlalchemy import Integer, String, Text, Binary, Column + class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) @@ -38,9 +39,9 @@ attribute is first referenced on the individual object instance:: Classical mappings as always place the usage of :func:`_orm.deferred` in the ``properties`` dictionary against the table-bound :class:`_schema.Column`:: - mapper_registry.map_imperatively(Book, book_table, properties={ - 'photo':deferred(book_table.c.photo) - }) + mapper_registry.map_imperatively( + Book, book_table, properties={"photo": deferred(book_table.c.photo)} + ) Deferred columns can be associated with a "group" name, so that they load together when any of them are first accessed. The example below defines a @@ -49,15 +50,15 @@ photos will be loaded in one SELECT statement. The ``.excerpt`` will be loaded separately when it is accessed:: class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) - photo1 = deferred(Column(Binary), group='photos') - photo2 = deferred(Column(Binary), group='photos') - photo3 = deferred(Column(Binary), group='photos') + photo1 = deferred(Column(Binary), group="photos") + photo2 = deferred(Column(Binary), group="photos") + photo3 = deferred(Column(Binary), group="photos") .. _deferred_options: @@ -73,7 +74,7 @@ basic query options are :func:`_orm.defer` and from sqlalchemy.orm import undefer query = session.query(Book) - query = query.options(defer('summary'), undefer('excerpt')) + query = query.options(defer("summary"), undefer("excerpt")) query.all() Above, the "summary" column will not load until accessed, and the "excerpt" @@ -85,7 +86,7 @@ using :func:`_orm.undefer_group`, sending in the group name:: from sqlalchemy.orm import undefer_group query = session.query(Book) - query.options(undefer_group('photos')).all() + query.options(undefer_group("photos")).all() .. _deferred_loading_w_multiple: @@ -117,8 +118,8 @@ those explicitly specified:: query = session.query(Author) query = query.options( - joinedload(Author.books).load_only(Book.summary, Book.excerpt), - ) + joinedload(Author.books).load_only(Book.summary, Book.excerpt), + ) Option structures as above can also be organized in more complex ways, such as hierarchically using the :meth:`_orm.Load.options` @@ -132,14 +133,13 @@ may be used:: query = session.query(Author) query = query.options( - joinedload(Author.book).options( - load_only(Book.summary, Book.excerpt), - joinedload(Book.citations).options( - joinedload(Citation.author), - defer(Citation.fulltext) - ) - ) - ) + joinedload(Author.book).options( + load_only(Book.summary, Book.excerpt), + joinedload(Book.citations).options( + joinedload(Citation.author), defer(Citation.fulltext) + ), + ) + ) .. versionadded:: 1.3.6 Added :meth:`_orm.Load.options` to allow easier construction of hierarchies of loader options. @@ -154,7 +154,7 @@ to create the same structure as we did above using :meth:`_orm.Load.options` as: query = query.options( joinedload(Author.book).load_only(Book.summary, Book.excerpt), defaultload(Author.book).joinedload(Book.citations).joinedload(Citation.author), - defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext) + defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext), ) .. seealso:: @@ -173,8 +173,7 @@ the "summary" and "excerpt" columns, we could say:: from sqlalchemy.orm import defer from sqlalchemy.orm import undefer - session.query(Book).options( - defer('*'), undefer("summary"), undefer("excerpt")) + session.query(Book).options(defer("*"), undefer("summary"), undefer("excerpt")) Above, the :func:`.defer` option is applied using a wildcard to all column attributes on the ``Book`` class. Then, the :func:`.undefer` option is used @@ -208,9 +207,7 @@ both at once. Using :class:`_orm.Load` looks like:: from sqlalchemy.orm import Load query = session.query(Book, Author).join(Book.author) - query = query.options( - Load(Book).load_only(Book.summary, Book.excerpt) - ) + query = query.options(Load(Book).load_only(Book.summary, Book.excerpt)) Above, :class:`_orm.Load` is used in conjunction with the exclusionary option :func:`.load_only` so that the deferral of all other columns only takes @@ -246,16 +243,15 @@ Deferred "raiseload" can be configured at the mapper level via class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = deferred(Column(String(2000)), raiseload=True) excerpt = deferred(Column(Text), raiseload=True) - book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() - + book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() Column Deferral API ------------------- @@ -286,8 +282,8 @@ The bundle allows columns to be grouped together:: from sqlalchemy.orm import Bundle - bn = Bundle('mybundle', MyClass.data1, MyClass.data2) - for row in session.query(bn).filter(bn.c.data1 == 'd1'): + bn = Bundle("mybundle", MyClass.data1, MyClass.data2) + for row in session.query(bn).filter(bn.c.data1 == "d1"): print(row.mybundle.data1, row.mybundle.data2) The bundle can be subclassed to provide custom behaviors when results @@ -300,13 +296,14 @@ return structure with a straight Python dictionary:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row): - return dict( - zip(labels, (proc(row) for proc in procs)) - ) + return dict(zip(labels, (proc(row) for proc in procs))) + return proc .. note:: @@ -322,9 +319,9 @@ return structure with a straight Python dictionary:: A result from the above bundle will return dictionary values:: - bn = DictBundle('mybundle', MyClass.data1, MyClass.data2) - for row in session.query(bn).filter(bn.c.data1 == 'd1'): - print(row.mybundle['data1'], row.mybundle['data2']) + bn = DictBundle("mybundle", MyClass.data1, MyClass.data2) + for row in session.query(bn).filter(bn.c.data1 == "d1"): + print(row.mybundle["data1"], row.mybundle["data2"]) The :class:`.Bundle` construct is also integrated into the behavior of :func:`.composite`, where it is used to return composite attributes as objects diff --git a/doc/build/orm/loading_objects.rst b/doc/build/orm/loading_objects.rst index 956ef2f6995..3f6c84bf1d2 100644 --- a/doc/build/orm/loading_objects.rst +++ b/doc/build/orm/loading_objects.rst @@ -24,5 +24,4 @@ sections are currently mixed as far as which style they are using. loading_columns loading_relationships inheritance_loading - constructors query diff --git a/doc/build/orm/loading_relationships.rst b/doc/build/orm/loading_relationships.rst index 5a1d5151d42..ad77f6e0de5 100644 --- a/doc/build/orm/loading_relationships.rst +++ b/doc/build/orm/loading_relationships.rst @@ -88,10 +88,10 @@ For example, to configure a relationship to use joined eager loading when the parent object is queried:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - children = relationship("Child", lazy='joined') + children = relationship("Child", lazy="joined") Above, whenever a collection of ``Parent`` objects are loaded, each ``Parent`` will also have its ``children`` collection populated, using @@ -128,16 +128,16 @@ The loader options can also be "chained" using **method chaining** to specify how loading should occur further levels deep:: session.query(Parent).options( - joinedload(Parent.children). - subqueryload(Child.subelements)).all() + joinedload(Parent.children).subqueryload(Child.subelements) + ).all() Chained loader options can be applied against a "lazy" loaded collection. This means that when a collection or association is lazily loaded upon access, the specified option will then take effect:: session.query(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)).all() + lazyload(Parent.children).subqueryload(Child.subelements) + ).all() Above, the query will return ``Parent`` objects without the ``children`` collections loaded. When the ``children`` collection on a particular @@ -149,9 +149,7 @@ The above examples, using :class:`_orm.Query`, are now referred to as :term:`1.x style` queries. The options system is available as well for :term:`2.0 style` queries using the :meth:`_sql.Select.options` method:: - stmt = select(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)) + stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements)) result = session.execute(stmt) @@ -191,18 +189,14 @@ Using method chaining, the loader style of each link in the path is explicitly stated. To navigate along a path without changing the existing loader style of a particular attribute, the :func:`.defaultload` method/function may be used:: - session.query(A).options( - defaultload(A.atob). - joinedload(B.btoc)).all() + session.query(A).options(defaultload(A.atob).joinedload(B.btoc)).all() A similar approach can be used to specify multiple sub-options at once, using the :meth:`_orm.Load.options` method:: session.query(A).options( - defaultload(A.atob).options( - joinedload(B.btoc), - joinedload(B.btod) - )).all() + defaultload(A.atob).options(joinedload(B.btoc), joinedload(B.btod)) + ).all() .. versionadded:: 1.3.6 added :meth:`_orm.Load.options` @@ -219,8 +213,8 @@ the :meth:`_orm.Load.options` method:: memory. For example, given the previous example:: session.query(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)).all() + lazyload(Parent.children).subqueryload(Child.subelements) + ).all() if the ``children`` collection on a particular ``Parent`` object loaded by the above query is expired (such as when a :class:`.Session` object's @@ -235,8 +229,8 @@ the :meth:`_orm.Load.options` method:: # change the options on Parent objects that were already loaded session.query(Parent).populate_existing().options( - lazyload(Parent.children). - lazyload(Child.subelements)).all() + lazyload(Parent.children).lazyload(Child.subelements) + ).all() If the objects loaded above are fully cleared from the :class:`.Session`, such as due to garbage collection or that :meth:`.Session.expunge_all` @@ -310,6 +304,7 @@ replaces the behavior of lazy loading with an informative error being raised:: from sqlalchemy.orm import raiseload + session.query(User).options(raiseload(User.addresses)) Above, a ``User`` object loaded from the above query will not have @@ -320,8 +315,7 @@ access this attribute, an ORM exception is raised. indicate that all relationships should use this strategy. For example, to set up only one attribute as eager loading, and all the rest as raise:: - session.query(Order).options( - joinedload(Order.items), raiseload('*')) + session.query(Order).options(joinedload(Order.items), raiseload("*")) The above wildcard will apply to **all** relationships not just on ``Order`` besides ``items``, but all those on the ``Item`` objects as well. To set up @@ -330,14 +324,11 @@ path with :class:`_orm.Load`:: from sqlalchemy.orm import Load - session.query(Order).options( - joinedload(Order.items), Load(Order).raiseload('*')) + session.query(Order).options(joinedload(Order.items), Load(Order).raiseload("*")) Conversely, to set up the raise for just the ``Item`` objects:: - session.query(Order).options( - joinedload(Order.items).raiseload('*')) - + session.query(Order).options(joinedload(Order.items).raiseload("*")) The :func:`.raiseload` option applies only to relationship attributes. For column-oriented attributes, the :func:`.defer` option supports the @@ -382,9 +373,9 @@ using the :func:`_orm.joinedload` loader option: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter_by(name='jack').all() + >>> jack = ( + ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -409,13 +400,12 @@ at the mapping level via the :paramref:`_orm.relationship.innerjoin` flag:: class Address(Base): # ... - user_id = Column(ForeignKey('users.id'), nullable=False) + user_id = Column(ForeignKey("users.id"), nullable=False) user = relationship(User, lazy="joined", innerjoin=True) At the query option level, via the :paramref:`_orm.joinedload.innerjoin` flag:: - session.query(Address).options( - joinedload(Address.user, innerjoin=True)) + session.query(Address).options(joinedload(Address.user, innerjoin=True)) The JOIN will right-nest itself when applied in a chain that includes an OUTER JOIN: @@ -423,8 +413,8 @@ an OUTER JOIN: .. sourcecode:: python+sql >>> session.query(User).options( - ... joinedload(User.addresses). - ... joinedload(Address.widgets, innerjoin=True)).all() + ... joinedload(User.addresses).joinedload(Address.widgets, innerjoin=True) + ... ).all() {opensql}SELECT widgets_1.id AS widgets_1_id, widgets_1.name AS widgets_1_name, @@ -519,10 +509,13 @@ named in the query: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -544,10 +537,13 @@ address is to use :meth:`_query.Query.join`: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql} SELECT users.id AS users_id, @@ -568,11 +564,14 @@ are ordering on, the other is used anonymously to load the contents of the .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -600,12 +599,14 @@ to see why :func:`joinedload` does what it does, consider if we were .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... filter(Address.email_address=='someaddress@foo.com').\ - ... all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .filter(Address.email_address == "someaddress@foo.com") + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -634,12 +635,14 @@ into :func:`.subqueryload`: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(subqueryload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... filter(Address.email_address=='someaddress@foo.com').\ - ... all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(subqueryload(User.addresses)) + ... .filter(User.name == "jack") + ... .filter(Address.email_address == "someaddress@foo.com") + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -688,9 +691,12 @@ the collection members to load them at once: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(subqueryload(User.addresses)).\ - ... filter_by(name='jack').all() + >>> jack = ( + ... session.query(User) + ... .options(subqueryload(User.addresses)) + ... .filter_by(name="jack") + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -752,18 +758,15 @@ the same ordering as used by the parent query. Without it, there is a chance that the inner query could return the wrong rows:: # incorrect, no ORDER BY - session.query(User).options( - subqueryload(User.addresses)).first() + session.query(User).options(subqueryload(User.addresses)).first() # incorrect if User.name is not unique - session.query(User).options( - subqueryload(User.addresses) - ).order_by(User.name).first() + session.query(User).options(subqueryload(User.addresses)).order_by(User.name).first() # correct - session.query(User).options( - subqueryload(User.addresses) - ).order_by(User.name, User.id).first() + session.query(User).options(subqueryload(User.addresses)).order_by( + User.name, User.id + ).first() .. seealso:: @@ -793,9 +796,12 @@ order to load related associations: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(selectinload(User.addresses)).\ - ... filter(or_(User.name == 'jack', User.name == 'ed')).all() + >>> jack = ( + ... session.query(User) + ... .options(selectinload(User.addresses)) + ... .filter(or_(User.name == "jack", User.name == "ed")) + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -829,8 +835,7 @@ value from the parent object is used: .. sourcecode:: python+sql - >>> session.query(Address).\ - ... options(selectinload(Address.user)).all() + >>> session.query(Address).options(selectinload(Address.user)).all() {opensql}SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, @@ -1012,7 +1017,7 @@ attributes not otherwise specified in the :class:`_query.Query`. This feature is available by passing the string ``'*'`` as the argument to any of these options:: - session.query(MyClass).options(lazyload('*')) + session.query(MyClass).options(lazyload("*")) Above, the ``lazyload('*')`` option will supersede the ``lazy`` setting of all :func:`_orm.relationship` constructs in use for that query, @@ -1028,10 +1033,7 @@ query, such as :func:`.eagerload`, :func:`.subqueryload`, etc. The query below will still use joined loading for the ``widget`` relationship:: - session.query(MyClass).options( - lazyload('*'), - joinedload(MyClass.widget) - ) + session.query(MyClass).options(lazyload("*"), joinedload(MyClass.widget)) If multiple ``'*'`` options are passed, the last one overrides those previously passed. @@ -1045,8 +1047,7 @@ we can instruct all relationships on ``Address`` only to use lazy loading by first applying the :class:`_orm.Load` object, then specifying the ``*`` as a chained option:: - session.query(User, Address).options( - Load(Address).lazyload('*')) + session.query(User, Address).options(Load(Address).lazyload("*")) Above, all relationships on ``Address`` will be set to a lazy load. @@ -1073,18 +1074,18 @@ explicitly. Below, we specify a join between ``User`` and ``Address`` and additionally establish this as the basis for eager loading of ``User.addresses``:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) addresses = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... - q = session.query(User).join(User.addresses).\ - options(contains_eager(User.addresses)) + q = session.query(User).join(User.addresses).options(contains_eager(User.addresses)) If the "eager" portion of the statement is "aliased", the path should be specified using :meth:`.PropComparator.of_type`, which allows @@ -1096,9 +1097,11 @@ the specific :func:`_orm.aliased` construct to be passed: adalias = aliased(Address) # construct a Query object which expects the "addresses" results - query = session.query(User).\ - outerjoin(User.addresses.of_type(adalias)).\ - options(contains_eager(User.addresses.of_type(adalias))) + query = ( + session.query(User) + .outerjoin(User.addresses.of_type(adalias)) + .options(contains_eager(User.addresses.of_type(adalias))) + ) # get results normally r = query.all() @@ -1117,9 +1120,7 @@ The path given as the argument to :func:`.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the option would be used as:: - query(User).options( - contains_eager(User.orders). - contains_eager(Order.items)) + query(User).options(contains_eager(User.orders).contains_eager(Order.items)) Using contains_eager() to load a custom-filtered collection result ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1136,11 +1137,13 @@ routing it using :func:`_orm.contains_eager`, also using :meth:`_query.Query.populate_existing` to ensure any already-loaded collections are overwritten:: - q = session.query(User).\ - join(User.addresses).\ - filter(Address.email_address.like('%@aol.com')).\ - options(contains_eager(User.addresses)).\ - populate_existing() + q = ( + session.query(User) + .join(User.addresses) + .filter(Address.email_address.like("%@aol.com")) + .options(contains_eager(User.addresses)) + .populate_existing() + ) The above query will load only ``User`` objects which contain at least ``Address`` object that contains the substring ``'aol.com'`` in its @@ -1204,20 +1207,16 @@ Given the following mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) - b = relationship( - "B", - backref=backref("a", uselist=False), - lazy='joined') + b_id = Column(ForeignKey("b.id")) + b = relationship("B", backref=backref("a", uselist=False), lazy="joined") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - If we query for an ``A`` row, and then ask it for ``a.b.a``, we will get an extra SELECT:: @@ -1232,10 +1231,11 @@ can create an on-load rule to populate this for us:: from sqlalchemy import event from sqlalchemy.orm import attributes + @event.listens_for(A, "load") def load_b(target, context): - if 'b' in target.__dict__: - attributes.set_committed_value(target.b, 'a', target) + if "b" in target.__dict__: + attributes.set_committed_value(target.b, "a", target) Now when we query for ``A``, we will get ``A.b`` from the joined eager load, and ``A.b.a`` from our event: @@ -1253,7 +1253,6 @@ and ``A.b.a`` from our event: (1, 0) {stop}assert a1.b.a is a1 - Relationship Loader API ----------------------- diff --git a/doc/build/orm/mapped_attributes.rst b/doc/build/orm/mapped_attributes.rst index a4fd3115d5d..5ee7d6498ff 100644 --- a/doc/build/orm/mapped_attributes.rst +++ b/doc/build/orm/mapped_attributes.rst @@ -19,15 +19,16 @@ issued when the ORM is populating the object:: from sqlalchemy.orm import validates + class EmailAddress(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - @validates('email') + @validates("email") def validate_email(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simple email validation") return address @@ -42,18 +43,18 @@ collection:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses') + @validates("addresses") def validate_address(self, key, address): - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address - The validation function by default does not get emitted for collection remove events, as the typical expectation is that a value being discarded doesn't require validation. However, :func:`.validates` supports reception @@ -63,18 +64,18 @@ argument which if ``True`` indicates that the operation is a removal:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses', include_removes=True) + @validates("addresses", include_removes=True) def validate_address(self, key, address, is_remove): if is_remove: - raise ValueError( - "not allowed to remove items from the collection") + raise ValueError("not allowed to remove items from the collection") else: - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address @@ -85,14 +86,15 @@ event occurs as a result of a backref:: from sqlalchemy.orm import validates + class User(Base): # ... - addresses = relationship("Address", backref='user') + addresses = relationship("Address", backref="user") - @validates('addresses', include_backrefs=False) + @validates("addresses", include_backrefs=False) def validate_address(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simplified email validation") return address @@ -131,7 +133,7 @@ plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -158,8 +160,9 @@ usable with :class:`_query.Query`. To provide these, we instead use the from sqlalchemy.ext.hybrid import hybrid_property + class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -180,6 +183,7 @@ that is, from the ``EmailAddress`` class directly: .. sourcecode:: python+sql from sqlalchemy.orm import Session + session = Session() {sql}address = session.query(EmailAddress).\ @@ -189,14 +193,12 @@ that is, from the ``EmailAddress`` class directly: FROM address WHERE address.email = ? ('address@example.com',) - {stop} - address.email = 'otheraddress@example.com' + address.email = "otheraddress@example.com" {sql}session.commit() UPDATE address SET email=? WHERE address.id = ? ('otheraddress@example.com', 1) COMMIT - {stop} The :class:`~.hybrid_property` also allows us to change the behavior of the attribute, including defining separate behaviors when the attribute is @@ -206,7 +208,7 @@ host name automatically, we might define two sets of string manipulation logic:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -245,7 +247,6 @@ attribute, a SQL function is rendered which produces the same effect: FROM address WHERE substr(address.email, ?, length(address.email) - ?) = ? (0, 12, 'address') - {stop} Read more about Hybrids at :ref:`hybrids_toplevel`. @@ -261,9 +262,10 @@ In the most basic sense, the synonym is an easy way to make a certain attribute available by an additional name:: from sqlalchemy.orm import synonym - + + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) job_status = Column(String(50)) @@ -274,19 +276,19 @@ The above class ``MyClass`` has two attributes, ``.job_status`` and ``.status`` that will behave as one attribute, both at the expression level:: - >>> print(MyClass.job_status == 'some_status') + >>> print(MyClass.job_status == "some_status") my_table.job_status = :job_status_1 - >>> print(MyClass.status == 'some_status') + >>> print(MyClass.status == "some_status") my_table.job_status = :job_status_1 and at the instance level:: - >>> m1 = MyClass(status='x') + >>> m1 = MyClass(status="x") >>> m1.status, m1.job_status ('x', 'x') - >>> m1.job_status = 'y' + >>> m1.job_status = "y" >>> m1.status, m1.job_status ('y', 'y') @@ -299,7 +301,7 @@ a user-defined :term:`descriptor`. We can supply our ``status`` synonym with a ``@property``:: class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) status = Column(String(50)) @@ -315,8 +317,9 @@ using the :func:`.synonym_for` decorator:: from sqlalchemy.ext.declarative import synonym_for + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) status = Column(String(50)) diff --git a/doc/build/orm/mapped_sql_expr.rst b/doc/build/orm/mapped_sql_expr.rst index eefd1d5d685..0e93e5e920c 100644 --- a/doc/build/orm/mapped_sql_expr.rst +++ b/doc/build/orm/mapped_sql_expr.rst @@ -21,8 +21,9 @@ will provide for us the ``fullname``, which is the string concatenation of the t from sqlalchemy.ext.hybrid import hybrid_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -51,8 +52,9 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import case + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -66,9 +68,12 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and @fullname.expression def fullname(cls): - return case([ - (cls.firstname != None, cls.firstname + " " + cls.lastname), - ], else_ = cls.lastname) + return case( + [ + (cls.firstname != None, cls.firstname + " " + cls.lastname), + ], + else_=cls.lastname, + ) .. _mapper_column_property_sql_expressions: @@ -95,8 +100,9 @@ follows:: from sqlalchemy.orm import column_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -115,28 +121,30 @@ of ``Address`` objects available for a particular ``User``:: Base = declarative_base() + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) In the above example, we define a :func:`_expression.ScalarSelect` construct like the following:: stmt = ( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) Above, we first use :func:`_sql.select` to create a :class:`_sql.Select` @@ -166,9 +174,7 @@ to add an additional property after the fact:: # only works if a declarative base class is in use User.address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)).where(Address.user_id == User.id).scalar_subquery() ) When using mapping styles that don't use :func:`_orm.declarative_base`, @@ -180,9 +186,10 @@ which can be obtained using :func:`_sa.inspect`:: reg = registry() + @reg.mapped class User: - __tablename__ = 'user' + __tablename__ = "user" # ... additional mapping directives @@ -191,11 +198,12 @@ which can be obtained using :func:`_sa.inspect`:: # works for any kind of mapping from sqlalchemy import inspect + inspect(User).add_property( column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == User.id) + .scalar_subquery() ) ) @@ -205,17 +213,19 @@ association table to both tables in a relationship:: from sqlalchemy import and_ + class Author(Base): # ... book_count = column_property( - select(func.count(books.c.id) - ).where( + select(func.count(books.c.id)) + .where( and_( - book_authors.c.author_id==authors.c.id, - book_authors.c.book_id==books.c.id + book_authors.c.author_id == authors.c.id, + book_authors.c.book_id == books.c.id, ) - ).scalar_subquery() + ) + .scalar_subquery() ) .. _mapper_column_property_sql_expressions_composed: @@ -238,21 +248,20 @@ attribute, which is itself a :class:`.ColumnProperty`:: class File(Base): - __tablename__ = 'file' + __tablename__ = "file" id = Column(Integer, primary_key=True) name = Column(String(64)) extension = Column(String(8)) - filename = column_property(name + '.' + extension) - path = column_property('C:/' + filename.expression) + filename = column_property(name + "." + extension) + path = column_property("C:/" + filename.expression) When the ``File`` class is used in expressions normally, the attributes assigned to ``filename`` and ``path`` are usable directly. The use of the :attr:`.ColumnProperty.expression` attribute is only necessary when using the :class:`.ColumnProperty` directly within the mapping definition:: - q = session.query(File.path).filter(File.filename == 'foo.txt') - + q = session.query(File.path).filter(File.filename == "foo.txt") Using a plain descriptor ------------------------ @@ -269,19 +278,18 @@ which is then used to emit a query:: from sqlalchemy.orm import object_session from sqlalchemy import select, func + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @property def address_count(self): - return object_session(self).\ - scalar( - select(func.count(Address.id)).\ - where(Address.user_id==self.id) - ) + return object_session(self).scalar( + select(func.count(Address.id)).where(Address.user_id == self.id) + ) The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that @@ -310,8 +318,9 @@ may be applied:: from sqlalchemy.orm import query_expression + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) x = Column(Integer) y = Column(Integer) @@ -322,8 +331,8 @@ We can then query for objects of type ``A``, applying an arbitrary SQL expression to be populated into ``A.expr``:: from sqlalchemy.orm import with_expression - q = session.query(A).options( - with_expression(A.expr, A.x + A.y)) + + q = session.query(A).options(with_expression(A.expr, A.x + A.y)) The :func:`.query_expression` mapping has these caveats: @@ -341,8 +350,12 @@ The :func:`.query_expression` mapping has these caveats: To ensure the attribute is re-loaded, use :meth:`_orm.Query.populate_existing`:: - obj = session.query(A).populate_existing().options( - with_expression(A.expr, some_expr)).first() + obj = ( + session.query(A) + .populate_existing() + .options(with_expression(A.expr, some_expr)) + .first() + ) * The query_expression value **does not refresh when the object is expired**. Once the object is expired, either via :meth:`.Session.expire` @@ -352,23 +365,109 @@ The :func:`.query_expression` mapping has these caveats: a new :func:`.with_expression` directive will the attribute be set to a non-None value. -* The mapped attribute currently **cannot** be applied to other parts of the +* :func:`_orm.with_expression`, as an object loading option, only takes effect + on the **outermost part + of a query** and only for a query against a full entity, and not for arbitrary + column selects, within subqueries, or the elements of a compound + statement such as a UNION. See the next + section :ref:`mapper_querytime_expression_unions` for an example. + + .. versionchanged:: 1.4 This is new as of version 1.4. See the change notes + at :ref:`change_8879` for background. + +* The mapped attribute **cannot** be applied to other parts of the query, such as the WHERE clause, the ORDER BY clause, and make use of the ad-hoc expression; that is, this won't work:: # wont work - q = session.query(A).options( - with_expression(A.expr, A.x + A.y) - ).filter(A.expr > 5).order_by(A.expr) + q = ( + session.query(A) + .options(with_expression(A.expr, A.x + A.y)) + .filter(A.expr > 5) + .order_by(A.expr) + ) The ``A.expr`` expression will resolve to NULL in the above WHERE clause and ORDER BY clause. To use the expression throughout the query, assign to a variable and use that:: a_expr = A.x + A.y - q = session.query(A).options( - with_expression(A.expr, a_expr) - ).filter(a_expr > 5).order_by(a_expr) + q = ( + session.query(A) + .options(with_expression(A.expr, a_expr)) + .filter(a_expr > 5) + .order_by(a_expr) + ) .. versionadded:: 1.2 + +.. _mapper_querytime_expression_unions: + + +Using ``with_expression()`` with UNIONs, other subqueries +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :func:`_orm.with_expression` construct is an ORM loader option, and as +such may only be applied to the outermost level of a SELECT statement which +is to load a particular ORM entity. It does not have any effect if used +inside of a :func:`_sql.select` that will then be used as a subquery or +as an element within a compound statement such as a UNION. + +.. versionchanged:: 1.4 The behavior of column loader options applying + only at the outermost layer of an ORM SELECT statement was previously + not applied consistently; in 1.4 it applies to all loader options + for both columns as well as relationships. Background on this change + is at :ref:`change_8879`. + +In order to use arbitrary SQL expressions in subqueries, normal Core-style +means of adding expressions should be used. To assemble a subquery-derived +expression onto the ORM entity's :func:`_orm.query_expression` attributes, +:func:`_orm.with_expression` is used at the top layer of ORM object loading, +referencing the SQL expression within the subquery. + +.. note:: + + The example below uses :term:`2.0 style` queries in order to demonstrate a + UNION. ORM UNIONs may be assembled without ambiguity using this style + of query. + +In the example below, two :func:`_sql.select` constructs are used against +the ORM entity ``A`` with an additional SQL expression labeled in +``expr``, and combined using :func:`_sql.union_all`. Then, at the topmost +layer, the ``A`` entity is SELECTed from this UNION, using the +querying technique described at :ref:`orm_queryguide_unions`, adding an +option with :func:`_orm.with_expression` to extract this SQL expression +onto newly loaded instances of ``A``: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import union_all + >>> s1 = ( + ... select(User, func.count(Book.id).label("book_count")) + ... .join_from(User, Book) + ... .where(User.name == "spongebob") + ... ) + >>> s2 = ( + ... select(User, func.count(Book.id).label("book_count")) + ... .join_from(User, Book) + ... .where(User.name == "sandy") + ... ) + >>> union_stmt = union_all(s1, s2) + >>> orm_stmt = ( + ... select(User) + ... .from_statement(union_stmt) + ... .options(with_expression(User.book_count, union_stmt.c.book_count)) + ... ) + >>> for user in session.scalars(orm_stmt): + ... print(f"Username: {user.name} Number of books: {user.book_count}") + {execsql}SELECT user_account.id, user_account.name, user_account.fullname, count(book.id) AS book_count + FROM user_account JOIN book ON user_account.id = book.owner_id + WHERE user_account.name = ? + UNION ALL + SELECT user_account.id, user_account.name, user_account.fullname, count(book.id) AS book_count + FROM user_account JOIN book ON user_account.id = book.owner_id + WHERE user_account.name = ? + [...] ('spongebob', 'sandy'){stop} + Username: spongebob Number of books: 3 + Username: sandy Number of books: 3 diff --git a/doc/build/orm/mapper_config.rst b/doc/build/orm/mapper_config.rst index 4de08690329..13d2ce860db 100644 --- a/doc/build/orm/mapper_config.rst +++ b/doc/build/orm/mapper_config.rst @@ -1,13 +1,20 @@ .. _mapper_config_toplevel: -==================== -Mapper Configuration -==================== +=============================== +ORM Mapped Class Configuration +=============================== -This section describes a variety of configurational patterns that are usable -with mappers. It assumes you've worked through :ref:`ormtutorial_toplevel` and -know how to construct and use rudimentary mappers and relationships. +Detailed reference for ORM configuration, not including +relationships, which are detailed at +:ref:`relationship_config_toplevel`. + +For a quick look at a typical ORM configuration, start with +:ref:`orm_quickstart`. + +For an introduction to the concept of object relational mapping as implemented +in SQLAlchemy, it's first introduced in the :ref:`unified_tutorial` at +:ref:`tutorial_orm_table_metadata`. .. toctree:: @@ -15,6 +22,7 @@ know how to construct and use rudimentary mappers and relationships. mapping_styles declarative_mapping + dataclasses scalar_mapping inheritance nonstandard_mappings diff --git a/doc/build/orm/mapping_api.rst b/doc/build/orm/mapping_api.rst index 5d0b6c0d023..199d92d4315 100644 --- a/doc/build/orm/mapping_api.rst +++ b/doc/build/orm/mapping_api.rst @@ -34,6 +34,8 @@ Class Mapping API .. autofunction:: polymorphic_union +.. autofunction:: reconstructor + .. autoclass:: Mapper :members: diff --git a/doc/build/orm/mapping_columns.rst b/doc/build/orm/mapping_columns.rst index 596c64f7c5a..1ec8636b6d2 100644 --- a/doc/build/orm/mapping_columns.rst +++ b/doc/build/orm/mapping_columns.rst @@ -5,11 +5,30 @@ Mapping Table Columns ===================== -The default behavior of :func:`_orm.mapper` is to assemble all the columns in -the mapped :class:`_schema.Table` into mapped object attributes, each of which are -named according to the name of the column itself (specifically, the ``key`` -attribute of :class:`_schema.Column`). This behavior can be -modified in several ways. +Introductory background on mapping to columns falls under the subject of +:class:`.Table` configuration; the general form falls under one of three +forms: + +* :ref:`orm_declarative_table` - :class:`.Column` objects are associated with a + :class:`.Table` as well as with an ORM mapping in one step by declaring + them inline as class attributes. +* :ref:`orm_imperative_table_configuration` - :class:`.Column` objects are + associated directly with their :class:`.Table` object, as detailed at + :ref:`metadata_describing_toplevel`; the columns are then mapped by + the Declarative process by associating the :class:`.Table` with the + class to be mapped via the ``__table__`` attribute. +* :ref:`orm_imperative_mapping` - like "Imperative Table", :class:`.Column` + objects are associated directly with their :class:`.Table` object; the + columns are then mapped by the Imperative process using + :meth:`_orm.registry.map_imperatively`. + +In all of the above cases, the :class:`_orm.mapper` constructor is ultimately +invoked with a completed :class:`.Table` object passed as the selectable unit +to be mapped. The behavior of :class:`_orm.mapper` then is to assemble all the +columns in the mapped :class:`_schema.Table` into mapped object attributes, +each of which are named according to the name of the column itself +(specifically, the ``key`` attribute of :class:`_schema.Column`). This behavior +can be modified in several ways. .. _mapper_column_distinct_names: @@ -27,9 +46,9 @@ The name assigned to the Python attribute which maps to it that way, as we illustrate here in a Declarative mapping:: class User(Base): - __tablename__ = 'user' - id = Column('user_id', Integer, primary_key=True) - name = Column('user_name', String(50)) + __tablename__ = "user" + id = Column("user_id", Integer, primary_key=True) + name = Column("user_name", String(50)) Where above ``User.id`` resolves to a column named ``user_id`` and ``User.name`` resolves to a column named ``user_name``. @@ -46,12 +65,14 @@ The corresponding technique for an :term:`imperative` mapping is to place the desired key in the :paramref:`_orm.mapper.properties` dictionary with the desired key:: - mapper_registry.map_imperatively(User, user_table, properties={ - 'id': user_table.c.user_id, - 'name': user_table.c.user_name, - }) - -In the next section we'll examine the usage of ``.key`` more closely. + mapper_registry.map_imperatively( + User, + user_table, + properties={ + "id": user_table.c.user_id, + "name": user_table.c.user_name, + }, + ) .. _mapper_automated_reflection_schemes: @@ -62,7 +83,7 @@ In the previous section :ref:`mapper_column_distinct_names`, we showed how a :class:`_schema.Column` explicitly mapped to a class can have a different attribute name than the column. But what if we aren't listing out :class:`_schema.Column` objects explicitly, and instead are automating the production of :class:`_schema.Table` -objects using reflection (e.g. as described in :ref:`metadata_reflection_toplevel`)? +objects using reflection (i.e. as described in :ref:`metadata_reflection_toplevel`)? In this case we can make use of the :meth:`_events.DDLEvents.column_reflect` event to intercept the production of :class:`_schema.Column` objects and provide them with the :attr:`_schema.Column.key` of our choice. The event is most easily @@ -73,42 +94,27 @@ instance:: @event.listens_for(Base.metadata, "column_reflect") def column_reflect(inspector, table, column_info): # set column.key = "attr_" - column_info['key'] = "attr_%s" % column_info['name'].lower() + column_info["key"] = "attr_%s" % column_info["name"].lower() With the above event, the reflection of :class:`_schema.Column` objects will be intercepted with our event that adds a new ".key" element, such as in a mapping as below:: class MyClass(Base): - __table__ = Table("some_table", Base.metadata, - autoload_with=some_engine) + __table__ = Table("some_table", Base.metadata, autoload_with=some_engine) -The approach also works with the :ref:`automap_toplevel` extension. See -the section :ref:`automap_intercepting_columns` for background. +The approach also works with both the :class:`.DeferredReflection` base class +as well as with the :ref:`automap_toplevel` extension. For automap +specifically, see the section :ref:`automap_intercepting_columns` for +background. .. seealso:: + :ref:`orm_declarative_reflected` + :meth:`_events.DDLEvents.column_reflect` :ref:`automap_intercepting_columns` - in the :ref:`automap_toplevel` documentation -.. _column_prefix: - -Naming All Columns with a Prefix --------------------------------- - -A quick approach to prefix column names, typically when mapping -to an existing :class:`_schema.Table` object, is to use ``column_prefix``:: - - class User(Base): - __table__ = user_table - __mapper_args__ = {'column_prefix':'_'} - -The above will place attribute names such as ``_user_id``, ``_user_name``, -``_password`` etc. on the mapped ``User`` class. - -This approach is uncommon in modern usage. For dealing with reflected -tables, a more flexible approach is to use that described in -:ref:`mapper_automated_reflection_schemes`. .. _column_property_options: @@ -127,8 +133,9 @@ result in the former value being loaded first:: from sqlalchemy.orm import column_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = column_property(Column(String(50)), active_history=True) @@ -152,7 +159,7 @@ that is the string concatenation of the ``firstname`` and ``lastname`` columns:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -162,6 +169,73 @@ See examples of this usage at :ref:`mapper_sql_expressions`. .. autofunction:: column_property +.. _mapper_primary_key: + +Mapping to an Explicit Set of Primary Key Columns +------------------------------------------------- + +The :class:`.Mapper` construct in order to successfully map a table always +requires that at least one column be identified as the "primary key" for +that selectable. This is so that when an ORM object is loaded or persisted, +it can be placed in the :term:`identity map` with an appropriate +:term:`identity key`. + +To support this use case, all :class:`.FromClause` objects (where +:class:`.FromClause` is the common base for objects such as :class:`.Table`, +:class:`.Join`, :class:`.Subquery`, etc.) have an attribute +:attr:`.FromClause.primary_key` which returns a collection of those +:class:`.Column` objects that indicate they are part of a "primary key", +which is derived from each :class:`.Column` object being a member of a +:class:`.PrimaryKeyConstraint` collection that's associated with the +:class:`.Table` from which they ultimately derive. + +In those cases where the selectable being mapped does not include columns +that are explicitly part of the primary key constraint on their parent table, +a user-defined set of primary key columns must be defined. The +:paramref:`.mapper.primary_key` parameter is used for this purpose. + +Given the following example of a :ref:`Imperative Table ` +mapping against an existing :class:`.Table` object, as would occur in a scenario +such as when the :class:`.Table` were :term:`reflected` from an existing +database, where the table does not have any declared primary key, we may +map such a table as in the following example:: + + from sqlalchemy import Column + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy import UniqueConstraint + from sqlalchemy.orm import declarative_base + + + metadata = MetaData() + group_users = Table( + "group_users", + metadata, + Column("user_id", String(40), nullable=False), + Column("group_id", String(40), nullable=False), + UniqueConstraint("user_id", "group_id"), + ) + + + Base = declarative_base() + + + class GroupUsers(Base): + __table__ = group_users + __mapper_args__ = {"primary_key": [group_users.c.user_id, group_users.c.group_id]} + +Above, the ``group_users`` table is an association table of some kind +with string columns ``user_id`` and ``group_id``, but no primary key is set up; +instead, there is only a :class:`.UniqueConstraint` establishing that the +two columns represent a unique key. The :class:`.Mapper` does not automatically +inspect unique constraints for primary keys; instead, we make use of the +:paramref:`.mapper.primary_key` parameter, passing a collection of +``[group_users.c.user_id, group_users.c.group_id]``, indicating that these two +columns should be used in order to construct the identity key for instances +of the ``GroupUsers`` class. + + .. _include_exclude_cols: Mapping a Subset of Table Columns @@ -177,9 +251,7 @@ For example:: class User(Base): __table__ = user_table - __mapper_args__ = { - 'include_properties' :['user_id', 'user_name'] - } + __mapper_args__ = {"include_properties": ["user_id", "user_name"]} ...will map the ``User`` class to the ``user_table`` table, only including the ``user_id`` and ``user_name`` columns - the rest are not referenced. @@ -187,9 +259,7 @@ Similarly:: class Address(Base): __table__ = address_table - __mapper_args__ = { - 'exclude_properties' : ['street', 'city', 'state', 'zip'] - } + __mapper_args__ = {"exclude_properties": ["street", "city", "state", "zip"]} ...will map the ``Address`` class to the ``address_table`` table, including all columns present except ``street``, ``city``, ``state``, and ``zip``. @@ -209,8 +279,8 @@ should be included or excluded:: class UserAddress(Base): __table__ = user_table.join(addresses_table) __mapper_args__ = { - 'exclude_properties' :[address_table.c.id], - 'primary_key' : [user_table.c.id] + "exclude_properties": [address_table.c.id], + "primary_key": [user_table.c.id], } .. note:: diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index e643cfce633..edec17c14bf 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -1,51 +1,61 @@ .. _orm_mapping_classes_toplevel: -======================= -Mapping Python Classes -======================= +========================== +ORM Mapped Class Overview +========================== + +Overview of ORM class mapping configuration. + +For readers new to the SQLAlchemy ORM and/or new to Python in general, +it's recommended to browse through the +:ref:`orm_quickstart` and preferably to work through the +:ref:`unified_tutorial`, where ORM configuration is first introduced at +:ref:`tutorial_orm_table_metadata`. + + +ORM Mapping Styles +================== + +SQLAlchemy features two distinct styles of mapper configuration, which then +feature further sub-options for how they are set up. The variability in mapper +styles is present to suit a varied list of developer preferences, including +the degree of abstraction of a user-defined class from how it is to be +mapped to relational schema tables and columns, what kinds of class hierarchies +are in use, including whether or not custom metaclass schemes are present, +and finally if there are other class-instrumentation approaches present such +as if Python dataclasses_ are in use simultaneously. + +In modern SQLAlchemy, the difference between these styles is mostly +superficial; when a particular SQLAlchemy configurational style is used to +express the intent to map a class, the internal process of mapping the class +proceeds in mostly the same way for each, where the end result is always a +user-defined class that has a :class:`_orm.Mapper` configured against a +selectable unit, typically represented by a :class:`_schema.Table` object, and +the class itself has been :term:`instrumented` to include behaviors linked to +relational operations both at the level of the class as well as on instances of +that class. As the process is basically the same in all cases, classes mapped +from different styles are always fully interoperable with each other. -SQLAlchemy historically features two distinct styles of mapper configuration. The original mapping API is commonly referred to as "classical" style, whereas the more automated style of mapping is known as "declarative" style. SQLAlchemy now refers to these two mapping styles as **imperative mapping** and **declarative mapping**. -Both styles may be used interchangeably, as the end result of each is exactly -the same - a user-defined class that has a :class:`_orm.Mapper` configured -against a selectable unit, typically represented by a :class:`_schema.Table` -object. - -Both imperative and declarative mapping begin with an ORM :class:`_orm.registry` -object, which maintains a set of classes that are mapped. This registry -is present for all mappings. +Regardless of what style of mapping used, all ORM mappings as of SQLAlchemy 1.4 +originate from a single object known as :class:`_orm.registry`, which is a +registry of mapped classes. Using this registry, a set of mapper configurations +can be finalized as a group, and classes within a particular registry may refer +to each other by name within the configurational process. .. versionchanged:: 1.4 Declarative and classical mapping are now referred to as "declarative" and "imperative" mapping, and are unified internally, all originating from the :class:`_orm.registry` construct that represents a collection of related mappings. -The full suite of styles can be hierarchically organized as follows: - -* :ref:`orm_declarative_mapping` - * Using :func:`_orm.declarative_base` Base class w/ metaclass - * :ref:`orm_declarative_table` - * :ref:`Imperative Table (a.k.a. "hybrid table") ` - * Using :meth:`_orm.registry.mapped` Declarative Decorator - * :ref:`Declarative Table ` - combine :meth:`_orm.registry.mapped` - with ``__tablename__`` - * Imperative Table (Hybrid) - combine :meth:`_orm.registry.mapped` with ``__table__`` - * :ref:`orm_declarative_dataclasses` - * :ref:`orm_declarative_dataclasses_imperative_table` - * :ref:`orm_declarative_dataclasses_declarative_table` - * :ref:`orm_declarative_attrs_imperative_table` -* :ref:`Imperative (a.k.a. "classical" mapping) ` - * Using :meth:`_orm.registry.map_imperatively` - * :ref:`orm_imperative_dataclasses` - .. _orm_declarative_mapping: Declarative Mapping -=================== +------------------- The **Declarative Mapping** is the typical way that mappings are constructed in modern SQLAlchemy. The most common pattern @@ -62,7 +72,7 @@ used in a declarative table mapping:: # an example mapping using the base class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -73,11 +83,10 @@ Above, the :func:`_orm.declarative_base` callable returns a new base class from which new classes to be mapped may inherit from, as above a new mapped class ``User`` is constructed. -The base class refers to a -:class:`_orm.registry` object that maintains a collection of related mapped -classes. The :func:`_orm.declarative_base` function is in fact shorthand -for first creating the registry with the :class:`_orm.registry` -constructor, and then generating a base class using the +The base class refers to a :class:`_orm.registry` object that maintains a +collection of related mapped classes. The :func:`_orm.declarative_base` +function is in fact shorthand for first creating the registry with the +:class:`_orm.registry` constructor, and then generating a base class using the :meth:`_orm.registry.generate_base` method:: from sqlalchemy.orm import registry @@ -87,442 +96,34 @@ constructor, and then generating a base class using the mapper_registry = registry() Base = mapper_registry.generate_base() -The :class:`_orm.registry` is used directly in order to access a variety -of mapping styles to suit different use cases: +The major Declarative mapping styles are further detailed in the following +sections: + +* :ref:`orm_declarative_generated_base_class` - declarative mapping using a + base class generated by the :class:`_orm.registry` object. * :ref:`orm_declarative_decorator` - declarative mapping using a decorator, rather than a base class. -* :ref:`orm_imperative_mapping` - imperative mapping, specifying all mapping - arguments directly rather than scanning a class. - -Documentation for Declarative mapping continues at :ref:`declarative_config_toplevel`. - -.. seealso:: - - :ref:`declarative_config_toplevel` - -.. _orm_explicit_declarative_base: - -Creating an Explicit Base Non-Dynamically (for use with mypy, similar) ----------------------------------------------------------------------- - -SQLAlchemy includes a :ref:`Mypy plugin ` that automatically -accommodates for the dynamically generated ``Base`` class -delivered by SQLAlchemy functions like :func:`_orm.declarative_base`. -This plugin works along with a new set of typing stubs published at -`sqlalchemy2-stubs `_. - -When this plugin is not in use, or when using other :pep:`484` tools which -may not know how to interpret this class, the declarative base class may -be produced in a fully explicit fashion using the -:class:`_orm.DeclarativeMeta` directly as follows:: - - from sqlalchemy.orm import registry - from sqlalchemy.orm.decl_api import DeclarativeMeta - - mapper_registry = registry() - - class Base(metaclass=DeclarativeMeta): - __abstract__ = True - - # these are supplied by the sqlalchemy2-stubs, so may be omitted - # when they are installed - registry = mapper_registry - metadata = mapper_registry.metadata - - __init__ = mapper_registry.constructor - -The above ``Base`` is equivalent to one created using the -:meth:`_orm.registry.generate_base` method and will be fully understood by -type analysis tools without the use of plugins. - -.. seealso:: - - :ref:`mypy_toplevel` - background on the Mypy plugin which applies the - above structure automatically when running Mypy. - - -.. _orm_declarative_decorator: - -Declarative Mapping using a Decorator (no declarative base) ------------------------------------------------------------- - -As an alternative to using the "declarative base" class is to apply -declarative mapping to a class explicitly, using either an imperative technique -similar to that of a "classical" mapping, or more succinctly by using -a decorator. The :meth:`_orm.registry.mapped` function is a class decorator -that can be applied to any Python class with no hierarchy in place. The -Python class otherwise is configured in declarative style normally:: - - from sqlalchemy import Column, Integer, String, Text, ForeignKey - - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - @mapper_registry.mapped - class User: - __tablename__ = 'user' - - id = Column(Integer, primary_key=True) - name = Column(String) - - addresses = relationship("Address", back_populates="user") - - @mapper_registry.mapped - class Address: - __tablename__ = 'address' - - id = Column(Integer, primary_key=True) - user_id = Column(ForeignKey("user.id")) - email_address = Column(String) - - user = relationship("User", back_populates="addresses") - -Above, the same :class:`_orm.registry` that we'd use to generate a declarative -base class via its :meth:`_orm.registry.generate_base` method may also apply -a declarative-style mapping to a class without using a base. When using -the above style, the mapping of a particular class will **only** proceed -if the decorator is applied to that class directly. For inheritance -mappings, the decorator should be applied to each subclass:: - - from sqlalchemy.orm import registry - mapper_registry = registry() - - @mapper_registry.mapped - class Person: - __tablename__ = "person" - - person_id = Column(Integer, primary_key=True) - type = Column(String, nullable=False) - - __mapper_args__ = { - - "polymorphic_on": type, - "polymorphic_identity": "person" - } - - - @mapper_registry.mapped - class Employee(Person): - __tablename__ = "employee" - - person_id = Column(ForeignKey("person.person_id"), primary_key=True) - - __mapper_args__ = { - "polymorphic_identity": "employee" - } - -Both the "declarative table" and "imperative table" styles of declarative -mapping may be used with the above mapping style. - -The decorator form of mapping is particularly useful when combining a -SQLAlchemy declarative mapping with other forms of class declaration, notably -the Python ``dataclasses`` module. See the next section. - -.. _orm_declarative_dataclasses: - -Declarative Mapping with Dataclasses and Attrs ----------------------------------------------- - -The dataclasses_ module, added in Python 3.7, provides a ``@dataclass`` class -decorator to automatically generate boilerplate definitions of ``__init__()``, -``__eq__()``, ``__repr()__``, etc. methods. Another very popular library that does -the same, and much more, is attrs_. Both libraries make use of class -decorators in order to scan a class for attributes that define the class' -behavior, which are then used to generate methods, documentation, and annotations. - -The :meth:`_orm.registry.mapped` class decorator allows the declarative mapping -of a class to occur after the class has been fully constructed, allowing the -class to be processed by other class decorators first. The ``@dataclass`` -and ``@attr.s`` decorators may therefore be applied first before the -ORM mapping process proceeds via the :meth:`_orm.registry.mapped` decorator -or via the :meth:`_orm.registry.map_imperatively` method discussed in a -later section. - -Mapping with ``@dataclass`` or ``@attr.s`` may be used in a straightforward -way with :ref:`orm_imperative_table_configuration` style, where the -the :class:`_schema.Table`, which means that it is defined separately and -associated with the class via the ``__table__``. For dataclasses specifically, -:ref:`orm_declarative_table` is also supported. - -.. versionadded:: 1.4.0b2 Added support for full declarative mapping when using - dataclasses. - -When attributes are defined using ``dataclasses``, the ``@dataclass`` -decorator consumes them but leaves them in place on the class. -SQLAlchemy's mapping process, when it encounters an attribute that normally -is to be mapped to a :class:`_schema.Column`, checks explicitly if the -attribute is part of a Dataclasses setup, and if so will **replace** -the class-bound dataclass attribute with its usual mapped -properties. The ``__init__`` method created by ``@dataclass`` is left -intact. In contrast, the ``@attr.s`` decorator actually removes its -own class-bound attributes after the decorator runs, so that SQLAlchemy's -mapping process takes over these attributes without any issue. - -.. versionadded:: 1.4 Added support for direct mapping of Python dataclasses, - where the :class:`_orm.Mapper` will now detect attributes that are specific - to the ``@dataclasses`` module and replace them at mapping time, rather - than skipping them as is the default behavior for any class attribute - that's not part of the mapping. - -.. _orm_declarative_dataclasses_imperative_table: - -Example One - Dataclasses with Imperative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -An example of a mapping using ``@dataclass`` using -:ref:`orm_imperative_table_configuration` is as follows:: - - from __future__ import annotations - - from dataclasses import dataclass - from dataclasses import field - from typing import List - from typing import Optional - - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy import Table - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - - @mapper_registry.mapped - @dataclass - class User: - __table__ = Table( - "user", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("name", String(50)), - Column("fullname", String(50)), - Column("nickname", String(12)), - ) - id: int = field(init=False) - name: Optional[str] = None - fullname: Optional[str] = None - nickname: Optional[str] = None - addresses: List[Address] = field(default_factory=list) - - __mapper_args__ = { # type: ignore - "properties" : { - "addresses": relationship("Address") - } - } - - @mapper_registry.mapped - @dataclass - class Address: - __table__ = Table( - "address", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("user.id")), - Column("email_address", String(50)), - ) - id: int = field(init=False) - user_id: int = field(init=False) - email_address: Optional[str] = None - -In the above example, the ``User.id``, ``Address.id``, and ``Address.user_id`` -attributes are defined as ``field(init=False)``. This means that parameters for -these won't be added to ``__init__()`` methods, but -:class:`.Session` will still be able to set them after getting their values -during flush from autoincrement or other default value generator. To -allow them to be specified in the constructor explicitly, they would instead -be given a default value of ``None``. - -For a :func:`_orm.relationship` to be declared separately, it needs to -be specified directly within the :paramref:`_orm.mapper.properties` -dictionary passed to the :func:`_orm.mapper`. An alternative to this -approach is in the next example. - -.. _orm_declarative_dataclasses_declarative_table: - -Example Two - Dataclasses with Declarative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The fully declarative approach requires that :class:`_schema.Column` objects -are declared as class attributes, which when using dataclasses would conflict -with the dataclass-level attributes. An approach to combine these together -is to make use of the ``metadata`` attribute on the ``dataclass.field`` -object, where SQLAlchemy-specific mapping information may be supplied. -Declarative supports extraction of these parameters when the class -specifies the attribute ``__sa_dataclass_metadata_key__``. This also -provides a more succinct method of indicating the :func:`_orm.relationship` -association:: - - - from __future__ import annotations - - from dataclasses import dataclass - from dataclasses import field - from typing import List - - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - - @mapper_registry.mapped - @dataclass - class User: - __tablename__ = "user" - - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - name: str = field(default=None, metadata={"sa": Column(String(50))}) - fullname: str = field(default=None, metadata={"sa": Column(String(50))}) - nickname: str = field(default=None, metadata={"sa": Column(String(12))}) - addresses: List[Address] = field( - default_factory=list, metadata={"sa": relationship("Address")} - ) - - - @mapper_registry.mapped - @dataclass - class Address: - __tablename__ = "address" - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) - -.. _orm_declarative_dataclasses_mixin: - -Using Declarative Mixins with Dataclasses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the section :ref:`orm_mixins_toplevel`, Declarative Mixin classes -are introduced. One requirement of declarative mixins is that certain -constructs that can't be easily duplicated must be given as callables, -using the :class:`_orm.declared_attr` decorator, such as in the -example at :ref:`orm_declarative_mixins_relationships`:: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship("Target") - -This form is supported within the Dataclasses ``field()`` object by using -a lambda to indicate the SQLAlchemy construct inside the ``field()``. -Using :func:`_orm.declared_attr` to surround the lambda is optional. -If we wanted to produce our ``User`` class above where the ORM fields -came from a mixin that is itself a dataclass, the form would be:: - - @dataclass - class UserMixin: - __tablename__ = "user" - - __sa_dataclass_metadata_key__ = "sa" - - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - - addresses: List[Address] = field( - default_factory=list, metadata={"sa": lambda: relationship("Address")} - ) - - @dataclass - class AddressMixin: - __tablename__ = "address" - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) - - @mapper_registry.mapped - class User(UserMixin): - pass - - @mapper_registry.mapped - class Address(AddressMixin): - pass - -.. versionadded:: 1.4.2 Added support for "declared attr" style mixin attributes, - namely :func:`_orm.relationship` constructs as well as :class:`_schema.Column` - objects with foreign key declarations, to be used within "Dataclasses - with Declarative Table" style mappings. - -.. _orm_declarative_attrs_imperative_table: - -Example Three - attrs with Imperative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A mapping using ``@attr.s``, in conjunction with imperative table:: +Within the scope of a Declarative mapped class, there are also two varieties +of how the :class:`_schema.Table` metadata may be declared. These include: - import attr +* :ref:`orm_declarative_table` - individual :class:`_schema.Column` definitions + are combined with a table name and additional arguments, where the Declarative + mapping process will construct a :class:`_schema.Table` object to be mapped. - # other imports +* :ref:`orm_imperative_table_configuration` - Instead of specifying table name + and attributes separately, an explicitly constructed :class:`_schema.Table` object + is associated with a class that is otherwise mapped declaratively. This + style of mapping is a hybrid of "declarative" and "imperative" mapping. - from sqlalchemy.orm import registry - - mapper_registry = registry() - - - @mapper_registry.mapped - @attr.s - class User: - __table__ = Table( - "user", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("name", String(50)), - Column("fullname", String(50)), - Column("nickname", String(12)), - ) - id = attr.ib() - name = attr.ib() - fullname = attr.ib() - nickname = attr.ib() - addresses = attr.ib() - - # other classes... - -``@dataclass`` and attrs_ mappings may also be used with classical mappings, i.e. -with the :meth:`_orm.registry.map_imperatively` function. See the section -:ref:`orm_imperative_dataclasses` for a similar example. - -.. _dataclasses: https://docs.python.org/3/library/dataclasses.html -.. _attrs: https://pypi.org/project/attrs/ - -.. _orm_imperative_mapping: +Documentation for Declarative mapping continues at :ref:`declarative_config_toplevel`. .. _classical_mapping: +.. _orm_imperative_mapping: -Imperative (a.k.a. Classical) Mappings -====================================== +Imperative Mapping +------------------- An **imperative** or **classical** mapping refers to the configuration of a mapped class using the :meth:`_orm.registry.map_imperatively` method, @@ -548,34 +149,40 @@ the :meth:`_orm.registry.map_imperatively` method:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) + class User: pass - mapper_registry.map_imperatively(User, user_table) - + mapper_registry.map_imperatively(User, user_table) Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`_schema.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`_orm.relationship`:: - address = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)) - ) + address = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses' : relationship(Address, backref='user', order_by=address.c.id) - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id) + }, + ) mapper_registry.map_imperatively(Address, address) @@ -593,83 +200,10 @@ user-defined class, linked together with a :func:`.mapper`. When we talk about as well - it's still used, just behind the scenes. - - -.. _orm_imperative_dataclasses: - -Imperative Mapping with Dataclasses and Attrs ---------------------------------------------- - -As described in the section :ref:`orm_declarative_dataclasses`, the -``@dataclass`` decorator and the attrs_ library both work as class -decorators that are applied to a class first, before it is passed to -SQLAlchemy for mapping. Just like we can use the -:meth:`_orm.registry.mapped` decorator in order to apply declarative-style -mapping to the class, we can also pass it to the :meth:`_orm.registry.map_imperatively` -method so that we may pass all :class:`_schema.Table` and :class:`_orm.Mapper` -configuration imperatively to the function rather than having them defined -on the class itself as declarative class variables:: - - from __future__ import annotations - - from dataclasses import dataclass - from dataclasses import field - from typing import List - - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import MetaData - from sqlalchemy import String - from sqlalchemy import Table - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - @dataclass - class User: - id: int = field(init=False) - name: str = None - fullname: str = None - nickname: str = None - addresses: List[Address] = field(default_factory=list) - - @dataclass - class Address: - id: int = field(init=False) - user_id: int = field(init=False) - email_address: str = None - - metadata_obj = MetaData() - - user = Table( - 'user', - metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), - ) - - address = Table( - 'address', - metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), - ) - - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) - - mapper_registry.map_imperatively(Address, address) - .. _orm_mapper_configuration_overview: -Mapper Configuration Overview -============================= +Mapped Class Essential Components +================================== With all mapping forms, the mapping of the class can be configured in many ways by passing construction arguments that become @@ -751,27 +285,19 @@ to :meth:`_orm.registry.map_imperatively`, which will pass it along to the Other mapper configuration parameters ------------------------------------- -These flags are documented at :func:`_orm.mapper`. - When mapping with the :ref:`declarative ` mapping style, additional mapper configuration arguments are configured via the -``__mapper_args__`` class attribute, documented at -:ref:`orm_declarative_mapper_options` +``__mapper_args__`` class attribute. Examples of use are available +at :ref:`orm_declarative_mapper_options`. When mapping with the :ref:`imperative ` style, keyword arguments are passed to the to :meth:`_orm.registry.map_imperatively` method which passes them along to the :func:`_orm.mapper` function. +The full range of parameters accepted are documented at :class:`_orm.mapper`. -.. [1] When running under Python 2, a Python 2 "old style" class is the only - kind of class that isn't compatible. When running code on Python 2, - all classes must extend from the Python ``object`` class. Under - Python 3 this is always the case. -.. [2] There is a legacy feature known as a "non primary mapper", where - additional :class:`_orm.Mapper` objects may be associated with a class - that's already mapped, however they don't apply instrumentation - to the class. This feature is deprecated as of SQLAlchemy 1.3. +.. _orm_mapped_class_behavior: Mapped Class Behavior @@ -795,8 +321,9 @@ all the attributes that are named. E.g.:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(...) name = Column(...) @@ -805,7 +332,7 @@ all the attributes that are named. E.g.:: An object of type ``User`` above will have a constructor which allows ``User`` objects to be created as:: - u1 = User(name='some name', fullname='some fullname') + u1 = User(name="some name", fullname="some fullname") The above constructor may be customized by passing a Python callable to the :paramref:`_orm.registry.constructor` parameter which provides the @@ -818,26 +345,30 @@ The constructor also applies to imperative mappings:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) + class User: pass + mapper_registry.map_imperatively(User, user_table) -The above class, mapped imperatively as described at :ref:`classical_mapping`, +The above class, mapped imperatively as described at :ref:`orm_imperative_mapping`, will also feature the default constructor associated with the :class:`_orm.registry`. .. versionadded:: 1.4 classical mappings now support a standard configuration-level constructor when they are mapped via the :meth:`_orm.registry.map_imperatively` method. -Runtime Introspection of Mapped classes and Mappers ---------------------------------------------------- +.. _orm_mapper_inspection: + +Runtime Introspection of Mapped classes, Instances and Mappers +--------------------------------------------------------------- A class that is mapped using :class:`_orm.registry` will also feature a few attributes that are common to all mappings: @@ -857,12 +388,12 @@ attributes that are common to all mappings: .. * The ``__table__`` attribute will refer to the :class:`_schema.Table`, or - more generically to the :class:`_schema.FromClause` object, to which the + more generically to the :class:`.FromClause` object, to which the class is mapped:: table = User.__table__ - This :class:`_schema.FromClause` is also what's returned when using the + This :class:`.FromClause` is also what's returned when using the :attr:`_orm.Mapper.local_table` attribute of the :class:`_orm.Mapper`:: table = inspect(User).local_table @@ -877,8 +408,10 @@ attributes that are common to all mappings: .. -Mapper Inspection Features --------------------------- +.. _orm_mapper_inspection_mapper: + +Inspection of Mapper objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As illustrated in the previous section, the :class:`_orm.Mapper` object is available from any mapped class, regardless of method, using the @@ -921,8 +454,89 @@ As well as :attr:`_orm.Mapper.column_attrs`:: .. seealso:: - :ref:`core_inspection_toplevel` + :class:`.Mapper` + +.. _orm_mapper_inspection_instancestate: + +Inspection of Mapped Instances +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The :func:`_sa.inspect` function also provides information about instances +of a mapped class. When applied to an instance of a mapped class, rather +than the class itself, the object returned is known as :class:`.InstanceState`, +which will provide links to not only the :class:`.Mapper` in use by the +class, but also a detailed interface that provides information on the state +of individual attributes within the instance including their current value +and how this relates to what their database-loaded value is. + +Given an instance of the ``User`` class loaded from the database:: + + >>> u1 = session.scalars(select(User)).first() + +The :func:`_sa.inspect` function will return to us an :class:`.InstanceState` +object:: + + >>> insp = inspect(u1) + >>> insp + + +With this object we can see elements such as the :class:`.Mapper`:: + + >>> insp.mapper + + +The :class:`_orm.Session` to which the object is :term:`attached`, if any:: + + >>> insp.session + + +Information about the current :ref:`persistence state ` +for the object:: + + >>> insp.persistent + True + >>> insp.pending + False - :class:`_orm.Mapper` +Attribute state information such as attributes that have not been loaded or +:term:`lazy loaded` (assume ``addresses`` refers to a :func:`_orm.relationship` +on the mapped class to a related class):: + + >>> insp.unloaded + {'addresses'} + +Information regarding the current in-Python status of attributes, such as +attributes that have not been modified since the last flush:: + + >>> insp.unmodified + {'nickname', 'name', 'fullname', 'id'} + +as well as specific history on modifications to attributes since the last flush:: + + >>> insp.attrs.nickname.value + 'nickname' + >>> u1.nickname = "new nickname" + >>> insp.attrs.nickname.history + History(added=['new nickname'], unchanged=(), deleted=['nickname']) + +.. seealso:: :class:`.InstanceState` + + :attr:`.InstanceState.attrs` + + :class:`.AttributeState` + + +.. _dataclasses: https://docs.python.org/3/library/dataclasses.html + +.. [1] When running under Python 2, a Python 2 "old style" class is the only + kind of class that isn't compatible. When running code on Python 2, + all classes must extend from the Python ``object`` class. Under + Python 3 this is always the case. + +.. [2] There is a legacy feature known as a "non primary mapper", where + additional :class:`_orm.Mapper` objects may be associated with a class + that's already mapped, however they don't apply instrumentation + to the class. This feature is deprecated as of SQLAlchemy 1.3. + diff --git a/doc/build/orm/nonstandard_mappings.rst b/doc/build/orm/nonstandard_mappings.rst index bf6b0f247d2..4bd2546e096 100644 --- a/doc/build/orm/nonstandard_mappings.rst +++ b/doc/build/orm/nonstandard_mappings.rst @@ -15,24 +15,27 @@ function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a :class:`_schema.Table`:: - from sqlalchemy import Table, Column, Integer, \ - String, MetaData, join, ForeignKey + from sqlalchemy import Table, Column, Integer, String, MetaData, join, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import column_property metadata_obj = MetaData() # define two Table objects - user_table = Table('user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String), - ) - - address_table = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String) - ) + user_table = Table( + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String), + ) + + address_table = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String), + ) # define a join between them. This # takes place across the user.id and address.user_id @@ -104,9 +107,10 @@ may be used:: from sqlalchemy import event - @event.listens_for(PtoQ, 'before_update') + + @event.listens_for(PtoQ, "before_update") def receive_before_update(mapper, connection, target): - if target.some_required_attr_on_q is None: + if target.some_required_attr_on_q is None: connection.execute(q_table.insert(), {"id": target.id}) where above, a row is INSERTed into the ``q_table`` table by creating an @@ -128,15 +132,22 @@ includes a join to a subquery:: from sqlalchemy import select, func - subq = select( - func.count(orders.c.id).label('order_count'), - func.max(orders.c.price).label('highest_order'), - orders.c.customer_id - ).group_by(orders.c.customer_id).subquery() + subq = ( + select( + func.count(orders.c.id).label("order_count"), + func.max(orders.c.price).label("highest_order"), + orders.c.customer_id, + ) + .group_by(orders.c.customer_id) + .subquery() + ) + + customer_select = ( + select(customers, subq) + .join_from(customers, subq, customers.c.id == subq.c.customer_id) + .subquery() + ) - customer_select = select(customers, subq).join_from( - customers, subq, customers.c.id == subq.c.customer_id - ).subquery() class Customer(Base): __table__ = customer_select diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 38f289058b6..e6d4941f839 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -21,6 +21,7 @@ an attribute:: value = Column(Integer) + someobject = session.query(SomeClass).get(5) # set 'value' attribute to a SQL expression adding one @@ -89,10 +90,10 @@ This is most easily accomplished using the session = Session() # execute a string statement - result = session.execute("select * from table where id=:id", {'id':7}) + result = session.execute("select * from table where id=:id", {"id": 7}) # execute a SQL expression construct - result = session.execute(select(mytable).where(mytable.c.id==7)) + result = session.execute(select(mytable).where(mytable.c.id == 7)) The current :class:`~sqlalchemy.engine.Connection` held by the :class:`~sqlalchemy.orm.session.Session` is accessible using the @@ -118,13 +119,12 @@ proper context for the desired engine:: # need to specify mapper or class when executing result = session.execute( text("select * from table where id=:id"), - {'id':7}, - bind_arguments={'mapper': MyMappedClass} + {"id": 7}, + bind_arguments={"mapper": MyMappedClass}, ) result = session.execute( - select(mytable).where(mytable.c.id==7), - bind_arguments={'mapper': MyMappedClass} + select(mytable).where(mytable.c.id == 7), bind_arguments={"mapper": MyMappedClass} ) connection = session.connection(MyMappedClass) @@ -144,14 +144,15 @@ The ORM considers any attribute that was never set on an object as a "default" case; the attribute will be omitted from the INSERT statement:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True) + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the NULL value + # itself will persist this as the NULL value Omitting a column from the INSERT means that the column will have the NULL value set, *unless* the column has a default set up, @@ -161,29 +162,31 @@ behavior of SQLAlchemy's insert behavior with both client-side and server-side defaults:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the value 'default' + # itself will persist this as the value 'default' However, in the ORM, even if one assigns the Python value ``None`` explicitly to the object, this is treated the **same** as though the value were never assigned:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM still omits it from the statement and the - # database will still persist this as the value 'default' + # the ORM still omits it from the statement and the + # database will still persist this as the value 'default' The above operation will persist into the ``data`` column the server default value of ``"default"`` and not SQL NULL, even though ``None`` @@ -200,9 +203,9 @@ on a per-instance level, we assign the attribute using the obj = MyObject(id=1, data=null()) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set as null(); - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value The :obj:`_expression.null` SQL construct always translates into the SQL NULL value being directly present in the target INSERT statement. @@ -215,18 +218,21 @@ a type where the ORM should treat the value ``None`` the same as any other value and pass it through, rather than omitting it as a "missing" value:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column( - String(50).evaluates_none(), # indicate that None should always be passed - nullable=True, server_default="default") + String(50).evaluates_none(), # indicate that None should always be passed + nullable=True, + server_default="default", + ) + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value .. topic:: Evaluating None @@ -280,12 +286,13 @@ Case 1: non primary key, RETURNING or equivalent is supported In this case, columns should be marked as :class:`.FetchedValue` or with an explicit :paramref:`_schema.Column.server_default`. The -:paramref:`.orm.mapper.eager_defaults` flag may be used to indicate that these +:paramref:`_orm.mapper.eager_defaults` parameter +may be used to indicate that these columns should be fetched immediately upon INSERT and sometimes UPDATE:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) timestamp = Column(DateTime(), server_default=func.now()) @@ -306,6 +313,7 @@ above table will look like: INSERT INTO my_table DEFAULT VALUES RETURNING my_table.id, my_table.timestamp, my_table.special_identifier + Case 2: non primary key, RETURNING or equivalent is not supported or not needed -------------------------------------------------------------------------------- @@ -313,7 +321,7 @@ This case is the same as case 1 above, except we don't specify :paramref:`.orm.mapper.eager_defaults`:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) timestamp = Column(DateTime(), server_default=func.now()) @@ -364,7 +372,7 @@ For an explicit sequence as we use with Oracle, this just means we are using the :class:`.Sequence` construct:: class MyOracleModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, Sequence("my_sequence"), primary_key=True) data = Column(String(50)) @@ -383,7 +391,7 @@ by a trigger, we use :class:`.FetchedValue`. Below is a model that uses a SQL Server TIMESTAMP column as the primary key, which generates values automatically:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column(TIMESTAMP(), server_default=FetchedValue(), primary_key=True) @@ -417,7 +425,7 @@ Using the example of a :class:`.DateTime` column for MySQL, we add an explicit pre-execute-supported default using the "NOW()" SQL function:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column(DateTime(), default=func.now(), primary_key=True) @@ -443,13 +451,11 @@ into the column:: from sqlalchemy import cast, Binary + class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" - timestamp = Column( - TIMESTAMP(), - default=cast(func.now(), Binary), - primary_key=True) + timestamp = Column(TIMESTAMP(), default=cast(func.now(), Binary), primary_key=True) Above, in addition to selecting the "NOW()" function, we additionally make use of the :class:`.Binary` datatype in conjunction with :func:`.cast` so that @@ -476,12 +482,13 @@ We therefore must also specify that we'd like to coerce the return value to by passing this as the ``type_`` parameter:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column( DateTime, - default=func.datetime('now', 'localtime', type_=DateTime), - primary_key=True) + default=func.datetime("now", "localtime", type_=DateTime), + primary_key=True, + ) The above mapping upon INSERT will look like: @@ -497,9 +504,67 @@ The above mapping upon INSERT will look like: :ref:`metadata_defaults_toplevel` +Notes on eagerly fetching client invoked SQL expressions used for INSERT or UPDATE +----------------------------------------------------------------------------------- + +The preceding examples indicate the use of :paramref:`_schema.Column.server_default` +to create tables that include default-generation functions within their +DDL. + +SQLAlchemy also supports non-DDL server side defaults, as documented at +:ref:`defaults_client_invoked_sql`; these "client invoked SQL expressions" +are set up using the :paramref:`_schema.Column.default` and +:paramref:`_schema.Column.onupdate` parameters. + +These SQL expressions currently are subject to the same limitations within the +ORM as occurs for true server-side defaults; they won't be eagerly fetched with +RETURNING when using :paramref:`_orm.mapper.eager_defaults` unless the +:class:`.FetchedValue` directive is associated with the +:class:`_schema.Column`, even though these expressions are not DDL server +defaults and are actively rendered by SQLAlchemy itself. This limitation may be +addressed in future SQLAlchemy releases. + +The :class:`.FetchedValue` construct can be applied to +:paramref:`_schema.Column.server_default` or +:paramref:`_schema.Column.server_onupdate` at the same time that a SQL +expression is used with :paramref:`_schema.Column.default` and +:paramref:`_schema.Column.onupdate`, such as in the example below where the +``func.now()`` construct is used as a client-invoked SQL expression +for :paramref:`_schema.Column.default` and +:paramref:`_schema.Column.onupdate`. In order for the behavior of +:paramref:`_orm.mapper.eager_defaults` to include that it fetches these +values using RETURNING when available, :paramref:`_schema.Column.server_default` and +:paramref:`_schema.Column.server_onupdate` are used with :class:`.FetchedValue` +to ensure that the fetch occurs:: + + class MyModel(Base): + __tablename__ = "my_table" + + id = Column(Integer, primary_key=True) + + created = Column(DateTime(), default=func.now(), server_default=FetchedValue()) + updated = Column( + DateTime(), + onupdate=func.now(), + server_default=FetchedValue(), + server_onupdate=FetchedValue(), + ) + + __mapper_args__ = {"eager_defaults": True} + +With a mapping similar to the above, the SQL rendered by the ORM for +INSERT and UPDATE will include ``created`` and ``updated`` in the RETURNING +clause:: + + INSERT INTO my_table (created) VALUES (now()) RETURNING my_table.id, my_table.created, my_table.updated + + UPDATE my_table SET updated=now() WHERE my_table.id = %(my_table_id)s RETURNING my_table.updated + + .. _orm_dml_returning_objects: + Using INSERT, UPDATE and ON CONFLICT (i.e. upsert) to return ORM Objects ========================================================================== @@ -518,8 +583,12 @@ corresponding to all the rows which were matched by the criteria:: from sqlalchemy import update - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - returning(User.id) + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .returning(User.id) + ) for row in session.execute(stmt): print(f"id: {row.id}") @@ -533,8 +602,12 @@ achieve this, we may combine the :class:`_dml.Update` construct which returns statement in an ORM context using the :meth:`_sql.Select.from_statement` method:: - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - returning(User) + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .returning(User) + ) orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True) @@ -583,11 +656,7 @@ database, while simultaneously producing those objects as ORM instances:: index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname) ).returning(User) - orm_stmt = ( - select(User) - .from_statement(stmt) - .execution_options(populate_existing=True) - ) + orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True) for user in session.execute( orm_stmt, ).scalars(): @@ -663,13 +732,13 @@ The dictionary is consulted whenever the :class:`.Session` needs to emit SQL on behalf of a particular kind of mapped class in order to locate the appropriate source of database connectivity:: - engine1 = create_engine('postgresql://db1') - engine2 = create_engine('postgresql://db2') + engine1 = create_engine("postgresql://db1") + engine2 = create_engine("postgresql://db2") Session = sessionmaker() # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -764,26 +833,25 @@ a custom :class:`.Session` which delivers the following rules: :: engines = { - 'leader':create_engine("sqlite:///leader.db"), - 'other':create_engine("sqlite:///other.db"), - 'follower1':create_engine("sqlite:///follower1.db"), - 'follower2':create_engine("sqlite:///follower2.db"), + "leader": create_engine("sqlite:///leader.db"), + "other": create_engine("sqlite:///other.db"), + "follower1": create_engine("sqlite:///follower1.db"), + "follower2": create_engine("sqlite:///follower2.db"), } from sqlalchemy.sql import Update, Delete from sqlalchemy.orm import Session, sessionmaker import random + class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): if mapper and issubclass(mapper.class_, MyOtherClass): - return engines['other'] + return engines["other"] elif self._flushing or isinstance(clause, (Update, Delete)): - return engines['leader'] + return engines["leader"] else: - return engines[ - random.choice(['follower1','follower2']) - ] + return engines[random.choice(["follower1", "follower2"])] The above :class:`.Session` class is plugged in using the ``class_`` argument to :class:`.sessionmaker`:: @@ -813,18 +881,27 @@ ORM extension. An example of use is at: :ref:`examples_sharding`. Bulk Operations =============== -.. deepalchemy:: Bulk operations are essentially lower-functionality versions +.. tip:: + + Bulk operations are essentially lower-functionality versions of the Unit of Work's facilities for emitting INSERT and UPDATE statements on primary key targeted rows. These routines were added to suit some cases where many rows being inserted or updated could be run into the - database without as much of the usual unit of work overhead, in that - most unit of work features are **disabled**. + database without as much of the usual unit of work overhead, by bypassing + a large portion of the functionality that the unit of work provides. - There is **usually no need to use these routines, and they are not easy - to use as there are many missing behaviors that are usually expected when - using ORM objects**; for efficient - bulk inserts, it's better to use the Core :class:`_sql.Insert` construct - directly. Please read all caveats at :ref:`bulk_operations_caveats`. + SQLAlchemy 2.0 features new and improved bulk techniques with clarified + behavior, better integration with ORM objects as well as INSERT/UPDATE/DELETE + statements, and new capabilities. They additionally repair some long lived + performance issues that plagued both regular unit of work and "bulk" routines, + most notably in the area of INSERT operations. + + For these reasons, the previous bulk methods move into legacy status, which + is revised from the original plan that "bulk" features were to be deprecated + entirely. + + When using the legacy 1.4 versions of these features, please read all + caveats at :ref:`bulk_operations_caveats`, as they are not always obvious. .. note:: Bulk INSERT and UPDATE should not be confused with the more common feature known as :ref:`orm_expression_update_delete`. This @@ -904,19 +981,15 @@ The methods each work in the context of the :class:`.Session` object's transaction, like any other:: s = Session() - objects = [ - User(name="u1"), - User(name="u2"), - User(name="u3") - ] + objects = [User(name="u1"), User(name="u2"), User(name="u3")] s.bulk_save_objects(objects) + s.commit() For :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`, dictionaries are passed:: - s.bulk_insert_mappings(User, - [dict(name="u1"), dict(name="u2"), dict(name="u3")] - ) + s.bulk_insert_mappings(User, [dict(name="u1"), dict(name="u2"), dict(name="u3")]) + s.commit() .. seealso:: @@ -933,7 +1006,7 @@ Comparison to Core Insert / Update Constructs The bulk methods offer performance that under particular circumstances can be close to that of using the core :class:`_expression.Insert` and :class:`_expression.Update` constructs in an "executemany" context (for a description -of "executemany", see :ref:`execute_multiple` in the Core tutorial). +of "executemany", see :ref:`tutorial_multiple_parameters` in the Core tutorial). In order to achieve this, the :paramref:`.Session.bulk_insert_mappings.return_defaults` flag should be disabled so that rows can be batched together. The example diff --git a/doc/build/orm/query.rst b/doc/build/orm/query.rst index d7711671cf1..29df9f31d12 100644 --- a/doc/build/orm/query.rst +++ b/doc/build/orm/query.rst @@ -20,14 +20,7 @@ Following is the full interface for the :class:`_query.Query` object. .. autoclass:: sqlalchemy.orm.Query :members: - - .. automethod:: sqlalchemy.orm.Query.prefix_with - - .. automethod:: sqlalchemy.orm.Query.suffix_with - - .. automethod:: sqlalchemy.orm.Query.with_hint - - .. automethod:: sqlalchemy.orm.Query.with_statement_hint + :inherited-members: ORM-Specific Query Constructs ============================= @@ -43,6 +36,7 @@ ORM-Specific Query Constructs .. autoclass:: sqlalchemy.orm.Load :members: + :noindex: .. autofunction:: sqlalchemy.orm.with_loader_criteria diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index a10af53ba14..3da22ebd264 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -23,37 +23,37 @@ upon the content at :ref:`tutorial_selecting_data`. >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) >>> from sqlalchemy import ForeignKey >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> orders_table = Table( ... "user_order", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> order_items_table = Table( ... "order_items", ... metadata_obj, ... Column("order_id", ForeignKey("user_order.id"), primary_key=True), - ... Column("item_id", ForeignKey("item.id"), primary_key=True) + ... Column("item_id", ForeignKey("item.id"), primary_key=True), ... ) >>> items_table = Table( ... "item", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String), - ... Column('description', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String), + ... Column("description", String), ... ) >>> metadata_obj.create_all(engine) BEGIN (implicit) @@ -68,7 +68,7 @@ upon the content at :ref:`tutorial_selecting_data`. ... orders = relationship("Order") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): ... __table__ = address_table @@ -88,22 +88,34 @@ upon the content at :ref:`tutorial_selecting_data`. >>> conn = engine.connect() >>> from sqlalchemy.orm import Session >>> session = Session(conn) - >>> session.add_all([ - ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[ - ... Address(email_address="spongebob@sqlalchemy.org") - ... ]), - ... User(name="sandy", fullname="Sandy Cheeks", addresses=[ - ... Address(email_address="sandy@sqlalchemy.org"), - ... Address(email_address="squirrel@squirrelpower.org") - ... ]), - ... User(name="patrick", fullname="Patrick Star", addresses=[ - ... Address(email_address="pat999@aol.com") - ... ]), - ... User(name="squidward", fullname="Squidward Tentacles", addresses=[ - ... Address(email_address="stentcl@sqlalchemy.org") - ... ]), - ... User(name="ehkrabs", fullname="Eugene H. Krabs"), - ... ]) + >>> session.add_all( + ... [ + ... User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ), + ... User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="squirrel@squirrelpower.org"), + ... ], + ... ), + ... User( + ... name="patrick", + ... fullname="Patrick Star", + ... addresses=[Address(email_address="pat999@aol.com")], + ... ), + ... User( + ... name="squidward", + ... fullname="Squidward Tentacles", + ... addresses=[Address(email_address="stentcl@sqlalchemy.org")], + ... ), + ... User(name="ehkrabs", fullname="Eugene H. Krabs"), + ... ] + ... ) >>> session.commit() BEGIN ... >>> conn.begin() @@ -117,7 +129,7 @@ SELECT statements are produced by the :func:`_sql.select` function which returns a :class:`_sql.Select` object:: >>> from sqlalchemy import select - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") To invoke a :class:`_sql.Select` with the ORM, it is passed to :meth:`_orm.Session.execute`:: @@ -184,7 +196,7 @@ same time:: >>> stmt = select(User, Address).join(User.addresses).order_by(User.id, Address.id) {sql}>>> for row in session.execute(stmt): - ... print(f"{row.User.name} {row.Address.email_address}") + ... print(f"{row.User.name} {row.Address.email_address}") SELECT user_account.id, user_account.name, user_account.fullname, address.id AS id_1, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -207,9 +219,9 @@ when passed to :func:`_sql.select`. They may be used in the same way as table columns are used:: {sql}>>> result = session.execute( - ... select(User.name, Address.email_address). - ... join(User.addresses). - ... order_by(User.id, Address.id) + ... select(User.name, Address.email_address) + ... .join(User.addresses) + ... .order_by(User.id, Address.id) ... ) SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -238,20 +250,18 @@ allows sets of column expressions to be grouped in result rows:: >>> from sqlalchemy.orm import Bundle >>> stmt = select( - ... Bundle("user", User.name, User.fullname), - ... Bundle("email", Address.email_address) + ... Bundle("user", User.name, User.fullname), Bundle("email", Address.email_address) ... ).join_from(User, Address) {sql}>>> for row in session.execute(stmt): - ... print(f"{row.user.name} {row.email.email_address}") + ... print(f"{row.user.name} {row.user.fullname} {row.email.email_address}") SELECT user_account.name, user_account.fullname, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id [...] (){stop} - spongebob spongebob@sqlalchemy.org - sandy sandy@sqlalchemy.org - sandy squirrel@squirrelpower.org - patrick pat999@aol.com - squidward stentcl@sqlalchemy.org - + spongebob Spongebob Squarepants spongebob@sqlalchemy.org + sandy Sandy Cheeks sandy@sqlalchemy.org + sandy Sandy Cheeks squirrel@squirrelpower.org + patrick Patrick Star pat999@aol.com + squidward Squidward Tentacles stentcl@sqlalchemy.org The :class:`_orm.Bundle` is potentially useful for creating lightweight views as well as custom column groupings such as mappings. @@ -295,6 +305,7 @@ The :class:`_orm.aliased` construct is also central to making use of subqueries with the ORM; the sections :ref:`orm_queryguide_subqueries` and :ref:`orm_queryguide_join_subqueries` discusses this further. + .. _orm_queryguide_selecting_text: Getting ORM Results from Textual and Core Statements @@ -381,7 +392,7 @@ Selecting Entities from Subqueries ----------------------------------- The :func:`_orm.aliased` construct discussed in the previous section -can be used with any :class:`_sql.Subuqery` construct that comes from a +can be used with any :class:`_sql.Subquery` construct that comes from a method such as :meth:`_sql.Select.subquery` to link ORM entities to the columns returned by that subquery; there must be a **column correspondence** relationship between the columns delivered by the subquery and the columns @@ -429,8 +440,7 @@ is used:: >>> from sqlalchemy import union_all >>> u = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).order_by(User.id) >>> stmt = select(User).from_statement(u) >>> for user_obj in session.execute(stmt).scalars(): @@ -455,8 +465,7 @@ entity in a :func:`_sql.select` construct, including that we can add filtering and order by criteria based on its exported columns:: >>> subq = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).subquery() >>> user_alias = aliased(User, subq) >>> stmt = select(user_alias).order_by(user_alias.id) @@ -477,7 +486,6 @@ and order by criteria based on its exported columns:: :ref:`tutorial_orm_union` - in the :ref:`unified_tutorial` - .. _orm_queryguide_joins: Joins @@ -532,11 +540,7 @@ a JOIN first from ``User`` to ``Order``, and a second from ``Order`` to relationship, it results in two separate JOIN elements, for a total of three JOIN elements in the resulting SQL:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -560,12 +564,7 @@ as potential points to continue joining FROM. We can continue to add other elements to join FROM the ``User`` entity above, for example adding on the ``User.addresses`` relationship to our chain of joins:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items). - ... join(User.addresses) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items).join(User.addresses) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -611,7 +610,7 @@ The third calling form allows both the target entity as well as the ON clause to be passed explicitly. A example that includes a SQL expression as the ON clause is as follows:: - >>> stmt = select(User).join(Address, User.id==Address.user_id) + >>> stmt = select(User).join(Address, User.id == Address.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address ON user_account.id = address.user_id @@ -634,11 +633,11 @@ below:: >>> a1 = aliased(Address) >>> a2 = aliased(Address) >>> stmt = ( - ... select(User). - ... join(a1, User.addresses). - ... join(a2, User.addresses). - ... where(a1.email_address == 'ed@foo.com'). - ... where(a2.email_address == 'ed@bar.com') + ... select(User) + ... .join(a1, User.addresses) + ... .join(a2, User.addresses) + ... .where(a1.email_address == "ed@foo.com") + ... .where(a2.email_address == "ed@bar.com") ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -654,11 +653,11 @@ substituted with an aliased entity by using the this method would be:: >>> stmt = ( - ... select(User). - ... join(User.addresses.of_type(a1)). - ... join(User.addresses.of_type(a2)). - ... where(a1.email_address == 'ed@foo.com'). - ... where(a2.email_address == 'ed@bar.com') + ... select(User) + ... .join(User.addresses.of_type(a1)) + ... .join(User.addresses.of_type(a2)) + ... .where(a1.email_address == "ed@foo.com") + ... .where(a2.email_address == "ed@bar.com") ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -682,10 +681,7 @@ with the default criteria using AND. Below, the ON criteria between by ``AND``, the first one being the natural join along the foreign key, and the second being a custom limiting criteria:: - >>> stmt = ( - ... select(User). - ... join(User.addresses.and_(Address.email_address != 'foo@bar.com')) - ... ) + >>> stmt = select(User).join(User.addresses.and_(Address.email_address != "foo@bar.com")) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -703,7 +699,7 @@ Joining to Subqueries ^^^^^^^^^^^^^^^^^^^^^^^ The target of a join may be any "selectable" entity which usefully includes -subuqeries. When using the ORM, it is typical +subqueries. When using the ORM, it is typical that these targets are stated in terms of an :func:`_orm.aliased` construct, but this is not strictly required particularly if the joined entity is not being returned in the results. For example, to join from the @@ -712,11 +708,7 @@ is represented as a row limited subquery, we first construct a :class:`_sql.Subq object using :meth:`_sql.Select.subquery`, which may then be used as the target of the :meth:`_sql.Select.join` method:: - >>> subq = ( - ... select(Address). - ... where(Address.email_address == 'pat999@aol.com'). - ... subquery() - ... ) + >>> subq = select(Address).where(Address.email_address == "pat999@aol.com").subquery() >>> stmt = select(User).join(subq, User.id == subq.c.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -755,14 +747,14 @@ will remain unique within the statement, while the entities that are linked to it using :class:`_orm.aliased` refer to distinct sets of columns:: >>> user_address_subq = ( - ... select(User.id, User.name, Address.id, Address.email_address). - ... join_from(User, Address). - ... where(Address.email_address.in_(['pat999@aol.com', 'squirrel@squirrelpower.org'])). - ... subquery() + ... select(User.id, User.name, Address.id, Address.email_address) + ... .join_from(User, Address) + ... .where(Address.email_address.in_(["pat999@aol.com", "squirrel@squirrelpower.org"])) + ... .subquery() ... ) >>> user_alias = aliased(User, user_address_subq, name="user") >>> address_alias = aliased(Address, user_address_subq, name="address") - >>> stmt = select(user_alias, address_alias).where(user_alias.name == 'sandy') + >>> stmt = select(user_alias, address_alias).where(user_alias.name == "sandy") >>> for row in session.execute(stmt): ... print(f"{row.user} {row.address}") {opensql}SELECT anon_1.id, anon_1.name, anon_1.id_1, anon_1.email_address @@ -783,7 +775,7 @@ In cases where the left side of the current state of :class:`_sql.Select` is not in line with what we want to join from, the :meth:`_sql.Select.join_from` method may be used:: - >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -793,7 +785,7 @@ The :meth:`_sql.Select.join_from` method accepts two or three arguments, either in the form ``, ``, or ``, , []``:: - >>> stmt = select(Address).join_from(User, Address).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -804,7 +796,7 @@ can be used subsequent, the :meth:`_sql.Select.select_from` method may also be used:: - >>> stmt = select(Address).select_from(User).join(Address).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -821,7 +813,7 @@ be used:: such a :class:`_sql.Join` object. Therefore we can see the contents of :meth:`_sql.Select.select_from` being overridden in a case like this:: - >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM address JOIN user_account ON user_account.id = address.user_id @@ -838,8 +830,10 @@ be used:: >>> >>> j = address_table.join(user_table, user_table.c.id == address_table.c.user_id) >>> stmt = ( - ... select(address_table).select_from(user_table).select_from(j). - ... where(user_table.c.name == 'sandy') + ... select(address_table) + ... .select_from(user_table) + ... .select_from(j) + ... .where(user_table.c.name == "sandy") ... ) >>> print(stmt) SELECT address.id, address.user_id, address.email_address @@ -999,16 +993,97 @@ The ``autoflush`` execution option is equvialent to the .. _orm_queryguide_yield_per: -Yield Per -^^^^^^^^^ +Fetching Large Result Sets with Yield Per +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``yield_per`` execution option is an integer value which will cause the -:class:`_engine.Result` to yield only a fixed count of rows at a time. It is -often useful to use with a result partitioning method such as -:meth:`_engine.Result.partitions`, e.g.:: +:class:`_engine.Result` to buffer only limited number of rows and/or ORM +objects at a time, before making data available to the client. + +Normally, the ORM will construct ORM objects for **all** rows up front, +assembling them into a single buffer, before passing this buffer to +the :class:`_engine.Result` object as a source of rows to be returned. +The rationale for this behavior is to allow correct behavior +for features such as joined eager loading, uniquifying of results, and the +general case of result handling logic that relies upon the identity map +maintaining a consistent state for every object in a result set as it is +fetched. + +The purpose of the ``yield_per`` option is to change this behavior so that the +ORM result set is optimized for iteration through very large result sets (> 10K +rows), where the user has determined that the above patterns don't apply. When +``yield_per`` is used, the ORM will instead batch ORM results into +sub-collections and yield rows from each sub-collection individually as the +:class:`_engine.Result` object is iterated, so that the Python interpreter +doesn't need to declare very large areas of memory which is both time consuming +and leads to excessive memory use. The option affects both the way the database +cursor is used as well as how the ORM constructs rows and objects to be +passed to the :class:`_engine.Result`. + +.. tip:: + + From the above, it follows that the :class:`_engine.Result` must be + consumed in an iterable fashion, that is, using iteration such as + ``for row in result`` or using partial row methods such as + :meth:`_engine.Result.fetchmany` or :meth:`_engine.Result.partitions`. + Calling :meth:`_engine.Result.all` will defeat the purpose of using + ``yield_per``. + +Using ``yield_per`` is equivalent to making use +of both the :paramref:`_engine.Connection.execution_options.stream_results` +execution option, which selects for server side cursors to be used +by the backend if supported, and the :meth:`_engine.Result.yield_per` method +on the returned :class:`_engine.Result` object, +which establishes a fixed size of rows to be fetched as well as a +corresponding limit to how many ORM objects will be constructed at once. + +.. tip:: + + ``yield_per`` is now available as a Core execution option as well, + described in detail at :ref:`engine_stream_results`. This section details + the use of ``yield_per`` as an execution option with an ORM + :class:`_orm.Session`. The option behaves as similarly as possible + in both contexts. + +``yield_per`` when used with the ORM is typically established either +via the :meth:`.Executable.execution_options` method on the given statement +or by passing it to the :paramref:`_orm.Session.execute.execution_options` +parameter of :meth:`_orm.Session.execute` or other similar :class:`_orm.Session` +method. In the example below its invoked upon a statement:: >>> stmt = select(User).execution_options(yield_per=10) - {sql}>>> for partition in session.execute(stmt).partitions(10): + {sql}>>> for row in session.execute(stmt): + ... print(row) + SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + [...] (){stop} + (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) + ... + +The above code is mostly equivalent as making use of the +:paramref:`_engine.Connection.execution_options.stream_results` execution +option, setting the :paramref:`_engine.Connection.execution_options.max_row_buffer` +to the given integer size, and then using the :meth:`_engine.Result.yield_per` +method on the :class:`_engine.Result` returned by the +:class:`_orm.Session`, as in the following example:: + + # equivalent code + >>> stmt = select(User).execution_options(stream_results=True, max_row_buffer=10) + {sql}>>> for row in session.execute(stmt).yield_per(10): + ... print(row) + SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + [...] (){stop} + (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) + ... + +``yield_per`` is also commonly used in combination with the +:meth:`_engine.Result.partitions` method, that will iterate rows in grouped +partitions. The size of each partition defaults to the integer value passed to +``yield_per``, as in the below example:: + + >>> stmt = select(User).execution_options(yield_per=10) + {sql}>>> for partition in session.execute(stmt).partitions(): ... for row in partition: ... print(row) SELECT user_account.id, user_account.name, user_account.fullname @@ -1017,24 +1092,12 @@ often useful to use with a result partitioning method such as (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) ... -The purpose of this method is when fetching very large result sets -(> 10K rows), to batch results in sub-collections and yield them -out partially, so that the Python interpreter doesn't need to declare -very large areas of memory which is both time consuming and leads -to excessive memory use. The performance from fetching hundreds of -thousands of rows can often double when a suitable yield-per setting -(e.g. approximately 1000) is used, even with DBAPIs that buffer -rows (which are most). - -When ``yield_per`` is used, the -:paramref:`_engine.Connection.execution_options.stream_results` option is also -set for the Core execution, so that a streaming / server side cursor will be -used if the backend supports it [1]_ - -The ``yield_per`` execution option **is not compatible with subqueryload eager -loading or joinedload eager loading when using collections**. It is -potentially compatible with selectinload eager loading, **provided the database -driver supports multiple, independent cursors** [2]_ . +The ``yield_per`` execution option **is not compatible** with +:ref:`"subquery" eager loading ` loading or +:ref:`"joined" eager loading ` when using collections. It +is potentially compatible with :ref:`"select in" eager loading +` , provided the database driver supports multiple, +independent cursors. Additionally, the ``yield_per`` execution option is not compatible with the :meth:`_engine.Result.unique` method; as this method relies upon @@ -1047,20 +1110,10 @@ large number of rows. :meth:`_engine.Result.unique` filter, at the same time as the ``yield_per`` execution option is used. -The ``yield_per`` execution option is equvialent to the -:meth:`_orm.Query.yield_per` method in :term:`1.x style` ORM queries. - -.. [1] currently known are - :mod:`_postgresql.psycopg2`, - :mod:`_mysql.mysqldb` and - :mod:`_mysql.pymysql`. Other backends will pre buffer - all rows. The memory use of raw database rows is much less than that of an - ORM-mapped object, but should still be taken into consideration when - benchmarking. +When using the legacy :class:`_orm.Query` object with +:term:`1.x style` ORM use, the :meth:`_orm.Query.yield_per` method +will have the same result as that of the ``yield_per`` execution option. -.. [2] the :mod:`_postgresql.psycopg2` - and :mod:`_sqlite.pysqlite` drivers are - known to work, drivers for MySQL and SQL Server ODBC drivers do not. .. seealso:: @@ -1081,4 +1134,103 @@ matching objects locally present in the :class:`_orm.Session`. See the section .. Setup code, not for display >>> conn.close() - ROLLBACK \ No newline at end of file + ROLLBACK + +.. _queryguide_inspection: + +Inspecting entities and columns from ORM-enabled SELECT and DML statements +========================================================================== + +The :func:`_sql.select` construct, as well as the :func:`_sql.insert`, :func:`_sql.update` +and :func:`_sql.delete` constructs (for the latter DML constructs, as of SQLAlchemy +1.4.33), all support the ability to inspect the entities in which these +statements are created against, as well as the columns and datatypes that would +be returned in a result set. + +For a :class:`.Select` object, this information is available from the +:attr:`.Select.column_descriptions` attribute. This attribute operates in the +same way as the legacy :attr:`.Query.column_descriptions` attribute. The format +returned is a list of dictionaries:: + + >>> from pprint import pprint + >>> user_alias = aliased(User, name="user2") + >>> stmt = select(User, User.id, user_alias) + >>> pprint(stmt.column_descriptions) + [{'aliased': False, + 'entity': , + 'expr': , + 'name': 'User', + 'type': }, + {'aliased': False, + 'entity': , + 'expr': <....InstrumentedAttribute object at ...>, + 'name': 'id', + 'type': Integer()}, + {'aliased': True, + 'entity': , + 'expr': , + 'name': 'user2', + 'type': }] + + +When :attr:`.Select.column_descriptions` is used with non-ORM objects +such as plain :class:`.Table` or :class:`.Column` objects, the entries +will contain basic information about individual columns returned in all +cases:: + + >>> stmt = select(user_table, address_table.c.id) + >>> pprint(stmt.column_descriptions) + [{'expr': Column('id', Integer(), table=, primary_key=True, nullable=False), + 'name': 'id', + 'type': Integer()}, + {'expr': Column('name', String(length=30), table=), + 'name': 'name', + 'type': String(length=30)}, + {'expr': Column('fullname', String(), table=), + 'name': 'fullname', + 'type': String()}, + {'expr': Column('id', Integer(), table=
, primary_key=True, nullable=False), + 'name': 'id_1', + 'type': Integer()}] + +.. versionchanged:: 1.4.33 The :attr:`.Select.column_descriptions` attribute now returns + a value when used against a :class:`.Select` that is not ORM-enabled. Previously, + this would raise ``NotImplementedError``. + + +For :func:`_sql.insert`, :func:`.update` and :func:`.delete` constructs, there are +two separate attributes. One is :attr:`.UpdateBase.entity_description` which +returns information about the primary ORM entity and database table which the +DML construct would be affecting:: + + >>> from sqlalchemy import update + >>> stmt = update(User).values(name="somename").returning(User.id) + >>> pprint(stmt.entity_description) + {'entity': , + 'expr': , + 'name': 'User', + 'table': Table('user_account', ...), + 'type': } + +.. tip:: The :attr:`.UpdateBase.entity_description` includes an entry + ``"table"`` which is actually the **table to be inserted, updated or + deleted** by the statement, which is **not** always the same as the SQL + "selectable" to which the class may be mapped. For example, in a + joined-table inheritance scenario, ``"table"`` will refer to the local table + for the given entity. + +The other is :attr:`.UpdateBase.returning_column_descriptions` which +delivers information about the columns present in the RETURNING collection +in a manner roughly similar to that of :attr:`.Select.column_descriptions`:: + + >>> pprint(stmt.returning_column_descriptions) + [{'aliased': False, + 'entity': , + 'expr': , + 'name': 'id', + 'type': Integer()}] + +.. versionadded:: 1.4.33 Added the :attr:`.UpdateBase.entity_description` + and :attr:`.UpdateBase.returning_column_descriptions` attributes. + + diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst new file mode 100644 index 00000000000..d766ef1d77b --- /dev/null +++ b/doc/build/orm/quickstart.rst @@ -0,0 +1,444 @@ +.. _orm_quickstart: + + +ORM Quick Start +=============== + +For new users who want to quickly see what basic ORM use looks like, here's an +abbreviated form of the mappings and examples used in the +:ref:`unified_tutorial`. The code here is fully runnable from a clean command +line. + +As the descriptions in this section are intentionally **very short**, please +proceed to the full :ref:`unified_tutorial` for a much more in-depth +description of each of the concepts being illustrated here. + + +Declare Models +--------------- + +Here, we define module-level constructs that will form the structures +which we will be querying from the database. This structure, known as a +:ref:`Declarative Mapping `, defines at once both a +Python object model, as well as +:term:`database metadata` that describes +real SQL tables that exist, or will exist, in a particular database:: + + >>> from sqlalchemy import Column + >>> from sqlalchemy import ForeignKey + >>> from sqlalchemy import Integer + >>> from sqlalchemy import String + >>> from sqlalchemy.orm import declarative_base + >>> from sqlalchemy.orm import relationship + + >>> Base = declarative_base() + + >>> class User(Base): + ... __tablename__ = "user_account" + ... + ... id = Column(Integer, primary_key=True) + ... name = Column(String(30)) + ... fullname = Column(String) + ... + ... addresses = relationship( + ... "Address", back_populates="user", cascade="all, delete-orphan" + ... ) + ... + ... def __repr__(self): + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + + >>> class Address(Base): + ... __tablename__ = "address" + ... + ... id = Column(Integer, primary_key=True) + ... email_address = Column(String, nullable=False) + ... user_id = Column(Integer, ForeignKey("user_account.id"), nullable=False) + ... + ... user = relationship("User", back_populates="addresses") + ... + ... def __repr__(self): + ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" + + +The mapping starts with a base class, which above is called ``Base``, and is +created by calling upon the :func:`_orm.declarative_base` function, which +produces a new base class. + +Individual mapped classes are then created by making subclasses of ``Base``. +A mapped class typically refers to a single particular database table, +the name of which is indicated by using the ``__tablename__`` class-level +attribute. + +Next, columns that are part of the table are declared, by adding attributes +linked to the :class:`_schema.Column` construct. :class:`_schema.Column` +describes all aspects of a database column, including typing +information with type objects such as :class:`.Integer` and :class:`.String` +as well as server defaults and +constraint information, such as membership within the primary key and foreign +keys. + +All ORM mapped classes require at least one column be declared as part of the +primary key, typically by using the :paramref:`_schema.Column.primary_key` +parameter on those :class:`_schema.Column` objects that should be part +of the key. In the above example, the ``User.id`` and ``Address.id`` +columns are marked as primary key. + +Taken together, the combination of a string table name as well as a list +of column declarations is referred towards in SQLAlchemy as :term:`table metadata`. +Setting up table metadata using both Core and ORM approaches is introduced +in the :ref:`unified_tutorial` at :ref:`tutorial_working_with_metadata`. +The above mapping is an example of what's referred towards as +:ref:`Declarative Table ` +configuration. + +Other Declarative directives are available, most commonly +the :func:`_orm.relationship` construct indicated above. In contrast +to the column-based attributes, :func:`_orm.relationship` denotes a linkage +between two ORM classes. In the above example, ``User.addresses`` links +``User`` to ``Address``, and ``Address.user`` links ``Address`` to ``User``. +The :func:`_orm.relationship` construct is introduced in the +:ref:`unified_tutorial` at :ref:`tutorial_orm_related_objects`. + +Finally, the above example classes include a ``__repr__()`` method, which is +not required but is useful for debugging. + +Create an Engine +------------------ + + +The :class:`_engine.Engine` is a **factory** that can create new +database connections for us, which also holds onto connections inside +of a :ref:`Connection Pool ` for fast reuse. For learning +purposes, we normally use a :ref:`SQLite ` memory-only database +for convenience:: + + >>> from sqlalchemy import create_engine + >>> engine = create_engine("sqlite://", echo=True, future=True) + +.. tip:: + + The ``echo=True`` parameter indicates that SQL emitted by connections will + be logged to standard out. ``future=True`` is to ensure we are using + the latest SQLAlchemy :term:`2.0-style` APIs. + +A full intro to the :class:`_engine.Engine` starts at :ref:`tutorial_engine`. + +Emit CREATE TABLE DDL +---------------------- + + +Using our table metadata and our engine, we can generate our schema at once +in our target SQLite database, using a method called :meth:`_schema.MetaData.create_all`: + +.. sourcecode:: pycon+sql + + >>> Base.metadata.create_all(engine) + {opensql}BEGIN (implicit) + PRAGMA main.table_...info("user_account") + ... + PRAGMA main.table_...info("address") + ... + CREATE TABLE user_account ( + id INTEGER NOT NULL, + name VARCHAR(30), + fullname VARCHAR, + PRIMARY KEY (id) + ) + ... + CREATE TABLE address ( + id INTEGER NOT NULL, + email_address VARCHAR NOT NULL, + user_id INTEGER NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(user_id) REFERENCES user_account (id) + ) + ... + COMMIT + +A lot just happened from that bit of Python code we wrote. For a complete +overview of what's going on on with Table metadata, proceed in the +Tutorial at :ref:`tutorial_working_with_metadata`. + +Create Objects and Persist +--------------------------- + +We are now ready to insert data in the database. We accomplish this by +creating instances of ``User`` and ``Address`` classes, which have +an ``__init__()`` method already as established automatically by the +declarative mapping process. We then pass them +to the database using an object called a :ref:`Session `, +which makes use of the :class:`_engine.Engine` to interact with the +database. The :meth:`_orm.Session.add_all` method is used here to add +multiple objects at once, and the :meth:`_orm.Session.commit` method +will be used to :ref:`flush ` any pending changes to the +database and then :ref:`commit ` the current database +transaction, which is always in progress whenever the :class:`_orm.Session` +is used: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy.orm import Session + + >>> with Session(engine) as session: + ... + ... spongebob = User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ) + ... sandy = User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="sandy@squirrelpower.org"), + ... ], + ... ) + ... patrick = User(name="patrick", fullname="Patrick Star") + ... + ... session.add_all([spongebob, sandy, patrick]) + ... + ... session.commit() + {opensql}BEGIN (implicit) + INSERT INTO user_account (name, fullname) VALUES (?, ?) + [...] ('spongebob', 'Spongebob Squarepants') + INSERT INTO user_account (name, fullname) VALUES (?, ?) + [...] ('sandy', 'Sandy Cheeks') + INSERT INTO user_account (name, fullname) VALUES (?, ?) + [...] ('patrick', 'Patrick Star') + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('spongebob@sqlalchemy.org', 1) + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('sandy@sqlalchemy.org', 2) + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('sandy@squirrelpower.org', 2) + COMMIT + + +.. tip:: + + It's recommended that the :class:`_orm.Session` be used in context + manager style as above, that is, using the Python ``with:`` statement. + The :class:`_orm.Session` object represents active database resources + so it's good to make sure it's closed out when a series of operations + are completed. In the next section, we'll keep a :class:`_orm.Session` + opened just for illustration purposes. + +Basics on creating a :class:`_orm.Session` are at +:ref:`tutorial_executing_orm_session` and more at :ref:`session_basics`. + +Then, some varieties of basic persistence operations are introduced +at :ref:`tutorial_inserting_orm`. + +Simple SELECT +-------------- + +With some rows in the database, here's the simplest form of emitting a SELECT +statement to load some objects. To create SELECT statements, we use the +:func:`_sql.select` function to create a new :class:`_sql.Select` object, which +we then invoke using a :class:`_orm.Session`. The method that is often useful +when querying for ORM objects is the :meth:`_orm.Session.scalars` method, which +will return a :class:`_result.ScalarResult` object that will iterate through +the ORM objects we've selected: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + + >>> session = Session(engine) + + >>> stmt = select(User).where(User.name.in_(["spongebob", "sandy"])) + + >>> for user in session.scalars(stmt): + ... print(user) + {opensql}BEGIN (implicit) + SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + WHERE user_account.name IN (?, ?) + [...] ('spongebob', 'sandy'){stop} + User(id=1, name='spongebob', fullname='Spongebob Squarepants') + User(id=2, name='sandy', fullname='Sandy Cheeks') + + +The above query also made use of the :meth:`_sql.Select.where` method +to add WHERE criteria, and also used the :meth:`_sql.ColumnOperators.in_` +method that's part of all SQLAlchemy column-like constructs to use the +SQL IN operator. + +More detail on how to select objects and individual columns is at +:ref:`tutorial_selecting_orm_entities`. + +SELECT with JOIN +----------------- + +It's very common to query amongst multiple tables at once, and in SQL +the JOIN keyword is the primary way this happens. The :class:`_sql.Select` +construct creates joins using the :meth:`_sql.Select.join` method: + +.. sourcecode:: pycon+sql + + >>> stmt = ( + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "sandy") + ... .where(Address.email_address == "sandy@sqlalchemy.org") + ... ) + >>> sandy_address = session.scalars(stmt).one() + {opensql}SELECT address.id, address.email_address, address.user_id + FROM address JOIN user_account ON user_account.id = address.user_id + WHERE user_account.name = ? AND address.email_address = ? + [...] ('sandy', 'sandy@sqlalchemy.org') + {stop} + >>> sandy_address + Address(id=2, email_address='sandy@sqlalchemy.org') + +The above query illustrates multiple WHERE criteria which are automatically +chained together using AND, as well as how to use SQLAlchemy column-like +objects to create "equality" comparisons, which uses the overridden Python +method :meth:`_sql.ColumnOperators.__eq__` to produce a SQL criteria object. + +Some more background on the concepts above are at +:ref:`tutorial_select_where_clause` and :ref:`tutorial_select_join`. + +Make Changes +------------ + +The :class:`_orm.Session` object, in conjunction with our ORM-mapped classes +``User`` and ``Address``, automatically track changes to the objects as they +are made, which result in SQL statements that will be emitted the next +time the :class:`_orm.Session` flushes. Below, we change one email +address associated with "sandy", and also add a new email address to +"patrick", after emitting a SELECT to retrieve the row for "patrick": + +.. sourcecode:: pycon+sql + + >>> stmt = select(User).where(User.name == "patrick") + >>> patrick = session.scalars(stmt).one() + {opensql}SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + WHERE user_account.name = ? + [...] ('patrick',) + {stop} + + >>> patrick.addresses.append(Address(email_address="patrickstar@sqlalchemy.org")) + {opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id + FROM address + WHERE ? = address.user_id + [...] (3,){stop} + + >>> sandy_address.email_address = "sandy_cheeks@sqlalchemy.org" + + >>> session.commit() + {opensql}UPDATE address SET email_address=? WHERE address.id = ? + [...] ('sandy_cheeks@sqlalchemy.org', 2) + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('patrickstar@sqlalchemy.org', 3) + COMMIT + {stop} + +Notice when we accessed ``patrick.addresses``, a SELECT was emitted. This is +called a :term:`lazy load`. Background on different ways to access related +items using more or less SQL is introduced at :ref:`tutorial_orm_loader_strategies`. + +A detailed walkthrough on ORM data manipulation starts at +:ref:`tutorial_orm_data_manipulation`. + +Some Deletes +------------ + +All things must come to an end, as is the case for some of our database +rows - here's a quick demonstration of two different forms of deletion, both +of which are important based on the specific use case. + +First we will remove one of the ``Address`` objects from the "sandy" user. +When the :class:`_orm.Session` next flushes, this will result in the +row being deleted. This behavior is something that we configured in our +mapping called the :ref:`delete cascade `. We can get a handle to the ``sandy`` +object by primary key using :meth:`_orm.Session.get`, then work with the object: + +.. sourcecode:: pycon+sql + + >>> sandy = session.get(User, 2) + {opensql}BEGIN (implicit) + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name, user_account.fullname AS user_account_fullname + FROM user_account + WHERE user_account.id = ? + [...] (2,){stop} + + >>> sandy.addresses.remove(sandy_address) + {opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id + FROM address + WHERE ? = address.user_id + [...] (2,) + +The last SELECT above was the :term:`lazy load` operation proceeding so that +the ``sandy.addresses`` collection could be loaded, so that we could remove the +``sandy_address`` member. There are other ways to go about this series +of operations that won't emit as much SQL. + +We can choose to emit the DELETE SQL for what's set to be changed so far, without +committing the transaction, using the +:meth:`_orm.Session.flush` method: + +.. sourcecode:: pycon+sql + + >>> session.flush() + {opensql}DELETE FROM address WHERE address.id = ? + [...] (2,) + +Next, we will delete the "patrick" user entirely. For a top-level delete of +an object by itself, we use the :meth:`_orm.Session.delete` method; this +method doesn't actually perform the deletion, but sets up the object +to be deleted on the next flush. The +operation will also :term:`cascade` to related objects based on the cascade +options that we configured, in this case, onto the related ``Address`` objects: + +.. sourcecode:: pycon+sql + + >>> session.delete(patrick) + {opensql}SELECT user_account.id AS user_account_id, user_account.name AS user_account_name, user_account.fullname AS user_account_fullname + FROM user_account + WHERE user_account.id = ? + [...] (3,) + SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id + FROM address + WHERE ? = address.user_id + [...] (3,) + +The :meth:`_orm.Session.delete` method in this particular case emitted two +SELECT statements, even though it didn't emit a DELETE, which might seem surprising. +This is because when the method went to inspect the object, it turns out the +``patrick`` object was :term:`expired`, which happened when we last called upon +:meth:`_orm.Session.commit`, and the SQL emitted was to re-load the rows +from the new transaction. This expiration is optional, and in normal +use we will often be turning it off for situations where it doesn't apply well. + +To illustrate the rows being deleted, here's the commit: + +.. sourcecode:: pycon+sql + + >>> session.commit() + {opensql}DELETE FROM address WHERE address.id = ? + [...] (4,) + DELETE FROM user_account WHERE user_account.id = ? + [...] (3,) + COMMIT + {stop} + +The Tutorial discusses ORM deletion at :ref:`tutorial_orm_deleting`. +Background on object expiration is at :ref:`session_expiring`; cascades +are discussed in depth at :ref:`unitofwork_cascades`. + +Learn the above concepts in depth +--------------------------------- + +For a new user, the above sections were likely a whirlwind tour. There's a +lot of important concepts in each step above that weren't covered. With a +quick overview of what things look like, it's recommended to work through +the :ref:`unified_tutorial` to gain a solid working knowledge of what's +really going on above. Good luck! + + + + + diff --git a/doc/build/orm/relationship_persistence.rst b/doc/build/orm/relationship_persistence.rst index f843764741d..77396639abe 100644 --- a/doc/build/orm/relationship_persistence.rst +++ b/doc/build/orm/relationship_persistence.rst @@ -64,27 +64,27 @@ a complete example, including two :class:`_schema.ForeignKey` constructs:: Base = declarative_base() + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = Column(Integer, primary_key=True) - widget_id = Column(Integer, ForeignKey('widget.widget_id')) + widget_id = Column(Integer, ForeignKey("widget.widget_id")) name = Column(String(50)) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" widget_id = Column(Integer, primary_key=True) - favorite_entry_id = Column(Integer, - ForeignKey('entry.entry_id', - name="fk_favorite_entry")) + favorite_entry_id = Column( + Integer, ForeignKey("entry.entry_id", name="fk_favorite_entry") + ) name = Column(String(50)) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - post_update=True) + entries = relationship(Entry, primaryjoin=widget_id == Entry.widget_id) + favorite_entry = relationship( + Entry, primaryjoin=favorite_entry_id == Entry.entry_id, post_update=True + ) When a structure against the above configuration is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will @@ -94,8 +94,8 @@ row at a time for the time being): .. sourcecode:: pycon+sql - >>> w1 = Widget(name='somewidget') - >>> e1 = Entry(name='someentry') + >>> w1 = Widget(name="somewidget") + >>> e1 = Entry(name="someentry") >>> w1.favorite_entry = e1 >>> w1.entries = [e1] >>> session.add_all([w1, e1]) @@ -115,26 +115,32 @@ it's guaranteed that ``favorite_entry_id`` refers to an ``Entry`` that also refers to this ``Widget``. We can use a composite foreign key, as illustrated below:: - from sqlalchemy import Integer, ForeignKey, String, \ - Column, UniqueConstraint, ForeignKeyConstraint + from sqlalchemy import ( + Integer, + ForeignKey, + String, + Column, + UniqueConstraint, + ForeignKeyConstraint, + ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = Column(Integer, primary_key=True) - widget_id = Column(Integer, ForeignKey('widget.widget_id')) + widget_id = Column(Integer, ForeignKey("widget.widget_id")) name = Column(String(50)) - __table_args__ = ( - UniqueConstraint("entry_id", "widget_id"), - ) + __table_args__ = (UniqueConstraint("entry_id", "widget_id"),) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" - widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True) + widget_id = Column(Integer, autoincrement="ignore_fk", primary_key=True) favorite_entry_id = Column(Integer) name = Column(String(50)) @@ -143,18 +149,19 @@ as illustrated below:: ForeignKeyConstraint( ["widget_id", "favorite_entry_id"], ["entry.widget_id", "entry.entry_id"], - name="fk_favorite_entry" + name="fk_favorite_entry", ), ) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id, - foreign_keys=Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - foreign_keys=favorite_entry_id, - post_update=True) + entries = relationship( + Entry, primaryjoin=widget_id == Entry.widget_id, foreign_keys=Entry.widget_id + ) + favorite_entry = relationship( + Entry, + primaryjoin=favorite_entry_id == Entry.entry_id, + foreign_keys=favorite_entry_id, + post_update=True, + ) The above mapping features a composite :class:`_schema.ForeignKeyConstraint` bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure @@ -184,8 +191,8 @@ capabilities of the database. An example mapping which illustrates this is:: class User(Base): - __tablename__ = 'user' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "user" + __table_args__ = {"mysql_engine": "InnoDB"} username = Column(String(50), primary_key=True) fullname = Column(String(100)) @@ -194,13 +201,11 @@ illustrates this is:: class Address(Base): - __tablename__ = 'address' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "address" + __table_args__ = {"mysql_engine": "InnoDB"} email = Column(String(50), primary_key=True) - username = Column(String(50), - ForeignKey('user.username', onupdate="cascade") - ) + username = Column(String(50), ForeignKey("user.username", onupdate="cascade")) Above, we illustrate ``onupdate="cascade"`` on the :class:`_schema.ForeignKey` object, and we also illustrate the ``mysql_engine='InnoDB'`` setting @@ -245,7 +250,7 @@ will be fully loaded into memory if not already locally present. Our previous mapping using ``passive_updates=False`` looks like:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" username = Column(String(50), primary_key=True) fullname = Column(String(100)) @@ -254,11 +259,12 @@ Our previous mapping using ``passive_updates=False`` looks like:: # does not implement ON UPDATE CASCADE addresses = relationship("Address", passive_updates=False) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" email = Column(String(50), primary_key=True) - username = Column(String(50), ForeignKey('user.username')) + username = Column(String(50), ForeignKey("user.username")) Key limitations of ``passive_updates=False`` include: diff --git a/doc/build/orm/relationships.rst b/doc/build/orm/relationships.rst index 8a4fe36a1d8..0c12ba1a4b3 100644 --- a/doc/build/orm/relationships.rst +++ b/doc/build/orm/relationships.rst @@ -7,16 +7,16 @@ Relationship Configuration This section describes the :func:`relationship` function and in depth discussion of its usage. For an introduction to relationships, start with the -:ref:`ormtutorial_toplevel` and head into :ref:`orm_tutorial_relationship`. +:ref:`ormtutorial_toplevel` and head into :ref:`tutorial_orm_related_objects`. .. toctree:: :maxdepth: 3 basic_relationships self_referential - backref join_conditions collections relationship_persistence + backref relationship_api diff --git a/doc/build/orm/self_referential.rst b/doc/build/orm/self_referential.rst index 2f1c021020b..6db51520e88 100644 --- a/doc/build/orm/self_referential.rst +++ b/doc/build/orm/self_referential.rst @@ -26,9 +26,9 @@ In this example, we'll work with a single mapped class called ``Node``, representing a tree structure:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) children = relationship("Node") @@ -60,9 +60,9 @@ is a :class:`_schema.Column` or collection of :class:`_schema.Column` objects that indicate those which should be considered to be "remote":: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) parent = relationship("Node", remote_side=[id]) @@ -75,13 +75,11 @@ As always, both directions can be combined into a bidirectional relationship using the :func:`.backref` function:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) - children = relationship("Node", - backref=backref('parent', remote_side=[id]) - ) + children = relationship("Node", backref=backref("parent", remote_side=[id])) There are several examples included with SQLAlchemy illustrating self-referential strategies; these include :ref:`examples_adjacencylist` and @@ -99,11 +97,11 @@ the same account as that of the parent; while ``folder_id`` refers to a specific folder within that account:: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = Column(Integer, primary_key=True) @@ -111,10 +109,9 @@ to a specific folder within that account:: parent_id = Column(Integer) name = Column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, we pass ``account_id`` into the :paramref:`_orm.relationship.remote_side` list. :func:`_orm.relationship` recognizes that the ``account_id`` column here @@ -130,14 +127,14 @@ Self-Referential Query Strategies Querying of self-referential structures works like any other query:: # get all nodes named 'child2' - session.query(Node).filter(Node.data=='child2') + session.query(Node).filter(Node.data == "child2") However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, a join from a table to itself requires that at least one side of the expression be "aliased" so that it can be unambiguously referred to. -Recall from :ref:`ormtutorial_aliases` in the ORM tutorial that the +Recall from :ref:`orm_queryguide_orm_aliases` in the ORM tutorial that the :func:`_orm.aliased` construct is normally used to provide an "alias" of an ORM entity. Joining from ``Node`` to itself using this technique looks like: @@ -147,10 +144,9 @@ looks like: from sqlalchemy.orm import aliased nodealias = aliased(Node) - session.query(Node).filter(Node.data=='subchild1').\ - join(Node.parent.of_type(nodealias)).\ - filter(nodealias.data=="child2").\ - all() + session.query(Node).filter(Node.data == "subchild1").join( + Node.parent.of_type(nodealias) + ).filter(nodealias.data == "child2").all() {opensql}SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data @@ -182,13 +178,12 @@ configured via :paramref:`~.relationships.join_depth`: .. sourcecode:: python+sql class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) - children = relationship("Node", - lazy="joined", - join_depth=2) + children = relationship("Node", lazy="joined", join_depth=2) + session.query(Node).all() {opensql}SELECT node_1.id AS node_1_id, diff --git a/doc/build/orm/session_api.rst b/doc/build/orm/session_api.rst index ada035e957f..635223aa100 100644 --- a/doc/build/orm/session_api.rst +++ b/doc/build/orm/session_api.rst @@ -46,8 +46,8 @@ Session and sessionmaker() :attr:`_orm.ORMExecuteState.execution_options` .. attribute:: execution_options - The complete dictionary of current execution options. + The complete dictionary of current execution options. This is a merge of the statement level options with the locally passed execution options. diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index 529f786c1af..41390ff0bc4 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -40,6 +40,7 @@ caveats. It's intended that usually, you'd re-associate detached objects with another :class:`.Session` when you want to work with them again, so that they can resume their normal task of representing database state. +.. _session_basics: Basics of Using a Session ========================= @@ -61,7 +62,7 @@ may look like:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # create session and add objects with Session(engine) as session: @@ -81,6 +82,18 @@ persisted to the database. If we were only issuing SELECT calls and did not need to write any changes, then the call to :meth:`_orm.Session.commit` would be unnecessary. +.. note:: + + Note that after :meth:`_orm.Session.commit` is called, either explicitly or + when using a context manager, all objects associated with the + :class:`.Session` are :term:`expired`, meaning their contents are erased to + be re-loaded within the next transaction. If these objects are instead + :term:`detached`, they will be non-functional until re-associated with a + new :class:`.Session`, unless the :paramref:`.Session.expire_on_commit` + parameter is used to disable this behavior. See the + section :ref:`session_committing` for more detail. + + .. _session_begin_commit_rollback_block: Framing out a begin / commit / rollback block @@ -116,8 +129,8 @@ operations:: # create session and add objects with Session(engine) as session: with session.begin(): - session.add(some_object) - session.add(some_other_object) + session.add(some_object) + session.add(some_other_object) # inner context calls session.commit(), if there were no exceptions # outer context calls session.close() @@ -144,7 +157,7 @@ scope, the :class:`_orm.sessionmaker` can provide a factory for # an Engine, which the Session will use for connection # resources, typically in module scope - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -169,7 +182,7 @@ and also maintains a begin/commit/rollback block:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -210,10 +223,10 @@ will issue mapper queries within the context of this Session. By other ORM constructs such as an :func:`_orm.aliased` construct:: # query from a class - results = session.query(User).filter_by(name='ed').all() + results = session.query(User).filter_by(name="ed").all() # query with multiple classes, returns tuples - results = session.query(User, Address).join('addresses').filter_by(name='ed').all() + results = session.query(User, Address).join("addresses").filter_by(name="ed").all() # query using orm-columns, also returns tuples results = session.query(User.name, User.fullname).all() @@ -270,7 +283,7 @@ statements that use ORM entities:: result = session.execute(statement).scalars().all() # query with multiple classes - statement = select(User, Address).join('addresses').filter_by(name='ed') + statement = select(User, Address).join("addresses").filter_by(name="ed") # list of tuples result = session.execute(statement).all() @@ -301,6 +314,7 @@ via standard methods such as :meth:`_engine.Result.all`, :ref:`migration_20_toplevel` +.. _session_adding: Adding New or Existing Items @@ -314,12 +328,12 @@ already present and do not need to be added. Instances which are :term:`detached (i.e. have been removed from a session) may be re-associated with a session using this method:: - user1 = User(name='user1') - user2 = User(name='user2') + user1 = User(name="user1") + user2 = User(name="user2") session.add(user1) session.add(user2) - session.commit() # write changes to the database + session.commit() # write changes to the database To add a list of items to the session at once, use :meth:`~.Session.add_all`:: @@ -330,6 +344,7 @@ The :meth:`~.Session.add` operation **cascades** along the ``save-update`` cascade. For more details see the section :ref:`unitofwork_cascades`. +.. _session_deleting: Deleting -------- @@ -423,40 +438,96 @@ a :term:`2.0-style` :meth:`_orm.Session.execute` call, as well as within the committed. It also occurs before a SAVEPOINT is issued when :meth:`~.Session.begin_nested` is used. -Regardless of the autoflush setting, a flush can always be forced by issuing -:meth:`~.Session.flush`:: +A :class:`.Session` flush can be forced at any time by calling the +:meth:`~.Session.flush` method:: session.flush() -The "flush-on-Query" aspect of the behavior can be disabled by constructing -:class:`.sessionmaker` with the flag ``autoflush=False``:: +The flush which occurs automatically within the scope of certain methods +is known as **autoflush**. Autoflush is defined as a configurable, +automatic flush call which occurs at the beginning of methods including: - Session = sessionmaker(autoflush=False) +* :meth:`_orm.Session.execute` and other SQL-executing methods, when used + against ORM-enabled SQL constructs, such as :func:`_sql.select` objects + that refer to ORM entities and/or ORM-mapped attributes +* When a :class:`_query.Query` is invoked to send SQL to the database +* Within the :meth:`.Session.merge` method before querying the database +* When objects are :ref:`refreshed ` +* When ORM :term:`lazy load` operations occur against unloaded object + attributes. -Additionally, autoflush can be temporarily disabled by setting the -``autoflush`` flag at any time:: +There are also points at which flushes occur **unconditionally**; these +points are within key transactional boundaries which include: - mysession = Session() - mysession.autoflush = False +* Within the process of the :meth:`.Session.commit` method +* When :meth:`.Session.begin_nested` is called +* When the :meth:`.Session.prepare` 2PC method is used. -More conveniently, it can be turned off within a context managed block using :attr:`.Session.no_autoflush`:: +The **autoflush** behavior, as applied to the previous list of items, +can be disabled by constructing a :class:`.Session` or +:class:`.sessionmaker` passing the :paramref:`.Session.autoflush` parameter as +``False``:: + + Session = sessionmaker(autoflush=False) + +Additionally, autoflush can be temporarily disabled within the flow +of using a :class:`.Session` using the +:attr:`.Session.no_autoflush` context manager:: with mysession.no_autoflush: mysession.add(some_object) mysession.flush() -The flush process *always* occurs within a transaction, even if the -:class:`~sqlalchemy.orm.session.Session` has been configured with -``autocommit=True``, a setting that disables the session's persistent -transactional state. If no transaction is present, -:meth:`~.Session.flush` creates its own transaction and -commits it. Any failures during flush will always result in a rollback of -whatever transaction is present. If the Session is not in ``autocommit=True`` -mode, an explicit call to :meth:`~.Session.rollback` is -required after a flush fails, even though the underlying transaction will have -been rolled back already - this is so that the overall nesting pattern of -so-called "subtransactions" is consistently maintained. +**To reiterate:** The flush process **always occurs** when transactional +methods such as :meth:`.Session.commit` and :meth:`.Session.begin_nested` are +called, regardless of any "autoflush" settings, when the :class:`.Session` has +remaining pending changes to process. + +The flush process *always* occurs within a transaction, (subject to the +:ref:`isolation level ` of the database +transaction), provided that the DBAPI is not in +:ref:`driver level autocommit ` mode. This includes even if +the :class:`~sqlalchemy.orm.session.Session` has been configured with the +deprecated :paramref:`_orm.Session.autocommit` setting, which disables the +session's persistent transactional state. If no transaction is present, +:meth:`~.Session.flush` creates its own transaction and commits it. This means +that assuming the database connection is providing for :term:`atomicity` within +its transactional settings, if any individual DML statement inside the flush +fails, the entire operation will be rolled back. + +Outside of using :paramref:`_orm.Session.autocommit`, when a failure occurs +within a flush, in order to continue using that same :class:`_orm.Session`, an +explicit call to :meth:`~.Session.rollback` is required after a flush fails, +even though the underlying transaction will have been rolled back already (even +if the database driver is technically in driver-level autocommit mode). This is +so that the overall nesting pattern of so-called "subtransactions" is +consistently maintained. The FAQ section :ref:`faq_session_rollback` contains a +more detailed description of this behavior. + +.. _session_get: + +Get by Primary Key +------------------ + +As the :class:`_orm.Session` makes use of an :term:`identity map` which refers +to current in-memory objects by primary key, the :meth:`_orm.Session.get` +method is provided as a means of locating objects by primary key, first +looking within the current identity map and then querying the database +for non present values. Such as, to locate a ``User`` entity with primary key +identity ``(5, )``:: + + my_user = session.get(User, 5) + +The :meth:`_orm.Session.get` also includes calling forms for composite primary +key values, which may be passed as tuples or dictionaries, as well as +additional parameters which allow for specific loader and execution options. +See :meth:`_orm.Session.get` for the complete parameter list. + +.. seealso:: + + :meth:`_orm.Session.get` +.. _session_expiring: Expiring / Refreshing --------------------- @@ -508,9 +579,11 @@ ways to refresh its contents with new data from the current transaction: .. -* **the populate_existing() method** - this method is actually on the - :class:`_orm.Query` object as :meth:`_orm.Query.populate_existing` - and indicates that it should return objects that are unconditionally +* **the populate_existing() method or execution option** - This is now + an execution option documented at :ref:`orm_queryguide_populate_existing`; in + legacy form it's found on the :class:`_orm.Query` object as the + :meth:`_orm.Query.populate_existing` method. This operation in either form + indicates that objects being returned from a query should be unconditionally re-populated from their contents in the database:: u2 = session.query(User).populate_existing().filter(id=5).first() @@ -543,8 +616,9 @@ time refresh locally present objects which match those rows. To emit an ORM-enabled UPDATE in :term:`1.x style`, the :meth:`_query.Query.update` method may be used:: - session.query(User).filter(User.name == "squidward").\ - update({"name": "spongebob"}, synchronize_session="fetch") + session.query(User).filter(User.name == "squidward").update( + {"name": "spongebob"}, synchronize_session="fetch" + ) Above, an UPDATE will be emitted against all rows that match the name "squidward" and be updated to the name "spongebob". The @@ -559,8 +633,12 @@ Core :class:`_sql.Update` construct:: from sqlalchemy import update - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - execution_options(synchronize_session="fetch") + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .execution_options(synchronize_session="fetch") + ) result = session.execute(stmt) @@ -579,14 +657,17 @@ within the :class:`_orm.Session` will be marked as deleted and expunged. ORM-enabled delete, :term:`1.x style`:: - session.query(User).filter(User.name == "squidward").\ - delete(synchronize_session="fetch") + session.query(User).filter(User.name == "squidward").delete(synchronize_session="fetch") ORM-enabled delete, :term:`2.0 style`:: from sqlalchemy import delete - stmt = delete(User).where(User.name == "squidward").execution_options(synchronize_session="fetch") + stmt = ( + delete(User) + .where(User.name == "squidward") + .execution_options(synchronize_session="fetch") + ) session.execute(stmt) @@ -664,7 +745,7 @@ values for ``synchronize_session`` are supported: automatically. If the operation is against multiple tables, typically individual UPDATE / DELETE statements against the individual tables should be used. Some databases support multiple table UPDATEs. - Similar guidelines as those detailed at :ref:`multi_table_updates` + Similar guidelines as those detailed at :ref:`tutorial_update_from` may be applied. * The WHERE criteria needed in order to limit the polymorphic identity to @@ -739,13 +820,23 @@ Committing ---------- :meth:`~.Session.commit` is used to commit the current -transaction, if any. When there is no transaction in place, the method -passes silently. - -When :meth:`_orm.Session.commit` operates upon the current open transaction, -it first always issues :meth:`~.Session.flush` -beforehand to flush any remaining state to the database; this is independent -of the "autoflush" setting. +transaction. At its core this indicates that it emits ``COMMIT`` on +all current database connections that have a transaction in progress; +from a :term:`DBAPI` perspective this means the ``connection.commit()`` +DBAPI method is invoked on each DBAPI connection. + +When there is no transaction in place for the :class:`.Session`, indicating +that no operations were invoked on this :class:`.Session` since the previous +call to :meth:`.Session.commit`, the method will begin and commit an +internal-only "logical" transaction, that does not normally affect the database +unless pending flush changes were detected, but will still invoke event +handlers and object expiration rules. + +The :meth:`_orm.Session.commit` operation unconditionally issues +:meth:`~.Session.flush` before emitting COMMIT on relevant database +connections. If no pending changes are detected, then no SQL is emitted to the +database. This behavior is not configurable and is not affected by the +:paramref:`.Session.autoflush` parameter. Subsequent to that, :meth:`_orm.Session.commit` will then COMMIT the actual database transaction or transactions, if any, that are in place. @@ -757,15 +848,6 @@ result of a SELECT, they receive the most recent state. This behavior may be controlled by the :paramref:`_orm.Session.expire_on_commit` flag, which may be set to ``False`` when this behavior is undesirable. -.. versionchanged:: 1.4 - - The :class:`_orm.Session` object now features deferred "begin" behavior, as - described in :ref:`autobegin `. If no transaction is - begun, methods like :meth:`_orm.Session.commit` and - :meth:`_orm.Session.rollback` have no effect. This behavior would not - have been observed prior to 1.4 as under non-autocommit mode, a - transaction would always be implicitly present. - .. seealso:: :ref:`session_autobegin` @@ -963,6 +1045,7 @@ E.g. **don't do this**:: ### this is the **wrong way to do it** ### + class ThingOne(object): def go(self): session = Session() @@ -973,6 +1056,7 @@ E.g. **don't do this**:: session.rollback() raise + class ThingTwo(object): def go(self): session = Session() @@ -983,6 +1067,7 @@ E.g. **don't do this**:: session.rollback() raise + def run_my_program(): ThingOne().go() ThingTwo().go() @@ -995,21 +1080,23 @@ transaction automatically:: ### this is a **better** (but not the only) way to do it ### + class ThingOne(object): def go(self, session): session.query(FooBar).update({"x": 5}) + class ThingTwo(object): def go(self, session): session.query(Widget).update({"q": 18}) + def run_my_program(): with Session() as session: with session.begin(): ThingOne().go(session) ThingTwo().go(session) - .. versionchanged:: 1.4 The :class:`_orm.Session` may be used as a context manager without the use of external helper functions. @@ -1047,6 +1134,7 @@ available on :class:`~sqlalchemy.orm.session.Session`:: The newer :ref:`core_inspection_toplevel` system can also be used:: from sqlalchemy import inspect + session = inspect(someobject).session .. _session_faq_threadsafe: diff --git a/doc/build/orm/session_events.rst b/doc/build/orm/session_events.rst index 544a6c5773d..c24bb9fa962 100644 --- a/doc/build/orm/session_events.rst +++ b/doc/build/orm/session_events.rst @@ -47,6 +47,7 @@ options:: Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if orm_execute_state.is_select: @@ -58,7 +59,7 @@ options:: # ORDER BY if so col_descriptions = orm_execute_state.statement.column_descriptions - if col_descriptions[0]['entity'] is MyEntity: + if col_descriptions[0]["entity"] is MyEntity: orm_execute_state.statement = statement.order_by(MyEntity.name) The above example illustrates some simple modifications to SELECT statements. @@ -85,13 +86,14 @@ may be used on its own, or is ideally suited to be used within the Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select and - not orm_execute_state.is_column_load and - not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): orm_execute_state.statement = orm_execute_state.statement.options( with_loader_criteria(MyEntity.public == True) @@ -114,6 +116,7 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: import datetime + class HasTimestamp(object): timestamp = Column(DateTime, default=datetime.datetime.now) @@ -122,11 +125,11 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: __tablename__ = "some_entity" id = Column(Integer, primary_key=True) + class SomeOtherEntity(HasTimestamp, Base): __tablename__ = "some_entity" id = Column(Integer, primary_key=True) - The above classes ``SomeEntity`` and ``SomeOtherEntity`` will each have a column ``timestamp`` that defaults to the current date and time. An event may be used to intercept all objects that extend from ``HasTimestamp`` and filter their @@ -135,9 +138,9 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select - and not orm_execute_state.is_column_load - and not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): one_month_ago = datetime.datetime.today() - datetime.timedelta(months=1) @@ -145,7 +148,7 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their with_loader_criteria( HasTimestamp, lambda cls: cls.timestamp >= one_month_ago, - include_aliases=True + include_aliases=True, ) ) @@ -202,6 +205,7 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: cache = {} + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if "my_cache_key" in orm_execute_state.execution_options: @@ -222,7 +226,9 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: With the above hook in place, an example of using the cache would look like:: - stmt = select(User).where(User.name == 'sandy').execution_options(my_cache_key="key_sandy") + stmt = ( + select(User).where(User.name == "sandy").execution_options(my_cache_key="key_sandy") + ) result = session.execute(stmt) @@ -413,7 +419,8 @@ with a specific :class:`.Session` object:: session = Session() - @event.listens_for(session, 'transient_to_pending') + + @event.listens_for(session, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -425,7 +432,8 @@ Or with the :class:`.Session` class itself, as well as with a specific maker = sessionmaker() - @event.listens_for(maker, 'transient_to_pending') + + @event.listens_for(maker, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -457,11 +465,11 @@ intercept all new objects for a particular declarative base:: Base = declarative_base() + @event.listens_for(Base, "init", propagate=True) def intercept_init(instance, args, kwargs): print("new transient: %s" % instance) - Transient to Pending ^^^^^^^^^^^^^^^^^^^^ @@ -476,7 +484,6 @@ the :meth:`.SessionEvents.transient_to_pending` event:: def intercept_transient_to_pending(session, object_): print("transient to pending: %s" % object_) - Pending to Persistent ^^^^^^^^^^^^^^^^^^^^^ @@ -517,7 +524,6 @@ state via this particular avenue:: def intercept_loaded_as_persistent(session, object_): print("object loaded into persistent state: %s" % object_) - Persistent to Transient ^^^^^^^^^^^^^^^^^^^^^^^ @@ -561,7 +567,6 @@ Track the persistent to deleted transition with def intercept_persistent_to_deleted(session, object_): print("object was DELETEd, is now in deleted state: %s" % object_) - Deleted to Detached ^^^^^^^^^^^^^^^^^^^ @@ -575,7 +580,6 @@ the deleted to detached transition using :meth:`.SessionEvents.deleted_to_detach def intercept_deleted_to_detached(session, object_): print("deleted to detached: %s" % object_) - .. note:: While the object is in the deleted state, the :attr:`.InstanceState.deleted` @@ -618,7 +622,6 @@ objects moving back to persistent from detached using the def intercept_detached_to_persistent(session, object_): print("object became persistent again: %s" % object_) - Deleted to Persistent ^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst index 64efffd7614..c1d7230686c 100644 --- a/doc/build/orm/session_state_management.rst +++ b/doc/build/orm/session_state_management.rst @@ -50,7 +50,19 @@ Getting the Current State of an Object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The actual state of any mapped object can be viewed at any time using -the :func:`_sa.inspect` system:: +the :func:`_sa.inspect` function on a mapped instance; this function will +return the corresponding :class:`.InstanceState` object which manages the +internal ORM state for the object. :class:`.InstanceState` provides, among +other accessors, boolean attributes indicating the persistence state +of the object, including: + +* :attr:`.InstanceState.transient` +* :attr:`.InstanceState.pending` +* :attr:`.InstanceState.persistent` +* :attr:`.InstanceState.deleted` +* :attr:`.InstanceState.detached` + +E.g.:: >>> from sqlalchemy import inspect >>> insp = inspect(my_object) @@ -59,15 +71,8 @@ the :func:`_sa.inspect` system:: .. seealso:: - :attr:`.InstanceState.transient` - - :attr:`.InstanceState.pending` - - :attr:`.InstanceState.persistent` - - :attr:`.InstanceState.deleted` - - :attr:`.InstanceState.detached` + :ref:`orm_mapper_inspection_instancestate` - further examples of + :class:`.InstanceState` .. _session_attributes: @@ -137,25 +142,25 @@ the :term:`persistent` state is as follows:: from sqlalchemy import event + def strong_reference_session(session): @event.listens_for(session, "pending_to_persistent") @event.listens_for(session, "deleted_to_persistent") @event.listens_for(session, "detached_to_persistent") @event.listens_for(session, "loaded_as_persistent") def strong_ref_object(sess, instance): - if 'refs' not in sess.info: - sess.info['refs'] = refs = set() + if "refs" not in sess.info: + sess.info["refs"] = refs = set() else: - refs = sess.info['refs'] + refs = sess.info["refs"] refs.add(instance) - @event.listens_for(session, "persistent_to_detached") @event.listens_for(session, "persistent_to_deleted") @event.listens_for(session, "persistent_to_transient") def deref_object(sess, instance): - sess.info['refs'].discard(instance) + sess.info["refs"].discard(instance) Above, we intercept the :meth:`.SessionEvents.pending_to_persistent`, :meth:`.SessionEvents.detached_to_persistent`, @@ -181,7 +186,6 @@ It may also be called for any :class:`.sessionmaker`:: maker = sessionmaker() strong_reference_session(maker) - .. _unitofwork_merging: Merging @@ -204,11 +208,14 @@ When given an instance, it follows these steps: key if not located locally. * If the given instance has no primary key, or if no instance can be found with the primary key given, a new instance is created. -* The state of the given instance is then copied onto the located/newly - created instance. For attributes which are present on the source - instance, the value is transferred to the target instance. For mapped - attributes which aren't present on the source, the attribute is - expired on the target instance, discarding its existing value. +* The state of the given instance is then copied onto the located/newly created + instance. For attribute values which are present on the source instance, the + value is transferred to the target instance. For attribute values that aren't + present on the source instance, the corresponding attribute on the target + instance is :term:`expired` from memory, which discards any locally + present value from the target instance for that attribute, but no + direct modification is made to the database-persisted value for that + attribute. If the ``load=True`` flag is left at its default, this copy process emits events and will load the target object's @@ -282,22 +289,23 @@ some unexpected state regarding the object being passed to :meth:`~.Session.merg Lets use the canonical example of the User and Address objects:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) addresses = relationship("Address", backref="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email_address = Column(String(50), nullable=False) - user_id = Column(Integer, ForeignKey('user.id'), nullable=False) + user_id = Column(Integer, ForeignKey("user.id"), nullable=False) Assume a ``User`` object with one ``Address``, already persistent:: - >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')]) + >>> u1 = User(name="ed", addresses=[Address(email_address="ed@ed.com")]) >>> session.add(u1) >>> session.commit() @@ -411,7 +419,7 @@ When we talk about expiration of data we are usually talking about an object that is in the :term:`persistent` state. For example, if we load an object as follows:: - user = session.query(User).filter_by(name='user1').first() + user = session.query(User).filter_by(name="user1").first() The above ``User`` object is persistent, and has a series of attributes present; if we were to look inside its ``__dict__``, we'd see that state @@ -473,7 +481,7 @@ Another key behavior of both :meth:`~.Session.expire` and :meth:`~.Session.refre is that all un-flushed changes on an object are discarded. That is, if we were to modify an attribute on our ``User``:: - >>> user.name = 'user2' + >>> user.name = "user2" but then we call :meth:`~.Session.expire` without first calling :meth:`~.Session.flush`, our pending value of ``'user2'`` is discarded:: @@ -492,7 +500,7 @@ it can also be passed a list of string attribute names, referring to specific attributes to be marked as expired:: # expire only attributes obj1.attr1, obj1.attr2 - session.expire(obj1, ['attr1', 'attr2']) + session.expire(obj1, ["attr1", "attr2"]) The :meth:`.Session.expire_all` method allows us to essentially call :meth:`.Session.expire` on all objects contained within the :class:`.Session` @@ -511,7 +519,7 @@ but unlike :meth:`~.Session.expire`, expects at least one name to be that of a column-mapped attribute:: # reload obj1.attr1, obj1.attr2 - session.refresh(obj1, ['attr1', 'attr2']) + session.refresh(obj1, ["attr1", "attr2"]) .. tip:: diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst index 6d7c4dd18ed..1d246b79ab9 100644 --- a/doc/build/orm/session_transaction.rst +++ b/doc/build/orm/session_transaction.rst @@ -28,6 +28,7 @@ the scope of the :class:`_orm.SessionTransaction`. Below, assume we start with a :class:`_orm.Session`:: from sqlalchemy.orm import Session + session = Session(engine) We can now run operations within a demarcated transaction using a context @@ -106,7 +107,7 @@ first:: Similarly, the :class:`_orm.sessionmaker` can be used in the same way:: - Session = sesssionmaker(engine) + Session = sessionmaker(engine) with Session() as session: with session.begin(): @@ -139,21 +140,26 @@ method:: session.add(u1) session.add(u2) - nested = session.begin_nested() # establish a savepoint + nested = session.begin_nested() # establish a savepoint session.add(u3) nested.rollback() # rolls back u3, keeps u1 and u2 # commits u1 and u2 Each time :meth:`_orm.Session.begin_nested` is called, a new "BEGIN SAVEPOINT" -command is emitted to the database with a unique identifier. When -:meth:`_orm.SessionTransaction.commit` is called, "RELEASE SAVEPOINT" -is emitted on the database, and if instead -:meth:`_orm.SessionTransaction.rollback` is called, "ROLLBACK TO SAVEPOINT" -is emitted. - -:meth:`_orm.Session.begin_nested` may also be used as a context manager in the -same manner as that of the :meth:`_orm.Session.begin` method:: +command is emitted to the database within the scope of the current +database transaction (starting one if not already in progress), and +an object of type :class:`_orm.SessionTransaction` is returned, which +represents a handle to this SAVEPOINT. When +the ``.commit()`` method on this object is called, "RELEASE SAVEPOINT" +is emitted to the database, and if instead the ``.rollback()`` +method is called, "ROLLBACK TO SAVEPOINT" is emitted. The enclosing +database transaction remains in progress. + +:meth:`_orm.Session.begin_nested` is typically used as a context manager +where specific per-instance errors may be caught, in conjunction with +a rollback emitted for that portion of the transaction's state, without +rolling back the whole transaction, as in the example below:: for record in records: try: @@ -163,19 +169,54 @@ same manner as that of the :meth:`_orm.Session.begin` method:: print("Skipped record %s" % record) session.commit() -When :meth:`~.Session.begin_nested` is called, a -:meth:`~.Session.flush` is unconditionally issued -(regardless of the ``autoflush`` setting). This is so that when a -rollback on this nested transaction occurs, the full state of the -session is expired, thus causing all subsequent attribute/instance access to -reference the full state of the :class:`~sqlalchemy.orm.session.Session` right -before :meth:`~.Session.begin_nested` was called. +When the context manager yielded by :meth:`_orm.Session.begin_nested` +completes, it "commits" the savepoint, +which includes the usual behavior of flushing all pending state. When +an error is raised, the savepoint is rolled back and the state of the +:class:`_orm.Session` local to the objects that were changed is expired. + +This pattern is ideal for situations such as using PostgreSQL and +catching :class:`.IntegrityError` to detect duplicate rows; PostgreSQL normally +aborts the entire tranasction when such an error is raised, however when using +SAVEPOINT, the outer transaction is maintained. In the example below +a list of data is persisted into the database, with the occasional +"duplicate primary key" record skipped, without rolling back the entire +operation:: + + from sqlalchemy import exc + + with session.begin(): + for record in records: + try: + with session.begin_nested(): + obj = SomeRecord(id=record["identifier"], name=record["name"]) + session.add(obj) + except exc.IntegrityError: + print(f"Skipped record {record} - row already exists") + +When :meth:`~.Session.begin_nested` is called, the :class:`_orm.Session` first +flushes all currently pending state to the database; this occurs unconditionally, +regardless of the value of the :paramref:`_orm.Session.autoflush` parameter +which normally may be used to disable automatic flush. The rationale +for this behavior is so that +when a rollback on this nested transaction occurs, the :class:`_orm.Session` +may expire any in-memory state that was created within the scope of the +SAVEPOINT, while +ensuring that when those expired objects are refreshed, the state of the +object graph prior to the beginning of the SAVEPOINT will be available +to re-load from the database. + +In modern versions of SQLAlchemy, when a SAVEPOINT initiated by +:meth:`_orm.Session.begin_nested` is rolled back, in-memory object state that +was modified since the SAVEPOINT was created +is expired, however other object state that was not altered since the SAVEPOINT +began is maintained. This is so that subsequent operations can continue to make use of the +otherwise unaffected data +without the need for refreshing it from the database. .. seealso:: - :class:`_engine.NestedTransaction` - the :class:`.NestedTransaction` class is the - Core-level construct that is used by the :class:`_orm.Session` internally - to produce SAVEPOINT blocks. + :meth:`_engine.Connection.begin_nested` - Core SAVEPOINT API .. _orm_session_vs_engine: @@ -224,8 +265,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) conn.commit() @@ -234,11 +275,13 @@ Session:: Session = sessionmaker(engine, future=True) with Session() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) session.commit() Begin Once @@ -260,8 +303,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) # commits and closes automatically @@ -270,14 +313,15 @@ Session:: Session = sessionmaker(engine, future=True) with Session.begin() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) # commits and closes automatically - Nested Transaction ~~~~~~~~~~~~~~~~~~~~ @@ -299,8 +343,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) savepoint.commit() # or rollback @@ -312,17 +356,16 @@ Session:: with Session.begin() as session: savepoint = session.begin_nested() - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) savepoint.commit() # or rollback # commits automatically - - - .. _session_autocommit: .. _session_explicit_begin: @@ -359,8 +402,8 @@ point at which the "begin" operation occurs. To suit this, the try: item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" session.commit() except: session.rollback() @@ -373,8 +416,8 @@ The above pattern is more idiomatically invoked using a context manager:: with session.begin(): item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" The :meth:`_orm.Session.begin` method and the session's "autobegin" process use the same sequence of steps to begin the transaction. This includes @@ -413,6 +456,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(session): if not session.in_transaction(): @@ -421,7 +465,6 @@ a decorator may be used:: else: yield - The above context manager may be used in the same way the "subtransaction" flag works, such as in the following example:: @@ -431,12 +474,14 @@ The above context manager may be used in the same way the with transaction(session): method_b(session) + # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): with transaction(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -451,8 +496,10 @@ or methods to be concerned with the details of transaction demarcation:: def method_a(session): method_b(session) + def method_b(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -478,13 +525,13 @@ also :meth:`_orm.Session.prepare` the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag ``twophase=True`` on the session:: - engine1 = create_engine('postgresql://db1') - engine2 = create_engine('postgresql://db2') + engine1 = create_engine("postgresql://db1") + engine2 = create_engine("postgresql://db2") Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -494,7 +541,6 @@ transactions set the flag ``twophase=True`` on the session:: # before committing both transactions session.commit() - .. _session_transaction_isolation: Setting Transaction Isolation Levels / DBAPI AUTOCOMMIT @@ -543,13 +589,11 @@ in all cases, which is then used as the source of connectivity for a from sqlalchemy.orm import sessionmaker eng = create_engine( - "postgresql://scott:tiger@localhost/test", - isolation_level='REPEATABLE READ' + "postgresql://scott:tiger@localhost/test", isolation_level="REPEATABLE READ" ) Session = sessionmaker(eng) - Another option, useful if there are to be two engines with different isolation levels at once, is to use the :meth:`_engine.Engine.execution_options` method, which will produce a shallow copy of the original :class:`_engine.Engine` which @@ -567,7 +611,6 @@ operations:: transactional_session = sessionmaker(eng) autocommit_session = sessionmaker(autocommit_engine) - Above, both "``eng``" and ``"autocommit_engine"`` share the same dialect and connection pool. However the "AUTOCOMMIT" mode will be set upon connections when they are acquired from the ``autocommit_engine``. The two @@ -620,7 +663,6 @@ methods:: with Session() as session: session.bind_mapper(User, autocommit_engine) - Setting Isolation for Individual Transactions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -643,7 +685,7 @@ level on a per-connection basis can be affected by using the # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a real # database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... @@ -675,15 +717,13 @@ the per-connection-transaction isolation level:: # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a # real database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... # outside the block, the transaction has been committed. the connection is # released and reverted to its previous isolation level. - - Tracking Transaction State with Events -------------------------------------- @@ -725,7 +765,8 @@ are reverted:: # global application scope. create Session class, engine Session = sessionmaker() - engine = create_engine('postgresql://...') + engine = create_engine("postgresql://...") + class SomeTest(TestCase): def setUp(self): @@ -735,11 +776,9 @@ are reverted:: # begin a non-ORM transaction self.trans = self.connection.begin() - # bind an individual Session to the connection self.session = Session(bind=self.connection) - ### optional ### # if the database supports SAVEPOINT (SQLite needs special diff --git a/doc/build/orm/tutorial.rst b/doc/build/orm/tutorial.rst index fb52023420c..327957e9f60 100644 --- a/doc/build/orm/tutorial.rst +++ b/doc/build/orm/tutorial.rst @@ -72,7 +72,7 @@ Version Check A quick check to verify that we are on at least **version 1.4** of SQLAlchemy:: >>> import sqlalchemy - >>> sqlalchemy.__version__ # doctest:+SKIP + >>> sqlalchemy.__version__ # doctest:+SKIP 1.4.0 Connecting @@ -82,7 +82,7 @@ For this tutorial we will use an in-memory-only SQLite database. To connect we use :func:`~sqlalchemy.create_engine`:: >>> from sqlalchemy import create_engine - >>> engine = create_engine('sqlite:///:memory:', echo=True) + >>> engine = create_engine("sqlite:///:memory:", echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll @@ -146,7 +146,7 @@ the table name, and names and datatypes of columns:: >>> from sqlalchemy import Column, Integer, String >>> class User(Base): - ... __tablename__ = 'users' + ... __tablename__ = "users" ... ... id = Column(Integer, primary_key=True) ... name = Column(String) @@ -154,8 +154,11 @@ the table name, and names and datatypes of columns:: ... nickname = Column(String) ... ... def __repr__(self): - ... return "" % ( - ... self.name, self.fullname, self.nickname) + ... return "" % ( + ... self.name, + ... self.fullname, + ... self.nickname, + ... ) .. sidebar:: Tip @@ -196,7 +199,7 @@ our table, known as :term:`table metadata`. The object used by SQLAlchemy to r this information for a specific table is called the :class:`_schema.Table` object, and here Declarative has made one for us. We can see this object by inspecting the ``__table__`` attribute:: - >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE + >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE Table('users', MetaData(), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('name', String(), table=), @@ -269,21 +272,25 @@ the actual ``CREATE TABLE`` statement: without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence - Column(Integer, Sequence('user_id_seq'), primary_key=True) + + Column(Integer, Sequence("user_id_seq"), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` generated via our declarative mapping is therefore:: class User(Base): - __tablename__ = 'users' - id = Column(Integer, Sequence('user_id_seq'), primary_key=True) + __tablename__ = "users" + id = Column(Integer, Sequence("user_id_seq"), primary_key=True) name = Column(String(50)) fullname = Column(String(50)) nickname = Column(String(50)) def __repr__(self): return "" % ( - self.name, self.fullname, self.nickname) + self.name, + self.fullname, + self.nickname, + ) We include this more verbose table definition separately to highlight the difference between a minimal construct geared primarily @@ -296,7 +303,7 @@ Create an Instance of the Mapped Class With mappings complete, let's now create and inspect a ``User`` object:: - >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname') + >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname") >>> ed_user.name 'ed' >>> ed_user.nickname @@ -383,7 +390,7 @@ Adding and Updating Objects To persist our ``User`` object, we :meth:`~.Session.add` it to our :class:`~sqlalchemy.orm.session.Session`:: - >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname') + >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname") >>> session.add(ed_user) At this point, we say that the instance is **pending**; no SQL has yet been issued @@ -401,7 +408,9 @@ added: .. sourcecode:: python+sql - {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+NORMALIZE_WHITESPACE + {sql}>>> our_user = ( + ... session.query(User).filter_by(name="ed").first() + ... ) # doctest:+NORMALIZE_WHITESPACE BEGIN (implicit) INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?) [...] ('ed', 'Ed Jones', 'edsnickname') @@ -440,16 +449,19 @@ We can add more ``User`` objects at once using .. sourcecode:: python+sql - >>> session.add_all([ - ... User(name='wendy', fullname='Wendy Williams', nickname='windy'), - ... User(name='mary', fullname='Mary Contrary', nickname='mary'), - ... User(name='fred', fullname='Fred Flintstone', nickname='freddy')]) + >>> session.add_all( + ... [ + ... User(name="wendy", fullname="Wendy Williams", nickname="windy"), + ... User(name="mary", fullname="Mary Contrary", nickname="mary"), + ... User(name="fred", fullname="Fred Flintstone", nickname="freddy"), + ... ] + ... ) Also, we've decided Ed's nickname isn't that great, so lets change it: .. sourcecode:: python+sql - >>> ed_user.nickname = 'eddie' + >>> ed_user.nickname = "eddie" The :class:`~sqlalchemy.orm.session.Session` is paying attention. It knows, for example, that ``Ed Jones`` has been modified: @@ -498,7 +510,7 @@ If we look at Ed's ``id`` attribute, which earlier was ``None``, it now has a va .. sourcecode:: python+sql - {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE + {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, @@ -535,20 +547,20 @@ we can roll back changes made too. Let's make two changes that we'll revert; .. sourcecode:: python+sql - >>> ed_user.name = 'Edwardo' + >>> ed_user.name = "Edwardo" and we'll add another erroneous user, ``fake_user``: .. sourcecode:: python+sql - >>> fake_user = User(name='fakeuser', fullname='Invalid', nickname='12345') + >>> fake_user = User(name="fakeuser", fullname="Invalid", nickname="12345") >>> session.add(fake_user) Querying the session, we can see that they're flushed into the current transaction: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() + {sql}>>> session.query(User).filter(User.name.in_(["Edwardo", "fakeuser"])).all() UPDATE users SET name=? WHERE users.id = ? [...] ('Edwardo', 1) INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?) @@ -588,7 +600,7 @@ issuing a SELECT illustrates the changes made to the database: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() + {sql}>>> session.query(User).filter(User.name.in_(["ed", "fakeuser"])).all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -655,7 +667,7 @@ class: .. sourcecode:: python+sql {sql}>>> for row in session.query(User, User.name).all(): - ... print(row.User, row.name) + ... print(row.User, row.name) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -675,8 +687,8 @@ is mapped to one (such as ``User.name``): .. sourcecode:: python+sql - {sql}>>> for row in session.query(User.name.label('name_label')).all(): - ... print(row.name_label) + {sql}>>> for row in session.query(User.name.label("name_label")).all(): + ... print(row.name_label) SELECT users.name AS name_label FROM users [...] (){stop} @@ -692,10 +704,10 @@ entities are present in the call to :meth:`~.Session.query`, can be controlled u .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased - >>> user_alias = aliased(User, name='user_alias') + >>> user_alias = aliased(User, name="user_alias") {sql}>>> for row in session.query(user_alias, user_alias.name).all(): - ... print(row.user_alias) + ... print(row.user_alias) SELECT user_alias.id AS user_alias_id, user_alias.name AS user_alias_name, user_alias.fullname AS user_alias_fullname, @@ -715,7 +727,7 @@ conjunction with ORDER BY: .. sourcecode:: python+sql {sql}>>> for u in session.query(User).order_by(User.id)[1:3]: - ... print(u) + ... print(u) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -731,9 +743,8 @@ and filtering results, which is accomplished either with .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter_by(fullname='Ed Jones'): - ... print(name) + {sql}>>> for (name,) in session.query(User.name).filter_by(fullname="Ed Jones"): + ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? [...] ('Ed Jones',) @@ -745,9 +756,8 @@ operators with the class-level attributes on your mapped class: .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.fullname=='Ed Jones'): - ... print(name) + {sql}>>> for (name,) in session.query(User.name).filter(User.fullname == "Ed Jones"): + ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? [...] ('Ed Jones',) @@ -762,10 +772,10 @@ users named "ed" with a full name of "Ed Jones", you can call .. sourcecode:: python+sql - {sql}>>> for user in session.query(User).\ - ... filter(User.name=='ed').\ - ... filter(User.fullname=='Ed Jones'): - ... print(user) + {sql}>>> for user in ( + ... session.query(User).filter(User.name == "ed").filter(User.fullname == "Ed Jones") + ... ): + ... print(user) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -783,11 +793,11 @@ Here's a rundown of some of the most common operators used in * :meth:`equals <.ColumnOperators.__eq__>`:: - query.filter(User.name == 'ed') + query.filter(User.name == "ed") * :meth:`not equals <.ColumnOperators.__ne__>`:: - query.filter(User.name != 'ed') + query.filter(User.name != "ed") * :meth:`LIKE <.ColumnOperators.like>`:: @@ -808,23 +818,21 @@ Here's a rundown of some of the most common operators used in * :meth:`IN <.ColumnOperators.in_>`:: - query.filter(User.name.in_(['ed', 'wendy', 'jack'])) + query.filter(User.name.in_(["ed", "wendy", "jack"])) # works with query objects too: - query.filter(User.name.in_( - session.query(User.name).filter(User.name.like('%ed%')) - )) + query.filter(User.name.in_(session.query(User.name).filter(User.name.like("%ed%")))) # use tuple_() for composite (multi-column) queries from sqlalchemy import tuple_ + query.filter( - tuple_(User.name, User.nickname).\ - in_([('ed', 'edsnickname'), ('wendy', 'windy')]) + tuple_(User.name, User.nickname).in_([("ed", "edsnickname"), ("wendy", "windy")]) ) * :meth:`NOT IN <.ColumnOperators.not_in>`:: - query.filter(~User.name.in_(['ed', 'wendy', 'jack'])) + query.filter(~User.name.in_(["ed", "wendy", "jack"])) * :meth:`IS NULL <.ColumnOperators.is_>`:: @@ -886,7 +894,7 @@ database results. Here's a brief tour: .. sourcecode:: python+sql - >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id) + >>> query = session.query(User).filter(User.name.like("%ed")).order_by(User.id) {sql}>>> query.all() SELECT users.id AS users_id, users.name AS users_name, @@ -964,8 +972,7 @@ database results. Here's a brief tour: .. sourcecode:: python+sql - >>> query = session.query(User.id).filter(User.name == 'ed').\ - ... order_by(User.id) + >>> query = session.query(User.id).filter(User.name == "ed").order_by(User.id) {sql}>>> query.scalar() SELECT users.id AS users_id FROM users @@ -988,9 +995,7 @@ by most applicable methods. For example, .. sourcecode:: python+sql >>> from sqlalchemy import text - {sql}>>> for user in session.query(User).\ - ... filter(text("id<224")).\ - ... order_by(text("id")).all(): + {sql}>>> for user in session.query(User).filter(text("id<224")).order_by(text("id")).all(): ... print(user.name) SELECT users.id AS users_id, users.name AS users_name, @@ -1010,8 +1015,9 @@ method: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(text("id<:value and name=:name")).\ - ... params(value=224, name='fred').order_by(User.id).one() + {sql}>>> session.query(User).filter(text("id<:value and name=:name")).params( + ... value=224, name="fred" + ... ).order_by(User.id).one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1029,8 +1035,9 @@ returned by the SQL statement based on column name: .. sourcecode:: python+sql - {sql}>>> session.query(User).from_statement( - ... text("SELECT * FROM users where name=:name")).params(name='ed').all() + {sql}>>> session.query(User).from_statement(text("SELECT * FROM users where name=:name")).params( + ... name="ed" + ... ).all() SELECT * FROM users where name=? [...] ('ed',) {stop}[] @@ -1041,10 +1048,9 @@ columns are passed in the desired order to :meth:`_expression.TextClause.columns .. sourcecode:: python+sql - >>> stmt = text("SELECT name, id, fullname, nickname " - ... "FROM users where name=:name") + >>> stmt = text("SELECT name, id, fullname, nickname " "FROM users where name=:name") >>> stmt = stmt.columns(User.name, User.id, User.fullname, User.nickname) - {sql}>>> session.query(User).from_statement(stmt).params(name='ed').all() + {sql}>>> session.query(User).from_statement(stmt).params(name="ed").all() SELECT name, id, fullname, nickname FROM users where name=? [...] ('ed',) {stop}[] @@ -1058,8 +1064,7 @@ any other case: >>> stmt = text("SELECT name, id FROM users where name=:name") >>> stmt = stmt.columns(User.name, User.id) - {sql}>>> session.query(User.id, User.name).\ - ... from_statement(stmt).params(name='ed').all() + {sql}>>> session.query(User.id, User.name).from_statement(stmt).params(name="ed").all() SELECT name, id FROM users where name=? [...] ('ed',) {stop}[(1, u'ed')] @@ -1077,7 +1082,7 @@ counting called :meth:`_query.Query.count`: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.like('%ed')).count() + {sql}>>> session.query(User).filter(User.name.like("%ed")).count() SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, @@ -1125,7 +1130,7 @@ To achieve our simple ``SELECT count(*) FROM table``, we can apply it as: .. sourcecode:: python+sql - {sql}>>> session.query(func.count('*')).select_from(User).scalar() + {sql}>>> session.query(func.count("*")).select_from(User).scalar() SELECT count(?) AS count_1 FROM users [...] ('*',) @@ -1160,18 +1165,17 @@ declarative, we define this table along with its mapped class, ``Address``: >>> from sqlalchemy.orm import relationship >>> class Address(Base): - ... __tablename__ = 'addresses' + ... __tablename__ = "addresses" ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): ... return "" % self.email_address - >>> User.addresses = relationship( - ... "Address", order_by=Address.id, back_populates="user") + >>> User.addresses = relationship("Address", order_by=Address.id, back_populates="user") The above class introduces the :class:`_schema.ForeignKey` construct, which is a directive applied to :class:`_schema.Column` that indicates that values in this @@ -1269,7 +1273,7 @@ default, the collection is a Python list. .. sourcecode:: python+sql - >>> jack = User(name='jack', fullname='Jack Bean', nickname='gjffdd') + >>> jack = User(name="jack", fullname="Jack Bean", nickname="gjffdd") >>> jack.addresses [] @@ -1279,8 +1283,9 @@ just assign a full list directly: .. sourcecode:: python+sql >>> jack.addresses = [ - ... Address(email_address='jack@google.com'), - ... Address(email_address='j25@yahoo.com')] + ... Address(email_address="jack@google.com"), + ... Address(email_address="j25@yahoo.com"), + ... ] When using a bidirectional relationship, elements added in one direction automatically become visible in the other direction. This behavior occurs @@ -1316,8 +1321,7 @@ Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addre .. sourcecode:: python+sql - {sql}>>> jack = session.query(User).\ - ... filter_by(name='jack').one() + {sql}>>> jack = session.query(User).filter_by(name="jack").one() BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, @@ -1366,10 +1370,12 @@ Below we load the ``User`` and ``Address`` entities at once using this method: .. sourcecode:: python+sql - {sql}>>> for u, a in session.query(User, Address).\ - ... filter(User.id==Address.user_id).\ - ... filter(Address.email_address=='jack@google.com').\ - ... all(): + {sql}>>> for u, a in ( + ... session.query(User, Address) + ... .filter(User.id == Address.user_id) + ... .filter(Address.email_address == "jack@google.com") + ... .all() + ... ): ... print(u) ... print(a) SELECT users.id AS users_id, @@ -1391,9 +1397,9 @@ using the :meth:`_query.Query.join` method: .. sourcecode:: python+sql - {sql}>>> session.query(User).join(Address).\ - ... filter(Address.email_address=='jack@google.com').\ - ... all() + {sql}>>> session.query(User).join(Address).filter( + ... Address.email_address == "jack@google.com" + ... ).all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1408,15 +1414,17 @@ and ``Address`` because there's only one foreign key between them. If there were no foreign keys, or several, :meth:`_query.Query.join` works better when one of the following forms are used:: - query.join(Address, User.id==Address.user_id) # explicit condition - query.join(User.addresses) # specify relationship from left to right - query.join(Address, User.addresses) # same, with explicit target - query.join(User.addresses.and_(Address.name != 'foo')) # use relationship + additional ON criteria + query.join(Address, User.id == Address.user_id) # explicit condition + query.join(User.addresses) # specify relationship from left to right + query.join(Address, User.addresses) # same, with explicit target + query.join( + User.addresses.and_(Address.name != "foo") + ) # use relationship + additional ON criteria As you would expect, the same idea is used for "outer" joins, using the :meth:`_query.Query.outerjoin` function:: - query.outerjoin(User.addresses) # LEFT OUTER JOIN + query.outerjoin(User.addresses) # LEFT OUTER JOIN The reference documentation for :meth:`_query.Query.join` contains detailed information and examples of the calling styles accepted by this method; :meth:`_query.Query.join` @@ -1431,7 +1439,6 @@ is an important method at the center of usage for any SQL-fluent application. query = session.query(User, Address).select_from(Address).join(User) - .. _ormtutorial_aliases: Using Aliases @@ -1453,12 +1460,13 @@ distinct email addresses at the same time: >>> from sqlalchemy.orm import aliased >>> adalias1 = aliased(Address) >>> adalias2 = aliased(Address) - {sql}>>> for username, email1, email2 in \ - ... session.query(User.name, adalias1.email_address, adalias2.email_address).\ - ... join(User.addresses.of_type(adalias1)).\ - ... join(User.addresses.of_type(adalias2)).\ - ... filter(adalias1.email_address=='jack@google.com').\ - ... filter(adalias2.email_address=='j25@yahoo.com'): + {sql}>>> for username, email1, email2 in ( + ... session.query(User.name, adalias1.email_address, adalias2.email_address) + ... .join(User.addresses.of_type(adalias1)) + ... .join(User.addresses.of_type(adalias2)) + ... .filter(adalias1.email_address == "jack@google.com") + ... .filter(adalias2.email_address == "j25@yahoo.com") + ... ): ... print(username, email1, email2) SELECT users.name AS users_name, addresses_1.email_address AS addresses_1_email_address, @@ -1501,9 +1509,11 @@ representing the statement generated by a particular construct, which are described in :ref:`sqlexpression_toplevel`:: >>> from sqlalchemy.sql import func - >>> stmt = session.query(Address.user_id, func.count('*').\ - ... label('address_count')).\ - ... group_by(Address.user_id).subquery() + >>> stmt = ( + ... session.query(Address.user_id, func.count("*").label("address_count")) + ... .group_by(Address.user_id) + ... .subquery() + ... ) The ``func`` keyword generates SQL functions, and the ``subquery()`` method on :class:`~sqlalchemy.orm.query.Query` produces a SQL expression construct @@ -1517,8 +1527,11 @@ accessible through an attribute called ``c``: .. sourcecode:: python+sql - {sql}>>> for u, count in session.query(User, stmt.c.address_count).\ - ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): + {sql}>>> for u, count in ( + ... session.query(User, stmt.c.address_count) + ... .outerjoin(stmt, User.id == stmt.c.user_id) + ... .order_by(User.id) + ... ): ... print(u, count) SELECT users.id AS users_id, users.name AS users_name, @@ -1546,12 +1559,11 @@ to associate an "alias" of a mapped class to a subquery: .. sourcecode:: python+sql - {sql}>>> stmt = session.query(Address).\ - ... filter(Address.email_address != 'j25@yahoo.com').\ - ... subquery() + {sql}>>> stmt = ( + ... session.query(Address).filter(Address.email_address != "j25@yahoo.com").subquery() + ... ) >>> addr_alias = aliased(Address, stmt) - >>> for user, address in session.query(User, addr_alias).\ - ... join(addr_alias, User.addresses): + >>> for user, address in session.query(User, addr_alias).join(addr_alias, User.addresses): ... print(user) ... print(address) SELECT users.id AS users_id, @@ -1585,8 +1597,8 @@ There is an explicit EXISTS construct, which looks like this: .. sourcecode:: python+sql >>> from sqlalchemy.sql import exists - >>> stmt = exists().where(Address.user_id==User.id) - {sql}>>> for name, in session.query(User.name).filter(stmt): + >>> stmt = exists().where(Address.user_id == User.id) + {sql}>>> for (name,) in session.query(User.name).filter(stmt): ... print(name) SELECT users.name AS users_name FROM users @@ -1602,8 +1614,7 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.addresses.any()): + {sql}>>> for (name,) in session.query(User.name).filter(User.addresses.any()): ... print(name) SELECT users.name AS users_name FROM users @@ -1617,8 +1628,9 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.addresses.any(Address.email_address.like('%google%'))): + {sql}>>> for (name,) in session.query(User.name).filter( + ... User.addresses.any(Address.email_address.like("%google%")) + ... ): ... print(name) SELECT users.name AS users_name FROM users @@ -1634,8 +1646,7 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> session.query(Address).\ - ... filter(~Address.user.has(User.name=='jack')).all() + {sql}>>> session.query(Address).filter(~Address.user.has(User.name == "jack")).all() SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id @@ -1671,18 +1682,18 @@ and behavior: * :meth:`~.RelationshipProperty.Comparator.any` (used for collections):: - query.filter(User.addresses.any(Address.email_address == 'bar')) + query.filter(User.addresses.any(Address.email_address == "bar")) # also takes keyword arguments: - query.filter(User.addresses.any(email_address='bar')) + query.filter(User.addresses.any(email_address="bar")) * :meth:`~.RelationshipProperty.Comparator.has` (used for scalar references):: - query.filter(Address.user.has(name='ed')) + query.filter(Address.user.has(name="ed")) * :meth:`_query.Query.with_parent` (used for any relationship):: - session.query(Address).with_parent(someuser, 'addresses') + session.query(Address).with_parent(someuser, "addresses") Eager Loading ============= @@ -1710,9 +1721,12 @@ at once: .. sourcecode:: python+sql >>> from sqlalchemy.orm import selectinload - {sql}>>> jack = session.query(User).\ - ... options(selectinload(User.addresses)).\ - ... filter_by(name='jack').one() + {sql}>>> jack = ( + ... session.query(User) + ... .options(selectinload(User.addresses)) + ... .filter_by(name="jack") + ... .one() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1749,9 +1763,9 @@ will emit the extra join regardless: >>> from sqlalchemy.orm import joinedload - {sql}>>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter_by(name='jack').one() + {sql}>>> jack = ( + ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").one() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1812,11 +1826,13 @@ attribute: .. sourcecode:: python+sql >>> from sqlalchemy.orm import contains_eager - {sql}>>> jacks_addresses = session.query(Address).\ - ... join(Address.user).\ - ... filter(User.name=='jack').\ - ... options(contains_eager(Address.user)).\ - ... all() + {sql}>>> jacks_addresses = ( + ... session.query(Address) + ... .join(Address.user) + ... .filter(User.name == "jack") + ... .options(contains_eager(Address.user)) + ... .all() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1846,7 +1862,7 @@ in the session, then we'll issue a ``count`` query to see that no rows remain: .. sourcecode:: python+sql >>> session.delete(jack) - {sql}>>> session.query(User).filter_by(name='jack').count() + {sql}>>> session.query(User).filter_by(name="jack").count() UPDATE addresses SET user_id=? WHERE addresses.id = ? [...] ((None, 1), (None, 2)) DELETE FROM users WHERE users.id = ? @@ -1866,8 +1882,8 @@ So far, so good. How about Jack's ``Address`` objects ? .. sourcecode:: python+sql {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) - ... ).count() + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) + ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, @@ -1905,28 +1921,32 @@ Next we'll declare the ``User`` class, adding in the ``addresses`` relationship including the cascade configuration (we'll leave the constructor out too):: >>> class User(Base): - ... __tablename__ = 'users' + ... __tablename__ = "users" ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... nickname = Column(String) ... - ... addresses = relationship("Address", back_populates='user', - ... cascade="all, delete, delete-orphan") + ... addresses = relationship( + ... "Address", back_populates="user", cascade="all, delete, delete-orphan" + ... ) ... ... def __repr__(self): - ... return "" % ( - ... self.name, self.fullname, self.nickname) + ... return "" % ( + ... self.name, + ... self.fullname, + ... self.nickname, + ... ) Then we recreate ``Address``, noting that in this case we've created the ``Address.user`` relationship via the ``User`` class already:: >>> class Address(Base): - ... __tablename__ = 'addresses' + ... __tablename__ = "addresses" ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): @@ -1963,7 +1983,7 @@ being deleted: # only one address remains {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) ... ).count() DELETE FROM addresses WHERE addresses.id = ? [...] (2,) @@ -1983,7 +2003,7 @@ with the user: >>> session.delete(jack) - {sql}>>> session.query(User).filter_by(name='jack').count() + {sql}>>> session.query(User).filter_by(name="jack").count() DELETE FROM addresses WHERE addresses.id = ? [...] (1,) DELETE FROM users WHERE users.id = ? @@ -1999,7 +2019,7 @@ with the user: {stop}0 {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, @@ -2032,9 +2052,11 @@ to serve as the association table. This looks like the following:: >>> from sqlalchemy import Table, Text >>> # association table - >>> post_keywords = Table('post_keywords', Base.metadata, - ... Column('post_id', ForeignKey('posts.id'), primary_key=True), - ... Column('keyword_id', ForeignKey('keywords.id'), primary_key=True) + >>> post_keywords = Table( + ... "post_keywords", + ... Base.metadata, + ... Column("post_id", ForeignKey("posts.id"), primary_key=True), + ... Column("keyword_id", ForeignKey("keywords.id"), primary_key=True), ... ) Above, we can see declaring a :class:`_schema.Table` directly is a little different @@ -2048,17 +2070,15 @@ Next we define ``BlogPost`` and ``Keyword``, using complementary table as an association table:: >>> class BlogPost(Base): - ... __tablename__ = 'posts' + ... __tablename__ = "posts" ... ... id = Column(Integer, primary_key=True) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... headline = Column(String(255), nullable=False) ... body = Column(Text) ... ... # many to many BlogPost<->Keyword - ... keywords = relationship('Keyword', - ... secondary=post_keywords, - ... back_populates='posts') + ... keywords = relationship("Keyword", secondary=post_keywords, back_populates="posts") ... ... def __init__(self, headline, body, author): ... self.author = author @@ -2070,13 +2090,11 @@ table as an association table:: >>> class Keyword(Base): - ... __tablename__ = 'keywords' + ... __tablename__ = "keywords" ... ... id = Column(Integer, primary_key=True) ... keyword = Column(String(50), nullable=False, unique=True) - ... posts = relationship('BlogPost', - ... secondary=post_keywords, - ... back_populates='keywords') + ... posts = relationship("BlogPost", secondary=post_keywords, back_populates="keywords") ... ... def __init__(self, keyword): ... self.keyword = keyword @@ -2144,9 +2162,7 @@ Usage is not too different from what we've been doing. Let's give Wendy some bl .. sourcecode:: python+sql - {sql}>>> wendy = session.query(User).\ - ... filter_by(name='wendy').\ - ... one() + {sql}>>> wendy = session.query(User).filter_by(name="wendy").one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -2163,8 +2179,8 @@ have any yet, so we can just create them: .. sourcecode:: python+sql - >>> post.keywords.append(Keyword('wendy')) - >>> post.keywords.append(Keyword('firstpost')) + >>> post.keywords.append(Keyword("wendy")) + >>> post.keywords.append(Keyword("firstpost")) We can now look up all blog posts with the keyword 'firstpost'. We'll use the ``any`` operator to locate "blog posts where any of its keywords has the @@ -2172,9 +2188,7 @@ keyword string 'firstpost'": .. sourcecode:: python+sql - {sql}>>> session.query(BlogPost).\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> session.query(BlogPost).filter(BlogPost.keywords.any(keyword="firstpost")).all() INSERT INTO keywords (keyword) VALUES (?) [...] ('wendy',) INSERT INTO keywords (keyword) VALUES (?) @@ -2201,10 +2215,9 @@ the query to narrow down to that ``User`` object as a parent: .. sourcecode:: python+sql - {sql}>>> session.query(BlogPost).\ - ... filter(BlogPost.author==wendy).\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> session.query(BlogPost).filter(BlogPost.author == wendy).filter( + ... BlogPost.keywords.any(keyword="firstpost") + ... ).all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, @@ -2223,9 +2236,7 @@ relationship, to query straight from there: .. sourcecode:: python+sql - {sql}>>> wendy.posts.\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> wendy.posts.filter(BlogPost.keywords.any(keyword="firstpost")).all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, diff --git a/doc/build/orm/versioning.rst b/doc/build/orm/versioning.rst index a141df6a0cd..8cb827f8105 100644 --- a/doc/build/orm/versioning.rst +++ b/doc/build/orm/versioning.rst @@ -45,7 +45,7 @@ transaction). .. seealso:: - `Repeatable Read Isolation Level `_ - PostgreSQL's implementation of repeatable read, including a description of the error condition. + `Repeatable Read Isolation Level `_ - PostgreSQL's implementation of repeatable read, including a description of the error condition. Simple Version Counting ----------------------- @@ -55,15 +55,13 @@ to the mapped table, then establish it as the ``version_id_col`` within the mapper options:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_id = Column(Integer, nullable=False) name = Column(String(50), nullable=False) - __mapper_args__ = { - "version_id_col": version_id - } + __mapper_args__ = {"version_id_col": version_id} .. note:: It is **strongly recommended** that the ``version_id`` column be made NOT NULL. The versioning feature **does not support** a NULL @@ -105,16 +103,17 @@ support a native GUID type, but we illustrate here using a simple string):: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_uuid = Column(String(32), nullable=False) name = Column(String(50), nullable=False) __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator':lambda version: uuid.uuid4().hex + "version_id_col": version_uuid, + "version_id_generator": lambda version: uuid.uuid4().hex, } The persistence engine will call upon ``uuid.uuid4()`` each time a @@ -141,24 +140,22 @@ some means of generating new identifiers when a row is subject to an INSERT as well as with an UPDATE. For the UPDATE case, typically an update trigger is needed, unless the database in question supports some other native version identifier. The PostgreSQL database in particular supports a system -column called `xmin `_ +column called `xmin `_ which provides UPDATE versioning. We can make use of the PostgreSQL ``xmin`` column to version our ``User`` class as follows:: from sqlalchemy import FetchedValue + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) xmin = Column("xmin", String, system=True, server_default=FetchedValue()) - __mapper_args__ = { - 'version_id_col': xmin, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": xmin, "version_id_generator": False} With the above mapping, the ORM will rely upon the ``xmin`` column for automatically providing the new value of the version id counter. @@ -222,26 +219,25 @@ at our choosing:: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_uuid = Column(String(32), nullable=False) name = Column(String(50), nullable=False) - __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": version_uuid, "version_id_generator": False} + - u1 = User(name='u1', version_uuid=uuid.uuid4()) + u1 = User(name="u1", version_uuid=uuid.uuid4().hex) session.add(u1) session.commit() - u1.name = 'u2' - u1.version_uuid = uuid.uuid4() + u1.name = "u2" + u1.version_uuid = uuid.uuid4().hex session.commit() @@ -252,7 +248,7 @@ for schemes where only certain classes of UPDATE are sensitive to concurrency issues:: # will leave version_uuid unchanged - u1.name = 'u3' + u1.name = "u3" session.commit() .. versionadded:: 0.9.0 diff --git a/doc/build/requirements.txt b/doc/build/requirements.txt index f3e40e01fd9..6588bf3665d 100644 --- a/doc/build/requirements.txt +++ b/doc/build/requirements.txt @@ -1,3 +1,4 @@ git+https://github.com/sqlalchemyorg/changelog.git#egg=changelog git+https://github.com/sqlalchemyorg/sphinx-paramlinks.git#egg=sphinx-paramlinks git+https://github.com/sqlalchemyorg/zzzeeksphinx.git#egg=zzzeeksphinx +sphinx-copybutton==0.5.1 diff --git a/doc/build/tutorial/data.rst b/doc/build/tutorial/data.rst index 1d5dde7b847..3242710a928 100644 --- a/doc/build/tutorial/data.rst +++ b/doc/build/tutorial/data.rst @@ -5,6 +5,8 @@ .. include:: tutorial_nav_include.rst +.. rst-class:: core-header, orm-addin + .. _tutorial_working_with_data: Working with Data diff --git a/doc/build/tutorial/data_insert.rst b/doc/build/tutorial/data_insert.rst index 90180154b7d..765b6890b6e 100644 --- a/doc/build/tutorial/data_insert.rst +++ b/doc/build/tutorial/data_insert.rst @@ -5,8 +5,7 @@ .. include:: tutorial_nav_include.rst - -.. rst-class:: core-header +.. rst-class:: core-header, orm-addin .. _tutorial_core_insert: @@ -34,7 +33,7 @@ A simple example of :class:`_sql.Insert` illustrating the target table and the VALUES clause at once:: >>> from sqlalchemy import insert - >>> stmt = insert(user_table).values(name='spongebob', fullname="Spongebob Squarepants") + >>> stmt = insert(user_table).values(name="spongebob", fullname="Spongebob Squarepants") The above ``stmt`` variable is an instance of :class:`_sql.Insert`. Most SQL expressions can be stringified in place as a means to see the general @@ -121,8 +120,8 @@ illustrate this: ... insert(user_table), ... [ ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"} - ... ] + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -166,19 +165,19 @@ construct automatically. >>> from sqlalchemy import select, bindparam >>> scalar_subq = ( - ... select(user_table.c.id). - ... where(user_table.c.name==bindparam('username')). - ... scalar_subquery() + ... select(user_table.c.id) + ... .where(user_table.c.name == bindparam("username")) + ... .scalar_subquery() ... ) >>> with engine.connect() as conn: ... result = conn.execute( ... insert(address_table).values(user_id=scalar_subq), ... [ - ... {"username": 'spongebob', "email_address": "spongebob@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@squirrelpower.org"}, - ... ] + ... {"username": "spongebob", "email_address": "spongebob@sqlalchemy.org"}, + ... {"username": "sandy", "email_address": "sandy@sqlalchemy.org"}, + ... {"username": "sandy", "email_address": "sandy@squirrelpower.org"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -220,7 +219,9 @@ method; in this case, the :class:`_engine.Result` object that's returned when the statement is executed has rows which can be fetched:: - >>> insert_stmt = insert(address_table).returning(address_table.c.id, address_table.c.email_address) + >>> insert_stmt = insert(address_table).returning( + ... address_table.c.id, address_table.c.email_address + ... ) >>> print(insert_stmt) {opensql}INSERT INTO address (id, user_id, email_address) VALUES (:id, :user_id, :email_address) diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index 9f7aafc1b2e..a47b0ca4edc 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -36,7 +36,7 @@ each method builds more state onto the object. Like the other SQL constructs, it can be stringified in place:: >>> from sqlalchemy import select - >>> stmt = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt = select(user_table).where(user_table.c.name == "spongebob") >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -71,7 +71,7 @@ elements within each row: .. sourcecode:: pycon+sql - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") >>> with Session(engine) as session: ... for row in session.execute(stmt): ... print(row) @@ -156,6 +156,20 @@ The above :class:`_engine.Row` has just one element, representing the ``User`` e >>> row[0] User(id=1, name='spongebob', fullname='Spongebob Squarepants') +A highly recommended convenience method of achieving the same result as above +is to use the :meth:`_orm.Session.scalars` method to execute the statement +directly; this method will return a :class:`_result.ScalarResult` object +that delivers the first "column" of each row at once, in this case, +instances of the ``User`` class:: + + >>> user = session.scalars(select(User)).first() + {opensql}SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + [...] (){stop} + >>> user + User(id=1, name='spongebob', fullname='Spongebob Squarepants') + + Alternatively, we can select individual columns of an ORM entity as distinct elements within result rows, by using the class-bound attributes; when these are passed to a construct such as :func:`_sql.select`, they are resolved into @@ -182,9 +196,7 @@ attribute of the ``User`` entity as the first element of the row, and combine it with full ``Address`` entities in the second element:: >>> session.execute( - ... select(User.name, Address). - ... where(User.id==Address.user_id). - ... order_by(Address.id) + ... select(User.name, Address).where(User.id == Address.user_id).order_by(Address.id) ... ).all() {opensql}SELECT user_account.name, address.id, address.email_address, address.user_id FROM user_account, address @@ -212,11 +224,9 @@ when referring to arbitrary SQL expressions in a result row by name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, cast - >>> stmt = ( - ... select( - ... ("Username: " + user_table.c.name).label("username"), - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select( + ... ("Username: " + user_table.c.name).label("username"), + ... ).order_by(user_table.c.name) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): ... print(f"{row.username}") @@ -234,7 +244,7 @@ when referring to arbitrary SQL expressions in a result row by name: :ref:`tutorial_order_by_label` - the label names we create may also be referred towards in the ORDER BY or GROUP BY clause of the :class:`_sql.Select`. -.. _tutorial_select_arbtrary_text: +.. _tutorial_select_arbitrary_text: Selecting with Textual Column Expressions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -255,11 +265,7 @@ a hardcoded string literal ``'some label'`` and embed it within the SELECT statement:: >>> from sqlalchemy import text - >>> stmt = ( - ... select( - ... text("'some phrase'"), user_table.c.name - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select(text("'some phrase'"), user_table.c.name).order_by(user_table.c.name) >>> with engine.connect() as conn: ... print(conn.execute(stmt).all()) {opensql}BEGIN (implicit) @@ -281,10 +287,8 @@ towards in subqueries and other expressions:: >>> from sqlalchemy import literal_column - >>> stmt = ( - ... select( - ... literal_column("'some phrase'").label("p"), user_table.c.name - ... ).order_by(user_table.c.name) + >>> stmt = select(literal_column("'some phrase'").label("p"), user_table.c.name).order_by( + ... user_table.c.name ... ) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): @@ -316,7 +320,7 @@ conjunction with Python operators such as ``==``, ``!=``, ``<``, ``>=`` etc. generate new SQL Expression objects, rather than plain boolean ``True``/``False`` values:: - >>> print(user_table.c.name == 'squidward') + >>> print(user_table.c.name == "squidward") user_account.name = :name_1 >>> print(address_table.c.user_id > 10) @@ -326,7 +330,7 @@ SQL Expression objects, rather than plain boolean ``True``/``False`` values:: We can use expressions like these to generate the WHERE clause by passing the resulting objects to the :meth:`_sql.Select.where` method:: - >>> print(select(user_table).where(user_table.c.name == 'squidward')) + >>> print(select(user_table).where(user_table.c.name == "squidward")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 @@ -336,9 +340,9 @@ To produce multiple expressions joined by AND, the :meth:`_sql.Select.where` method may be invoked any number of times:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) ... ) {opensql}SELECT address.email_address FROM address, user_account @@ -348,10 +352,8 @@ A single call to :meth:`_sql.Select.where` also accepts multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id ... ) ... ) {opensql}SELECT address.email_address @@ -364,11 +366,10 @@ of ORM entities:: >>> from sqlalchemy import and_, or_ >>> print( - ... select(Address.email_address). - ... where( + ... select(Address.email_address).where( ... and_( - ... or_(User.name == 'squidward', User.name == 'sandy'), - ... Address.user_id == User.id + ... or_(User.name == "squidward", User.name == "sandy"), + ... Address.user_id == User.id, ... ) ... ) ... ) @@ -382,9 +383,7 @@ popular method known as :meth:`_sql.Select.filter_by` which accepts keyword arguments that match to column keys or ORM attribute names. It will filter against the leftmost FROM clause or the last entity joined:: - >>> print( - ... select(User).filter_by(name='spongebob', fullname='Spongebob Squarepants') - ... ) + >>> print(select(User).filter_by(name="spongebob", fullname="Spongebob Squarepants")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 AND user_account.fullname = :fullname_1 @@ -426,8 +425,9 @@ method, which allows us to indicate the left and right side of the JOIN explicitly:: >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join_from(user_table, address_table) + ... select(user_table.c.name, address_table.c.email_address).join_from( + ... user_table, address_table + ... ) ... ) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -436,10 +436,7 @@ explicitly:: The other is the the :meth:`_sql.Select.join` method, which indicates only the right side of the JOIN, the left hand-side is inferred:: - >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join(address_table) - ... ) + >>> print(select(user_table.c.name, address_table.c.email_address).join(address_table)) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -456,10 +453,7 @@ where we establish ``user_table`` as the first element in the FROM clause and :meth:`_sql.Select.join` to establish ``address_table`` as the second:: - >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table).join(address_table) - ... ) + >>> print(select(address_table.c.email_address).select_from(user_table).join(address_table)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -470,9 +464,7 @@ FROM clause. For example, to SELECT from the common SQL expression produce the SQL ``count()`` function:: >>> from sqlalchemy import func - >>> print ( - ... select(func.count('*')).select_from(user_table) - ... ) + >>> print(select(func.count("*")).select_from(user_table)) {opensql}SELECT count(:count_2) AS count_1 FROM user_account @@ -501,9 +493,9 @@ accept an additional argument for the ON clause, which is stated using the same SQL Expression mechanics as we saw about in :ref:`tutorial_select_where_clause`:: >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table). - ... join(address_table, user_table.c.id == address_table.c.user_id) + ... select(address_table.c.email_address) + ... .select_from(user_table) + ... .join(address_table, user_table.c.id == address_table.c.user_id) ... ) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -525,15 +517,11 @@ accept keyword arguments :paramref:`_sql.Select.join.isouter` and :paramref:`_sql.Select.join.full` which will render LEFT OUTER JOIN and FULL OUTER JOIN, respectively:: - >>> print( - ... select(user_table).join(address_table, isouter=True) - ... ) + >>> print(select(user_table).join(address_table, isouter=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account LEFT OUTER JOIN address ON user_account.id = address.user_id{stop} - >>> print( - ... select(user_table).join(address_table, full=True) - ... ) + >>> print(select(user_table).join(address_table, full=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account FULL OUTER JOIN address ON user_account.id = address.user_id{stop} @@ -630,10 +618,10 @@ than one address: >>> with engine.connect() as conn: ... result = conn.execute( - ... select(User.name, func.count(Address.id).label("count")). - ... join(Address). - ... group_by(User.name). - ... having(func.count(Address.id) > 1) + ... select(User.name, func.count(Address.id).label("count")) + ... .join(Address) + ... .group_by(User.name) + ... .having(func.count(Address.id) > 1) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -663,10 +651,11 @@ error if no match is found. The unary modifiers .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc - >>> stmt = select( - ... Address.user_id, - ... func.count(Address.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", desc("num_addresses")) + >>> stmt = ( + ... select(Address.user_id, func.count(Address.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", desc("num_addresses")) + ... ) >>> print(stmt) {opensql}SELECT address.user_id, count(address.id) AS num_addresses FROM address GROUP BY address.user_id ORDER BY address.user_id, num_addresses DESC @@ -693,8 +682,9 @@ below for example returns all unique pairs of user names:: >>> user_alias_1 = user_table.alias() >>> user_alias_2 = user_table.alias() >>> print( - ... select(user_alias_1.c.name, user_alias_2.c.name). - ... join_from(user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id) + ... select(user_alias_1.c.name, user_alias_2.c.name).join_from( + ... user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id + ... ) ... ) {opensql}SELECT user_account_1.name, user_account_2.name AS name_1 FROM user_account AS user_account_1 @@ -716,11 +706,11 @@ while maintaining ORM functionality. The SELECT below selects from the >>> address_alias_1 = aliased(Address) >>> address_alias_2 = aliased(Address) >>> print( - ... select(User). - ... join_from(User, address_alias_1). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join_from(User, address_alias_2). - ... where(address_alias_2.email_address == 'patrick@gmail.com') + ... select(User) + ... .join_from(User, address_alias_1) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join_from(User, address_alias_2) + ... .where(address_alias_2.email_address == "patrick@gmail.com") ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -761,10 +751,11 @@ We can construct a :class:`_sql.Subquery` that will select an aggregate count of rows from the ``address`` table (aggregate functions and GROUP BY were introduced previously at :ref:`tutorial_group_by_w_aggregates`): - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).subquery() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .subquery() + ... ) Stringifying the subquery by itself without it being embedded inside of another :class:`_sql.Select` or other statement produces the plain SELECT statement @@ -790,11 +781,9 @@ With a selection of rows contained within the ``subq`` object, we can apply the object to a larger :class:`_sql.Select` that will join the data to the ``user_account`` table:: - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}SELECT user_account.name, user_account.fullname, anon_1.count @@ -820,16 +809,15 @@ the invocation of the :meth:`_sql.Select.subquery` method to use element in the same way, but the SQL rendered is the very different common table expression syntax:: - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).cte() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .cte() + ... ) - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}WITH anon_1 AS @@ -880,9 +868,13 @@ each ``Address`` object ultimately came from a subquery against the .. sourcecode:: python+sql - >>> subq = select(Address).where(~Address.email_address.like('%@aol.com')).subquery() + >>> subq = select(Address).where(~Address.email_address.like("%@aol.com")).subquery() >>> address_subq = aliased(Address, subq) - >>> stmt = select(User, address_subq).join_from(User, address_subq).order_by(User.id, address_subq.id) + >>> stmt = ( + ... select(User, address_subq) + ... .join_from(User, address_subq) + ... .order_by(User.id, address_subq.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -905,9 +897,13 @@ Another example follows, which is exactly the same except it makes use of the .. sourcecode:: python+sql - >>> cte_obj = select(Address).where(~Address.email_address.like('%@aol.com')).cte() + >>> cte_obj = select(Address).where(~Address.email_address.like("%@aol.com")).cte() >>> address_cte = aliased(Address, cte_obj) - >>> stmt = select(User, address_cte).join_from(User, address_cte).order_by(User.id, address_cte.id) + >>> stmt = ( + ... select(User, address_cte) + ... .join_from(User, address_cte) + ... .order_by(User.id, address_cte.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -954,9 +950,11 @@ subquery is indicated explicitly by making use of the :meth:`_sql.Select.scalar_ method as below. It's default string form when stringified by itself renders as an ordinary SELECT statement that is selecting from two tables:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery() + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... ) >>> print(subq) {opensql}(SELECT count(address.id) AS count_1 FROM address, user_account @@ -989,13 +987,13 @@ Simple correlated subqueries will usually do the right thing that's desired. However, in the case where the correlation is ambiguous, SQLAlchemy will let us know that more clarity is needed:: - >>> stmt = select( - ... user_table.c.name, - ... address_table.c.email_address, - ... subq.label("address_count") - ... ).\ - ... join_from(user_table, address_table).\ - ... order_by(user_table.c.id, address_table.c.id) + >>> stmt = ( + ... select( + ... user_table.c.name, address_table.c.email_address, subq.label("address_count") + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) + ... ) >>> print(stmt) Traceback (most recent call last): ... @@ -1007,9 +1005,12 @@ To specify that the ``user_table`` is the one we seek to correlate we specify this using the :meth:`_sql.ScalarSelect.correlate` or :meth:`_sql.ScalarSelect.correlate_except` methods:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery().correlate(user_table) + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... .correlate(user_table) + ... ) The statement then can return the data for this column like any other: @@ -1020,10 +1021,10 @@ The statement then can return the data for this column like any other: ... select( ... user_table.c.name, ... address_table.c.email_address, - ... subq.label("address_count") - ... ). - ... join_from(user_table, address_table). - ... order_by(user_table.c.id, address_table.c.id) + ... subq.label("address_count"), + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -1036,6 +1037,73 @@ The statement then can return the data for this column like any other: ('sandy', 'sandy@squirrelpower.org', 2)] {opensql}ROLLBACK{stop} + +.. _tutorial_lateral_correlation: + +LATERAL correlation +~~~~~~~~~~~~~~~~~~~ + +LATERAL correlation is a special sub-category of SQL correlation which +allows a selectable unit to refer to another selectable unit within a +single FROM clause. This is an extremely special use case which, while +part of the SQL standard, is only known to be supported by recent +versions of PostgreSQL. + +Normally, if a SELECT statement refers to +``table1 JOIN (SELECT ...) AS subquery`` in its FROM clause, the subquery +on the right side may not refer to the "table1" expression from the left side; +correlation may only refer to a table that is part of another SELECT that +entirely encloses this SELECT. The LATERAL keyword allows us to turn this +behavior around and allow correlation from the right side JOIN. + +SQLAlchemy supports this feature using the :meth:`_expression.Select.lateral` +method, which creates an object known as :class:`.Lateral`. :class:`.Lateral` +is in the same family as :class:`.Subquery` and :class:`.Alias`, but also +includes correlation behavior when the construct is added to the FROM clause of +an enclosing SELECT. The following example illustrates a SQL query that makes +use of LATERAL, selecting the "user account / count of email address" data as +was discussed in the previous section:: + + >>> subq = ( + ... select( + ... func.count(address_table.c.id).label("address_count"), + ... address_table.c.email_address, + ... address_table.c.user_id, + ... ) + ... .where(user_table.c.id == address_table.c.user_id) + ... .lateral() + ... ) + >>> stmt = ( + ... select(user_table.c.name, subq.c.address_count, subq.c.email_address) + ... .join_from(user_table, subq) + ... .order_by(user_table.c.id, subq.c.email_address) + ... ) + >>> print(stmt) + {opensql}SELECT user_account.name, anon_1.address_count, anon_1.email_address + FROM user_account + JOIN LATERAL (SELECT count(address.id) AS address_count, + address.email_address AS email_address, address.user_id AS user_id + FROM address + WHERE user_account.id = address.user_id) AS anon_1 + ON user_account.id = anon_1.user_id + ORDER BY user_account.id, anon_1.email_address + +Above, the right side of the JOIN is a subquery that correlates to the +``user_account`` table that's on the left side of the join. + +When using :meth:`_expression.Select.lateral`, the behavior of +:meth:`_expression.Select.correlate` and +:meth:`_expression.Select.correlate_except` methods is applied to the +:class:`.Lateral` construct as well. + +.. seealso:: + + :class:`_expression.Lateral` + + :meth:`_expression.Select.lateral` + + + .. _tutorial_union: UNION, UNION ALL and other set operations @@ -1060,8 +1128,8 @@ that it has fewer methods. The :class:`_sql.CompoundSelect` produced by :meth:`_engine.Connection.execute`:: >>> from sqlalchemy import union_all - >>> stmt1 = select(user_table).where(user_table.c.name == 'sandy') - >>> stmt2 = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt1 = select(user_table).where(user_table.c.name == "sandy") + >>> stmt2 = select(user_table).where(user_table.c.name == "spongebob") >>> u = union_all(stmt1, stmt2) >>> with engine.connect() as conn: ... result = conn.execute(u) @@ -1084,9 +1152,9 @@ collection that may be referred towards in an enclosing :func:`_sql.select`:: >>> u_subq = u.subquery() >>> stmt = ( - ... select(u_subq.c.name, address_table.c.email_address). - ... join_from(address_table, u_subq). - ... order_by(u_subq.c.name, address_table.c.email_address) + ... select(u_subq.c.name, address_table.c.email_address) + ... .join_from(address_table, u_subq) + ... .order_by(u_subq.c.name, address_table.c.email_address) ... ) >>> with engine.connect() as conn: ... result = conn.execute(stmt) @@ -1121,8 +1189,8 @@ object that represents the SELECT / UNION / etc statement we want to execute; this statement should be composed against the target ORM entities or their underlying mapped :class:`_schema.Table` objects:: - >>> stmt1 = select(User).where(User.name == 'sandy') - >>> stmt2 = select(User).where(User.name == 'spongebob') + >>> stmt1 = select(User).where(User.name == "sandy") + >>> stmt2 = select(User).where(User.name == "spongebob") >>> u = union_all(stmt1, stmt2) For a simple SELECT with UNION that is not already nested inside of a @@ -1196,15 +1264,13 @@ can return ``user_account`` rows that have more than one related row in .. sourcecode:: pycon+sql >>> subq = ( - ... select(func.count(address_table.c.id)). - ... where(user_table.c.id == address_table.c.user_id). - ... group_by(address_table.c.user_id). - ... having(func.count(address_table.c.id) > 1) + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .having(func.count(address_table.c.id) > 1) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1226,13 +1292,10 @@ clause: .. sourcecode:: pycon+sql >>> subq = ( - ... select(address_table.c.id). - ... where(user_table.c.id == address_table.c.user_id) + ... select(address_table.c.id).where(user_table.c.id == address_table.c.user_id) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(~subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(~subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1244,6 +1307,7 @@ clause: [('patrick',)] {opensql}ROLLBACK{stop} + .. _tutorial_functions: Working with SQL Functions @@ -1332,11 +1396,18 @@ as opposed to the "return type" of a Python function. The SQL return type of any SQL function may be accessed, typically for debugging purposes, by referring to the :attr:`_functions.Function.type` -attribute:: +attribute; this will be pre-configured for a **select few** of extremely +common SQL functions, but for most SQL functions is the "null" datatype +if not otherwise specified:: + >>> # pre-configured SQL function (only a few dozen of these) >>> func.now().type DateTime() + >>> # arbitrary SQL function (all other SQL functions) + >>> func.run_some_calculation().type + NullType() + These SQL return types are significant when making use of the function expression in the context of a larger expression; that is, math operators will work better when the datatype of the expression is @@ -1450,10 +1521,19 @@ Overall, the scenario where the or again special datatypes such as :class:`_types.JSON`, :class:`_types.ARRAY`. +Advanced SQL Function Techniques +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following subsections illustrate more things that can be done with +SQL functions. While these techniques are less common and more advanced than +basic SQL function use, they nonetheless are extremely popular, largely +as a result of PostgreSQL's emphasis on more complex function forms, including +table- and column-valued forms that are popular with JSON data. + .. _tutorial_window_functions: Using Window Functions -~~~~~~~~~~~~~~~~~~~~~~ +###################### A window function is a special use of a SQL aggregate function which calculates the aggregate value over the rows being returned in a group as the individual @@ -1478,11 +1558,15 @@ number the email addresses of individual users: .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.row_number().over(partition_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.row_number().over(partition_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1500,10 +1584,15 @@ We also may make use of the ``ORDER BY`` clause using :paramref:`_functions.Func .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.count().over(order_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count().over(order_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1529,7 +1618,7 @@ Further options for window functions include usage of ranges; see .. _tutorial_functions_within_group: Special Modifiers WITHIN GROUP, FILTER -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +###################################### The "WITHIN GROUP" SQL syntax is used in conjunction with an "ordered set" or a "hypothetical set" aggregate @@ -1542,7 +1631,7 @@ method:: >>> print( ... func.unnest( - ... func.percentile_disc([0.25,0.5,0.75,1]).within_group(user_table.c.name) + ... func.percentile_disc([0.25, 0.5, 0.75, 1]).within_group(user_table.c.name) ... ) ... ) unnest(percentile_disc(:percentile_disc_1) WITHIN GROUP (ORDER BY user_account.name)) @@ -1551,10 +1640,16 @@ method:: particular subset of rows compared to the total range of rows returned, available using the :meth:`_functions.FunctionElement.filter` method:: - >>> stmt = select( - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'sandy'), - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'spongebob') - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count(address_table.c.email_address).filter(user_table.c.name == "sandy"), + ... func.count(address_table.c.email_address).filter( + ... user_table.c.name == "spongebob" + ... ), + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1563,13 +1658,13 @@ using the :meth:`_functions.FunctionElement.filter` method:: count(address.email_address) FILTER (WHERE user_account.name = ?) AS anon_2 FROM user_account JOIN address ON user_account.id = address.user_id [...] ('sandy', 'spongebob') - [(2, 1)] - ROLLBACK + {stop}[(2, 1)] + {opensql}ROLLBACK .. _tutorial_functions_table_valued: Table-Valued Functions -~~~~~~~~~~~~~~~~~~~~~~~~~ +####################### Table-valued SQL functions support a scalar representation that contains named sub-elements. Often used for JSON and ARRAY-oriented functions as well as @@ -1600,16 +1695,16 @@ modern versions of SQLite:: >>> onetwothree = func.json_each('["one", "two", "three"]').table_valued("value") >>> stmt = select(onetwothree).where(onetwothree.c.value.in_(["two", "three"])) - >>> with engine.connect() as conn: # doctest:+SKIP + >>> with engine.connect() as conn: ... result = conn.execute(stmt) - ... print(result.all()) + ... result.all() {opensql}BEGIN (implicit) SELECT anon_1.value FROM json_each(?) AS anon_1 WHERE anon_1.value IN (?, ?) [...] ('["one", "two", "three"]', 'two', 'three') - [('two',), ('three',)] - ROLLBACK + {stop}[('two',), ('three',)] + {opensql}ROLLBACK{stop} Above, we used the ``json_each()`` JSON function supported by SQLite and PostgreSQL to generate a table valued expression with a single column referred @@ -1624,7 +1719,7 @@ towards as ``value``, and then selected two of its three rows. .. _tutorial_functions_column_valued: Column Valued Functions - Table Valued Function as a Scalar Column -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +################################################################## A special syntax supported by PostgreSQL and Oracle is that of referring towards a function in the FROM clause, which then delivers itself as a @@ -1649,7 +1744,7 @@ it is usable for custom SQL functions:: >>> from sqlalchemy.dialects import oracle >>> stmt = select(func.scalar_strings(5).column_valued("s")) >>> print(stmt.compile(dialect=oracle.dialect())) - SELECT COLUMN_VALUE s + SELECT s.COLUMN_VALUE FROM TABLE (scalar_strings(:scalar_strings_1)) s @@ -1657,4 +1752,74 @@ it is usable for custom SQL functions:: :ref:`postgresql_column_valued` - in the :ref:`postgresql_toplevel` documentation. +.. _tutorial_casts: +Data Casts and Type Coercion +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In SQL, we often need to indicate the datatype of an expression explicitly, +either to tell the database what type is expected in an otherwise ambiguous +expression, or in some cases when we want to convert the implied datatype +of a SQL expression into something else. The SQL CAST keyword is used for +this task, which in SQLAlchemy is provided by the :func:`.cast` function. +This function accepts a column expression and a data type +object as arguments, as demonstrated below where we produce a SQL expression +``CAST(user_account.id AS VARCHAR)`` from the ``user_table.c.id`` column +object:: + + >>> from sqlalchemy import cast + >>> stmt = select(cast(user_table.c.id, String)) + >>> with engine.connect() as conn: + ... result = conn.execute(stmt) + ... result.all() + {opensql}BEGIN (implicit) + SELECT CAST(user_account.id AS VARCHAR) AS id + FROM user_account + [...] () + {stop}[('1',), ('2',), ('3',)] + {opensql}ROLLBACK{stop} + +The :func:`.cast` function not only renders the SQL CAST syntax, it also +produces a SQLAlchemy column expression that will act as the given datatype on +the Python side as well. A string expression that is :func:`.cast` to +:class:`_sqltypes.JSON` will gain JSON subscript and comparison operators, +for example:: + + >>> from sqlalchemy import JSON + >>> print(cast("{'a': 'b'}", JSON)["a"]) + CAST(:param_1 AS JSON)[:param_2] + + +type_coerce() - a Python-only "cast" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes there is the need to have SQLAlchemy know the datatype of an +expression, for all the reasons mentioned above, but to not render the CAST +expression itself on the SQL side, where it may interfere with a SQL operation +that already works without it. For this fairly common use case there is +another function :func:`.type_coerce` which is closely related to +:func:`.cast`, in that it sets up a Python expression as having a specific SQL +database type, but does not render the ``CAST`` keyword or datatype on the +database side. :func:`.type_coerce` is particularly important when dealing +with the :class:`_types.JSON` datatype, which typically has an intricate +relationship with string-oriented datatypes on different platforms and +may not even be an explicit datatype, such as on SQLite and MariaDB. +Below, we use :func:`.type_coerce` to deliver a Python structure as a JSON +string into one of MySQL's JSON functions: + +.. sourcecode:: pycon+sql + + >>> import json + >>> from sqlalchemy import JSON + >>> from sqlalchemy import type_coerce + >>> from sqlalchemy.dialects import mysql + >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"]) + >>> print(s.compile(dialect=mysql.dialect())) + SELECT JSON_EXTRACT(%s, %s) AS anon_1 + +Above, MySQL's ``JSON_EXTRACT`` SQL function was invoked +because we used :func:`.type_coerce` to indicate that our Python dictionary +should be treated as :class:`_types.JSON`. The Python ``__getitem__`` +operator, ``['some_key']`` in this case, became available as a result and +allowed a ``JSON_EXTRACT`` path expression (not shown, however in this +case it would ultimately be ``'$."some_key"'``) to be rendered. diff --git a/doc/build/tutorial/data_update.rst b/doc/build/tutorial/data_update.rst index 8813dda9889..78c2e60f63d 100644 --- a/doc/build/tutorial/data_update.rst +++ b/doc/build/tutorial/data_update.rst @@ -56,8 +56,9 @@ A basic UPDATE looks like:: >>> from sqlalchemy import update >>> stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star') + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") ... ) >>> print(stmt) {opensql}UPDATE user_account SET fullname=:fullname WHERE user_account.name = :name_1 @@ -70,10 +71,7 @@ keyword arguments. UPDATE supports all the major SQL forms of UPDATE, including updates against expressions, where we can make use of :class:`_schema.Column` expressions:: - >>> stmt = ( - ... update(user_table). - ... values(fullname="Username: " + user_table.c.name) - ... ) + >>> stmt = update(user_table).values(fullname="Username: " + user_table.c.name) >>> print(stmt) {opensql}UPDATE user_account SET fullname=(:name_1 || user_account.name) @@ -86,19 +84,19 @@ that literal values would normally go: >>> from sqlalchemy import bindparam >>> stmt = ( - ... update(user_table). - ... where(user_table.c.name == bindparam('oldname')). - ... values(name=bindparam('newname')) + ... update(user_table) + ... .where(user_table.c.name == bindparam("oldname")) + ... .values(name=bindparam("newname")) ... ) >>> with engine.begin() as conn: - ... conn.execute( - ... stmt, - ... [ - ... {'oldname':'jack', 'newname':'ed'}, - ... {'oldname':'wendy', 'newname':'mary'}, - ... {'oldname':'jim', 'newname':'jake'}, - ... ] - ... ) + ... conn.execute( + ... stmt, + ... [ + ... {"oldname": "jack", "newname": "ed"}, + ... {"oldname": "wendy", "newname": "mary"}, + ... {"oldname": "jim", "newname": "jake"}, + ... ], + ... ) {opensql}BEGIN (implicit) UPDATE user_account SET name=? WHERE user_account.name = ? [...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) @@ -118,11 +116,11 @@ An UPDATE statement can make use of rows in other tables by using a anywhere a column expression might be placed:: >>> scalar_subq = ( - ... select(address_table.c.email_address). - ... where(address_table.c.user_id == user_table.c.id). - ... order_by(address_table.c.id). - ... limit(1). - ... scalar_subquery() + ... select(address_table.c.email_address) + ... .where(address_table.c.user_id == user_table.c.id) + ... .order_by(address_table.c.id) + ... .limit(1) + ... .scalar_subquery() ... ) >>> update_stmt = update(user_table).values(fullname=scalar_subq) >>> print(update_stmt) @@ -143,11 +141,11 @@ syntax will be generated implicitly when additional tables are located in the WHERE clause of the statement:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values(fullname='Pat') - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values(fullname="Pat") + ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname FROM address WHERE user_account.id = address.user_id AND address.email_address = :email_address_1 @@ -158,16 +156,13 @@ requires we refer to :class:`_schema.Table` objects in the VALUES clause in order to refer to additional tables:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values( - ... { - ... user_table.c.fullname: "Pat", - ... address_table.c.email_address: "pat@aol.com" - ... } - ... ) - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values( + ... {user_table.c.fullname: "Pat", address_table.c.email_address: "pat@aol.com"} + ... ) + ... ) >>> from sqlalchemy.dialects import mysql >>> print(update_stmt.compile(dialect=mysql.dialect())) {opensql}UPDATE user_account, address @@ -175,6 +170,8 @@ order to refer to additional tables:: WHERE user_account.id = address.user_id AND address.email_address = %s +.. _tutorial_parameter_ordered_updates: + Parameter Ordered Updates ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -183,12 +180,8 @@ of an UPDATE actually impacts the evaluation of each expression. For this use case, the :meth:`_sql.Update.ordered_values` method accepts a sequence of tuples so that this order may be controlled [2]_:: - >>> update_stmt = ( - ... update(some_table). - ... ordered_values( - ... (some_table.c.y, 20), - ... (some_table.c.x, some_table.c.y + 10) - ... ) + >>> update_stmt = update(some_table).ordered_values( + ... (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10) ... ) >>> print(update_stmt) {opensql}UPDATE some_table SET y=:y, x=(some_table.y + :y_1) @@ -218,7 +211,7 @@ allowing for a RETURNING variant on some database backends. :: >>> from sqlalchemy import delete - >>> stmt = delete(user_table).where(user_table.c.name == 'patrick') + >>> stmt = delete(user_table).where(user_table.c.name == "patrick") >>> print(stmt) {opensql}DELETE FROM user_account WHERE user_account.name = :name_1 @@ -233,10 +226,10 @@ subqueries in the WHERE clause as well as backend-specific multiple table syntaxes, such as ``DELETE FROM..USING`` on MySQL:: >>> delete_stmt = ( - ... delete(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com') - ... ) + ... delete(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... ) >>> from sqlalchemy.dialects import mysql >>> print(delete_stmt.compile(dialect=mysql.dialect())) {opensql}DELETE FROM user_account USING user_account, address @@ -257,9 +250,9 @@ is available from the :attr:`_engine.CursorResult.rowcount` attribute: >>> with engine.begin() as conn: ... result = conn.execute( - ... update(user_table). - ... values(fullname="Patrick McStar"). - ... where(user_table.c.name == 'patrick') + ... update(user_table) + ... .values(fullname="Patrick McStar") + ... .where(user_table.c.name == "patrick") ... ) ... print(result.rowcount) {opensql}BEGIN (implicit) @@ -314,9 +307,10 @@ be iterated:: >>> update_stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star'). - ... returning(user_table.c.id, user_table.c.name) + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname @@ -324,8 +318,9 @@ be iterated:: RETURNING user_account.id, user_account.name{stop} >>> delete_stmt = ( - ... delete(user_table).where(user_table.c.name == 'patrick'). - ... returning(user_table.c.id, user_table.c.name) + ... delete(user_table) + ... .where(user_table.c.name == "patrick") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(delete_stmt) {opensql}DELETE FROM user_account diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 16768da2b9f..c1e815a4c8a 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -11,32 +11,32 @@ Working with Transactions and the DBAPI -With the :class:`_future.Engine` object ready to go, we may now proceed -to dive into the basic operation of an :class:`_future.Engine` and -its primary interactive endpoints, the :class:`_future.Connection` and -:class:`_engine.Result`. We will additionally introduce the ORM's -:term:`facade` for these objects, known as the :class:`_orm.Session`. +With the :class:`_future.Engine` object ready to go, we can +dive into the basic operation of an :class:`_future.Engine` and +its primary endpoints, the :class:`_future.Connection` and +:class:`_engine.Result`. We'll also introduce the ORM's :term:`facade` +for these objects, known as the :class:`_orm.Session`. .. container:: orm-header **Note to ORM readers** - When using the ORM, the :class:`_future.Engine` is managed by another - object called the :class:`_orm.Session`. The :class:`_orm.Session` in - modern SQLAlchemy emphasizes a transactional and SQL execution pattern that - is largely identical to that of the :class:`_future.Connection` discussed - below, so while this subsection is Core-centric, all of the concepts here - are essentially relevant to ORM use as well and is recommended for all ORM + When using the ORM, the :class:`_future.Engine` is managed by the + :class:`_orm.Session`. The :class:`_orm.Session` in modern SQLAlchemy + emphasizes a transactional and SQL execution pattern that is largely + identical to that of the :class:`_future.Connection` discussed below, + so while this subsection is Core-centric, all of the concepts here + are relevant to ORM use as well and is recommended for all ORM learners. The execution pattern used by the :class:`_future.Connection` - will be contrasted with that of the :class:`_orm.Session` at the end + will be compared to the :class:`_orm.Session` at the end of this section. As we have yet to introduce the SQLAlchemy Expression Language that is the -primary feature of SQLAlchemy, we will make use of one simple construct within -this package called the :func:`_sql.text` construct, which allows us to write -SQL statements as **textual SQL**. Rest assured that textual SQL in -day-to-day SQLAlchemy use is by far the exception rather than the rule for most -tasks, even though it always remains fully available. +primary feature of SQLAlchemy, we'll use a simple construct within +this package called the :func:`_sql.text` construct, to write +SQL statements as **textual SQL**. Rest assured that textual SQL is the +exception rather than the rule in day-to-day SQLAlchemy use, but it's +always available. .. rst-class:: core-header @@ -45,17 +45,15 @@ tasks, even though it always remains fully available. Getting a Connection --------------------- -The sole purpose of the :class:`_future.Engine` object from a user-facing -perspective is to provide a unit of -connectivity to the database called the :class:`_future.Connection`. When -working with the Core directly, the :class:`_future.Connection` object -is how all interaction with the database is done. As the :class:`_future.Connection` -represents an open resource against the database, we want to always limit -the scope of our use of this object to a specific context, and the best -way to do that is by using Python context manager form, also known as -`the with statement `_. -Below we illustrate "Hello World", using a textual SQL statement. Textual -SQL is emitted using a construct called :func:`_sql.text` that will be discussed +The purpose of the :class:`_future.Engine` is to connect to the database by +providing a :class:`_future.Connection` object. When working with the Core +directly, the :class:`_future.Connection` object is how all interaction with the +database is done. Because the :class:`_future.Connection` creates an open +resource against the database, we want to limit our use of this object to a +specific context. The best way to do that is with a Python context manager, also +known as `the with statement `_. +Below we use a textual SQL statement to show "Hello World". Textual SQL is +created with a construct called :func:`_sql.text` which we'll discuss in more detail later: .. sourcecode:: pycon+sql @@ -71,21 +69,21 @@ in more detail later: {stop}[('hello world',)] {opensql}ROLLBACK{stop} -In the above example, the context manager provided for a database connection -and also framed the operation inside of a transaction. The default behavior of -the Python DBAPI includes that a transaction is always in progress; when the -scope of the connection is :term:`released`, a ROLLBACK is emitted to end the -transaction. The transaction is **not committed automatically**; when we want -to commit data we normally need to call :meth:`_future.Connection.commit` +In the example above, the context manager creates a database connection +and executes the operation in a transaction. The default behavior of +the Python DBAPI is that a transaction is always in progress; when the +connection is :term:`released`, a ROLLBACK is emitted to end the +transaction. The transaction is **not committed automatically**; if we want +to commit data we need to call :meth:`_future.Connection.commit` as we'll see in the next section. .. tip:: "autocommit" mode is available for special cases. The section :ref:`dbapi_autocommit` discusses this. -The result of our SELECT was also returned in an object called -:class:`_engine.Result` that will be discussed later, however for the moment -we'll add that it's best to ensure this object is consumed within the -"connect" block, and is not passed along outside of the scope of our connection. +The result of our SELECT was returned in an object called +:class:`_engine.Result` that will be discussed later. For the moment +we'll add that it's best to use this object within the "connect" block, +and to not use it outside of the scope of our connection. .. rst-class:: core-header @@ -94,11 +92,11 @@ we'll add that it's best to ensure this object is consumed within the Committing Changes ------------------ -We just learned that the DBAPI connection is non-autocommitting. What if -we want to commit some data? We can alter our above example to create a -table and insert some data, and the transaction is then committed using -the :meth:`_future.Connection.commit` method, invoked **inside** the block -where we acquired the :class:`_future.Connection` object: +We just learned that the DBAPI connection doesn't commit automatically. +What if we want to commit some data? We can change our example above to create a +table, insert some data and then commit the transaction using +the :meth:`_future.Connection.commit` method, **inside** the block +where we have the :class:`_future.Connection` object: .. sourcecode:: pycon+sql @@ -107,7 +105,7 @@ where we acquired the :class:`_future.Connection` object: ... conn.execute(text("CREATE TABLE some_table (x int, y int)")) ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}] + ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -119,25 +117,23 @@ where we acquired the :class:`_future.Connection` object: COMMIT -Above, we emitted two SQL statements that are generally transactional, a -"CREATE TABLE" statement [1]_ and an "INSERT" statement that's parameterized -(the parameterization syntax above is discussed a few sections below in -:ref:`tutorial_multiple_parameters`). As we want the work we've done to be -committed within our block, we invoke the +Above, we execute two SQL statements, a "CREATE TABLE" statement [1]_ +and an "INSERT" statement that's parameterized (we discuss the parameterization syntax +later in :ref:`tutorial_multiple_parameters`). +To commit the work we've done in our block, we call the :meth:`_future.Connection.commit` method which commits the transaction. After -we call this method inside the block, we can continue to run more SQL -statements and if we choose we may call :meth:`_future.Connection.commit` -again for subsequent statements. SQLAlchemy refers to this style as **commit as +this, we can continue to run more SQL statements and call :meth:`_future.Connection.commit` +again for those statements. SQLAlchemy refers to this style as **commit as you go**. -There is also another style of committing data, which is that we can declare -our "connect" block to be a transaction block up front. For this mode of -operation, we use the :meth:`_future.Engine.begin` method to acquire the -connection, rather than the :meth:`_future.Engine.connect` method. This method -will both manage the scope of the :class:`_future.Connection` and also -enclose everything inside of a transaction with COMMIT at the end, assuming -a successful block, or ROLLBACK in case of exception raise. This style -may be referred towards as **begin once**: +There's also another style to commit data. We can declare +our "connect" block to be a transaction block up front. To do this, we use the +:meth:`_future.Engine.begin` method to get the connection, rather than the +:meth:`_future.Engine.connect` method. This method +will manage the scope of the :class:`_future.Connection` and also +enclose everything inside of a transaction with either a COMMIT at the end +if the block was successful, or a ROLLBACK if an exception was raised. This style +is known as **begin once**: .. sourcecode:: pycon+sql @@ -145,7 +141,7 @@ may be referred towards as **begin once**: >>> with engine.begin() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}] + ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}], ... ) {opensql}BEGIN (implicit) INSERT INTO some_table (x, y) VALUES (?, ?) @@ -153,9 +149,9 @@ may be referred towards as **begin once**: COMMIT -"Begin once" style is often preferred as it is more succinct and indicates the -intention of the entire block up front. However, within this tutorial we will -normally use "commit as you go" style as it is more flexible for demonstration +You should mostly prefer the "begin once" style because it's shorter and shows the +intention of the entire block up front. However, in this tutorial we'll +use "commit as you go" style as it's more flexible for demonstration purposes. .. topic:: What's "BEGIN (implicit)"? @@ -169,8 +165,8 @@ purposes. .. [1] :term:`DDL` refers to the subset of SQL that instructs the database to create, modify, or remove schema-level constructs such as tables. DDL - such as "CREATE TABLE" is recommended to be within a transaction block that - ends with COMMIT, as many databases uses transactional DDL such that the + such as "CREATE TABLE" should be in a transaction block that + ends with COMMIT, as many databases use transactional DDL such that the schema changes don't take place until the transaction is committed. However, as we'll see later, we usually let SQLAlchemy run DDL sequences for us as part of a higher level operation where we don't generally need to worry @@ -179,6 +175,7 @@ purposes. .. rst-class:: core-header +.. _tutorial_statement_execution: Basics of Statement Execution ----------------------------- @@ -270,7 +267,7 @@ Below we illustrate a variety of ways to access rows. y = row.y # illustrate use with Python f-strings - print(f"Row: {row.x} {row.y}") + print(f"Row: {row.x} {y}") .. @@ -285,8 +282,8 @@ Below we illustrate a variety of ways to access rows. result = conn.execute(text("select x, y from some_table")) for dict_row in result.mappings(): - x = dict_row['x'] - y = dict_row['y'] + x = dict_row["x"] + y = dict_row["y"] .. @@ -315,12 +312,9 @@ construct accepts these using a colon format "``:y``". The actual value for .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... result = conn.execute( - ... text("SELECT x, y FROM some_table WHERE y > :y"), - ... {"y": 2} - ... ) + ... result = conn.execute(text("SELECT x, y FROM some_table WHERE y > :y"), {"y": 2}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? [...] (2,) @@ -369,7 +363,7 @@ be invoked against each parameter set individually: >>> with engine.connect() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}] + ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -397,50 +391,8 @@ for this use case. however again when using the ORM, there is a different technique generally used for updating or deleting many individual rows separately. -.. rst-class:: orm-addin - -.. _tutorial_bundling_parameters: - -Bundling Parameters with a Statement -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The two previous cases illustrate a series of parameters being passed to -accompany a SQL statement. For single-parameter statement executions, -SQLAlchemy's use of parameters is in fact more often than not done by -**bundling** the parameters with the statement itself, which is a primary -feature of the SQL Expression Language and makes for queries that can be -composed naturally while still making use of parameterization in all cases. -This concept will be discussed in much more detail in the sections that follow; -for a brief preview, the :func:`_sql.text` construct itself being part of the -SQL Expression Language supports this feature by using the -:meth:`_sql.TextClause.bindparams` method; this is a :term:`generative` method that -returns a new copy of the SQL construct with additional state added, in this -case the parameter values we want to pass along: - - -.. sourcecode:: pycon+sql - - >>> stmt = text("SELECT x, y FROM some_table WHERE y > :y ORDER BY x, y").bindparams(y=6) - >>> with engine.connect() as conn: - ... result = conn.execute(stmt) - ... for row in result: - ... print(f"x: {row.x} y: {row.y}") - {opensql}BEGIN (implicit) - SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y - [...] (6,) - {stop}x: 6 y: 8 - x: 9 y: 10 - x: 11 y: 12 - x: 13 y: 14 - {opensql}ROLLBACK{stop} - - -The interesting thing to note above is that even though we passed only a single -argument, ``stmt``, to the :meth:`_future.Connection.execute` method, the -execution of the statement illustrated both the SQL string as well as the -separate parameter tuple. -.. rst-class:: orm-addin +.. rst-class:: orm-header .. _tutorial_executing_orm_session: @@ -473,11 +425,11 @@ a context manager: >>> from sqlalchemy.orm import Session - >>> stmt = text("SELECT x, y FROM some_table WHERE y > :y ORDER BY x, y").bindparams(y=6) + >>> stmt = text("SELECT x, y FROM some_table WHERE y > :y ORDER BY x, y") >>> with Session(engine) as session: - ... result = session.execute(stmt) + ... result = session.execute(stmt, {"y": 6}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y [...] (6,){stop} @@ -488,7 +440,7 @@ a context manager: {opensql}ROLLBACK{stop} The example above can be compared to the example in the preceding section -in :ref:`tutorial_bundling_parameters` - we directly replace the call to +in :ref:`tutorial_sending_parameters` - we directly replace the call to ``with engine.connect() as conn`` with ``with Session(engine) as session``, and then make use of the :meth:`_orm.Session.execute` method just like we do with the :meth:`_future.Connection.execute` method. @@ -503,7 +455,7 @@ our data: >>> with Session(engine) as session: ... result = session.execute( ... text("UPDATE some_table SET y=:y WHERE x=:x"), - ... [{"x": 9, "y":11}, {"x": 13, "y": 15}] + ... [{"x": 9, "y": 11}, {"x": 13, "y": 15}], ... ) ... session.commit() {opensql}BEGIN (implicit) @@ -518,13 +470,18 @@ the block with a "commit as you go" commit. .. tip:: The :class:`_orm.Session` doesn't actually hold onto the :class:`_future.Connection` object after it ends the transaction. It gets a new :class:`_future.Connection` from the :class:`_future.Engine` - when executing SQL against the database is next needed. + the next time it needs to execute SQL against the database. The :class:`_orm.Session` obviously has a lot more tricks up its sleeve than that, however understanding that it has a :meth:`_orm.Session.execute` method that's used the same way as :meth:`_future.Connection.execute` will get us started with the examples that follow later. +.. seealso:: + + :ref:`session_basics` - presents basic creational and usage patterns with + the :class:`_orm.Session` object. + diff --git a/doc/build/tutorial/engine.rst b/doc/build/tutorial/engine.rst index fc8973c4659..4e53ae6bf9c 100644 --- a/doc/build/tutorial/engine.rst +++ b/doc/build/tutorial/engine.rst @@ -3,11 +3,19 @@ .. include:: tutorial_nav_include.rst +.. rst-class:: core-header, orm-addin + .. _tutorial_engine: Establishing Connectivity - the Engine ========================================== +.. container:: orm-header + + **Welcome ORM and Core readers alike!** + + Every SQLAlchemy application that connects to a database needs to use + an :class:`_engine.Engine`. This short section is for everyone. The start of any SQLAlchemy application is an object called the :class:`_future.Engine`. This object acts as a central source of connections diff --git a/doc/build/tutorial/index.rst b/doc/build/tutorial/index.rst index cb6c2feae3a..2440a33ed08 100644 --- a/doc/build/tutorial/index.rst +++ b/doc/build/tutorial/index.rst @@ -42,9 +42,14 @@ These APIs are known as **Core** and **ORM**. to a database, interacting with database queries and results, and programmatic construction of SQL statements. - Sections that have a **dark blue border on the right** will discuss - concepts that are **primarily Core-only**; when using the ORM, these - concepts are still in play but are less often explicit in user code. + Sections that are **primarily Core-only** will not refer to the ORM. + SQLAlchemy constructs used in these sections will be imported from the + ``sqlalchemy`` namespace. As an additional indicator of subject + classification, they will also include a **dark blue border on the right**. + When using the ORM, these concepts are still in play but are less often + explicit in user code. ORM users should read these sections, but not expect + to be using these APIs directly for ORM-centric code. + .. container:: orm-header @@ -56,14 +61,28 @@ These APIs are known as **Core** and **ORM**. SQL Expression Language to allow SQL queries to be composed and invoked in terms of user-defined objects. - Sections that have a **light blue border on the left** will discuss - concepts that are **primarily ORM-only**. Core-only users - can skip these. + Sections that are **primarily ORM-only** should be **titled to + include the phrase "ORM"**, so that it's clear this is an ORM related topic. + SQLAlchemy constructs used in these sections will be imported from the + ``sqlalchemy.orm`` namespace. Finally, as an additional indicator of + subject classification, they will also include a **light blue border on the + left**. Core-only users can skip these. .. container:: core-header, orm-dependency - A section that has **both light and dark borders on both sides** will - discuss a **Core concept that is also used explicitly with the ORM**. + **Most** sections in this tutorial discuss **Core concepts that + are also used explicitly with the ORM**. SQLAlchemy 2.0 in particular + features a much greater level of integration of Core API use within the + ORM. + + For each of these sections, there will be **introductory text** discussing the + degree to which ORM users should expect to be using these programming + patterns. SQLAlchemy constructs in these sections will be imported from the + ``sqlalchemy`` namespace with some potential use of ``sqlalchemy.orm`` + constructs at the same time. As an additional indicator of subject + classification, these sections will also include **both a thinner light + border on the left, and a thicker dark border on the right**. Core and ORM + users should familiarize with concepts in these sections equally. Tutorial Overview diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index 24284c4aaca..df3b336f454 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -76,9 +76,9 @@ that will be how we will refer to the table in application code:: >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) We can observe that the above :class:`_schema.Table` construct looks a lot like @@ -151,9 +151,9 @@ table:: >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', ForeignKey('user_account.id'), nullable=False), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", ForeignKey("user_account.id"), nullable=False), + ... Column("email_address", String, nullable=False), ... ) The table above also features a third kind of constraint, which in SQL is the @@ -297,6 +297,7 @@ known as the **declarative base**. We get a new declarative base from the :func:`_orm.declarative_base` function:: from sqlalchemy.orm import declarative_base + Base = declarative_base() .. @@ -313,7 +314,7 @@ for the ``user`` and ``address`` table in terms of new classes ``User`` and >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' + ... __tablename__ = "user_account" ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) @@ -322,14 +323,14 @@ for the ``user`` and ``address`` table in terms of new classes ``User`` and ... addresses = relationship("Address", back_populates="user") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' + ... __tablename__ = "address" ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('user_account.id')) + ... user_id = Column(Integer, ForeignKey("user_account.id")) ... ... user = relationship("User", back_populates="addresses") ... @@ -428,7 +429,6 @@ using :meth:`_schema.MetaData.create_all`:: # declarative base Base.metadata.create_all(engine) - Combining Core Table Declarations with ORM Declarative ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -443,21 +443,35 @@ This form is called :ref:`hybrid table `, and it consists of assigning to the ``.__table__`` attribute directly, rather than having the declarative process generate it:: + mapper_registry = registry() + Base = mapper_registry.generate_base() + + class User(Base): __table__ = user_table - addresses = relationship("Address", back_populates="user") + addresses = relationship("Address", back_populates="user") - def __repr__(self): + def __repr__(self): return f"User({self.name!r}, {self.fullname!r})" + class Address(Base): __table__ = address_table - user = relationship("User", back_populates="addresses") + user = relationship("User", back_populates="addresses") + + def __repr__(self): + return f"Address({self.email_address!r})" - def __repr__(self): - return f"Address({self.email_address!r})" +.. note:: The above example is an **alternative form** to the mapping that's + first illustrated previously at :ref:`tutorial_declaring_mapped_classes`. + This example is for illustrative purposes only, and is not part of this + tutorial's "doctest" steps, and as such does not need to be run for readers + who are executing code examples. The mapping here and the one at + :ref:`tutorial_declaring_mapped_classes` produce equivalent mappings, but in + general one would use only **one** of these two forms for particular mapped + class. The above two classes are equivalent to those which we declared in the previous mapping example. @@ -484,8 +498,19 @@ another operation that was mentioned at the beginning of the section, that of **table reflection**. Table reflection refers to the process of generating :class:`_schema.Table` and related objects by reading the current state of a database. Whereas in the previous sections we've been declaring -:class:`_schema.Table` objects in Python and then emitting DDL to the database, -the reflection process does it in reverse. +:class:`_schema.Table` objects in Python, where we then have the option +to emit DDL to the database to generate such a schema, the reflection process +does these two steps in reverse, starting from an existing database +and generating in-Python data structures to represent the schemas within +that database. + +.. tip:: There is no requirement that reflection must be used in order to + use SQLAlchemy with a pre-existing database. It is entirely typical that + the SQLAlchemy application declares all metadata explicitly in Python, + such that its structure corresponds to that the existing database. + The metadata structure also need not include tables, columns, or other + constraints and constructs in the pre-existing database that are not needed + for the local application to function. As an example of reflection, we will create a new :class:`_schema.Table` object which represents the ``some_table`` object we created manually in diff --git a/doc/build/tutorial/orm_data_manipulation.rst b/doc/build/tutorial/orm_data_manipulation.rst index 740880567f4..e8bdb3d4c43 100644 --- a/doc/build/tutorial/orm_data_manipulation.rst +++ b/doc/build/tutorial/orm_data_manipulation.rst @@ -173,8 +173,8 @@ the ``id`` attribute:: INSERT many rows at once while still being able to retrieve the primary key values. -Identity Map -^^^^^^^^^^^^ +Getting Objects by Primary Key from the Identity Map +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The primary key identity of the objects are significant to the :class:`_orm.Session`, as the objects are now linked to this identity in memory using a feature @@ -214,6 +214,28 @@ behaviors and features: >>> session.commit() COMMIT +The above operation will commit the transaction that was in progress. The +objects which we've dealt with are still :term:`attached` to the :class:`.Session`, +which is a state they stay in until the :class:`.Session` is closed +(which is introduced at :ref:`tutorial_orm_closing`). + + +.. tip:: + + An important thing to note is that attributes on the objects that we just + worked with have been :term:`expired`, meaning, when we next access any + attributes on them, the :class:`.Session` will start a new transaction and + re-load their state. This option is sometimes problematic for both + performance reasons, or if one wishes to use the objects after closing the + :class:`.Session` (which is known as the :term:`detached` state), as they + will not have any state and will have no :class:`.Session` with which to load + that state, leading to "detached instance" errors. The behavior is + controllable using a parameter called :paramref:`.Session.expire_on_commit`. + More on this is at :ref:`tutorial_orm_closing`. + + + + .. _tutorial_orm_updating: Updating ORM Objects @@ -268,9 +290,7 @@ from this row and we will get our updated value back: .. sourcecode:: pycon+sql - >>> sandy_fullname = session.execute( - ... select(User.fullname).where(User.id == 2) - ... ).scalar_one() + >>> sandy_fullname = session.execute(select(User.fullname).where(User.id == 2)).scalar_one() {opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ? [...] ('Sandy Squirrel', 2) SELECT user_account.fullname @@ -292,7 +312,7 @@ dirty:: However note we are **still in a transaction** and our changes have not been pushed to the database's permanent storage. Since Sandy's last name is in fact "Cheeks" not "Squirrel", we will repair this mistake later when -we roll back the transction. But first we'll make some more data changes. +we roll back the transaction. But first we'll make some more data changes. .. seealso:: @@ -314,9 +334,9 @@ a value in the ``User.name`` column: .. sourcecode:: pycon+sql >>> session.execute( - ... update(User). - ... where(User.name == "sandy"). - ... values(fullname="Sandy Squirrel Extraordinaire") + ... update(User) + ... .where(User.name == "sandy") + ... .values(fullname="Sandy Squirrel Extraordinaire") ... ) {opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ? [...] ('Sandy Squirrel Extraordinaire', 'sandy'){stop} @@ -503,13 +523,14 @@ and of course the database data is present again as well: .. sourcecode:: pycon+sql - {sql}>>> session.execute(select(User).where(User.name == 'patrick')).scalar_one() is patrick + {sql}>>> session.execute(select(User).where(User.name == "patrick")).scalar_one() is patrick SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = ? [...] ('patrick',){stop} True +.. _tutorial_orm_closing: Closing a Session ------------------ diff --git a/doc/build/tutorial/orm_related_objects.rst b/doc/build/tutorial/orm_related_objects.rst index 59691cf818d..61ce5a1bd69 100644 --- a/doc/build/tutorial/orm_related_objects.rst +++ b/doc/build/tutorial/orm_related_objects.rst @@ -5,10 +5,12 @@ .. include:: tutorial_nav_include.rst +.. rst-class:: orm-header + .. _tutorial_orm_related_objects: -Working with Related Objects -============================ +Working with ORM Related Objects +================================ In this section, we will cover one more essential ORM concept, which is how the ORM interacts with mapped classes that refer to other objects. In the @@ -24,8 +26,10 @@ and other directives: .. sourcecode:: python from sqlalchemy.orm import relationship + + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" # ... Column mappings @@ -33,13 +37,12 @@ and other directives: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... Column mappings user = relationship("User", back_populates="addresses") - Above, the ``User`` class now has an attribute ``User.addresses`` and the ``Address`` class has an attribute ``Address.user``. The :func:`_orm.relationship` construct will be used to inspect the table @@ -68,7 +71,7 @@ We can start by illustrating what :func:`_orm.relationship` does to instances of objects. If we make a new ``User`` object, we can note that there is a Python list when we access the ``.addresses`` element:: - >>> u1 = User(name='pkrabs', fullname='Pearl Krabs') + >>> u1 = User(name="pkrabs", fullname="Pearl Krabs") >>> u1.addresses [] @@ -129,6 +132,9 @@ of the ``Address.user`` attribute after the fact:: # equivalent effect as a2 = Address(user=u1) >>> a2.user = u1 + +.. _tutorial_orm_cascades: + Cascading Objects into the Session ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -297,11 +303,7 @@ corresponding to the :func:`_orm.relationship` may be passed as the **single argument** to :meth:`_sql.Select.join`, where it serves to indicate both the right side of the join as well as the ON clause at once:: - >>> print( - ... select(Address.email_address). - ... select_from(User). - ... join(User.addresses) - ... ) + >>> print(select(Address.email_address).select_from(User).join(User.addresses)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -313,10 +315,7 @@ ON clause, it works because of the :class:`_schema.ForeignKeyConstraint` between the two mapped :class:`_schema.Table` objects, not because of the :func:`_orm.relationship` objects on the ``User`` and ``Address`` classes:: - >>> print( - ... select(Address.email_address). - ... join_from(User, Address) - ... ) + >>> print(select(Address.email_address).join_from(User, Address)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -334,12 +333,12 @@ demonstrate we will construct the same join illustrated at :ref:`tutorial_orm_en using the :func:`_orm.relationship` attributes to join instead:: >>> print( - ... select(User). - ... join(User.addresses.of_type(address_alias_1)). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join(User.addresses.of_type(address_alias_2)). - ... where(address_alias_2.email_address == 'patrick@gmail.com') - ... ) + ... select(User) + ... .join(User.addresses.of_type(address_alias_1)) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join(User.addresses.of_type(address_alias_2)) + ... .where(address_alias_2.email_address == "patrick@gmail.com") + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address AS address_1 ON user_account.id = address_1.user_id @@ -352,10 +351,7 @@ aliased entity, the attribute is available from the :func:`_orm.aliased` construct directly:: >>> user_alias_1 = aliased(User) - >>> print( - ... select(user_alias_1.name). - ... join(user_alias_1.addresses) - ... ) + >>> print(select(user_alias_1.name).join(user_alias_1.addresses)) {opensql}SELECT user_account_1.name FROM user_account AS user_account_1 JOIN address ON user_account_1.id = address.user_id @@ -377,9 +373,8 @@ email addresses: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... join(User.addresses.and_(Address.email_address == 'pearl.krabs@gmail.com')) + >>> stmt = select(User.fullname).join( + ... User.addresses.and_(Address.email_address == "pearl.krabs@gmail.com") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -407,9 +402,8 @@ an optional WHERE criteria to limit the rows matched by the subquery: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(User.addresses.any(Address.email_address == 'pearl.krabs@gmail.com')) + >>> stmt = select(User.fullname).where( + ... User.addresses.any(Address.email_address == "pearl.krabs@gmail.com") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -427,10 +421,7 @@ for ``User`` entities that have no related ``Address`` rows: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(~User.addresses.any()) - ... ) + >>> stmt = select(User.fullname).where(~User.addresses.any()) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname FROM user_account @@ -447,10 +438,7 @@ which belonged to "pearl": .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(Address.email_address). - ... where(Address.user.has(User.name=="pkrabs")) - ... ) + >>> stmt = select(Address.email_address).where(Address.user.has(User.name == "pkrabs")) >>> session.execute(stmt).all() {opensql}SELECT address.email_address FROM address @@ -564,8 +552,10 @@ the :paramref:`_orm.relationship.lazy` option, e.g.: .. sourcecode:: python from sqlalchemy.orm import relationship + + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" addresses = relationship("Address", back_populates="user", lazy="selectin") @@ -607,11 +597,11 @@ related ``Address`` objects: .. sourcecode:: pycon+sql >>> from sqlalchemy.orm import selectinload - >>> stmt = ( - ... select(User).options(selectinload(User.addresses)).order_by(User.id) - ... ) + >>> stmt = select(User).options(selectinload(User.addresses)).order_by(User.id) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})") + ... print( + ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})" + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account ORDER BY user_account.id [...] () @@ -651,7 +641,9 @@ as below where we know that all ``Address`` objects have an associated >>> from sqlalchemy.orm import joinedload >>> stmt = ( - ... select(Address).options(joinedload(Address.user, innerjoin=True)).order_by(Address.id) + ... select(Address) + ... .options(joinedload(Address.user, innerjoin=True)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -727,10 +719,11 @@ example: >>> from sqlalchemy.orm import contains_eager >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(contains_eager(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(contains_eager(Address.user)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -748,10 +741,11 @@ rows. If we had applied :func:`_orm.joinedload` separately, we would get a SQL query that unnecessarily joins twice:: >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(joinedload(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(joinedload(Address.user)) + ... .order_by(Address.id) ... ) >>> print(stmt) # SELECT has a JOIN and LEFT OUTER JOIN unnecessarily {opensql}SELECT address.id, address.email_address, address.user_id, @@ -787,19 +781,19 @@ the email addresses with the ``sqlalchemy.org`` domain, we can apply >>> from sqlalchemy.orm import selectinload >>> stmt = ( - ... select(User). - ... options( - ... selectinload( - ... User.addresses.and_( - ... ~Address.email_address.endswith("sqlalchemy.org") - ... ) - ... ) - ... ). - ... order_by(User.id). - ... execution_options(populate_existing=True) + ... select(User) + ... .options( + ... selectinload( + ... User.addresses.and_(~Address.email_address.endswith("sqlalchemy.org")) + ... ) + ... ) + ... .order_by(User.id) + ... .execution_options(populate_existing=True) ... ) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})") + ... print( + ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})" + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account ORDER BY user_account.id [...] () @@ -853,7 +847,7 @@ relationship will never try to emit SQL: .. sourcecode:: python class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" # ... Column mappings @@ -861,13 +855,12 @@ relationship will never try to emit SQL: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... Column mappings user = relationship("User", back_populates="addresses", lazy="raise_on_sql") - Using such a mapping, the application is blocked from lazy loading, indicating that a particular query would need to specify a loader strategy: diff --git a/examples/dogpile_caching/caching_query.py b/examples/dogpile_caching/caching_query.py index 68f72e5f06e..f5065f8df54 100644 --- a/examples/dogpile_caching/caching_query.py +++ b/examples/dogpile_caching/caching_query.py @@ -130,10 +130,19 @@ def __init__( self.expiration_time = expiration_time self.ignore_expiration = ignore_expiration + # this is not needed as of SQLAlchemy 1.4.28; + # UserDefinedOption classes no longer participate in the SQL + # compilation cache key def _gen_cache_key(self, anon_map, bindparams): return None def _generate_cache_key(self, statement, parameters, orm_cache): + """generate a cache key with which to key the results of a statement. + + This leverages the use of the SQL compilation cache key which is + repurposed as a SQL results key. + + """ statement_cache_key = statement._generate_cache_key() key = statement_cache_key.to_offline_string( diff --git a/examples/performance/bulk_updates.py b/examples/performance/bulk_updates.py index 0657c96f326..c15d0f16726 100644 --- a/examples/performance/bulk_updates.py +++ b/examples/performance/bulk_updates.py @@ -1,5 +1,5 @@ -"""This series of tests illustrates different ways to UPDATE a large number -of rows in bulk. +"""This series of tests will illustrate different ways to UPDATE a large number +of rows in bulk (under construction! there's just one test at the moment) """ diff --git a/examples/sharding/separate_databases.py b/examples/sharding/separate_databases.py index 95f12fa722d..9818656c3c5 100644 --- a/examples/sharding/separate_databases.py +++ b/examples/sharding/separate_databases.py @@ -56,9 +56,9 @@ def id_generator(ctx): # in reality, might want to use a separate transaction for this. - with db1.connect() as conn: + with db1.begin() as conn: nextid = conn.scalar(ids.select().with_for_update()) - conn.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1})) + conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1})) return nextid @@ -106,7 +106,7 @@ def __init__(self, temperature): # establish initial "id" in db1 with db1.begin() as conn: - conn.execute(ids.insert(), nextid=1) + conn.execute(ids.insert(), {"nextid": 1}) # step 5. define sharding functions. @@ -155,19 +155,19 @@ def id_chooser(query, ident): return ["north_america", "asia", "europe", "south_america"] -def query_chooser(query): - """query chooser. +def execute_chooser(context): + """statement execution chooser. - this also returns a list of shard ids, which can - just be all of them. but here we'll search into the Query in order - to try to narrow down the list of shards to query. + this also returns a list of shard ids, which can just be all of them. but + here we'll search into the execution context in order to try to narrow down + the list of shards to SELECT. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids - for column, operator, value in _get_query_comparisons(query): + for column, operator, value in _get_select_comparisons(context.statement): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object @@ -186,8 +186,8 @@ def query_chooser(query): return ids -def _get_query_comparisons(query): - """Search an orm.Query object for binary expressions. +def _get_select_comparisons(statement): + """Search a Select or Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form @@ -222,9 +222,9 @@ def visit_binary(binary): # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. - if query.whereclause is not None: + if statement.whereclause is not None: visitors.traverse( - query.whereclause, + statement.whereclause, {}, { "bindparam": visit_bindparam, @@ -239,7 +239,7 @@ def visit_binary(binary): Session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, - query_chooser=query_chooser, + execute_chooser=execute_chooser, ) # save and load objects! diff --git a/examples/sharding/separate_schema_translates.py b/examples/sharding/separate_schema_translates.py new file mode 100644 index 00000000000..c4f2b9e25ce --- /dev/null +++ b/examples/sharding/separate_schema_translates.py @@ -0,0 +1,243 @@ +"""Illustrates sharding using a single database with multiple schemas, +where a different "schema_translates_map" can be used for each shard. + +In this example we will set a "shard id" at all times. + +""" +import datetime +import os + +from sqlalchemy import Column +from sqlalchemy import create_engine +from sqlalchemy import DateTime +from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import inspect +from sqlalchemy import Integer +from sqlalchemy import select +from sqlalchemy import String +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.ext.horizontal_shard import ShardedSession +from sqlalchemy.orm import relationship +from sqlalchemy.orm import sessionmaker + + +echo = True +engine = create_engine("sqlite://", echo=echo) + + +with engine.connect() as conn: + # use attached databases on sqlite to get "schemas" + for i in range(1, 5): + if os.path.exists("schema_%s.db" % i): + os.remove("schema_%s.db" % i) + conn.exec_driver_sql( + 'ATTACH DATABASE "schema_%s.db" AS schema_%s' % (i, i) + ) + +db1 = engine.execution_options(schema_translate_map={None: "schema_1"}) +db2 = engine.execution_options(schema_translate_map={None: "schema_2"}) +db3 = engine.execution_options(schema_translate_map={None: "schema_3"}) +db4 = engine.execution_options(schema_translate_map={None: "schema_4"}) + + +# create session function. this binds the shard ids +# to databases within a ShardedSession and returns it. +Session = sessionmaker( + class_=ShardedSession, + future=True, + shards={ + "north_america": db1, + "asia": db2, + "europe": db3, + "south_america": db4, + }, +) + + +# mappings and tables +Base = declarative_base() + + +# table setup. we'll store a lead table of continents/cities, and a secondary +# table storing locations. a particular row will be placed in the database +# whose shard id corresponds to the 'continent'. in this setup, secondary rows +# in 'weather_reports' will be placed in the same DB as that of the parent, but +# this can be changed if you're willing to write more complex sharding +# functions. + + +class WeatherLocation(Base): + __tablename__ = "weather_locations" + + id = Column(Integer, primary_key=True) + continent = Column(String(30), nullable=False) + city = Column(String(50), nullable=False) + + reports = relationship("Report", backref="location") + + def __init__(self, continent, city): + self.continent = continent + self.city = city + + +class Report(Base): + __tablename__ = "weather_reports" + + id = Column(Integer, primary_key=True) + location_id = Column( + "location_id", Integer, ForeignKey("weather_locations.id") + ) + temperature = Column("temperature", Float) + report_time = Column( + "report_time", DateTime, default=datetime.datetime.now + ) + + def __init__(self, temperature): + self.temperature = temperature + + +# create tables +for db in (db1, db2, db3, db4): + Base.metadata.create_all(db) + + +# step 5. define sharding functions. + +# we'll use a straight mapping of a particular set of "country" +# attributes to shard id. +shard_lookup = { + "North America": "north_america", + "Asia": "asia", + "Europe": "europe", + "South America": "south_america", +} + + +def shard_chooser(mapper, instance, clause=None): + """shard chooser. + + this is primarily invoked at persistence time. + + looks at the given instance and returns a shard id + note that we need to define conditions for + the WeatherLocation class, as well as our secondary Report class which will + point back to its WeatherLocation via its 'location' attribute. + + """ + if isinstance(instance, WeatherLocation): + return shard_lookup[instance.continent] + else: + return shard_chooser(mapper, instance.location) + + +def id_chooser(query, ident): + """id chooser. + + given a primary key identity and a legacy :class:`_orm.Query`, + return which shard we should look at. + + in this case, we only want to support this for lazy-loaded items; + any primary query should have shard id set up front. + + """ + if query.lazy_loaded_from: + # if we are in a lazy load, we can look at the parent object + # and limit our search to that same shard, assuming that's how we've + # set things up. + return [query.lazy_loaded_from.identity_token] + else: + raise NotImplementedError() + + +def execute_chooser(context): + """statement execution chooser. + + given an :class:`.ORMExecuteState` for a statement, return a list + of shards we should consult. + + As before, we want a "shard_id" execution option to be present. + Otherwise, this would be a lazy load from a parent object where we + will look for the previous token. + + """ + if context.lazy_loaded_from: + return [context.lazy_loaded_from.identity_token] + else: + return [context.execution_options["shard_id"]] + + +# configure shard chooser +Session.configure( + shard_chooser=shard_chooser, + id_chooser=id_chooser, + execute_chooser=execute_chooser, +) + +# save and load objects! + +tokyo = WeatherLocation("Asia", "Tokyo") +newyork = WeatherLocation("North America", "New York") +toronto = WeatherLocation("North America", "Toronto") +london = WeatherLocation("Europe", "London") +dublin = WeatherLocation("Europe", "Dublin") +brasilia = WeatherLocation("South America", "Brasila") +quito = WeatherLocation("South America", "Quito") + +tokyo.reports.append(Report(80.0)) +newyork.reports.append(Report(75)) +quito.reports.append(Report(85)) + +with Session() as sess: + + sess.add_all([tokyo, newyork, toronto, london, dublin, brasilia, quito]) + + sess.commit() + + t = sess.get( + WeatherLocation, + tokyo.id, + # for session.get(), we currently need to use identity_token. + # the horizontal sharding API does not yet pass through the + # execution options + identity_token="asia", + # future version + # execution_options={"shard_id": "asia"} + ) + assert t.city == tokyo.city + assert t.reports[0].temperature == 80.0 + + north_american_cities = sess.execute( + select(WeatherLocation).filter( + WeatherLocation.continent == "North America" + ), + execution_options={"shard_id": "north_america"}, + ).scalars() + + assert {c.city for c in north_american_cities} == {"New York", "Toronto"} + + europe = sess.execute( + select(WeatherLocation).filter(WeatherLocation.continent == "Europe"), + execution_options={"shard_id": "europe"}, + ).scalars() + + assert {c.city for c in europe} == {"London", "Dublin"} + + # the Report class uses a simple integer primary key. So across two + # databases, a primary key will be repeated. The "identity_token" tracks + # in memory that these two identical primary keys are local to different + # databases. + newyork_report = newyork.reports[0] + tokyo_report = tokyo.reports[0] + + assert inspect(newyork_report).identity_key == ( + Report, + (1,), + "north_america", + ) + assert inspect(tokyo_report).identity_key == (Report, (1,), "asia") + + # the token representing the originating shard is also available directly + + assert inspect(newyork_report).identity_token == "north_america" + assert inspect(tokyo_report).identity_token == "asia" diff --git a/examples/sharding/separate_tables.py b/examples/sharding/separate_tables.py index f24dde288d0..0f6e2ffd830 100644 --- a/examples/sharding/separate_tables.py +++ b/examples/sharding/separate_tables.py @@ -70,9 +70,9 @@ def before_cursor_execute( def id_generator(ctx): # in reality, might want to use a separate transaction for this. - with engine.connect() as conn: + with engine.begin() as conn: nextid = conn.scalar(ids.select().with_for_update()) - conn.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1})) + conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1})) return nextid @@ -120,7 +120,7 @@ def __init__(self, temperature): # establish initial "id" in db1 with db1.begin() as conn: - conn.execute(ids.insert(), nextid=1) + conn.execute(ids.insert(), {"nextid": 1}) # step 5. define sharding functions. @@ -169,19 +169,19 @@ def id_chooser(query, ident): return ["north_america", "asia", "europe", "south_america"] -def query_chooser(query): - """query chooser. +def execute_chooser(context): + """statement execution chooser. - this also returns a list of shard ids, which can - just be all of them. but here we'll search into the Query in order - to try to narrow down the list of shards to query. + this also returns a list of shard ids, which can just be all of them. but + here we'll search into the execution context in order to try to narrow down + the list of shards to SELECT. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids - for column, operator, value in _get_query_comparisons(query): + for column, operator, value in _get_select_comparisons(context.statement): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object @@ -200,8 +200,8 @@ def query_chooser(query): return ids -def _get_query_comparisons(query): - """Search an orm.Query object for binary expressions. +def _get_select_comparisons(statement): + """Search a Select or Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form @@ -236,9 +236,9 @@ def visit_binary(binary): # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. - if query.whereclause is not None: + if statement.whereclause is not None: visitors.traverse( - query.whereclause, + statement.whereclause, {}, { "bindparam": visit_bindparam, @@ -253,7 +253,7 @@ def visit_binary(binary): Session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, - query_chooser=query_chooser, + execute_chooser=execute_chooser, ) # save and load objects! diff --git a/examples/versioned_history/history_meta.py b/examples/versioned_history/history_meta.py index 7d13f2d7456..1f83cf6d4fa 100644 --- a/examples/versioned_history/history_meta.py +++ b/examples/versioned_history/history_meta.py @@ -116,7 +116,7 @@ def _col_copy(col): Column( "changed", DateTime, - default=datetime.datetime.utcnow, + default=lambda: datetime.datetime.now(datetime.timezone.utc), info=version_meta, ) ) diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index ad6d96fdd3e..b8ba94208ae 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -1,5 +1,5 @@ -# sqlalchemy/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# __init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -123,6 +123,7 @@ from .types import TIME from .types import Time from .types import TIMESTAMP +from .types import TupleType from .types import TypeDecorator from .types import Unicode from .types import UnicodeText @@ -130,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.27" +__version__ = "1.4.55" def __go(lcls): diff --git a/lib/sqlalchemy/cextension/immutabledict.c b/lib/sqlalchemy/cextension/immutabledict.c index 1188dcd2baf..2bd9a1e4abc 100644 --- a/lib/sqlalchemy/cextension/immutabledict.c +++ b/lib/sqlalchemy/cextension/immutabledict.c @@ -1,6 +1,6 @@ /* immuatbledict.c -Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +Copyright (C) 2005-2025 the SQLAlchemy authors and contributors This module is part of SQLAlchemy and is released under the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/cextension/processors.c b/lib/sqlalchemy/cextension/processors.c index f6f203e7499..e5bef9ad824 100644 --- a/lib/sqlalchemy/cextension/processors.c +++ b/lib/sqlalchemy/cextension/processors.c @@ -1,6 +1,6 @@ /* processors.c -Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +Copyright (C) 2010-2025 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index 2de672f22b9..88f0ecb5562 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -1,6 +1,6 @@ /* resultproxy.c -Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +Copyright (C) 2010-2025 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under @@ -21,6 +21,12 @@ typedef Py_ssize_t (*lenfunc)(PyObject *); typedef intargfunc ssizeargfunc; #endif +#if PY_VERSION_HEX >= 0x030c0000 +# define PY_RAISE_SLICE_FOR_MAPPING PyExc_KeyError +#else +# define PY_RAISE_SLICE_FOR_MAPPING PyExc_TypeError +#endif + #if PY_MAJOR_VERSION < 3 // new typedef in Python 3 @@ -369,7 +375,7 @@ BaseRow_getitem_by_object(BaseRow *self, PyObject *key, int asmapping) if (record == NULL) { if (PySlice_Check(key)) { - PyErr_Format(PyExc_TypeError, "can't use slices for mapping access"); + PyErr_Format(PY_RAISE_SLICE_FOR_MAPPING, "can't use slices for mapping access"); return NULL; } record = PyObject_CallMethod(self->parent, "_key_fallback", @@ -541,6 +547,7 @@ BaseRow_getattro(BaseRow *self, PyObject *name) "Could not locate column in row for column '%.200s'", PyBytes_AS_STRING(err_bytes) ); + Py_DECREF(err_bytes); #else PyErr_Format( PyExc_AttributeError, @@ -814,6 +821,29 @@ typedef struct { static PyTypeObject tuplegetter_type; +static int +PyArg_NoKeywords(const char *funcname, PyObject *kwargs) +{ +#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 13 + /* Based on the one in CPython, removed from the public headers in 3.13 + * (https://github.com/python/cpython/issues/110964) + */ + if (kwargs == NULL) + return 1; + if (!PyDict_CheckExact(kwargs)) { + PyErr_BadInternalCall(); + return 0; + } + if (PyDict_GET_SIZE(kwargs) == 0) + return 1; + + PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", funcname); + return 0; +#else + return _PyArg_NoKeywords(funcname, kwargs); +#endif +} + static PyObject * tuplegetter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { @@ -821,7 +851,7 @@ tuplegetter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) PyObject *item; Py_ssize_t nitems; - if (!_PyArg_NoKeywords("tuplegetter", kwds)) + if (!PyArg_NoKeywords("tuplegetter", kwds)) return NULL; nitems = PyTuple_GET_SIZE(args); diff --git a/lib/sqlalchemy/connectors/__init__.py b/lib/sqlalchemy/connectors/__init__.py index fee8b3836f5..f293a4f181e 100644 --- a/lib/sqlalchemy/connectors/__init__.py +++ b/lib/sqlalchemy/connectors/__init__.py @@ -1,5 +1,5 @@ # connectors/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 1c2fb00c043..bfdabf5ed1f 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -1,5 +1,5 @@ # connectors/mxodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py index c2bbdf7ce91..8ec998bf6f6 100644 --- a/lib/sqlalchemy/connectors/pyodbc.py +++ b/lib/sqlalchemy/connectors/pyodbc.py @@ -1,5 +1,5 @@ # connectors/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -60,7 +60,7 @@ def create_connect_args(self, url): else: def check_quote(token): - if ";" in str(token): + if ";" in str(token) or str(token).startswith("{"): token = "{%s}" % token.replace("}", "}}") return token diff --git a/lib/sqlalchemy/databases/__init__.py b/lib/sqlalchemy/databases/__init__.py index 01768042591..8aa089d505d 100644 --- a/lib/sqlalchemy/databases/__init__.py +++ b/lib/sqlalchemy/databases/__init__.py @@ -1,5 +1,5 @@ # databases/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py index e06eb099524..2fff37c65a9 100644 --- a/lib/sqlalchemy/dialects/__init__.py +++ b/lib/sqlalchemy/dialects/__init__.py @@ -1,5 +1,5 @@ # dialects/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py index d4a054c3bf2..609896df20c 100644 --- a/lib/sqlalchemy/dialects/firebird/__init__.py +++ b/lib/sqlalchemy/dialects/firebird/__init__.py @@ -1,5 +1,5 @@ -# firebird/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/firebird/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py index 91e2c04a7eb..61537242fb5 100644 --- a/lib/sqlalchemy/dialects/firebird/base.py +++ b/lib/sqlalchemy/dialects/firebird/base.py @@ -1,5 +1,5 @@ -# firebird/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/firebird/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py index 4687809dfef..874983f09c6 100644 --- a/lib/sqlalchemy/dialects/firebird/fdb.py +++ b/lib/sqlalchemy/dialects/firebird/fdb.py @@ -1,5 +1,5 @@ -# firebird/fdb.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/firebird/fdb.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py index 102222de0ac..f65aeb41a23 100644 --- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py +++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -1,5 +1,5 @@ -# firebird/kinterbasdb.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/firebird/kinterbasdb.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py index 3aa1e344a6e..0a2f557b08f 100644 --- a/lib/sqlalchemy/dialects/mssql/__init__.py +++ b/lib/sqlalchemy/dialects/mssql/__init__.py @@ -1,5 +1,5 @@ -# mssql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mssql/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 95ccd2ca892..1607a4d67d0 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1,5 +1,5 @@ -# mssql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mssql/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -7,7 +7,6 @@ """ .. dialect:: mssql :name: Microsoft SQL Server - :full_support: 2017 :normal_support: 2012+ :best_effort: 2005+ @@ -433,12 +432,69 @@ class TestTable(Base): * ``SERIALIZABLE`` * ``SNAPSHOT`` - specific to SQL Server -.. versionadded:: 1.2 added AUTOCOMMIT isolation level setting +There are also more options for isolation level configurations, such as +"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply +different isolation level settings. See the discussion at +:ref:`dbapi_autocommit` for background. .. seealso:: :ref:`dbapi_autocommit` +.. _mssql_reset_on_return: + +Temporary Table / Resource Reset for Connection Pooling +------------------------------------------------------- + +The :class:`.QueuePool` connection pool implementation used +by the SQLAlchemy :class:`_sa.Engine` object includes +:ref:`reset on return ` behavior that will invoke +the DBAPI ``.rollback()`` method when connections are returned to the pool. +While this rollback will clear out the immediate state used by the previous +transaction, it does not cover a wider range of session-level state, including +temporary tables as well as other server state such as prepared statement +handles and statement caches. An undocumented SQL Server procedure known +as ``sp_reset_connection`` is known to be a workaround for this issue which +will reset most of the session state that builds up on a connection, including +temporary tables. + +To install ``sp_reset_connection`` as the means of performing reset-on-return, +the :meth:`.PoolEvents.reset` event hook may be used, as demonstrated in the +example below (**requires SQLAlchemy 1.4.43 or greater**). The +:paramref:`_sa.create_engine.pool_reset_on_return` parameter is set to ``None`` +so that the custom scheme can replace the default behavior completely. The +custom hook implementation calls ``.rollback()`` in any case, as it's usually +important that the DBAPI's own tracking of commit/rollback will remain +consistent with the state of the transaction:: + + from sqlalchemy import create_engine + from sqlalchemy import event + + mssql_engine = create_engine( + "mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server", + + # disable default reset-on-return scheme + pool_reset_on_return=None, + ) + + + @event.listens_for(mssql_engine, "reset") + def _reset_mssql(dbapi_connection, connection_record, reset_state): + dbapi_connection.execute("{call sys.sp_reset_connection}") + + # so that the DBAPI itself knows that the connection has been + # reset + dbapi_connection.rollback() + +.. versionchanged:: 1.4.43 Ensured the :meth:`.PoolEvents.reset` event + is invoked for all "reset" occurrences, so that it's appropriate + as a place for custom "reset" handlers. Previous schemes which + use the :meth:`.PoolEvents.checkin` handler remain usable as well. + +.. seealso:: + + :ref:`pool_reset_on_return` - in the :ref:`pooling_toplevel` documentation + Nullability ----------- MSSQL has support for three levels of column nullability. The default @@ -1285,9 +1341,10 @@ class NTEXT(sqltypes.UnicodeText): class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary): """The MSSQL VARBINARY type. - This type is present to support "deprecate_large_types" mode where - either ``VARBINARY(max)`` or IMAGE is rendered. Otherwise, this type - object is redundant vs. :class:`_types.VARBINARY`. + This type adds additional features to the core :class:`_types.VARBINARY` + type, including "deprecate_large_types" mode where + either ``VARBINARY(max)`` or IMAGE is rendered, as well as the SQL + Server ``FILESTREAM`` option. .. versionadded:: 1.0.0 @@ -1295,12 +1352,33 @@ class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary): :ref:`mssql_large_type_deprecation` - - """ __visit_name__ = "VARBINARY" + def __init__(self, length=None, filestream=False): + """ + Construct a VARBINARY type. + + :param length: optional, a length for the column for use in + DDL statements, for those binary types that accept a length, + such as the MySQL BLOB type. + + :param filestream=False: if True, renders the ``FILESTREAM`` keyword + in the table definition. In this case ``length`` must be ``None`` + or ``'max'``. + + .. versionadded:: 1.4.31 + + """ + + self.filestream = filestream + if self.filestream and length not in (None, "max"): + raise ValueError( + "length must be None or 'max' when setting filestream" + ) + super(VARBINARY, self).__init__(length=length) + class IMAGE(sqltypes.LargeBinary): __visit_name__ = "IMAGE" @@ -1354,6 +1432,7 @@ class TryCast(sql.elements.Cast): __visit_name__ = "try_cast" stringify_dialect = "mssql" + inherit_cache = True def __init__(self, *arg, **kw): """Create a TRY_CAST expression. @@ -1568,7 +1647,10 @@ def visit_XML(self, type_, **kw): return "XML" def visit_VARBINARY(self, type_, **kw): - return self._extend("VARBINARY", type_, length=type_.length or "max") + text = self._extend("VARBINARY", type_, length=type_.length or "max") + if getattr(type_, "filestream", False): + text += " FILESTREAM" + return text def visit_boolean(self, type_, **kw): return self.visit_BIT(type_) @@ -1599,7 +1681,6 @@ class MSExecutionContext(default.DefaultExecutionContext): _select_lastrowid = False _lastrowid = None _rowcount = None - _result_strategy = None def _opt_encode(self, statement): @@ -1626,15 +1707,12 @@ def pre_exec(self): ) if insert_has_identity: - compile_state = self.compiled.compile_state + compile_state = self.compiled.dml_compile_state self._enable_identity_insert = ( id_column.key in self.compiled_parameters[0] ) or ( compile_state._dict_parameters - and ( - id_column.key in compile_state._dict_parameters - or id_column in compile_state._dict_parameters - ) + and (id_column.key in compile_state._insert_col_keys) ) else: @@ -1731,14 +1809,6 @@ def handle_dbapi_exception(self, e): except Exception: pass - def get_result_cursor_strategy(self, result): - if self._result_strategy: - return self._result_strategy - else: - return super(MSExecutionContext, self).get_result_cursor_strategy( - result - ) - def fire_sequence(self, seq, type_): return self._execute_scalar( ( @@ -1776,6 +1846,10 @@ def __init__(self, *args, **kwargs): self.tablealiases = {} super(MSSQLCompiler, self).__init__(*args, **kwargs) + def _format_frame_clause(self, range_, **kw): + kw["literal_execute"] = True + return super()._format_frame_clause(range_, **kw) + def _with_legacy_schema_aliasing(fn): def decorate(self, *arg, **kw): if self.dialect.legacy_schema_aliasing: @@ -2083,6 +2157,7 @@ def returning_clause(self, stmt, returning_cols): stmt, adapter.traverse(c), {"result_map_targets": (c,)}, + fallback_label_name=c._non_anon_label, ) for c in expression._select_iterables(returning_cols) ] @@ -2592,10 +2667,8 @@ def _switch_db(dbname, connection, fn, *arg, **kw): def _owner_plus_db(dialect, schema): if not schema: return None, dialect.default_schema_name - elif "." in schema: - return _schema_elements(schema) else: - return None, schema + return _schema_elements(schema) _memoized_schema = util.LRUCache() @@ -2615,6 +2688,9 @@ def _schema_elements(schema): # test/dialect/mssql/test_compiler.py -> test_schema_many_tokens_* # + if schema.startswith("__[SCHEMA_"): + return None, schema + push = [] symbol = "" bracket = False @@ -2732,6 +2808,7 @@ def __init__( json_serializer=None, json_deserializer=None, legacy_schema_aliasing=None, + ignore_no_transaction_on_rollback=False, **opts ): self.query_timeout = int(query_timeout or 0) @@ -2739,6 +2816,9 @@ def __init__( self.use_scope_identity = use_scope_identity self.deprecate_large_types = deprecate_large_types + self.ignore_no_transaction_on_rollback = ( + ignore_no_transaction_on_rollback + ) if legacy_schema_aliasing is not None: util.warn_deprecated( @@ -2763,6 +2843,22 @@ def do_release_savepoint(self, connection, name): # SQL Server does not support RELEASE SAVEPOINT pass + def do_rollback(self, dbapi_connection): + try: + super(MSDialect, self).do_rollback(dbapi_connection) + except self.dbapi.ProgrammingError as e: + if self.ignore_no_transaction_on_rollback and re.match( + r".*\b111214\b", str(e) + ): + util.warn( + "ProgrammingError 111214 " + "'No corresponding transaction found.' " + "has been suppressed via " + "ignore_no_transaction_on_rollback=True" + ) + else: + raise + _isolation_lookup = set( [ "SERIALIZABLE", @@ -2787,48 +2883,54 @@ def set_isolation_level(self, connection, level): if level == "SNAPSHOT": connection.commit() - def get_isolation_level(self, connection): - last_error = None + def get_isolation_level(self, dbapi_connection): + cursor = dbapi_connection.cursor() + view_name = "sys.system_views" + try: + cursor.execute( + ( + "SELECT name FROM {} WHERE name IN " + "('dm_exec_sessions', 'dm_pdw_nodes_exec_sessions')" + ).format(view_name) + ) + row = cursor.fetchone() + if not row: + raise NotImplementedError( + "Can't fetch isolation level on this particular " + "SQL Server version." + ) - views = ("sys.dm_exec_sessions", "sys.dm_pdw_nodes_exec_sessions") - for view in views: - cursor = connection.cursor() - try: - cursor.execute( - """ - SELECT CASE transaction_isolation_level + view_name = "sys.{}".format(row[0]) + + cursor.execute( + """ + SELECT CASE transaction_isolation_level WHEN 0 THEN NULL WHEN 1 THEN 'READ UNCOMMITTED' WHEN 2 THEN 'READ COMMITTED' WHEN 3 THEN 'REPEATABLE READ' WHEN 4 THEN 'SERIALIZABLE' - WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL - FROM %s + WHEN 5 THEN 'SNAPSHOT' END + AS TRANSACTION_ISOLATION_LEVEL + FROM {} where session_id = @@SPID - """ - % view + """.format( + view_name ) - val = cursor.fetchone()[0] - except self.dbapi.Error as err: - # Python3 scoping rules - last_error = err - continue - else: - return val.upper() - finally: - cursor.close() - else: - # note that the NotImplementedError is caught by - # DefaultDialect, so the warning here is all that displays - util.warn( - "Could not fetch transaction isolation level, " - "tried views: %s; final error was: %s" % (views, last_error) ) - raise NotImplementedError( - "Can't fetch isolation level on this particular " - "SQL Server version. tried views: %s; final error was: %s" - % (views, last_error) + except self.dbapi.Error as err: + util.raise_( + NotImplementedError( + "Can't fetch isolation level; encountered error {} when " + 'attempting to query the "{}" view.'.format(err, view_name) + ), + from_=err, ) + else: + row = cursor.fetchone() + return row[0].upper() + finally: + cursor.close() def initialize(self, connection): super(MSDialect, self).initialize(connection) @@ -2887,37 +2989,23 @@ def _get_default_schema_name(self, connection): @_db_plus_owner def has_table(self, connection, tablename, dbname, owner, schema): self._ensure_has_table_connection(connection) - if tablename.startswith("#"): # temporary table - tables = ischema.mssql_temp_table_columns - s = sql.select(tables.c.table_name).where( - tables.c.table_name.like( - self._temp_table_name_like_pattern(tablename) + if tablename.startswith("#"): # temporary table + # mssql does not support temporary views + # SQL Error [4103] [S0001]: "#v": Temporary views are not allowed + return bool( + connection.scalar( + # U filters on user tables only. + text("SELECT object_id(:table_name, 'U')"), + {"table_name": "tempdb.dbo.[{}]".format(tablename)}, ) ) - # #7168: fetch all (not just first match) in case some other #temp - # table with the same name happens to appear first - table_names = connection.execute(s).scalars().fetchall() - # #6910: verify it's not a temp table from another session - for table_name in table_names: - if bool( - connection.scalar( - text("SELECT object_id(:table_name)"), - {"table_name": "tempdb.dbo.[{}]".format(table_name)}, - ) - ): - return True - else: - return False else: tables = ischema.tables - s = sql.select(tables.c.table_name).where( - sql.and_( - tables.c.table_type == "BASE TABLE", - tables.c.table_name == tablename, - ) + s = sql.select(tables.c.table_name, tables.c.table_type).where( + tables.c.table_name == tablename, ) if owner: @@ -3066,6 +3154,12 @@ def get_indexes(self, connection, tablename, dbname, owner, schema, **kw): indexes[row["index_id"]]["column_names"].append( row["name"] ) + for index_info in indexes.values(): + # NOTE: "root level" include_columns is legacy, now part of + # dialect_options (issue #7382) + index_info.setdefault("dialect_options", {})[ + "mssql_include" + ] = index_info["include_columns"] return list(indexes.values()) @@ -3163,14 +3257,16 @@ def get_columns(self, connection, tablename, dbname, owner, schema, **kw): computed_cols, onclause=sql.and_( computed_cols.c.object_id == func.object_id(full_name), - computed_cols.c.name == columns.c.column_name, + computed_cols.c.name + == columns.c.column_name.collate("DATABASE_DEFAULT"), ), isouter=True, ).join( identity_cols, onclause=sql.and_( identity_cols.c.object_id == func.object_id(full_name), - identity_cols.c.name == columns.c.column_name, + identity_cols.c.name + == columns.c.column_name.collate("DATABASE_DEFAULT"), ), isouter=True, ) @@ -3424,7 +3520,8 @@ def get_foreign_keys( AND index_info.index_name = fk_info.unique_constraint_name AND index_info.ordinal_position = fk_info.ordinal_position - ORDER BY constraint_schema, constraint_name, ordinal_position + ORDER BY fk_info.constraint_schema, fk_info.constraint_name, + fk_info.ordinal_position """ ) .bindparams( diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index fa0386faad3..13e0a777361 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -1,5 +1,5 @@ -# mssql/information_schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mssql/information_schema.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -215,7 +215,7 @@ class IdentitySqlVariant(TypeDecorator): cache_ok = True def column_expression(self, colexpr): - return cast(colexpr, Numeric) + return cast(colexpr, Numeric(38, 0)) identity_columns = Table( diff --git a/lib/sqlalchemy/dialects/mssql/json.py b/lib/sqlalchemy/dialects/mssql/json.py index d5157312c72..450bec29e8c 100644 --- a/lib/sqlalchemy/dialects/mssql/json.py +++ b/lib/sqlalchemy/dialects/mssql/json.py @@ -1,3 +1,9 @@ +# dialects/mssql/json.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import types as sqltypes # technically, all the dialect-specific datatypes that don't have any special diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py index 3f3fe4ed120..554992a0d61 100644 --- a/lib/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -1,5 +1,5 @@ -# mssql/mxodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mssql/mxodbc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/provision.py b/lib/sqlalchemy/dialects/mssql/provision.py index 56f3305a704..bc4c6cb841f 100644 --- a/lib/sqlalchemy/dialects/mssql/provision.py +++ b/lib/sqlalchemy/dialects/mssql/provision.py @@ -1,3 +1,9 @@ +# dialects/mssql/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from sqlalchemy import inspect from sqlalchemy import Integer from ... import create_engine @@ -12,10 +18,17 @@ from ...testing.provision import drop_db from ...testing.provision import get_temp_table_name from ...testing.provision import log +from ...testing.provision import post_configure_engine from ...testing.provision import run_reap_dbs from ...testing.provision import temp_table_keyword_args +@post_configure_engine.for_db("mssql") +def post_configure_engine(url, engine, follower_ident): + if engine.driver == "pyodbc": + engine.dialect.dbapi.pooling = False + + @create_db.for_db("mssql") def _mssql_create_db(cfg, eng, ident): with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn: diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index b559384ba0a..49588bde167 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -1,5 +1,5 @@ -# mssql/pymssql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mssql/pymssql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -19,24 +19,6 @@ pymssql is currently not included in SQLAlchemy's continuous integration (CI) testing. -Modern versions of this driver worked very well with SQL Server and FreeTDS -from Linux and were highly recommended. However, pymssql is currently -unmaintained and has fallen behind the progress of the Microsoft ODBC driver in -its support for newer features of SQL Server. The latest official release of -pymssql at the time of this document is version 2.1.4 (August, 2018) and it -lacks support for: - -1. table-valued parameters (TVPs), -2. ``datetimeoffset`` columns using timezone-aware ``datetime`` objects - (values are sent and retrieved as strings), and -3. encrypted connections (e.g., to Azure SQL), when pymssql is installed from - the pre-built wheels. Support for encrypted connections requires building - pymssql from source, which can be a nuisance, especially under Windows. - -The above features are all supported by mssql+pyodbc when using Microsoft's -ODBC Driver for SQL Server (msodbcsql), which is now available for Windows, -(several flavors of) Linux, and macOS. - """ # noqa import re diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 0a56a03de69..82210d0f8dd 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -1,5 +1,5 @@ -# mssql/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mssql/pyodbc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -49,18 +49,18 @@ engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server") -Other keywords interpreted by the Pyodbc dialect to be passed to -``pyodbc.connect()`` in both the DSN and hostname cases include: -``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``, -``authentication``. -Note that in order for the dialect to recognize these keywords -(including the ``driver`` keyword above) they must be all lowercase. -Multiple additional keyword arguments must be separated by an -ampersand (``&``), not a semicolon:: +The ``driver`` keyword is significant to the pyodbc dialect and must be +specified in lowercase. - engine = create_engine( - "mssql+pyodbc://scott:tiger@myhost:49242/databasename" - "?driver=ODBC+Driver+17+for+SQL+Server" +Any other names passed in the query string are passed through in the pyodbc +connect string, such as ``authentication``, ``TrustServerCertificate``, etc. +Multiple keyword arguments must be separated by an ampersand (``&``); these +will be translated to semicolons when the pyodbc connect string is generated +internally:: + + e = create_engine( + "mssql+pyodbc://scott:tiger@mssql2017:1433/test?" + "driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes" "&authentication=ActiveDirectoryIntegrated" ) @@ -71,11 +71,12 @@ "mssql+pyodbc", username="scott", password="tiger", - host="myhost", - port=49242, - database="databasename", + host="mssql2017", + port=1433, + database="test", query={ - "driver": "ODBC Driver 17 for SQL Server", + "driver": "ODBC Driver 18 for SQL Server", + "TrustServerCertificate": "yes", "authentication": "ActiveDirectoryIntegrated", }, ) @@ -155,6 +156,34 @@ def provide_token(dialect, conn_rec, cargs, cparams): stating that a connection string when using an access token must not contain ``UID``, ``PWD``, ``Authentication`` or ``Trusted_Connection`` parameters. +.. _azure_synapse_ignore_no_transaction_on_rollback: + +Avoiding transaction-related exceptions on Azure Synapse Analytics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Azure Synapse Analytics has a significant difference in its transaction +handling compared to plain SQL Server; in some cases an error within a Synapse +transaction can cause it to be arbitrarily terminated on the server side, which +then causes the DBAPI ``.rollback()`` method (as well as ``.commit()``) to +fail. The issue prevents the usual DBAPI contract of allowing ``.rollback()`` +to pass silently if no transaction is present as the driver does not expect +this condition. The symptom of this failure is an exception with a message +resembling 'No corresponding transaction found. (111214)' when attempting to +emit a ``.rollback()`` after an operation had a failure of some kind. + +This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to +the SQL Server dialect via the :func:`_sa.create_engine` function as follows:: + + engine = create_engine(connection_url, ignore_no_transaction_on_rollback=True) + +Using the above parameter, the dialect will catch ``ProgrammingError`` +exceptions raised during ``connection.rollback()`` and emit a warning +if the error message contains code ``111214``, however will not raise +an exception. + +.. versionadded:: 1.4.40 Added the + ``ignore_no_transaction_on_rollback=True`` parameter. + Enable autocommit for Azure SQL Data Warehouse (DW) connections ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -179,6 +208,33 @@ def provide_token(dialect, conn_rec, cargs, cparams): isolation_level="AUTOCOMMIT" ) +Avoiding sending large string parameters as TEXT/NTEXT +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, for historical reasons, Microsoft's ODBC drivers for SQL Server +send long string parameters (greater than 4000 SBCS characters or 2000 Unicode +characters) as TEXT/NTEXT values. TEXT and NTEXT have been deprecated for many +years and are starting to cause compatibility issues with newer versions of +SQL_Server/Azure. For example, see `this +issue `_. + +Starting with ODBC Driver 18 for SQL Server we can override the legacy +behavior and pass long strings as varchar(max)/nvarchar(max) using the +``LongAsMax=Yes`` connection string parameter:: + + connection_url = sa.engine.URL.create( + "mssql+pyodbc", + username="scott", + password="tiger", + host="mssqlserver.example.com", + database="mydb", + query={ + "driver": "ODBC Driver 18 for SQL Server", + "LongAsMax": "Yes", + }, + ) + + Pyodbc Pooling / connection close behavior ------------------------------------------ diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py index c83fec0c394..0ff338a3030 100644 --- a/lib/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/sqlalchemy/dialects/mysql/__init__.py @@ -1,5 +1,5 @@ -# mysql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/aiomysql.py b/lib/sqlalchemy/dialects/mysql/aiomysql.py index 93d2360580d..5def0121e5a 100644 --- a/lib/sqlalchemy/dialects/mysql/aiomysql.py +++ b/lib/sqlalchemy/dialects/mysql/aiomysql.py @@ -1,5 +1,5 @@ -# mysql/aiomysql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under @@ -11,13 +11,6 @@ :connectstring: mysql+aiomysql://user:password@host:port/dbname[?key=value&key=value...] :url: https://github.com/aio-libs/aiomysql -.. warning:: The aiomysql dialect is not currently tested as part of - SQLAlchemy’s continuous integration. As of September, 2021 the driver - appears to be unmaintained and no longer functions for Python version 3.10, - and additionally depends on a significantly outdated version of PyMySQL. - Please refer to the :ref:`asyncmy` dialect for current MySQL/MariaDB asyncio - functionality. - The aiomysql dialect is SQLAlchemy's second Python asyncio dialect. Using a special asyncio mediation layer, the aiomysql dialect is usable @@ -57,7 +50,7 @@ def __init__(self, adapt_connection): self._connection = adapt_connection._connection self.await_ = adapt_connection.await_ - cursor = self._connection.cursor() + cursor = self._connection.cursor(adapt_connection.dbapi.Cursor) # see https://github.com/aio-libs/aiomysql/issues/543 self._cursor = self.await_(cursor.__aenter__()) @@ -103,10 +96,7 @@ def executemany(self, operation, seq_of_parameters): async def _execute_async(self, operation, parameters): async with self._adapt_connection._execute_mutex: - if parameters is None: - result = await self._cursor.execute(operation) - else: - result = await self._cursor.execute(operation, parameters) + result = await self._cursor.execute(operation, parameters) if not self.server_side: # aiomysql has a "fake" async result, so we have to pull it out @@ -156,9 +146,7 @@ def __init__(self, adapt_connection): self._connection = adapt_connection._connection self.await_ = adapt_connection.await_ - cursor = self._connection.cursor( - adapt_connection.dbapi.aiomysql.SSCursor - ) + cursor = self._connection.cursor(adapt_connection.dbapi.SSCursor) self._cursor = self.await_(cursor.__aenter__()) @@ -224,6 +212,7 @@ def __init__(self, aiomysql, pymysql): self.pymysql = pymysql self.paramstyle = "format" self._init_dbapi_attributes() + self.Cursor, self.SSCursor = self._init_cursors_subclasses() def _init_dbapi_attributes(self): for name in ( @@ -265,6 +254,18 @@ def connect(self, *arg, **kw): await_only(self.aiomysql.connect(*arg, **kw)), ) + def _init_cursors_subclasses(self): + # suppress unconditional warning emitted by aiomysql + class Cursor(self.aiomysql.Cursor): + async def _show_warnings(self, conn): + pass + + class SSCursor(self.aiomysql.SSCursor): + async def _show_warnings(self, conn): + pass + + return Cursor, SSCursor + class MySQLDialect_aiomysql(MySQLDialect_pymysql): driver = "aiomysql" diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index 0fca338f561..6ea2a5a9bcd 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -1,5 +1,5 @@ -# mysql/asyncmy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under @@ -11,10 +11,6 @@ :connectstring: mysql+asyncmy://user:password@host:port/dbname[?key=value&key=value...] :url: https://github.com/long2ice/asyncmy -.. note:: The asyncmy dialect as of September, 2021 was added to provide - MySQL/MariaDB asyncio compatibility given that the :ref:`aiomysql` database - driver has become unmaintained, however asyncmy is itself very new. - Using a special asyncio mediation layer, the asyncmy dialect is usable as the backend for the :ref:`SQLAlchemy asyncio ` extension package. @@ -228,10 +224,14 @@ class AsyncAdaptFallback_asyncmy_connection(AsyncAdapt_asyncmy_connection): await_ = staticmethod(await_fallback) +def _Binary(x): + """Return x as a binary type.""" + return bytes(x) + + class AsyncAdapt_asyncmy_dbapi: - def __init__(self, asyncmy, pymysql): + def __init__(self, asyncmy): self.asyncmy = asyncmy - self.pymysql = pymysql self.paramstyle = "format" self._init_dbapi_attributes() @@ -251,15 +251,12 @@ def _init_dbapi_attributes(self): ): setattr(self, name, getattr(self.asyncmy.errors, name)) - for name in ( - "NUMBER", - "STRING", - "DATETIME", - "BINARY", - "TIMESTAMP", - "Binary", - ): - setattr(self, name, getattr(self.pymysql, name)) + STRING = util.symbol("STRING") + NUMBER = util.symbol("NUMBER") + BINARY = util.symbol("BINARY") + DATETIME = util.symbol("DATETIME") + TIMESTAMP = util.symbol("TIMESTAMP") + Binary = staticmethod(_Binary) def connect(self, *arg, **kw): async_fallback = kw.pop("async_fallback", False) @@ -287,9 +284,7 @@ class MySQLDialect_asyncmy(MySQLDialect_pymysql): @classmethod def dbapi(cls): - return AsyncAdapt_asyncmy_dbapi( - __import__("asyncmy"), __import__("pymysql") - ) + return AsyncAdapt_asyncmy_dbapi(__import__("asyncmy")) @classmethod def get_pool_class(cls, url): @@ -318,7 +313,7 @@ def is_disconnect(self, e, connection, cursor): ) def _found_rows_client_flag(self): - from pymysql.constants import CLIENT + from asyncmy.constants import CLIENT return CLIENT.FOUND_ROWS diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index ad38fee979e..ef69e249796 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1,5 +1,5 @@ -# mysql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -9,7 +9,6 @@ .. dialect:: mysql :name: MySQL / MariaDB - :full_support: 5.6, 5.7, 8.0 / 10.4, 10.5 :normal_support: 5.6+ / 10+ :best_effort: 5.0.2+ / 5.0.2+ @@ -238,6 +237,11 @@ the database connection will return true for the value of ``SELECT @@autocommit;``. +There are also more options for isolation level configurations, such as +"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply +different isolation level settings. See the discussion at +:ref:`dbapi_autocommit` for background. + .. seealso:: :ref:`dbapi_autocommit` @@ -560,7 +564,7 @@ def connect(dbapi_connection, connection_record): as well as a list of 2-tuples, which will automatically provide a parameter-ordered UPDATE statement in a manner similar to that described -at :ref:`updates_order_parameters`. Unlike the :class:`_expression.Update` +at :ref:`tutorial_parameter_ordered_updates`. Unlike the :class:`_expression.Update` object, no special flag is needed to specify the intent since the argument form is this context is unambiguous: @@ -1302,7 +1306,7 @@ def replace(obj): and obj.table is on_duplicate.inserted_alias ): obj = literal_column( - "VALUES(" + self.preparer.quote(column.name) + ")" + "VALUES(" + self.preparer.quote(obj.name) + ")" ) return obj else: @@ -1645,7 +1649,7 @@ def visit_is_not_distinct_from_binary(self, binary, operator, **kw): def _mariadb_regexp_flags(self, flags, pattern, **kw): return "CONCAT('(?', %s, ')', %s)" % ( - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), self.process(pattern, **kw), ) @@ -1663,7 +1667,7 @@ def _regexp_match(self, op_string, binary, operator, **kw): text = "REGEXP_LIKE(%s, %s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) if op_string == " NOT REGEXP ": return "NOT %s" % text @@ -1678,25 +1682,22 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): def visit_regexp_replace_op_binary(self, binary, operator, **kw): flags = binary.modifiers["flags"] - replacement = binary.modifiers["replacement"] if flags is None: - return "REGEXP_REPLACE(%s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), - self.process(replacement, **kw), ) elif self.dialect.is_mariadb: return "REGEXP_REPLACE(%s, %s, %s)" % ( self.process(binary.left, **kw), - self._mariadb_regexp_flags(flags, binary.right), - self.process(replacement, **kw), + self._mariadb_regexp_flags(flags, binary.right.clauses[0]), + self.process(binary.right.clauses[1], **kw), ) else: - return "REGEXP_REPLACE(%s, %s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), - self.process(replacement, **kw), - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) @@ -2578,16 +2579,22 @@ def do_recover_twophase(self, connection): def is_disconnect(self, e, connection, cursor): if isinstance( - e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError) + e, + ( + self.dbapi.OperationalError, + self.dbapi.ProgrammingError, + self.dbapi.InterfaceError, + ), + ) and self._extract_error_code(e) in ( + 1927, + 2006, + 2013, + 2014, + 2045, + 2055, + 4031, ): - return self._extract_error_code(e) in ( - 1927, - 2006, - 2013, - 2014, - 2045, - 2055, - ) + return True elif isinstance( e, (self.dbapi.InterfaceError, self.dbapi.InternalError) ): @@ -2699,9 +2706,18 @@ def get_sequence_names(self, connection, schema=None, **kw): ] def initialize(self, connection): + # this is driver-based, does not need server version info + # and is fairly critical for even basic SQL operations self._connection_charset = self._detect_charset(connection) + + # call super().initialize() because we need to have + # server_version_info set up. in 1.4 under python 2 only this does the + # "check unicode returns" thing, which is the one area that some + # SQL gets compiled within initialize() currently + default.DefaultDialect.initialize(self, connection) + self._detect_sql_mode(connection) - self._detect_ansiquotes(connection) + self._detect_ansiquotes(connection) # depends on sql mode self._detect_casing(connection) if self._server_ansiquotes: # if ansiquotes == True, build a new IdentifierPreparer @@ -2710,8 +2726,6 @@ def initialize(self, connection): self, server_ansiquotes=self._server_ansiquotes ) - default.DefaultDialect.initialize(self, connection) - self.supports_sequences = ( self.is_mariadb and self.server_version_info >= (10, 3) ) @@ -3015,14 +3029,22 @@ def get_indexes(self, connection, table_name, schema=None, **kw): ] index_d = {} - if dialect_options: - index_d["dialect_options"] = dialect_options index_d["name"] = spec["name"] index_d["column_names"] = [s[0] for s in spec["columns"]] + mysql_length = { + s[0]: s[1] for s in spec["columns"] if s[1] is not None + } + if mysql_length: + dialect_options["%s_length" % self.name] = mysql_length + index_d["unique"] = unique if flavor: index_d["type"] = flavor + + if dialect_options: + index_d["dialect_options"] = dialect_options + indexes.append(index_d) return indexes @@ -3089,7 +3111,7 @@ def _setup_parser(self, connection, table_name, schema=None, **kw): sql = self._show_create_table( connection, None, charset, full_name=full_name ) - if re.match(r"^CREATE (?:ALGORITHM)?.* VIEW", sql): + if parser._check_view(sql): # Adapt views to something table-like. columns = self._describe_table( connection, None, charset, full_name=full_name @@ -3097,6 +3119,23 @@ def _setup_parser(self, connection, table_name, schema=None, **kw): sql = parser._describe_to_create(table_name, columns) return parser.parse(sql, charset) + def _fetch_setting(self, connection, setting_name): + charset = self._connection_charset + + if self.server_version_info and self.server_version_info < (5, 6): + sql = "SHOW VARIABLES LIKE '%s'" % setting_name + fetch_col = 1 + else: + sql = "SELECT @@%s" % setting_name + fetch_col = 0 + + show_var = connection.exec_driver_sql(sql) + row = self._compat_first(show_var, charset=charset) + if not row: + return None + else: + return row[fetch_col] + def _detect_charset(self, connection): raise NotImplementedError() @@ -3109,25 +3148,18 @@ def _detect_casing(self, connection): """ # https://dev.mysql.com/doc/refman/en/identifier-case-sensitivity.html - charset = self._connection_charset - show_var = connection.execute( - sql.text("SHOW VARIABLES LIKE 'lower_case_table_names'") - ) - row = self._compat_first( - show_var, - charset=charset, - ) - if not row: + setting = self._fetch_setting(connection, "lower_case_table_names") + if setting is None: cs = 0 else: # 4.0.15 returns OFF or ON according to [ticket:489] # 3.23 doesn't, 4.0.27 doesn't.. - if row[1] == "OFF": + if setting == "OFF": cs = 0 - elif row[1] == "ON": + elif setting == "ON": cs = 1 else: - cs = int(row[1]) + cs = int(setting) self._casing = cs return cs @@ -3145,19 +3177,16 @@ def _detect_collations(self, connection): return collations def _detect_sql_mode(self, connection): - row = self._compat_first( - connection.exec_driver_sql("SHOW VARIABLES LIKE 'sql_mode'"), - charset=self._connection_charset, - ) + setting = self._fetch_setting(connection, "sql_mode") - if not row: + if setting is None: util.warn( "Could not retrieve SQL_MODE; please ensure the " "MySQL user has permissions to SHOW VARIABLES" ) self._sql_mode = "" else: - self._sql_mode = row[1] or "" + self._sql_mode = setting or "" def _detect_ansiquotes(self, connection): """Detect and adjust for the ANSI_QUOTES sql mode.""" diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py index f729e4a18c9..cd1ed0d2064 100644 --- a/lib/sqlalchemy/dialects/mysql/cymysql.py +++ b/lib/sqlalchemy/dialects/mysql/cymysql.py @@ -1,5 +1,5 @@ -# mysql/cymysql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/cymysql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/dml.py b/lib/sqlalchemy/dialects/mysql/dml.py index e2f78783c22..d1a4e3137f7 100644 --- a/lib/sqlalchemy/dialects/mysql/dml.py +++ b/lib/sqlalchemy/dialects/mysql/dml.py @@ -1,3 +1,9 @@ +# dialects/mysql/dml.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import exc from ... import util from ...sql.base import _exclusive_against @@ -25,10 +31,12 @@ class Insert(StandardInsert): """ stringify_dialect = "mysql" + inherit_cache = False @property def inserted(self): - """Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement + """Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE + statement MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row that would be inserted, via a special function called ``VALUES()``. @@ -96,7 +104,7 @@ def on_duplicate_key_update(self, *args, **kw): in the UPDATE clause should be ordered as sent, in a manner similar to that described for the :class:`_expression.Update` construct overall - in :ref:`updates_order_parameters`:: + in :ref:`tutorial_parameter_ordered_updates`:: insert().on_duplicate_key_update( [("name", "some name"), ("value", "some value")]) diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py index 9f9a838c5df..adc95e102e0 100644 --- a/lib/sqlalchemy/dialects/mysql/enumerated.py +++ b/lib/sqlalchemy/dialects/mysql/enumerated.py @@ -1,5 +1,5 @@ -# mysql/enumerated.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/enumerated.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -252,3 +252,12 @@ def process(value): def adapt(self, impltype, **kw): kw["retrieve_as_bitwise"] = self.retrieve_as_bitwise return util.constructor_copy(self, impltype, *self.values, **kw) + + def __repr__(self): + return util.generic_repr( + self, + to_inspect=[SET, _StringType], + additional_kw=[ + ("retrieve_as_bitwise", False), + ], + ) diff --git a/lib/sqlalchemy/dialects/mysql/expression.py b/lib/sqlalchemy/dialects/mysql/expression.py index 7a66e9b1428..e6a8af928f8 100644 --- a/lib/sqlalchemy/dialects/mysql/expression.py +++ b/lib/sqlalchemy/dialects/mysql/expression.py @@ -1,3 +1,9 @@ +# dialects/mysql/expression.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import exc from ... import util from ...sql import coercions diff --git a/lib/sqlalchemy/dialects/mysql/json.py b/lib/sqlalchemy/dialects/mysql/json.py index 8d052cc7c0d..2a0d81468ce 100644 --- a/lib/sqlalchemy/dialects/mysql/json.py +++ b/lib/sqlalchemy/dialects/mysql/json.py @@ -1,5 +1,5 @@ -# mysql/json.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/json.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mariadb.py b/lib/sqlalchemy/dialects/mysql/mariadb.py index 568c3f0cf58..741e119ce3f 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadb.py +++ b/lib/sqlalchemy/dialects/mysql/mariadb.py @@ -1,3 +1,9 @@ +# dialects/mysql/mariadb.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .base import MariaDBIdentifierPreparer from .base import MySQLDialect diff --git a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py index 14ed11b1999..65c5ca96eb1 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py @@ -1,5 +1,5 @@ -# mysql/mariadbconnector.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/mariadbconnector.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -39,12 +39,21 @@ class MySQLExecutionContext_mariadbconnector(MySQLExecutionContext): + _lastrowid = None + def create_server_side_cursor(self): return self._dbapi_connection.cursor(buffered=False) def create_default_cursor(self): return self._dbapi_connection.cursor(buffered=True) + def post_exec(self): + if self.isinsert and self.compiled.postfetch_lastrowid: + self._lastrowid = self.cursor.lastrowid + + def get_lastrowid(self): + return self._lastrowid + class MySQLCompiler_mariadbconnector(MySQLCompiler): pass diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py index e17da317456..89a11045c6b 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -1,5 +1,5 @@ -# mysql/mysqlconnector.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/mysqlconnector.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index dfe719c28da..4457c6c242e 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -1,5 +1,5 @@ -# mysql/mysqldb.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/mysqldb.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -44,9 +44,9 @@ "mysql+mysqldb://scott:tiger@192.168.0.134/test", connect_args={ "ssl": { - "ssl_ca": "/home/gord/client-ssl/ca.pem", - "ssl_cert": "/home/gord/client-ssl/client-cert.pem", - "ssl_key": "/home/gord/client-ssl/client-key.pem" + "ca": "/home/gord/client-ssl/ca.pem", + "cert": "/home/gord/client-ssl/client-cert.pem", + "key": "/home/gord/client-ssl/client-key.pem" } } ) @@ -63,19 +63,6 @@ "&ssl_key=/home/gord/client-ssl/client-key.pem" ) -If the server uses an automatically-generated certificate that is self-signed -or does not match the host name (as seen from the client), it may also be -necessary to indicate ``ssl_check_hostname=false``:: - - connection_uri = ( - "mysql+pymysql://scott:tiger@192.168.0.134/test" - "?ssl_ca=/home/gord/client-ssl/ca.pem" - "&ssl_cert=/home/gord/client-ssl/client-cert.pem" - "&ssl_key=/home/gord/client-ssl/client-key.pem" - "&ssl_check_hostname=false" - ) - - .. seealso:: :ref:`pymysql_ssl` in the PyMySQL dialect @@ -178,9 +165,12 @@ def on_connect(conn): return on_connect + def _ping_impl(self, dbapi_connection): + return dbapi_connection.ping() + def do_ping(self, dbapi_connection): try: - dbapi_connection.ping(False) + self._ping_impl(dbapi_connection) except self.dbapi.Error as err: if self.is_disconnect(err, dbapi_connection, None): return False diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index 6ec7ce9b9ec..3ccfbad0ac5 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -1,5 +1,5 @@ -# mysql/oursql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/oursql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/provision.py b/lib/sqlalchemy/dialects/mysql/provision.py index 86aaa94d94f..432bfbc91b6 100644 --- a/lib/sqlalchemy/dialects/mysql/provision.py +++ b/lib/sqlalchemy/dialects/mysql/provision.py @@ -1,3 +1,9 @@ +# dialects/mysql/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import exc from ...testing.provision import configure_follower from ...testing.provision import create_db diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index 1d2c3be2d73..7b4830932a5 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -1,5 +1,5 @@ -# mysql/pymysql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/pymysql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -25,7 +25,19 @@ ------------------ The PyMySQL DBAPI accepts the same SSL arguments as that of MySQLdb, -described at :ref:`mysqldb_ssl`. See that section for examples. +described at :ref:`mysqldb_ssl`. See that section for additional examples. + +If the server uses an automatically-generated certificate that is self-signed +or does not match the host name (as seen from the client), it may also be +necessary to indicate ``ssl_check_hostname=false`` in PyMySQL:: + + connection_uri = ( + "mysql+pymysql://scott:tiger@192.168.0.134/test" + "?ssl_ca=/home/gord/client-ssl/ca.pem" + "&ssl_cert=/home/gord/client-ssl/client-cert.pem" + "&ssl_key=/home/gord/client-ssl/client-key.pem" + "&ssl_check_hostname=false" + ) MySQL-Python Compatibility @@ -67,6 +79,42 @@ def supports_server_side_cursors(self): def dbapi(cls): return __import__("pymysql") + @langhelpers.memoized_property + def _send_false_to_ping(self): + """determine if pymysql has deprecated, changed the default of, + or removed the 'reconnect' argument of connection.ping(). + + See #10492 and + https://github.com/PyMySQL/mysqlclient/discussions/651#discussioncomment-7308971 + for background. + + """ # noqa: E501 + + try: + Connection = __import__( + "pymysql.connections" + ).connections.Connection + except (ImportError, AttributeError): + return True + else: + insp = langhelpers.get_callable_argspec(Connection.ping) + try: + reconnect_arg = insp.args[1] + except IndexError: + return False + else: + return reconnect_arg == "reconnect" and ( + not insp.defaults or insp.defaults[0] is not False + ) + + def _ping_impl(self, dbapi_connection): + if self._send_false_to_ping: + dbapi_connection.ping(False) + else: + dbapi_connection.ping() + + return True + def create_connect_args(self, url, _translate_args=None): if _translate_args is None: _translate_args = dict(username="user") diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index 69cc6487d15..a02d9b29a2f 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -1,5 +1,5 @@ -# mysql/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/pyodbc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -43,11 +43,11 @@ """ # noqa import re -import sys from .base import MySQLDialect from .base import MySQLExecutionContext from .types import TIME +from ... import exc from ... import util from ...connectors.pyodbc import PyODBCConnector from ...sql.sqltypes import Time @@ -88,13 +88,15 @@ def _detect_charset(self, connection): # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. - rs = connection.exec_driver_sql( - "SHOW VARIABLES LIKE 'character_set%%'" - ) - opts = {row[0]: row[1] for row in self._compat_fetchall(rs)} - for key in ("character_set_connection", "character_set"): - if opts.get(key, None): - return opts[key] + + # set this to None as _fetch_setting attempts to use it (None is OK) + self._connection_charset = None + try: + value = self._fetch_setting(connection, "character_set_client") + if value: + return value + except exc.DBAPIError: + pass util.warn( "Could not detect the connection character set. " @@ -102,6 +104,9 @@ def _detect_charset(self, connection): ) return "latin1" + def _get_server_version_info(self, connection): + return MySQLDialect._get_server_version_info(self, connection) + def _extract_error_code(self, exception): m = re.compile(r"\((\d+)\)").search(str(exception.args)) c = m.group(1) @@ -121,15 +126,9 @@ def on_connect(conn): # https://github.com/mkleehammer/pyodbc/wiki/Unicode pyodbc_SQL_CHAR = 1 # pyodbc.SQL_CHAR pyodbc_SQL_WCHAR = -8 # pyodbc.SQL_WCHAR - if sys.version_info.major > 2: - conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8") - conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8") - conn.setencoding(encoding="utf-8") - else: - conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8") - conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8") - conn.setencoding(str, encoding="utf-8") - conn.setencoding(unicode, encoding="utf-8") # noqa: F821 + conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8") + conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8") + conn.setencoding(encoding="utf-8") return on_connect diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py index 503c9614c0a..078e3d5339b 100644 --- a/lib/sqlalchemy/dialects/mysql/reflection.py +++ b/lib/sqlalchemy/dialects/mysql/reflection.py @@ -1,5 +1,5 @@ -# mysql/reflection.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/reflection.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -70,6 +70,9 @@ def parse(self, show_create, charset): pass return state + def _check_view(self, sql): + return bool(self._re_is_view.match(sql)) + def _parse_constraints(self, line): """Parse a KEY or CONSTRAINT line. @@ -319,7 +322,12 @@ def _describe_to_create(self, table_name, columns): def _parse_keyexprs(self, identifiers): """Unpack '"col"(2),"col" ASC'-ish strings into components.""" - return self._re_keyexprs.findall(identifiers) + return [ + (colname, int(length) if length else None, modifiers) + for colname, length, modifiers in self._re_keyexprs.findall( + identifiers + ) + ] def _prep_regexes(self): """Pre-compile regular expressions.""" @@ -349,6 +357,8 @@ def _prep_regexes(self): self.preparer._unescape_identifier, ) + self._re_is_view = _re_compile(r"^CREATE(?! TABLE)(\s.*)?\sVIEW") + # `col`,`col2`(32),`col3`(15) DESC # self._re_keyexprs = _re_compile( diff --git a/lib/sqlalchemy/dialects/mysql/reserved_words.py b/lib/sqlalchemy/dialects/mysql/reserved_words.py index e2c39852d80..7055dd5c67e 100644 --- a/lib/sqlalchemy/dialects/mysql/reserved_words.py +++ b/lib/sqlalchemy/dialects/mysql/reserved_words.py @@ -1,5 +1,5 @@ -# mysql/reserved_words.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/reserved_words.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py index dee58b4a531..a304f29b9ef 100644 --- a/lib/sqlalchemy/dialects/mysql/types.py +++ b/lib/sqlalchemy/dialects/mysql/types.py @@ -1,5 +1,5 @@ -# mysql/types.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/mysql/types.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -499,7 +499,7 @@ def __init__(self, display_width=None): class TEXT(_StringType, sqltypes.TEXT): - """MySQL TEXT type, for text up to 2^16 characters.""" + """MySQL TEXT type, for character storage encoded up to 2^16 bytes.""" __visit_name__ = "TEXT" @@ -508,7 +508,7 @@ def __init__(self, length=None, **kw): :param length: Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store - ``length`` characters. + ``length`` bytes of characters. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. @@ -535,7 +535,7 @@ def __init__(self, length=None, **kw): class TINYTEXT(_StringType): - """MySQL TINYTEXT type, for text up to 2^8 characters.""" + """MySQL TINYTEXT type, for character storage encoded up to 2^8 bytes.""" __visit_name__ = "TINYTEXT" @@ -567,7 +567,8 @@ def __init__(self, **kwargs): class MEDIUMTEXT(_StringType): - """MySQL MEDIUMTEXT type, for text up to 2^24 characters.""" + """MySQL MEDIUMTEXT type, for character storage encoded up + to 2^24 bytes.""" __visit_name__ = "MEDIUMTEXT" @@ -599,7 +600,7 @@ def __init__(self, **kwargs): class LONGTEXT(_StringType): - """MySQL LONGTEXT type, for text up to 2^32 characters.""" + """MySQL LONGTEXT type, for character storage encoded up to 2^32 bytes.""" __visit_name__ = "LONGTEXT" @@ -683,7 +684,7 @@ def __init__(self, length=None, **kwargs): super(CHAR, self).__init__(length=length, **kwargs) @classmethod - def _adapt_string_for_cast(self, type_): + def _adapt_string_for_cast(cls, type_): # copy the given string type into a CHAR # for the purposes of rendering a CAST expression type_ = sqltypes.to_instance(type_) diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py index 3d4aca1364a..9dfc3f017c4 100644 --- a/lib/sqlalchemy/dialects/oracle/__init__.py +++ b/lib/sqlalchemy/dialects/oracle/__init__.py @@ -1,5 +1,5 @@ -# oracle/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/oracle/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 8b790c70c69..45f0b62893b 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -1,5 +1,5 @@ -# oracle/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/oracle/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -8,7 +8,6 @@ r""" .. dialect:: oracle :name: Oracle - :full_support: 11.2, 18c :normal_support: 11+ :best_effort: 8+ @@ -255,36 +254,48 @@ version of Oracle server (compatibility version < 12.2) is detected. -LIMIT/OFFSET Support --------------------- +LIMIT/OFFSET/FETCH Support +-------------------------- -Oracle has no direct support for LIMIT and OFFSET until version 12c. -To achieve this behavior across all widely used versions of Oracle starting -with the 8 series, SQLAlchemy currently makes use of ROWNUM to achieve -LIMIT/OFFSET; the exact methodology is taken from -https://blogs.oracle.com/oraclemagazine/on-rownum-and-limiting-results . +Methods like :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset` currently +use an emulated approach for LIMIT / OFFSET based on window functions, which +involves creation of a subquery using ``ROW_NUMBER`` that is prone to +performance issues as well as SQL construction issues for complex statements. +However, this approach is supported by all Oracle versions. See notes below. -There is currently a single option to affect its behavior: +When using Oracle 12c and above, use the :meth:`_sql.Select.fetch` method +instead; this will render the more modern +``FETCH FIRST N ROW / OFFSET N ROWS`` syntax. + +Notes on LIMIT / OFFSET emulation (when fetch() method cannot be used) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If using :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset`, +or with the ORM the :meth:`_orm.Query.limit` and :meth:`_orm.Query.offset` methods, +and the :meth:`_sql.Select.fetch` method **cannot** be used instead, the following +notes apply: + +* SQLAlchemy currently makes use of ROWNUM to achieve + LIMIT/OFFSET; the exact methodology is taken from + https://blogs.oracle.com/oraclemagazine/on-rownum-and-limiting-results . * the "FIRST_ROWS()" optimization keyword is not used by default. To enable the usage of this optimization directive, specify ``optimize_limits=True`` to :func:`_sa.create_engine`. -.. versionchanged:: 1.4 - The Oracle dialect renders limit/offset integer values using a "post - compile" scheme which renders the integer directly before passing the - statement to the cursor for execution. The ``use_binds_for_limits`` flag - no longer has an effect. + .. versionchanged:: 1.4 + The Oracle dialect renders limit/offset integer values using a "post + compile" scheme which renders the integer directly before passing the + statement to the cursor for execution. The ``use_binds_for_limits`` flag + no longer has an effect. - .. seealso:: + .. seealso:: - :ref:`change_4808`. - -Support for changing the row number strategy, which would include one that -makes use of the ``row_number()`` window function as well as one that makes -use of the Oracle 12c "FETCH FIRST N ROW / OFFSET N ROWS" keywords may be -added in a future release. + :ref:`change_4808`. +* A future release may use ``FETCH FIRST N ROW / OFFSET N ROWS`` automatically + when :meth:`_sql.Select.limit`, :meth:`_sql.Select.offset`, :meth:`_orm.Query.limit`, + :meth:`_orm.Query.offset` are used. .. _oracle_returning: @@ -747,6 +758,7 @@ def get_dbapi_type(self, dbapi): "LONG": LONG, "BINARY_DOUBLE": BINARY_DOUBLE, "BINARY_FLOAT": BINARY_FLOAT, + "ROWID": ROWID, } @@ -929,14 +941,14 @@ def function_argspec(self, fn, **kw): def visit_function(self, func, **kw): text = super(OracleCompiler, self).visit_function(func, **kw) if kw.get("asfrom", False): - text = "TABLE (%s)" % func + text = "TABLE (%s)" % text return text def visit_table_valued_column(self, element, **kw): text = super(OracleCompiler, self).visit_table_valued_column( element, **kw ) - text = "COLUMN_VALUE " + text + text = text + ".COLUMN_VALUE" return text def default_from(self): @@ -1258,20 +1270,18 @@ def visit_is_not_distinct_from_binary(self, binary, operator, **kw): self.process(binary.right), ) - def _get_regexp_args(self, binary, kw): + def visit_regexp_match_op_binary(self, binary, operator, **kw): string = self.process(binary.left, **kw) pattern = self.process(binary.right, **kw) flags = binary.modifiers["flags"] - if flags is not None: - flags = self.process(flags, **kw) - return string, pattern, flags - - def visit_regexp_match_op_binary(self, binary, operator, **kw): - string, pattern, flags = self._get_regexp_args(binary, kw) if flags is None: return "REGEXP_LIKE(%s, %s)" % (string, pattern) else: - return "REGEXP_LIKE(%s, %s, %s)" % (string, pattern, flags) + return "REGEXP_LIKE(%s, %s, %s)" % ( + string, + pattern, + self.render_literal_value(flags, sqltypes.STRINGTYPE), + ) def visit_not_regexp_match_op_binary(self, binary, operator, **kw): return "NOT %s" % self.visit_regexp_match_op_binary( @@ -1279,20 +1289,19 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): ) def visit_regexp_replace_op_binary(self, binary, operator, **kw): - string, pattern, flags = self._get_regexp_args(binary, kw) - replacement = self.process(binary.modifiers["replacement"], **kw) + string = self.process(binary.left, **kw) + pattern_replace = self.process(binary.right, **kw) + flags = binary.modifiers["flags"] if flags is None: - return "REGEXP_REPLACE(%s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s)" % ( string, - pattern, - replacement, + pattern_replace, ) else: - return "REGEXP_REPLACE(%s, %s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s, %s)" % ( string, - pattern, - replacement, - flags, + pattern_replace, + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) @@ -1371,8 +1380,9 @@ def get_identity_options(self, identity_options): text = text.replace("NO MINVALUE", "NOMINVALUE") text = text.replace("NO MAXVALUE", "NOMAXVALUE") text = text.replace("NO CYCLE", "NOCYCLE") - text = text.replace("NO ORDER", "NOORDER") - return text + if identity_options.order is not None: + text += " ORDER" if identity_options.order else " NOORDER" + return text.strip() def visit_computed_column(self, generated): text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process( diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 3e705dced33..c334f5042a0 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1,4 +1,5 @@ -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/oracle/cx_oracle.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -468,9 +469,19 @@ def _remove_clob(inputsizes, cursor, statement, parameters, context): from ... import types as sqltypes from ... import util from ...engine import cursor as _cursor +from ...sql import expression from ...util import compat +_ORACLE_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\]\.\/\? ]") + +# Oracle bind names can't start with digits or underscores. +# currently we rely upon Oracle-specific quoting of bind names in most cases. +# however for expanding params, the escape chars are used. +# see #8708 +_ORACLE_BIND_TRANSLATE_CHARS = dict(zip("%():[]./? ", "PAZCCCCCCCC")) + + class _OracleInteger(sqltypes.Integer): def get_dbapi_type(self, dbapi): # see https://github.com/oracle/python-cx_Oracle/issues/ @@ -701,6 +712,10 @@ def bindparam_string(self, name, **kw): quote is True or quote is not False and self.preparer._bindparam_requires_quotes(name) + # bind param quoting for Oracle doesn't work with post_compile + # params. For those, the default bindparam_string will escape + # special chars, and the appending of a number "_1" etc. will + # take care of reserved words and not kw.get("post_compile", False) ): # interesting to note about expanding parameters - since the @@ -711,6 +726,29 @@ def bindparam_string(self, name, **kw): quoted_name = '"%s"' % name kw["escaped_from"] = name name = quoted_name + return OracleCompiler.bindparam_string(self, name, **kw) + + # TODO: we could likely do away with quoting altogether for + # Oracle parameters and use the custom escaping here + escaped_from = kw.get("escaped_from", None) + if not escaped_from: + + if _ORACLE_BIND_TRANSLATE_RE.search(name): + # not quite the translate use case as we want to + # also get a quick boolean if we even found + # unusual characters in the name + new_name = _ORACLE_BIND_TRANSLATE_RE.sub( + lambda m: _ORACLE_BIND_TRANSLATE_CHARS[m.group(0)], + name, + ) + if new_name[0].isdigit() or new_name[0] == "_": + new_name = "D" + new_name + kw["escaped_from"] = name + name = new_name + elif name[0].isdigit() or name[0] == "_": + new_name = "D" + name + kw["escaped_from"] = name + name = new_name return OracleCompiler.bindparam_string(self, name, **kw) @@ -851,11 +889,12 @@ def post_exec(self): self.cursor, [ (getattr(col, "name", col._anon_name_label), None) - for col in self.compiled.returning + for col in expression._select_iterables( + self.compiled.returning + ) ], initial_buffer=[tuple(returning_params)], ) - self.cursor_fetch_strategy = fetch_strategy def create_cursor(self): @@ -1104,10 +1143,33 @@ def _detect_decimal_char(self, connection): # NLS_TERRITORY or formatting behavior of the DB, we opt # to just look at it - self._decimal_char = connection.exec_driver_sql( - "select value from nls_session_parameters " - "where parameter = 'NLS_NUMERIC_CHARACTERS'" - ).scalar()[0] + dbapi_connection = connection.connection + + with dbapi_connection.cursor() as cursor: + # issue #8744 + # nls_session_parameters is not available in some Oracle + # modes like "mount mode". But then, v$nls_parameters is not + # available if the connection doesn't have SYSDBA priv. + # + # simplify the whole thing and just use the method that we were + # doing in the test suite already, selecting a number + + def output_type_handler( + cursor, name, defaultType, size, precision, scale + ): + return cursor.var( + self.dbapi.STRING, 255, arraysize=cursor.arraysize + ) + + cursor.outputtypehandler = output_type_handler + cursor.execute("SELECT 1.1 FROM DUAL") + value = cursor.fetchone()[0] + + decimal_char = value.lstrip("0")[1] + assert not decimal_char[0].isdigit() + + self._decimal_char = decimal_char + if self._decimal_char != ".": _detect_decimal = self._detect_decimal _to_decimal = self._to_decimal @@ -1318,7 +1380,14 @@ def is_disconnect(self, e, connection, cursor): ) and "not connected" in str(e): return True - if hasattr(error, "code"): + if hasattr(error, "code") and error.code in { + 28, + 3114, + 3113, + 3135, + 1033, + 2396, + }: # ORA-00028: your session has been killed # ORA-03114: not connected to ORACLE # ORA-03113: end-of-file on communication channel @@ -1326,9 +1395,18 @@ def is_disconnect(self, e, connection, cursor): # ORA-01033: ORACLE initialization or shutdown in progress # ORA-02396: exceeded maximum idle time, please connect again # TODO: Others ? - return error.code in (28, 3114, 3113, 3135, 1033, 2396) - else: - return False + return True + + if re.match(r"^(?:DPI-1010|DPI-1080|DPY-1001|DPY-4011)", str(e)): + # DPI-1010: not connected + # DPI-1080: connection was closed by ORA-3113 + # python-oracledb's DPY-1001: not connected to database + # python-oracledb's DPY-4011: the database or network closed the + # connection + # TODO: others? + return True + + return False def create_xid(self): """create a two-phase transaction ID. diff --git a/lib/sqlalchemy/dialects/oracle/provision.py b/lib/sqlalchemy/dialects/oracle/provision.py index 8ce58782be0..58f5853df35 100644 --- a/lib/sqlalchemy/dialects/oracle/provision.py +++ b/lib/sqlalchemy/dialects/oracle/provision.py @@ -1,3 +1,9 @@ +# dialects/oracle/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import create_engine from ... import exc from ...engine import url as sa_url @@ -63,7 +69,7 @@ def stop_test_class_outside_fixtures(config, db, cls): try: with db.begin() as conn: # run magic command to get rid of identity sequences - # https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa E501 + # https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa: E501 conn.exec_driver_sql("purge recyclebin") except exc.DatabaseError as err: log.warning("purge recyclebin command failed: %s", err) diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py index 0de84e5797d..2227a8eb5fb 100644 --- a/lib/sqlalchemy/dialects/postgresql/__init__.py +++ b/lib/sqlalchemy/dialects/postgresql/__init__.py @@ -1,5 +1,5 @@ -# postgresql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -30,6 +30,7 @@ from .base import INTEGER from .base import INTERVAL from .base import MACADDR +from .base import MACADDR8 from .base import MONEY from .base import NUMERIC from .base import OID @@ -80,6 +81,7 @@ "UUID", "BIT", "MACADDR", + "MACADDR8", "MONEY", "OID", "REGCLASS", diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index 0cb574dacf7..fd719dbe739 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -1,5 +1,5 @@ -# postgresql/array.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/array.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -87,6 +87,7 @@ class array(expression.ClauseList, expression.ColumnElement): __visit_name__ = "array" stringify_dialect = "postgresql" + inherit_cache = True def __init__(self, clauses, **kw): clauses = [ @@ -189,6 +190,31 @@ class also conjunction with the :class:`.ENUM` type. For a workaround, see the special type at :ref:`postgresql_array_of_enum`. + .. container:: topic + + **Detecting Changes in ARRAY columns when using the ORM** + + The :class:`_postgresql.ARRAY` type, when used with the SQLAlchemy ORM, + does not detect in-place mutations to the array. In order to detect + these, the :mod:`sqlalchemy.ext.mutable` extension must be used, using + the :class:`.MutableList` class:: + + from sqlalchemy.dialects.postgresql import ARRAY + from sqlalchemy.ext.mutable import MutableList + + class SomeOrmClass(Base): + # ... + + data = Column(MutableList.as_mutable(ARRAY(Integer))) + + This extension will allow "in-place" changes such to the array + such as ``.append()`` to produce events which will be detected by the + unit of work. Note that changes to elements **inside** the array, + including subarrays that are mutated in place, are **not** detected. + + Alternatively, assigning a new array value to an ORM element that + replaces the old one will always trigger a change event. + .. seealso:: :class:`_types.ARRAY` - base array type @@ -366,10 +392,11 @@ def process(value): if self._against_native_enum: super_rp = process + pattern = re.compile(r"^{(.*)}$") def handle_raw_string(value): - inner = re.match(r"^{(.*)}$", value).group(1) - return inner.split(",") if inner else [] + inner = pattern.match(value).group(1) + return _split_enum_values(inner) def process(value): if value is None: @@ -384,3 +411,28 @@ def process(value): ) return process + + +def _split_enum_values(array_string): + + if '"' not in array_string: + # no escape char is present so it can just split on the comma + return array_string.split(",") if array_string else [] + + # handles quoted strings from: + # r'abc,"quoted","also\\\\quoted", "quoted, comma", "esc \" quot", qpr' + # returns + # ['abc', 'quoted', 'also\\quoted', 'quoted, comma', 'esc " quot', 'qpr'] + text = array_string.replace(r"\"", "_$ESC_QUOTE$_") + text = text.replace(r"\\", "\\") + result = [] + on_quotes = re.split(r'(")', text) + in_quotes = False + for tok in on_quotes: + if tok == '"': + in_quotes = not in_quotes + elif in_quotes: + result.append(tok.replace("_$ESC_QUOTE$_", '"')) + else: + result.extend(re.findall(r"([^\s,]+),?", tok)) + return result diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 3d195e691ae..5c4f831048e 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -1,5 +1,5 @@ -# postgresql/asyncpg.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under @@ -96,6 +96,25 @@ stale, nor can it retry the statement as the PostgreSQL transaction is invalidated when these errors occur. +Disabling the PostgreSQL JIT to improve ENUM datatype handling +--------------------------------------------------------------- + +Asyncpg has an `issue `_ when +using PostgreSQL ENUM datatypes, where upon the creation of new database +connections, an expensive query may be emitted in order to retrieve metadata +regarding custom types which has been shown to negatively affect performance. +To mitigate this issue, the PostgreSQL "jit" setting may be disabled from the +client using this setting passed to :func:`_asyncio.create_async_engine`:: + + engine = create_async_engine( + "postgresql+asyncpg://user:password@localhost/tmp", + connect_args={"server_settings": {"jit": "off"}}, + ) + +.. seealso:: + + https://github.com/MagicStack/asyncpg/issues/727 + """ # noqa import collections @@ -136,7 +155,10 @@ class AsyncpgTime(sqltypes.Time): def get_dbapi_type(self, dbapi): - return dbapi.TIME + if self.timezone: + return dbapi.TIME_W_TZ + else: + return dbapi.TIME class AsyncpgDate(sqltypes.Date): @@ -249,6 +271,9 @@ def process(value): class AsyncpgNumeric(sqltypes.Numeric): + def get_dbapi_type(self, dbapi): + return dbapi.NUMBER + def bind_processor(self, dialect): return None @@ -277,6 +302,11 @@ def result_processor(self, dialect, coltype): ) +class AsyncpgFloat(AsyncpgNumeric): + def get_dbapi_type(self, dbapi): + return dbapi.FLOAT + + class AsyncpgREGCLASS(REGCLASS): def get_dbapi_type(self, dbapi): return dbapi.STRING @@ -413,7 +443,7 @@ async def _prepare_and_execute(self, operation, parameters): status = prepared_stmt.get_statusmsg() reg = re.match( - r"(?:UPDATE|DELETE|INSERT \d+) (\d+)", status + r"(?:SELECT|UPDATE|DELETE|INSERT \d+) (\d+)", status ) if reg: self.rowcount = int(reg.group(1)) @@ -693,31 +723,54 @@ def cursor(self, server_side=False): else: return AsyncAdapt_asyncpg_cursor(self) + async def _rollback_and_discard(self): + try: + await self._transaction.rollback() + finally: + # if asyncpg .rollback() was actually called, then whether or + # not it raised or succeeded, the transation is done, discard it + self._transaction = None + self._started = False + + async def _commit_and_discard(self): + try: + await self._transaction.commit() + finally: + # if asyncpg .commit() was actually called, then whether or + # not it raised or succeeded, the transation is done, discard it + self._transaction = None + self._started = False + def rollback(self): if self._started: try: - self.await_(self._transaction.rollback()) - except Exception as error: - self._handle_exception(error) - finally: + self.await_(self._rollback_and_discard()) self._transaction = None self._started = False + except Exception as error: + # don't dereference asyncpg transaction if we didn't + # actually try to call rollback() on it + self._handle_exception(error) def commit(self): if self._started: try: - self.await_(self._transaction.commit()) - except Exception as error: - self._handle_exception(error) - finally: + self.await_(self._commit_and_discard()) self._transaction = None self._started = False + except Exception as error: + # don't dereference asyncpg transaction if we didn't + # actually try to call commit() on it + self._handle_exception(error) def close(self): self.rollback() self.await_(self._connection.close()) + def terminate(self): + self._connection.terminate() + class AsyncAdaptFallback_asyncpg_connection(AsyncAdapt_asyncpg_connection): __slots__ = () @@ -810,6 +863,7 @@ def Binary(self, value): TIMESTAMP = util.symbol("TIMESTAMP") TIMESTAMP_W_TZ = util.symbol("TIMESTAMP_W_TZ") TIME = util.symbol("TIME") + TIME_W_TZ = util.symbol("TIME_W_TZ") DATE = util.symbol("DATE") INTERVAL = util.symbol("INTERVAL") NUMBER = util.symbol("NUMBER") @@ -835,6 +889,7 @@ def Binary(self, value): AsyncAdapt_asyncpg_dbapi.TIMESTAMP_W_TZ: "timestamp with time zone", AsyncAdapt_asyncpg_dbapi.DATE: "date", AsyncAdapt_asyncpg_dbapi.TIME: "time", + AsyncAdapt_asyncpg_dbapi.TIME_W_TZ: "time with time zone", AsyncAdapt_asyncpg_dbapi.INTERVAL: "interval", AsyncAdapt_asyncpg_dbapi.NUMBER: "numeric", AsyncAdapt_asyncpg_dbapi.FLOAT: "float", @@ -859,6 +914,7 @@ class PGDialect_asyncpg(PGDialect): supports_server_side_cursors = True supports_unicode_binds = True + has_terminate = True default_paramstyle = "format" supports_sane_multi_rowcount = False @@ -883,6 +939,7 @@ class PGDialect_asyncpg(PGDialect): sqltypes.Integer: AsyncpgInteger, sqltypes.BigInteger: AsyncpgBigInteger, sqltypes.Numeric: AsyncpgNumeric, + sqltypes.Float: AsyncpgFloat, sqltypes.JSON: AsyncpgJSON, json.JSONB: AsyncpgJSONB, sqltypes.JSON.JSONPathType: AsyncpgJSONPathType, @@ -954,6 +1011,9 @@ def set_deferrable(self, connection, value): def get_deferrable(self, connection): return connection.deferrable + def do_terminate(self, dbapi_connection) -> None: + dbapi_connection.terminate() + def create_connect_args(self, url): opts = url.translate_connect_args(username="user") @@ -994,8 +1054,42 @@ def do_set_input_sizes(self, cursor, list_of_tuples, context): } ) - def on_connect(self): - super_connect = super(PGDialect_asyncpg, self).on_connect() + async def setup_asyncpg_json_codec(self, conn): + """set up JSON codec for asyncpg. + + This occurs for all new connections and + can be overridden by third party dialects. + + .. versionadded:: 1.4.27 + + """ + + asyncpg_connection = conn._connection + deserializer = self._json_deserializer or _py_json.loads + + def _json_decoder(bin_value): + return deserializer(bin_value.decode()) + + await asyncpg_connection.set_type_codec( + "json", + encoder=str.encode, + decoder=_json_decoder, + schema="pg_catalog", + format="binary", + ) + + async def setup_asyncpg_jsonb_codec(self, conn): + """set up JSONB codec for asyncpg. + + This occurs for all new connections and + can be overridden by third party dialects. + + .. versionadded:: 1.4.27 + + """ + + asyncpg_connection = conn._connection + deserializer = self._json_deserializer or _py_json.loads def _jsonb_encoder(str_value): # \x01 is the prefix for jsonb used by PostgreSQL. @@ -1004,42 +1098,35 @@ def _jsonb_encoder(str_value): deserializer = self._json_deserializer or _py_json.loads - def _json_decoder(bin_value): - return deserializer(bin_value.decode()) - def _jsonb_decoder(bin_value): # the byte is the \x01 prefix for jsonb used by PostgreSQL. # asyncpg returns it when format='binary' return deserializer(bin_value[1:].decode()) - async def _setup_type_codecs(conn): - """set up type decoders at the asyncpg level. - - these are set_type_codec() calls to normalize - There was a tentative decoder for the "char" datatype here - to have it return strings however this type is actually a binary - type that other drivers are likely mis-interpreting. - - See https://github.com/MagicStack/asyncpg/issues/623 for reference - on why it's set up this way. - """ - await conn._connection.set_type_codec( - "json", - encoder=str.encode, - decoder=_json_decoder, - schema="pg_catalog", - format="binary", - ) - await conn._connection.set_type_codec( - "jsonb", - encoder=_jsonb_encoder, - decoder=_jsonb_decoder, - schema="pg_catalog", - format="binary", - ) + await asyncpg_connection.set_type_codec( + "jsonb", + encoder=_jsonb_encoder, + decoder=_jsonb_decoder, + schema="pg_catalog", + format="binary", + ) + + def on_connect(self): + """on_connect for asyncpg + + A major component of this for asyncpg is to set up type decoders at the + asyncpg level. + + See https://github.com/MagicStack/asyncpg/issues/623 for + notes on JSON/JSONB implementation. + + """ + + super_connect = super(PGDialect_asyncpg, self).on_connect() def connect(conn): - conn.await_(_setup_type_codecs(conn)) + conn.await_(self.setup_asyncpg_json_codec(conn)) + conn.await_(self.setup_asyncpg_jsonb_codec(conn)) if super_connect is not None: super_connect(conn) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c1a2cf81dcf..0e98a41bc60 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1,5 +1,5 @@ -# postgresql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -8,7 +8,6 @@ r""" .. dialect:: postgresql :name: PostgreSQL - :full_support: 9.6, 10, 11, 12, 13 :normal_support: 9.6+ :best_effort: 8+ @@ -148,7 +147,7 @@ def use_identity(element, compiler, **kw): --------------------------- Most SQLAlchemy dialects support setting of transaction isolation level -using the :paramref:`_sa.create_engine.execution_options` parameter +using the :paramref:`_sa.create_engine.isolation_level` parameter at the :func:`_sa.create_engine` level, and at the :class:`_engine.Connection` level via the :paramref:`.Connection.execution_options.isolation_level` parameter. @@ -166,9 +165,7 @@ def use_identity(element, compiler, **kw): engine = create_engine( "postgresql+pg8000://scott:tiger@localhost/test", - execution_options={ - "isolation_level": "REPEATABLE READ" - } + isolation_level = "REPEATABLE READ" ) To set using per-connection execution options:: @@ -180,6 +177,11 @@ def use_identity(element, compiler, **kw): with conn.begin(): # ... work with transaction +There are also more options for isolation level configurations, such as +"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply +different isolation level settings. See the discussion at +:ref:`dbapi_autocommit` for background. + Valid values for ``isolation_level`` on most PostgreSQL dialects include: * ``READ COMMITTED`` @@ -190,10 +192,10 @@ def use_identity(element, compiler, **kw): .. seealso:: - :ref:`postgresql_readonly_deferrable` - :ref:`dbapi_autocommit` + :ref:`postgresql_readonly_deferrable` + :ref:`psycopg2_isolation_level` :ref:`pg8000_isolation_level` @@ -227,6 +229,68 @@ def use_identity(element, compiler, **kw): .. versionadded:: 1.4 added support for the ``postgresql_readonly`` and ``postgresql_deferrable`` execution options. +.. _postgresql_reset_on_return: + +Temporary Table / Resource Reset for Connection Pooling +------------------------------------------------------- + +The :class:`.QueuePool` connection pool implementation used +by the SQLAlchemy :class:`_sa.Engine` object includes +:ref:`reset on return ` behavior that will invoke +the DBAPI ``.rollback()`` method when connections are returned to the pool. +While this rollback will clear out the immediate state used by the previous +transaction, it does not cover a wider range of session-level state, including +temporary tables as well as other server state such as prepared statement +handles and statement caches. The PostgreSQL database includes a variety +of commands which may be used to reset this state, including +``DISCARD``, ``RESET``, ``DEALLOCATE``, and ``UNLISTEN``. + + +To install +one or more of these commands as the means of performing reset-on-return, +the :meth:`.PoolEvents.reset` event hook may be used, as demonstrated +in the example below (**requires SQLAlchemy 1.4.43 or greater**). The implementation +will end transactions in progress as well as discard temporary tables +using the ``CLOSE``, ``RESET`` and ``DISCARD`` commands; see the PostgreSQL +documentation for background on what each of these statements do. + +The :paramref:`_sa.create_engine.pool_reset_on_return` parameter +is set to ``None`` so that the custom scheme can replace the default behavior +completely. The custom hook implementation calls ``.rollback()`` in any case, +as it's usually important that the DBAPI's own tracking of commit/rollback +will remain consistent with the state of the transaction:: + + + from sqlalchemy import create_engine + from sqlalchemy import event + + postgresql_engine = create_engine( + "postgresql+psycopg2://scott:tiger@hostname/dbname", + + # disable default reset-on-return scheme + pool_reset_on_return=None, + ) + + + @event.listens_for(postgresql_engine, "reset") + def _reset_postgresql(dbapi_connection, connection_record, reset_state): + dbapi_connection.execute("CLOSE ALL") + dbapi_connection.execute("RESET ALL") + dbapi_connection.execute("DISCARD TEMP") + + # so that the DBAPI itself knows that the connection has been + # reset + dbapi_connection.rollback() + +.. versionchanged:: 1.4.43 Ensured the :meth:`.PoolEvents.reset` event + is invoked for all "reset" occurrences, so that it's appropriate + as a place for custom "reset" handlers. Previous schemes which + use the :meth:`.PoolEvents.checkin` handler remain usable as well. + +.. seealso:: + + :ref:`pool_reset_on_return` - in the :ref:`pooling_toplevel` documentation + .. _postgresql_alternate_search_path: Setting Alternate Search Paths on Connect @@ -273,22 +337,77 @@ def set_search_path(dbapi_connection, connection_record): Remote-Schema Table Introspection and PostgreSQL search_path ------------------------------------------------------------ -**TL;DR;**: keep the ``search_path`` variable set to its default of ``public``, -name schemas **other** than ``public`` explicitly within ``Table`` definitions. - -The PostgreSQL dialect can reflect tables from any schema. The -:paramref:`_schema.Table.schema` argument, or alternatively the -:paramref:`.MetaData.reflect.schema` argument determines which schema will -be searched for the table or tables. The reflected :class:`_schema.Table` -objects -will in all cases retain this ``.schema`` attribute as was specified. -However, with regards to tables which these :class:`_schema.Table` -objects refer to -via foreign key constraint, a decision must be made as to how the ``.schema`` -is represented in those remote tables, in the case where that remote -schema name is also a member of the current +.. admonition:: Section Best Practices Summarized + + keep the ``search_path`` variable set to its default of ``public``, without + any other schema names. Ensure the username used to connect **does not** + match remote schemas, or ensure the ``"$user"`` token is **removed** from + ``search_path``. For other schema names, name these explicitly + within :class:`_schema.Table` definitions. Alternatively, the + ``postgresql_ignore_search_path`` option will cause all reflected + :class:`_schema.Table` objects to have a :attr:`_schema.Table.schema` + attribute set up. + +The PostgreSQL dialect can reflect tables from any schema, as outlined in +:ref:`metadata_reflection_schemas`. + +In all cases, the first thing SQLAlchemy does when reflecting tables is +to **determine the default schema for the current database connection**. +It does this using the PostgreSQL ``current_schema()`` +function, illustated below using a PostgreSQL client session (i.e. using +the ``psql`` tool):: + + test=> select current_schema(); + current_schema + ---------------- + public + (1 row) + +Above we see that on a plain install of PostgreSQL, the default schema name +is the name ``public``. + +However, if your database username **matches the name of a schema**, PostgreSQL's +default is to then **use that name as the default schema**. Below, we log in +using the username ``scott``. When we create a schema named ``scott``, **it +implicitly changes the default schema**:: + + test=> select current_schema(); + current_schema + ---------------- + public + (1 row) + + test=> create schema scott; + CREATE SCHEMA + test=> select current_schema(); + current_schema + ---------------- + scott + (1 row) + +The behavior of ``current_schema()`` is derived from the `PostgreSQL search path -`_. +`_ +variable ``search_path``, which in modern PostgreSQL versions defaults to this:: + + test=> show search_path; + search_path + ----------------- + "$user", public + (1 row) + +Where above, the ``"$user"`` variable will inject the current username as the +default schema, if one exists. Otherwise, ``public`` is used. + +When a :class:`_schema.Table` object is reflected, if it is present in the +schema indicated by the ``current_schema()`` function, **the schema name assigned +to the ".schema" attribute of the Table is the Python "None" value**. Otherwise, the +".schema" attribute will be assigned the string name of that schema. + +With regards to tables which these :class:`_schema.Table` +objects refer to via foreign key constraint, a decision must be made as to how +the ``.schema`` is represented in those remote tables, in the case where that +remote schema name is also a member of the current ``search_path``. By default, the PostgreSQL dialect mimics the behavior encouraged by PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure. This function @@ -349,8 +468,8 @@ def set_search_path(dbapi_connection, connection_record): >>> engine = create_engine("postgresql://scott:tiger@localhost/test") >>> with engine.connect() as conn: ... conn.execute(text("SET search_path TO test_schema, public")) - ... meta = MetaData() - ... referring = Table('referring', meta, + ... metadata_obj = MetaData() + ... referring = Table('referring', metadata_obj, ... autoload_with=conn) ... @@ -359,7 +478,7 @@ def set_search_path(dbapi_connection, connection_record): collection ``referred`` table named **without** the schema:: - >>> meta.tables['referred'].schema is None + >>> metadata_obj.tables['referred'].schema is None True To alter the behavior of reflection such that the referred schema is @@ -370,8 +489,8 @@ def set_search_path(dbapi_connection, connection_record): >>> with engine.connect() as conn: ... conn.execute(text("SET search_path TO test_schema, public")) - ... meta = MetaData() - ... referring = Table('referring', meta, + ... metadata_obj = MetaData() + ... referring = Table('referring', metadata_obj, ... autoload_with=conn, ... postgresql_ignore_search_path=True) ... @@ -379,7 +498,7 @@ def set_search_path(dbapi_connection, connection_record): We will now have ``test_schema.referred`` stored as schema-qualified:: - >>> meta.tables['test_schema.referred'].schema + >>> metadata_obj.tables['test_schema.referred'].schema 'test_schema' .. sidebar:: Best Practices for PostgreSQL Schema reflection @@ -394,22 +513,13 @@ def set_search_path(dbapi_connection, connection_record): described here are only for those users who can't, or prefer not to, stay within these guidelines. -Note that **in all cases**, the "default" schema is always reflected as -``None``. The "default" schema on PostgreSQL is that which is returned by the -PostgreSQL ``current_schema()`` function. On a typical PostgreSQL -installation, this is the name ``public``. So a table that refers to another -which is in the ``public`` (i.e. default) schema will always have the -``.schema`` attribute set to ``None``. - -.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path`` - dialect-level option accepted by :class:`_schema.Table` and - :meth:`_schema.MetaData.reflect`. - - .. seealso:: + :ref:`reflection_schema_qualified_interaction` - discussion of the issue + from a backend-agnostic perspective + `The Schema Search Path - `_ + `_ - on the PostgreSQL website. INSERT/UPDATE...RETURNING @@ -735,7 +845,7 @@ def set_search_path(dbapi_connection, connection_record): It's important to remember that text searching in PostgreSQL is powerful but complicated, and SQLAlchemy users are advised to reference the PostgreSQL documentation regarding - `Full Text Search `_. + `Full Text Search `_. There are important differences between ``to_tsquery`` and ``plainto_tsquery``, the most significant of which is that ``to_tsquery`` @@ -789,6 +899,11 @@ def set_search_path(dbapi_connection, connection_record): PostgreSQL to ensure that you are generating queries with SQLAlchemy that take full advantage of any indexes you may have created for full text search. +.. seealso:: + + `Full Text Search `_ - in the PostgreSQL documentation + + FROM ONLY ... ------------- @@ -849,7 +964,7 @@ def set_search_path(dbapi_connection, connection_record): PostgreSQL allows the specification of an *operator class* for each column of an index (see -https://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). +https://www.postgresql.org/docs/current/interactive/indexes-opclass.html). The :class:`.Index` construct allows these to be specified via the ``postgresql_ops`` keyword argument:: @@ -891,7 +1006,7 @@ def set_search_path(dbapi_connection, connection_record): PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well as the ability for users to create their own (see -https://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be +https://www.postgresql.org/docs/current/static/indexes-types.html). These can be specified on :class:`.Index` using the ``postgresql_using`` keyword argument:: Index('my_index', my_table.c.data, postgresql_using='gin') @@ -1058,7 +1173,54 @@ def set_search_path(dbapi_connection, connection_record): .. seealso:: `PostgreSQL CREATE TABLE options - `_ + `_ - + in the PostgreSQL documentation. + +.. _postgresql_constraint_options: + +PostgreSQL Constraint Options +----------------------------- + +The following option(s) are supported by the PostgreSQL dialect in conjunction +with selected constraint constructs: + +* ``NOT VALID``: This option applies towards CHECK and FOREIGN KEY constraints + when the constraint is being added to an existing table via ALTER TABLE, + and has the effect that existing rows are not scanned during the ALTER + operation against the constraint being added. + + When using a SQL migration tool such as `Alembic `_ + that renders ALTER TABLE constructs, the ``postgresql_not_valid`` argument + may be specified as an additional keyword argument within the operation + that creates the constraint, as in the following Alembic example:: + + def update(): + op.create_foreign_key( + "fk_user_address", + "address", + "user", + ["user_id"], + ["id"], + postgresql_not_valid=True + ) + + The keyword is ultimately accepted directly by the + :class:`_schema.CheckConstraint`, :class:`_schema.ForeignKeyConstraint` + and :class:`_schema.ForeignKey` constructs; when using a tool like + Alembic, dialect-specific keyword arguments are passed through to + these constructs from the migration operation directives:: + + CheckConstraint("some_field IS NOT NULL", postgresql_not_valid=True) + + ForeignKeyConstraint(["some_id"], ["some_table.some_id"], postgresql_not_valid=True) + + .. versionadded:: 1.4.32 + + .. seealso:: + + `PostgreSQL ALTER TABLE options + `_ - + in the PostgreSQL documentation. .. _postgresql_table_valued_overview: @@ -1135,11 +1297,14 @@ def set_search_path(dbapi_connection, connection_record): >>> from sqlalchemy import select, func >>> stmt = select( - ... func.generate_series(4, 1, -1).table_valued("value", with_ordinality="ordinality") + ... func.generate_series(4, 1, -1). + ... table_valued("value", with_ordinality="ordinality"). + ... render_derived() ... ) >>> print(stmt) SELECT anon_1.value, anon_1.ordinality - FROM generate_series(:generate_series_1, :generate_series_2, :generate_series_3) WITH ORDINALITY AS anon_1 + FROM generate_series(:generate_series_1, :generate_series_2, :generate_series_3) + WITH ORDINALITY AS anon_1(value, ordinality) .. versionadded:: 1.4.0b2 @@ -1371,7 +1536,7 @@ def bind_expression(self, bindvalue): ) -""" # noqa E501 +""" # noqa: E501 from collections import defaultdict import datetime as dt @@ -1379,6 +1544,7 @@ def bind_expression(self, bindvalue): from uuid import UUID as _python_UUID from . import array as _array +from . import dml from . import hstore as _hstore from . import json as _json from . import ranges as _ranges @@ -1409,7 +1575,6 @@ def bind_expression(self, bindvalue): from ...types import TEXT from ...types import VARCHAR - IDX_USING = re.compile(r"^(?:btree|hash|gist|gin|[\w_]+)$", re.I) AUTOCOMMIT_REGEXP = re.compile( @@ -1559,6 +1724,13 @@ class MACADDR(sqltypes.TypeEngine): PGMacAddr = MACADDR +class MACADDR8(sqltypes.TypeEngine): + __visit_name__ = "MACADDR8" + + +PGMacAddr8 = MACADDR8 + + class MONEY(sqltypes.TypeEngine): r"""Provide the PostgreSQL MONEY type. @@ -1627,13 +1799,39 @@ class REGCLASS(sqltypes.TypeEngine): class TIMESTAMP(sqltypes.TIMESTAMP): + + """Provide the PostgreSQL TIMESTAMP type.""" + + __visit_name__ = "TIMESTAMP" + def __init__(self, timezone=False, precision=None): + """Construct a TIMESTAMP. + + :param timezone: boolean value if timezone present, default False + :param precision: optional integer precision value + + .. versionadded:: 1.4 + + """ super(TIMESTAMP, self).__init__(timezone=timezone) self.precision = precision class TIME(sqltypes.TIME): + + """PostgreSQL TIME type.""" + + __visit_name__ = "TIME" + def __init__(self, timezone=False, precision=None): + """Construct a TIME. + + :param timezone: boolean value if timezone present, default False + :param precision: optional integer precision value + + .. versionadded:: 1.4 + + """ super(TIME, self).__init__(timezone=timezone) self.precision = precision @@ -1756,6 +1954,28 @@ def process(value): else: return None + def literal_processor(self, dialect): + if self.as_uuid: + + def process(value): + if value is not None: + value = "'%s'::UUID" % value + return value + + return process + else: + + def process(value): + if value is not None: + value = "'%s'" % value + return value + + return process + + @property + def python_type(self): + return _python_UUID if self.as_uuid else str + PGUuid = UUID @@ -2064,6 +2284,7 @@ def __init__(self, expression, type_): sqltypes.JSON: _json.JSON, } + ischema_names = { "_array": _array.ARRAY, "hstore": _hstore.HSTORE, @@ -2092,6 +2313,7 @@ def __init__(self, expression, type_): "bit": BIT, "bit varying": BIT, "macaddr": MACADDR, + "macaddr8": MACADDR8, "money": MONEY, "oid": OID, "regclass": REGCLASS, @@ -2214,18 +2436,15 @@ def _regexp_match(self, base_op, binary, operator, kw): return self._generate_generic_binary( binary, " %s " % base_op, **kw ) - if isinstance(flags, elements.BindParameter) and flags.value == "i": + if flags == "i": return self._generate_generic_binary( binary, " %s* " % base_op, **kw ) - flags = self.process(flags, **kw) - string = self.process(binary.left, **kw) - pattern = self.process(binary.right, **kw) return "%s %s CONCAT('(?', %s, ')', %s)" % ( - string, + self.process(binary.left, **kw), base_op, - flags, - pattern, + self.render_literal_value(flags, sqltypes.STRINGTYPE), + self.process(binary.right, **kw), ) def visit_regexp_match_op_binary(self, binary, operator, **kw): @@ -2236,23 +2455,18 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): def visit_regexp_replace_op_binary(self, binary, operator, **kw): string = self.process(binary.left, **kw) - pattern = self.process(binary.right, **kw) + pattern_replace = self.process(binary.right, **kw) flags = binary.modifiers["flags"] - if flags is not None: - flags = self.process(flags, **kw) - replacement = self.process(binary.modifiers["replacement"], **kw) if flags is None: - return "REGEXP_REPLACE(%s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s)" % ( string, - pattern, - replacement, + pattern_replace, ) else: - return "REGEXP_REPLACE(%s, %s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s, %s)" % ( string, - pattern, - replacement, - flags, + pattern_replace, + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) def visit_empty_set_expr(self, element_types): @@ -2347,7 +2561,9 @@ def for_update_clause(self, select, **kw): def returning_clause(self, stmt, returning_cols): columns = [ - self._label_returning_column(stmt, c) + self._label_returning_column( + stmt, c, fallback_label_name=c._non_anon_label + ) for c in expression._select_iterables(returning_cols) ] @@ -2396,6 +2612,24 @@ def _on_conflict_target(self, clause, **kw): return target_text + @util.memoized_property + def _is_safe_for_fast_insert_values_helper(self): + # don't allow fast executemany if _post_values_clause is + # present and is not an OnConflictDoNothing. what this means + # concretely is that the + # "fast insert executemany helper" won't be used, in other + # words we won't convert "executemany()" of many parameter + # sets into a single INSERT with many elements in VALUES. + # We can't apply that optimization safely if for example the + # statement includes a clause like "ON CONFLICT DO UPDATE" + + return self.insert_single_values_expr is not None and ( + self.statement._post_values_clause is None + or isinstance( + self.statement._post_values_clause, dml.OnConflictDoNothing + ) + ) + def visit_on_conflict_do_nothing(self, on_conflict, **kw): target_text = self._on_conflict_target(on_conflict, **kw) @@ -2440,7 +2674,7 @@ def visit_on_conflict_do_update(self, on_conflict, **kw): value.type = c.type value_text = self.process(value.self_group(), use_schema=False) - key_text = self.preparer.quote(col_key) + key_text = self.preparer.quote(c.name) action_set_ops.append("%s = %s" % (key_text, value_text)) # check for names that don't match columns @@ -2568,6 +2802,10 @@ def get_column_specification(self, column, **kwargs): colspec += " NULL" return colspec + def _define_constraint_validity(self, constraint): + not_valid = constraint.dialect_options["postgresql"]["not_valid"] + return " NOT VALID" if not_valid else "" + def visit_check_constraint(self, constraint): if constraint._type_bound: typ = list(constraint.columns)[0].type @@ -2582,7 +2820,16 @@ def visit_check_constraint(self, constraint): "create_constraint=False on this Enum datatype." ) - return super(PGDDLCompiler, self).visit_check_constraint(constraint) + text = super(PGDDLCompiler, self).visit_check_constraint(constraint) + text += self._define_constraint_validity(constraint) + return text + + def visit_foreign_key_constraint(self, constraint): + text = super(PGDDLCompiler, self).visit_foreign_key_constraint( + constraint + ) + text += self._define_constraint_validity(constraint) + return text def visit_drop_table_comment(self, drop): return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table( @@ -2811,6 +3058,9 @@ def visit_CIDR(self, type_, **kw): def visit_MACADDR(self, type_, **kw): return "MACADDR" + def visit_MACADDR8(self, type_, **kw): + return "MACADDR8" + def visit_MONEY(self, type_, **kw): return "MONEY" @@ -3196,6 +3446,18 @@ class PGDialect(default.DefaultDialect): "inherits": None, }, ), + ( + schema.CheckConstraint, + { + "not_valid": False, + }, + ), + ( + schema.ForeignKeyConstraint, + { + "not_valid": False, + }, + ), ] reflection_options = ("postgresql_ignore_search_path",) @@ -3762,12 +4024,19 @@ def _handle_array_type(attype): attype.endswith("[]"), ) - # strip (*) from character varying(5), timestamp(5) - # with time zone, geometry(POLYGON), etc. - attype = re.sub(r"\(.*\)", "", format_type) + if format_type is None: + no_format_type = True + attype = format_type = "no format_type()" + is_array = False + else: + no_format_type = False + + # strip (*) from character varying(5), timestamp(5) + # with time zone, geometry(POLYGON), etc. + attype = re.sub(r"\(.*\)", "", format_type) - # strip '[]' from integer[], etc. and check if an array - attype, is_array = _handle_array_type(attype) + # strip '[]' from integer[], etc. and check if an array + attype, is_array = _handle_array_type(attype) # strip quotes from case sensitive enum or domain names enum_or_domain_key = tuple(util.quoted_token_parser(attype)) @@ -3860,6 +4129,12 @@ def _handle_array_type(attype): coltype = coltype(*args, **kwargs) if is_array: coltype = self.ischema_names["_array"](coltype) + elif no_format_type: + util.warn( + "PostgreSQL format_type() returned NULL for column '%s'" + % (name,) + ) + coltype = sqltypes.NULLTYPE else: util.warn( "Did not recognize type '%s' of column '%s'" % (attype, name) @@ -4277,6 +4552,8 @@ def get_indexes(self, connection, table_name, schema, **kw): "column_names": [idx["cols"][i] for i in idx["key"]], } if self.server_version_info >= (11, 0): + # NOTE: this is legacy, this is part of dialect_options now + # as of #7382 entry["include_columns"] = [idx["cols"][i] for i in idx["inc"]] if "duplicates_constraint" in idx: entry["duplicates_constraint"] = idx["duplicates_constraint"] @@ -4285,6 +4562,10 @@ def get_indexes(self, connection, table_name, schema, **kw): (idx["cols"][idx["key"][i]], value) for i, value in idx["sorting"].items() ) + if "include_columns" in entry: + entry.setdefault("dialect_options", {})[ + "postgresql_include" + ] = entry["include_columns"] if "options" in idx: entry.setdefault("dialect_options", {})[ "postgresql_with" diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py index bb6345cf438..dbd9c28b113 100644 --- a/lib/sqlalchemy/dialects/postgresql/dml.py +++ b/lib/sqlalchemy/dialects/postgresql/dml.py @@ -1,5 +1,5 @@ -# postgresql/on_conflict.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/dml.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -35,6 +35,7 @@ class Insert(StandardInsert): """ stringify_dialect = "postgresql" + inherit_cache = False @util.memoized_property def excluded(self): @@ -187,7 +188,7 @@ def __init__(self, constraint=None, index_elements=None, index_where=None): if constraint is not None: if not isinstance(constraint, util.string_types) and isinstance( constraint, - (schema.Index, schema.Constraint, ext.ExcludeConstraint), + (schema.Constraint, ext.ExcludeConstraint), ): constraint = getattr(constraint, "name") or constraint diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index f9e4c1d6cb9..a0fa2fcb854 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -1,5 +1,5 @@ -# postgresql/ext.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/ext.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -14,6 +14,7 @@ from ...sql import roles from ...sql import schema from ...sql.schema import ColumnCollectionConstraint +from ...sql.visitors import InternalTraversal class aggregate_order_by(expression.ColumnElement): @@ -54,6 +55,11 @@ class aggregate_order_by(expression.ColumnElement): __visit_name__ = "aggregate_order_by" stringify_dialect = "postgresql" + _traverse_internals = [ + ("target", InternalTraversal.dp_clauseelement), + ("type", InternalTraversal.dp_type), + ("order_by", InternalTraversal.dp_clauseelement), + ] def __init__(self, target, *order_by): self.target = coercions.expect(roles.ExpressionElementRole, target) @@ -92,13 +98,14 @@ class ExcludeConstraint(ColumnCollectionConstraint): Defines an EXCLUDE constraint as described in the `PostgreSQL documentation`__. - __ https://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE + __ https://www.postgresql.org/docs/current/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE """ # noqa __visit_name__ = "exclude_constraint" where = None + inherit_cache = False create_drop_stringify_dialect = "postgresql" diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index a4090f1ac59..379f54f5554 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -1,5 +1,5 @@ -# postgresql/hstore.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/hstore.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -96,34 +96,38 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine): For a full list of special methods see :class:`.HSTORE.comparator_factory`. - For usage with the SQLAlchemy ORM, it may be desirable to combine - the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary - now part of the :mod:`sqlalchemy.ext.mutable` - extension. This extension will allow "in-place" changes to the - dictionary, e.g. addition of new keys or replacement/removal of existing - keys to/from the current dictionary, to produce events which will be - detected by the unit of work:: + .. container:: topic - from sqlalchemy.ext.mutable import MutableDict + **Detecting Changes in HSTORE columns when using the ORM** - class MyClass(Base): - __tablename__ = 'data_table' + For usage with the SQLAlchemy ORM, it may be desirable to combine the + usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now + part of the :mod:`sqlalchemy.ext.mutable` extension. This extension + will allow "in-place" changes to the dictionary, e.g. addition of new + keys or replacement/removal of existing keys to/from the current + dictionary, to produce events which will be detected by the unit of + work:: - id = Column(Integer, primary_key=True) - data = Column(MutableDict.as_mutable(HSTORE)) + from sqlalchemy.ext.mutable import MutableDict - my_object = session.query(MyClass).one() + class MyClass(Base): + __tablename__ = 'data_table' - # in-place mutation, requires Mutable extension - # in order for the ORM to detect - my_object.data['some_key'] = 'some value' + id = Column(Integer, primary_key=True) + data = Column(MutableDict.as_mutable(HSTORE)) - session.commit() + my_object = session.query(MyClass).one() - When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM - will not be alerted to any changes to the contents of an existing - dictionary, unless that dictionary value is re-assigned to the - HSTORE-attribute itself, thus generating a change event. + # in-place mutation, requires Mutable extension + # in order for the ORM to detect + my_object.data['some_key'] = 'some value' + + session.commit() + + When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM + will not be alerted to any changes to the contents of an existing + dictionary, unless that dictionary value is re-assigned to the + HSTORE-attribute itself, thus generating a change event. .. seealso:: @@ -296,41 +300,49 @@ class hstore(sqlfunc.GenericFunction): type = HSTORE name = "hstore" + inherit_cache = True class _HStoreDefinedFunction(sqlfunc.GenericFunction): type = sqltypes.Boolean name = "defined" + inherit_cache = True class _HStoreDeleteFunction(sqlfunc.GenericFunction): type = HSTORE name = "delete" + inherit_cache = True class _HStoreSliceFunction(sqlfunc.GenericFunction): type = HSTORE name = "slice" + inherit_cache = True class _HStoreKeysFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "akeys" + inherit_cache = True class _HStoreValsFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "avals" + inherit_cache = True class _HStoreArrayFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "hstore_to_array" + inherit_cache = True class _HStoreMatrixFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "hstore_to_matrix" + inherit_cache = True # diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py index 2acf177f539..dbe92a4ae93 100644 --- a/lib/sqlalchemy/dialects/postgresql/json.py +++ b/lib/sqlalchemy/dialects/postgresql/json.py @@ -1,5 +1,5 @@ -# postgresql/json.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/json.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -294,22 +294,27 @@ class Comparator(JSON.Comparator): """Define comparison operations for :class:`_types.JSON`.""" def has_key(self, other): - """Boolean expression. Test for presence of a key. Note that the - key may be a SQLA expression. + """Boolean expression. Test for presence of a key (equivalent of + the ``?`` operator). Note that the key may be a SQLA expression. """ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean) def has_all(self, other): - """Boolean expression. Test for presence of all keys in jsonb""" + """Boolean expression. Test for presence of all keys in jsonb + (equivalent of the ``?&`` operator) + """ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean) def has_any(self, other): - """Boolean expression. Test for presence of any key in jsonb""" + """Boolean expression. Test for presence of any key in jsonb + (equivalent of the ``?|`` operator) + """ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean) def contains(self, other, **kwargs): """Boolean expression. Test if keys (or array) are a superset - of/contained the keys of the argument jsonb expression. + of/contained the keys of the argument jsonb expression + (equivalent of the ``@>`` operator). kwargs may be ignored by this operator but are required for API conformance. @@ -318,7 +323,8 @@ def contains(self, other, **kwargs): def contained_by(self, other): """Boolean expression. Test if keys are a proper subset of the - keys of the argument jsonb expression. + keys of the argument jsonb expression + (equivalent of the ``<@`` operator). """ return self.operate( CONTAINED_BY, other, result_type=sqltypes.Boolean diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index a94f9dcdbb0..186f0ecf02d 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -1,5 +1,5 @@ -# postgresql/pg8000.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/provision.py b/lib/sqlalchemy/dialects/postgresql/provision.py index 68a01e483c2..0b315469c99 100644 --- a/lib/sqlalchemy/dialects/postgresql/provision.py +++ b/lib/sqlalchemy/dialects/postgresql/provision.py @@ -1,3 +1,9 @@ +# dialects/postgresql/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import time from ... import exc @@ -19,10 +25,7 @@ def _pg_create_db(cfg, eng, ident): template_db = cfg.options.postgresql_templatedb with eng.execution_options(isolation_level="AUTOCOMMIT").begin() as conn: - try: - _pg_drop_db(cfg, conn, ident) - except Exception: - pass + if not template_db: template_db = conn.exec_driver_sql( "select current_database()" diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 4143dd041d6..cd2b217eabb 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -1,5 +1,5 @@ -# postgresql/psycopg2.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/psycopg2.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -110,7 +110,7 @@ .. seealso:: `PQconnectdbParams \ - `_ + `_ .. _psycopg2_multi_host: @@ -120,22 +120,51 @@ psycopg2 supports multiple connection points in the connection string. When the ``host`` parameter is used multiple times in the query section of the URL, SQLAlchemy will create a single string of the host and port -information provided to make the connections:: +information provided to make the connections. Tokens may consist of +``host::port`` or just ``host``; in the latter case, the default port +is selected by libpq. In the example below, three host connections +are specified, for ``HostA::PortA``, ``HostB`` connecting to the default port, +and ``HostC::PortC``:: create_engine( - "postgresql+psycopg2://user:password@/dbname?host=HostA:port1&host=HostB&host=HostC" + "postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC" ) -A connection to each host is then attempted until either a connection is successful -or all connections are unsuccessful in which case an error is raised. +As an alternative, libpq query string format also may be used; this specifies +``host`` and ``port`` as single query string arguments with comma-separated +lists - the default port can be chosen by indicating an empty value +in the comma separated list:: + + create_engine( + "postgresql+psycopg2://user:password@/dbname?host=HostA,HostB,HostC&port=PortA,,PortC" + ) + +With either URL style, connections to each host is attempted based on a +configurable strategy, which may be configured using the libpq +``target_session_attrs`` parameter. Per libpq this defaults to ``any`` +which indicates a connection to each host is then attempted until a connection is successful. +Other strategies include ``primary``, ``prefer-standby``, etc. The complete +list is documented by PostgreSQL at +`libpq connection strings `_. + +For example, to indicate two hosts using the ``primary`` strategy:: + + create_engine( + "postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC&target_session_attrs=primary" + ) + +.. versionchanged:: 1.4.40 Port specification in psycopg2 multiple host format + is repaired, previously ports were not correctly interpreted in this context. + libpq comma-separated format is also now supported. .. versionadded:: 1.3.20 Support for multiple hosts in PostgreSQL connection string. .. seealso:: - `PQConnString \ - `_ + `libpq connection strings `_ - please refer + to this section in the libpq documentation for complete background on multiple host support. + Empty DSN Connections / Environment Variable Connections --------------------------------------------------------- @@ -202,13 +231,13 @@ Modern versions of psycopg2 include a feature known as `Fast Execution Helpers \ -`_, which +`_, which have been shown in benchmarking to improve psycopg2's executemany() performance, primarily with INSERT statements, by multiple orders of magnitude. SQLAlchemy internally makes use of these extensions for ``executemany()`` style calls, which correspond to lists of parameters being passed to :meth:`_engine.Connection.execute` as detailed in :ref:`multiple parameter -sets `. The ORM also uses this mode internally whenever +sets `. The ORM also uses this mode internally whenever possible. The two available extensions on the psycopg2 side are the ``execute_values()`` @@ -284,7 +313,7 @@ .. seealso:: - :ref:`execute_multiple` - General information on using the + :ref:`tutorial_multiple_parameters` - General information on using the :class:`_engine.Connection` object to execute statements in such a way as to make use of the DBAPI ``.executemany()`` method. @@ -835,15 +864,17 @@ def get_deferrable(self, connection): def do_ping(self, dbapi_connection): cursor = None + before_autocommit = dbapi_connection.autocommit try: - dbapi_connection.autocommit = True + if not before_autocommit: + dbapi_connection.autocommit = True cursor = dbapi_connection.cursor() try: cursor.execute(self._dialect_specific_select_one) finally: cursor.close() - if not dbapi_connection.closed: - dbapi_connection.autocommit = False + if not before_autocommit and not dbapi_connection.closed: + dbapi_connection.autocommit = before_autocommit except self.dbapi.Error as err: if self.is_disconnect(err, dbapi_connection, cursor): return False @@ -927,7 +958,7 @@ def do_executemany(self, cursor, statement, parameters, context=None): self.executemany_mode & EXECUTEMANY_VALUES and context and context.isinsert - and context.compiled.insert_single_values_expr + and context.compiled._is_safe_for_fast_insert_values_helper ): executemany_values = ( "(%s)" % context.compiled.insert_single_values_expr @@ -986,20 +1017,27 @@ def create_connect_args(self, url): if "host" in url.query: is_multihost = isinstance(url.query["host"], (list, tuple)) - if opts: + if opts or url.query: + if not opts: + opts = {} if "port" in opts: opts["port"] = int(opts["port"]) opts.update(url.query) if is_multihost: - opts["host"] = ",".join(url.query["host"]) - # send individual dbname, user, password, host, port - # parameters to psycopg2.connect() - return ([], opts) - elif url.query: - # any other connection arguments, pass directly - opts.update(url.query) - if is_multihost: - opts["host"] = ",".join(url.query["host"]) + hosts, ports = zip( + *[ + token.split(":") if ":" in token else (token, "") + for token in url.query["host"] + ] + ) + opts["host"] = ",".join(hosts) + if "port" in opts: + raise exc.ArgumentError( + "Can't mix 'multihost' formats together; use " + '"host=h1,h2,h3&port=p1,p2,p3" or ' + '"host=h1:p1&host=h2:p2&host=h3:p3" separately' + ) + opts["port"] = ",".join(ports) return ([], opts) else: # no connection arguments whatsoever; psycopg2.connect() diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py index 5be52a8707b..4bdb924cc07 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py @@ -1,5 +1,5 @@ -# testing/engines.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/psycopg2cffi.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pygresql.py b/lib/sqlalchemy/dialects/postgresql/pygresql.py index 42ef3c31e02..73eb8d0225e 100644 --- a/lib/sqlalchemy/dialects/postgresql/pygresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pygresql.py @@ -1,5 +1,5 @@ -# postgresql/pygresql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/pygresql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py index 1d646df44a4..d5e35695941 100644 --- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -1,5 +1,5 @@ -# postgresql/pypostgresql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/pypostgresql.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py index c54179c8182..cfd01790772 100644 --- a/lib/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -1,4 +1,5 @@ -# Copyright (C) 2013-2021 the SQLAlchemy authors and contributors +# dialects/postgresql/ranges.py +# Copyright (C) 2013-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,15 +14,15 @@ class RangeOperators(object): """ This mixin provides functionality for the Range Operators - listed in Table 9-44 of the `PostgreSQL documentation`__ for Range - Functions and Operators. It is used by all the range types + listed in the Range Operators table of the `PostgreSQL documentation`__ + for Range Functions and Operators. It is used by all the range types provided in the ``postgres`` dialect and can likely be used for any range types you create yourself. - __ https://www.postgresql.org/docs/devel/static/functions-range.html + __ https://www.postgresql.org/docs/current/static/functions-range.html - No extra support is provided for the Range Functions listed in - Table 9-45 of the PostgreSQL documentation. For these, the normal + No extra support is provided for the Range Functions listed in the Range + Functions table of the PostgreSQL documentation. For these, the normal :func:`~sqlalchemy.sql.expression.func` object should be used. """ diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py index 6e3ad0e668b..83dd3378129 100644 --- a/lib/sqlalchemy/dialects/sqlite/__init__.py +++ b/lib/sqlalchemy/dialects/sqlite/__init__.py @@ -1,5 +1,5 @@ -# sqlite/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sqlite/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py index 4319e26611d..78304ed8195 100644 --- a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py @@ -1,5 +1,5 @@ -# sqlite/aiosqlite.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sqlite/aiosqlite.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -34,6 +34,14 @@ The URL passes through all arguments to the ``pysqlite`` driver, so all connection arguments are the same as they are for that of :ref:`pysqlite`. +.. _aiosqlite_udfs: + +User-Defined Functions +---------------------- + +aiosqlite extends pysqlite to support async, so we can create our own user-defined functions (UDFs) +in Python and use them directly in SQLite queries as described here: :ref:`pysqlite_udfs`. + """ # noqa @@ -210,7 +218,6 @@ def commit(self): self._handle_exception(error) def close(self): - # print(">close", self) try: self.await_(self._connection.close()) except Exception as error: diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index e936c9080a0..347fa2095a8 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -1,5 +1,5 @@ -# sqlite/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sqlite/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -8,7 +8,6 @@ r""" .. dialect:: sqlite :name: SQLite - :full_support: 3.21, 3.28+ :normal_support: 3.12+ :best_effort: 3.7.16+ @@ -277,10 +276,18 @@ def bi_c(element, compiler, **kw): @event.listens_for(Engine, "connect") def set_sqlite_pragma(dbapi_connection, connection_record): + # the sqlite3 driver will not set PRAGMA foreign_keys + # if autocommit=False; set to True temporarily + ac = dbapi_connection.autocommit + dbapi_connection.autocommit = True + cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() + # restore previous autocommit setting + dbapi_connection.autocommit = ac + .. warning:: When SQLite foreign keys are enabled, it is **not possible** @@ -821,6 +828,7 @@ def set_sqlite_pragma(dbapi_connection, connection_record): from ... import processors from ... import schema as sa_schema from ... import sql +from ... import text from ... import types as sqltypes from ... import util from ...engine import default @@ -937,6 +945,10 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime): regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)" ) + :param truncate_microseconds: when ``True`` microseconds will be truncated + from the datetime. Can't be specified together with ``storage_format`` + or ``regexp``. + :param storage_format: format string which will be applied to the dict with keys year, month, day, hour, minute, second, and microsecond. @@ -1103,6 +1115,10 @@ class TIME(_DateTimeMixin, sqltypes.Time): regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") ) + :param truncate_microseconds: when ``True`` microseconds will be truncated + from the time. Can't be specified together with ``storage_format`` + or ``regexp``. + :param storage_format: format string which will be applied to the dict with keys hour, minute, second, and microsecond. @@ -1385,7 +1401,7 @@ def visit_on_conflict_do_update(self, on_conflict, **kw): value.type = c.type value_text = self.process(value.self_group(), use_schema=False) - key_text = self.preparer.quote(col_key) + key_text = self.preparer.quote(c.name) action_set_ops.append("%s = %s" % (key_text, value_text)) # check for names that don't match columns @@ -2099,6 +2115,14 @@ def get_columns(self, connection, table_name, schema=None, **kw): tablesql = self._get_table_sql( connection, table_name, schema, **kw ) + # remove create table + match = re.match( + r"create table .*?\((.*)\)$", + tablesql.strip(), + re.DOTALL | re.IGNORECASE, + ) + assert match, "create table not found in %s" % tablesql + tablesql = match.group(1).strip() columns.append( self._get_column_info( @@ -2148,7 +2172,10 @@ def _get_column_info( if generated: sqltext = "" if tablesql: - pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?" + pattern = ( + r"[^,]*\s+GENERATED\s+ALWAYS\s+AS" + r"\s+\((.*)\)\s*(?:virtual|stored)?" + ) match = re.search( re.escape(name) + pattern, tablesql, re.IGNORECASE ) @@ -2315,6 +2342,8 @@ def parse_fks(): r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *' r"((?:ON (?:DELETE|UPDATE) " r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)" + r"((?:NOT +)?DEFERRABLE)?" + r"(?: +INITIALLY +(DEFERRED|IMMEDIATE))?" ) for match in re.finditer(FK_PATTERN, table_data, re.I): ( @@ -2324,7 +2353,9 @@ def parse_fks(): referred_name, referred_columns, onupdatedelete, - ) = match.group(1, 2, 3, 4, 5, 6) + deferrable, + initially, + ) = match.group(1, 2, 3, 4, 5, 6, 7, 8) constrained_columns = list( self._find_cols_in_sig(constrained_columns) ) @@ -2346,6 +2377,12 @@ def parse_fks(): onupdate = token[6:].strip() if onupdate and onupdate != "NO ACTION": options["onupdate"] = onupdate + + if deferrable: + options["deferrable"] = "NOT" not in deferrable.upper() + if initially: + options["initially"] = initially.upper() + yield ( constraint_name, constrained_columns, @@ -2414,7 +2451,8 @@ def get_unique_constraints( def parse_uqs(): UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)' INLINE_UNIQUE_PATTERN = ( - r'(?:(".+?")|([a-z0-9]+)) ' r"+[a-z0-9_ ]+? +UNIQUE" + r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?) ' + r"+[a-z0-9_ ]+? +UNIQUE" ) for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): @@ -2448,17 +2486,21 @@ def get_check_constraints(self, connection, table_name, schema=None, **kw): if not table_data: return [] - CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *" + CHECK_PATTERN = r"(?:CONSTRAINT (.+) +)?" r"CHECK *\( *(.+) *\),? *" check_constraints = [] # NOTE: we aren't using re.S here because we actually are # taking advantage of each CHECK constraint being all on one # line in the table definition in order to delineate. This # necessarily makes assumptions as to how the CREATE TABLE # was emitted. + for match in re.finditer(CHECK_PATTERN, table_data, re.I): - check_constraints.append( - {"sqltext": match.group(2), "name": match.group(1)} - ) + name = match.group(1) + + if name: + name = re.sub(r'^"|"$', "", name) + + check_constraints.append({"sqltext": match.group(2), "name": name}) return check_constraints @@ -2469,6 +2511,21 @@ def get_indexes(self, connection, table_name, schema=None, **kw): ) indexes = [] + # regular expression to extract the filter predicate of a partial + # index. this could fail to extract the predicate correctly on + # indexes created like + # CREATE INDEX i ON t (col || ') where') WHERE col <> '' + # but as this function does not support expression-based indexes + # this case does not occur. + partial_pred_re = re.compile(r"\)\s+where\s+(.+)", re.IGNORECASE) + + if schema: + schema_expr = "%s." % self.identifier_preparer.quote_identifier( + schema + ) + else: + schema_expr = "" + include_auto_indexes = kw.pop("include_auto_indexes", False) for row in pragma_indexes: # ignore implicit primary key index. @@ -2477,12 +2534,43 @@ def get_indexes(self, connection, table_name, schema=None, **kw): "sqlite_autoindex" ): continue - indexes.append(dict(name=row[1], column_names=[], unique=row[2])) + indexes.append( + dict( + name=row[1], + column_names=[], + unique=row[2], + dialect_options={}, + ) + ) + + # check partial indexes + if len(row) >= 5 and row[4]: + s = ( + "SELECT sql FROM %(schema)ssqlite_master " + "WHERE name = ? " + "AND type = 'index'" % {"schema": schema_expr} + ) + rs = connection.exec_driver_sql(s, (row[1],)) + index_sql = rs.scalar() + predicate_match = partial_pred_re.search(index_sql) + if predicate_match is None: + # unless the regex is broken this case shouldn't happen + # because we know this is a partial index, so the + # definition sql should match the regex + util.warn( + "Failed to look up filter predicate of " + "partial index %s" % row[1] + ) + else: + predicate = predicate_match.group(1) + indexes[-1]["dialect_options"]["sqlite_where"] = text( + predicate + ) # loop thru unique indexes to get the column names. for idx in list(indexes): pragma_index = self._get_table_pragma( - connection, "index_info", idx["name"] + connection, "index_info", idx["name"], schema=schema ) for row in pragma_index: @@ -2495,6 +2583,8 @@ def get_indexes(self, connection, table_name, schema=None, **kw): break else: idx["column_names"].append(row[2]) + + indexes.sort(key=lambda d: d["name"] or "~") # sort None as last return indexes @reflection.cache diff --git a/lib/sqlalchemy/dialects/sqlite/dml.py b/lib/sqlalchemy/dialects/sqlite/dml.py index a93e31beba2..f3fe7c19e38 100644 --- a/lib/sqlalchemy/dialects/sqlite/dml.py +++ b/lib/sqlalchemy/dialects/sqlite/dml.py @@ -1,4 +1,5 @@ -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sqlite/dml.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -36,6 +37,7 @@ class Insert(StandardInsert): """ stringify_dialect = "sqlite" + inherit_cache = False @util.memoized_property def excluded(self): diff --git a/lib/sqlalchemy/dialects/sqlite/json.py b/lib/sqlalchemy/dialects/sqlite/json.py index 614f95405ff..1dda17f63f8 100644 --- a/lib/sqlalchemy/dialects/sqlite/json.py +++ b/lib/sqlalchemy/dialects/sqlite/json.py @@ -1,3 +1,9 @@ +# dialects/sqlite/json.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import types as sqltypes diff --git a/lib/sqlalchemy/dialects/sqlite/provision.py b/lib/sqlalchemy/dialects/sqlite/provision.py index e5b17e8294f..3faa2564460 100644 --- a/lib/sqlalchemy/dialects/sqlite/provision.py +++ b/lib/sqlalchemy/dialects/sqlite/provision.py @@ -1,3 +1,9 @@ +# dialects/sqlite/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import os import re diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py index 3765191c1bc..333502b4353 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py @@ -1,5 +1,5 @@ -# sqlite/pysqlcipher.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sqlite/pysqlcipher.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index e9d5d96827f..9ca735dcef1 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -1,5 +1,5 @@ -# sqlite/pysqlite.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sqlite/pysqlite.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -399,6 +399,39 @@ def do_begin(conn): `sqlite3 module breaks transactions and potentially corrupts data `_ - on the Python bug tracker +.. _pysqlite_udfs: + +User-Defined Functions +---------------------- + +pysqlite supports a `create_function() `_ +method that allows us to create our own user-defined functions (UDFs) in Python and use them directly in SQLite queries. +These functions are registered with a specific DBAPI Connection. + +SQLAlchemy uses connection pooling with file-based SQLite databases, so we need to ensure that the UDF is attached to the +connection when it is created. That is accomplished with an event listener:: + + from sqlalchemy import create_engine + from sqlalchemy import event + from sqlalchemy import text + + + def udf(): + return "udf-ok" + + + engine = create_engine("sqlite:///./db_file") + + + @event.listens_for(engine, "connect") + def connect(conn, rec): + conn.create_function("udf", 0, udf) + + + for i in range(5): + with engine.connect() as conn: + print(conn.scalar(text("SELECT UDF()"))) + """ # noqa @@ -540,7 +573,7 @@ def iso_level(conn): fns.append(iso_level) - def connect(conn): + def connect(conn): # noqa: F811 for fn in fns: fn(conn) diff --git a/lib/sqlalchemy/dialects/sybase/__init__.py b/lib/sqlalchemy/dialects/sybase/__init__.py index 87a90fb0623..98627d48e56 100644 --- a/lib/sqlalchemy/dialects/sybase/__init__.py +++ b/lib/sqlalchemy/dialects/sybase/__init__.py @@ -1,5 +1,5 @@ -# sybase/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sybase/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index 120093015c5..bab2f407d64 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -1,5 +1,5 @@ -# sybase/base.py -# Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +# dialects/sybase/base.py +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors # # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management diff --git a/lib/sqlalchemy/dialects/sybase/mxodbc.py b/lib/sqlalchemy/dialects/sybase/mxodbc.py index 4e8c8aeab19..5dcf5c87f44 100644 --- a/lib/sqlalchemy/dialects/sybase/mxodbc.py +++ b/lib/sqlalchemy/dialects/sybase/mxodbc.py @@ -1,5 +1,5 @@ -# sybase/mxodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sybase/mxodbc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py index afc315f264e..4e1d2774b3e 100644 --- a/lib/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -1,5 +1,5 @@ -# sybase/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# dialects/sybase/pyodbc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py index 0f408e80159..ddcd2363176 100644 --- a/lib/sqlalchemy/dialects/sybase/pysybase.py +++ b/lib/sqlalchemy/dialects/sybase/pysybase.py @@ -1,5 +1,5 @@ -# sybase/pysybase.py -# Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +# dialects/sybase/pysybase.py +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 6306e201d0c..26750cd31de 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -1,5 +1,5 @@ # engine/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -44,6 +44,7 @@ from .mock import create_mock_engine from .reflection import Inspector from .result import ChunkedIteratorResult +from .result import FilterResult from .result import FrozenResult from .result import IteratorResult from .result import MappingResult diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 2444b5c7fe1..26dfa6d6fa0 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1,5 +1,5 @@ # engine/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -133,6 +133,9 @@ def _log_info(self, message, *arg, **kw): if fmt: message = fmt(message) + if log.STACKLEVEL: + kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET + self.engine.logger.info(message, *arg, **kw) def _log_debug(self, message, *arg, **kw): @@ -141,6 +144,9 @@ def _log_debug(self, message, *arg, **kw): if fmt: message = fmt(message) + if log.STACKLEVEL: + kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET + self.engine.logger.debug(message, *arg, **kw) @property @@ -332,7 +338,7 @@ def execution_options(self, **opt): - set per :class:`_engine.Engine` isolation level :meth:`_engine.Connection.get_isolation_level` - - view current level + - view current actual level :ref:`SQLite Transaction Isolation ` @@ -358,15 +364,89 @@ def execution_options(self, **opt): :param stream_results: Available on: Connection, statement. Indicate to the dialect that results should be - "streamed" and not pre-buffered, if possible. This is a limitation - of many DBAPIs. The flag is currently understood within a subset - of dialects within the PostgreSQL and MySQL categories, and - may be supported by other third party dialects as well. + "streamed" and not pre-buffered, if possible. For backends + such as PostgreSQL, MySQL and MariaDB, this indicates the use of + a "server side cursor" as opposed to a client side cursor. + Other backends such as that of Oracle may already use server + side cursors by default. + + The usage of + :paramref:`_engine.Connection.execution_options.stream_results` is + usually combined with setting a fixed number of rows to to be fetched + in batches, to allow for efficient iteration of database rows while + at the same time not loading all result rows into memory at once; + this can be configured on a :class:`_engine.Result` object using the + :meth:`_engine.Result.yield_per` method, after execution has + returned a new :class:`_engine.Result`. If + :meth:`_engine.Result.yield_per` is not used, + the :paramref:`_engine.Connection.execution_options.stream_results` + mode of operation will instead use a dynamically sized buffer + which buffers sets of rows at a time, growing on each batch + based on a fixed growth size up until a limit which may + be configured using the + :paramref:`_engine.Connection.execution_options.max_row_buffer` + parameter. + + When using the ORM to fetch ORM mapped objects from a result, + :meth:`_engine.Result.yield_per` should always be used with + :paramref:`_engine.Connection.execution_options.stream_results`, + so that the ORM does not fetch all rows into new ORM objects at once. + + For typical use, the + :paramref:`_engine.Connection.execution_options.yield_per` execution + option should be preferred, which sets up both + :paramref:`_engine.Connection.execution_options.stream_results` and + :meth:`_engine.Result.yield_per` at once. This option is supported + both at a core level by :class:`_engine.Connection` as well as by the + ORM :class:`_engine.Session`; the latter is described at + :ref:`orm_queryguide_yield_per`. + + .. seealso:: + + :ref:`engine_stream_results` - background on + :paramref:`_engine.Connection.execution_options.stream_results` + + :paramref:`_engine.Connection.execution_options.max_row_buffer` + + :paramref:`_engine.Connection.execution_options.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + describing the ORM version of ``yield_per`` + + :param max_row_buffer: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. Sets a maximum + buffer size to use when the + :paramref:`_engine.Connection.execution_options.stream_results` + execution option is used on a backend that supports server side + cursors. The default value if not specified is 1000. .. seealso:: + :paramref:`_engine.Connection.execution_options.stream_results` + :ref:`engine_stream_results` + + :param yield_per: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. Integer value applied which will + set the :paramref:`_engine.Connection.execution_options.stream_results` + execution option and invoke :meth:`_engine.Result.yield_per` + automatically at once. Allows equivalent functionality as + is present when using this parameter with the ORM. + + .. versionadded:: 1.4.40 + + .. seealso:: + + :ref:`engine_stream_results` - background and examples + on using server side cursors with Core. + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + describing the ORM version of ``yield_per`` + + :param schema_translate_map: Available on: :class:`_engine.Connection`, + :class:`_engine.Engine`, :class:`_sql.Executable`. + :param schema_translate_map: Available on: Connection, Engine. A dictionary mapping schema names to schema names, that will be applied to the :paramref:`_schema.Table.schema` element of each @@ -463,22 +543,29 @@ def connection(self): return self._dbapi_connection def get_isolation_level(self): - """Return the current isolation level assigned to this - :class:`_engine.Connection`. - - This will typically be the default isolation level as determined - by the dialect, unless if the - :paramref:`.Connection.execution_options.isolation_level` - feature has been used to alter the isolation level on a - per-:class:`_engine.Connection` basis. - - This attribute will typically perform a live SQL operation in order - to procure the current isolation level, so the value returned is the - actual level on the underlying DBAPI connection regardless of how - this state was set. Compare to the - :attr:`_engine.Connection.default_isolation_level` accessor - which returns the dialect-level setting without performing a SQL - query. + """Return the current **actual** isolation level that's present on + the database within the scope of this connection. + + This attribute will perform a live SQL operation against the database + in order to procure the current isolation level, so the value returned + is the actual level on the underlying DBAPI connection regardless of + how this state was set. This will be one of the four actual isolation + modes ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``, + ``SERIALIZABLE``. It will **not** include the ``AUTOCOMMIT`` isolation + level setting. Third party dialects may also feature additional + isolation level settings. + + .. note:: This method **will not report** on the ``AUTOCOMMIT`` + isolation level, which is a separate :term:`dbapi` setting that's + independent of **actual** isolation level. When ``AUTOCOMMIT`` is + in use, the database connection still has a "traditional" isolation + mode in effect, that is typically one of the four values + ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``, + ``SERIALIZABLE``. + + Compare to the :attr:`_engine.Connection.default_isolation_level` + accessor which returns the isolation level that is present on the + database at initial connection time. .. versionadded:: 0.9.9 @@ -501,27 +588,25 @@ def get_isolation_level(self): @property def default_isolation_level(self): - """The default isolation level assigned to this - :class:`_engine.Connection`. + """The initial-connection time isolation level associated with the + :class:`_engine.Dialect` in use. - This is the isolation level setting that the - :class:`_engine.Connection` - has when first procured via the :meth:`_engine.Engine.connect` method. - This level stays in place until the - :paramref:`.Connection.execution_options.isolation_level` is used - to change the setting on a per-:class:`_engine.Connection` basis. + This value is independent of the + :paramref:`.Connection.execution_options.isolation_level` and + :paramref:`.Engine.execution_options.isolation_level` execution + options, and is determined by the :class:`_engine.Dialect` when the + first connection is created, by performing a SQL query against the + database for the current isolation level before any additional commands + have been emitted. - Unlike :meth:`_engine.Connection.get_isolation_level`, - this attribute is set - ahead of time from the first connection procured by the dialect, - so SQL query is not invoked when this accessor is called. + Calling this accessor does not invoke any new SQL queries. .. versionadded:: 0.9.9 .. seealso:: :meth:`_engine.Connection.get_isolation_level` - - view current level + - view current actual isolation level :paramref:`_sa.create_engine.isolation_level` - set per :class:`_engine.Engine` isolation level @@ -764,7 +849,10 @@ def begin(self): else: if self._is_future: raise exc.InvalidRequestError( - "a transaction is already begun for this connection" + "This connection has already initialized a SQLAlchemy " + "Transaction() object via begin() or autobegin; can't " + "call begin() here unless rollback() or commit() " + "is called first." ) else: return MarkerTransaction(self) @@ -814,7 +902,7 @@ def begin_nested(self): :meth:`_engine.Connection.begin` - :meth:`_engine.Connection.begin_twophase` + :ref:`session_begin_nested` - ORM support for SAVEPOINT """ if self._is_future: @@ -896,10 +984,15 @@ def in_nested_transaction(self): and self._nested_transaction.is_active ) - def _is_autocommit(self): - return ( - self._execution_options.get("isolation_level", None) - == "AUTOCOMMIT" + def _is_autocommit_isolation(self): + opt_iso = self._execution_options.get("isolation_level", None) + return bool( + opt_iso == "AUTOCOMMIT" + or ( + opt_iso is None + and getattr(self.engine.dialect, "isolation_level", None) + == "AUTOCOMMIT" + ) ) def get_transaction(self): @@ -930,7 +1023,13 @@ def _begin_impl(self, transaction): assert not self.__branch_from if self._echo: - self._log_info("BEGIN (implicit)") + if self._is_autocommit_isolation(): + self._log_info( + "BEGIN (implicit; DBAPI should not BEGIN due to " + "autocommit mode)" + ) + else: + self._log_info("BEGIN (implicit)") self.__in_begin = True @@ -952,7 +1051,7 @@ def _rollback_impl(self): if self._still_open_and_dbapi_connection_is_valid: if self._echo: - if self._is_autocommit(): + if self._is_autocommit_isolation(): self._log_info( "ROLLBACK using DBAPI connection.rollback(), " "DBAPI should ignore due to autocommit mode" @@ -971,7 +1070,7 @@ def _commit_impl(self, autocommit=False): # if a connection has this set as the isolation level, we can skip # the "autocommit" warning as the operation will do "autocommit" # in any case - if autocommit and not self._is_autocommit(): + if autocommit and not self._is_autocommit_isolation(): util.warn_deprecated_20( "The current statement is being autocommitted using " "implicit autocommit, which will be removed in " @@ -984,7 +1083,7 @@ def _commit_impl(self, autocommit=False): self.dispatch.commit(self) if self._echo: - if self._is_autocommit(): + if self._is_autocommit_isolation(): self._log_info( "COMMIT using DBAPI connection.commit(), " "DBAPI should ignore due to autocommit mode" @@ -1147,7 +1246,7 @@ def close(self): # as we just closed the transaction, close the connection # pool connection without doing an additional reset if skip_reset: - conn._close_no_reset() + conn._close_special(transaction_reset=True) else: conn.close() @@ -1613,8 +1712,11 @@ def _execute_20( def exec_driver_sql( self, statement, parameters=None, execution_options=None ): - r"""Executes a SQL statement construct and returns a - :class:`_engine.CursorResult`. + r"""Executes a string SQL statement on the DBAPI cursor directly, + without any SQL compilation steps. + + This can be used to pass any string directly to the + ``cursor.execute()`` method of the DBAPI in use. :param statement: The statement str to be executed. Bound parameters must use the underlying DBAPI's paramstyle, such as "qmark", @@ -1625,6 +1727,8 @@ def exec_driver_sql( a tuple of positional parameters, or a list containing either dictionaries or tuples for multiple-execute support. + :return: a :class:`_engine.CursorResult`. + E.g. multiple dictionaries:: @@ -1691,6 +1795,13 @@ def _execute_context( # the only feature that branching provides self = self.__branch_from + if execution_options: + yp = execution_options.get("yield_per", None) + if yp: + execution_options = execution_options.union( + {"stream_results": True, "max_row_buffer": yp} + ) + try: conn = self._dbapi_connection if conn is None: @@ -2368,6 +2479,13 @@ def _transaction_is_active(self): def _transaction_is_closed(self): return not self._deactivated_from_connection + def _rollback_can_be_called(self): + # for RootTransaction / NestedTransaction, it's safe to call + # rollback() even if the transaction is deactive and no warnings + # will be emitted. tested in + # test_transaction.py -> test_no_rollback_in_deactive(?:_savepoint)? + return True + class MarkerTransaction(Transaction): """A 'marker' transaction that is used for nested begin() calls. @@ -2921,32 +3039,45 @@ def driver(self): def __repr__(self): return "Engine(%r)" % (self.url,) - def dispose(self): + def dispose(self, close=True): """Dispose of the connection pool used by this :class:`_engine.Engine`. - This has the effect of fully closing all **currently checked in** - database connections. Connections that are still checked out - will **not** be closed, however they will no longer be associated - with this :class:`_engine.Engine`, - so when they are closed individually, - eventually the :class:`_pool.Pool` which they are associated with will - be garbage collected and they will be closed out fully, if - not already closed on checkin. - - A new connection pool is created immediately after the old one has - been disposed. This new pool, like all SQLAlchemy connection pools, - does not make any actual connections to the database until one is - first requested, so as long as the :class:`_engine.Engine` - isn't used again, - no new connections will be made. + A new connection pool is created immediately after the old one has been + disposed. The previous connection pool is disposed either actively, by + closing out all currently checked-in connections in that pool, or + passively, by losing references to it but otherwise not closing any + connections. The latter strategy is more appropriate for an initializer + in a forked Python process. + + :param close: if left at its default of ``True``, has the + effect of fully closing all **currently checked in** + database connections. Connections that are still checked out + will **not** be closed, however they will no longer be associated + with this :class:`_engine.Engine`, + so when they are closed individually, eventually the + :class:`_pool.Pool` which they are associated with will + be garbage collected and they will be closed out fully, if + not already closed on checkin. + + If set to ``False``, the previous connection pool is de-referenced, + and otherwise not touched in any way. + + .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` + parameter to allow the replacement of a connection pool in a child + process without interfering with the connections used by the parent + process. + .. seealso:: :ref:`engine_disposal` + :ref:`pooling_multiprocessing` + """ - self.pool.dispose() + if close: + self.pool.dispose() self.pool = self.pool.recreate() self.dispatch.engine_disposed(self) diff --git a/lib/sqlalchemy/engine/characteristics.py b/lib/sqlalchemy/engine/characteristics.py index c00bff40d03..232cf3b5d41 100644 --- a/lib/sqlalchemy/engine/characteristics.py +++ b/lib/sqlalchemy/engine/characteristics.py @@ -1,3 +1,9 @@ +# engine/characteristics.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import abc from ..util import ABC diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index 5e56ecdd9f8..0acbb57eff5 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -1,5 +1,5 @@ # engine/create.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -289,19 +289,7 @@ def create_engine(url, **kwargs): .. seealso:: - :attr:`_engine.Connection.default_isolation_level` - - view default level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`_engine.Connection` isolation level - - :ref:`SQLite Transaction Isolation ` - - :ref:`PostgreSQL Transaction Isolation ` - - :ref:`MySQL Transaction Isolation ` - - :ref:`session_transaction_isolation` - for the ORM + :ref:`dbapi_autocommit` :param json_deserializer: for dialects that support the :class:`_types.JSON` @@ -335,10 +323,6 @@ def create_engine(url, **kwargs): :paramref:`_sa.create_engine.max_identifier_length` - :param listeners: A list of one or more - :class:`~sqlalchemy.interfaces.PoolListener` objects which will - receive connection pool events. - :param logging_name: String identifier which will be used within the "name" field of logging records generated within the "sqlalchemy.engine" logger. Defaults to a hexstring of the @@ -457,7 +441,7 @@ def create_engine(url, **kwargs): .. seealso:: - :paramref:`_pool.Pool.reset_on_return` + :ref:`pool_reset_on_return` :param pool_timeout=30: number of seconds to wait before giving up on getting a connection from the pool. This is only used diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 5e6078f8662..970dbb39bfe 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1,5 +1,5 @@ # engine/cursor.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -165,6 +165,7 @@ def __init__(self, parent, cursor_description): result_columns, cols_are_ordered, textual_ordered, + ad_hoc_textual, loose_column_name_matching, ) = context.result_column_struct num_ctx_cols = len(result_columns) @@ -173,6 +174,8 @@ def __init__(self, parent, cursor_description): cols_are_ordered ) = ( num_ctx_cols + ) = ( + ad_hoc_textual ) = loose_column_name_matching = textual_ordered = False # merge cursor.description with the column info @@ -184,6 +187,7 @@ def __init__(self, parent, cursor_description): num_ctx_cols, cols_are_ordered, textual_ordered, + ad_hoc_textual, loose_column_name_matching, ) @@ -214,11 +218,18 @@ def __init__(self, parent, cursor_description): # column keys and other names if num_ctx_cols: - # if by-primary-string dictionary smaller (or bigger?!) than - # number of columns, assume we have dupes, rewrite - # dupe records with "None" for index which results in - # ambiguous column exception when accessed. if len(by_key) != num_ctx_cols: + # if by-primary-string dictionary smaller than + # number of columns, assume we have dupes; (this check + # is also in place if string dictionary is bigger, as + # can occur when '*' was used as one of the compiled columns, + # which may or may not be suggestive of dupes), rewrite + # dupe records with "None" for index which results in + # ambiguous column exception when accessed. + # + # this is considered to be the less common case as it is not + # common to have dupe column keys in a SELECT statement. + # # new in 1.4: get the complete set of all possible keys, # strings, objects, whatever, that are dupes across two # different records, first. @@ -291,6 +302,7 @@ def _merge_cursor_description( num_ctx_cols, cols_are_ordered, textual_ordered, + ad_hoc_textual, loose_column_name_matching, ): """Merge a cursor.description with compiled result column information. @@ -386,7 +398,9 @@ def _merge_cursor_description( # name-based or text-positional cases, where we need # to read cursor.description names - if textual_ordered: + if textual_ordered or ( + ad_hoc_textual and len(cursor_description) == num_ctx_cols + ): self._safe_for_cache = True # textual positional case raw_iterator = self._merge_textual_cols_by_position( @@ -1021,7 +1035,6 @@ def __init__( growth_factor=5, initial_buffer=None, ): - self._max_row_buffer = execution_options.get("max_row_buffer", 1000) if initial_buffer is not None: @@ -1043,6 +1056,8 @@ def create(cls, result): ) def _buffer_rows(self, result, dbapi_cursor): + """this is currently used only by fetchone().""" + size = self._bufsize try: if size < 1: @@ -1095,9 +1110,14 @@ def fetchmany(self, result, dbapi_cursor, size=None): lb = len(buf) if size > lb: try: - buf.extend(dbapi_cursor.fetchmany(size - lb)) + new = dbapi_cursor.fetchmany(size - lb) except BaseException as e: self.handle_exception(result, dbapi_cursor, e) + else: + if not new: + result._soft_close() + else: + buf.extend(new) result = buf[0:size] self._rowbuffer = collections.deque(buf[size:]) @@ -1348,7 +1368,6 @@ def _soft_close(self, hard=False): """ - if (not hard and self._soft_closed) or (hard and self.closed): return @@ -1366,7 +1385,8 @@ def _soft_close(self, hard=False): @property def inserted_primary_key_rows(self): - """Return the value of :attr:`_engine.CursorResult.inserted_primary_key` + """Return the value of + :attr:`_engine.CursorResult.inserted_primary_key` as a row contained within a list; some dialects may support a multiple row form as well. @@ -1677,7 +1697,7 @@ def rowcount(self): :ref:`tutorial_update_delete_rowcount` - in the :ref:`unified_tutorial` - """ # noqa E501 + """ # noqa: E501 try: return self.context.rowcount @@ -1707,7 +1727,8 @@ def lastrowid(self): @property def returns_rows(self): - """True if this :class:`_engine.CursorResult` returns zero or more rows. + """True if this :class:`_engine.CursorResult` returns zero or more + rows. I.e. if it is legal to call the methods :meth:`_engine.CursorResult.fetchone`, @@ -1780,6 +1801,7 @@ class CursorResult(BaseCursorResult, Result): _cursor_metadata = CursorResultMetaData _cursor_strategy_cls = CursorFetchStrategy _no_result_metadata = _NO_RESULT_METADATA + _is_cursor = True def _fetchiter_impl(self): fetchone = self.cursor_strategy.fetchone diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 75bca190502..90ca4c49566 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1,5 +1,5 @@ # engine/default.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -231,6 +231,7 @@ class DefaultDialect(interfaces.Dialect): CACHING_DISABLED = CACHING_DISABLED NO_CACHE_KEY = NO_CACHE_KEY NO_DIALECT_SUPPORT = NO_DIALECT_SUPPORT + has_terminate = False @util.deprecated_params( convert_unicode=( @@ -350,10 +351,23 @@ def _ensure_has_table_connection(self, arg): @util.memoized_property def _supports_statement_cache(self): - return ( - self.__class__.__dict__.get("supports_statement_cache", False) - is True - ) + ssc = self.__class__.__dict__.get("supports_statement_cache", None) + if ssc is None: + util.warn( + "Dialect %s:%s will not make use of SQL compilation caching " + "as it does not set the 'supports_statement_cache' attribute " + "to ``True``. This can have " + "significant performance implications including some " + "performance degradations in comparison to prior SQLAlchemy " + "versions. Dialect maintainers should seek to set this " + "attribute to True after appropriate development and testing " + "for SQLAlchemy 1.4 caching support. Alternatively, this " + "attribute may be set to False which will disable this " + "warning." % (self.name, self.driver), + code="cprf", + ) + + return bool(ssc) @util.memoized_property def _type_memos(self): @@ -631,8 +645,10 @@ def _set_connection_characteristics(self, connection, characteristics): if trans_objs: if connection._is_future: raise exc.InvalidRequestError( - "This connection has already begun a transaction; " - "%s may not be altered until transaction end" + "This connection has already initialized a SQLAlchemy " + "Transaction() object via begin() or autobegin; " + "%s may not be altered unless rollback() or commit() " + "is called first." % (", ".join(name for name, obj in trans_objs)) ) else: @@ -669,6 +685,9 @@ def do_rollback(self, dbapi_connection): def do_commit(self, dbapi_connection): dbapi_connection.commit() + def do_terminate(self, dbapi_connection): + self.do_close(dbapi_connection) + def do_close(self, dbapi_connection): dbapi_connection.close() @@ -956,6 +975,7 @@ def _init_compiled( compiled._result_columns, compiled._ordered_columns, compiled._textual_ordered_columns, + compiled._ad_hoc_textual, compiled._loose_column_name_matching, ) self.isinsert = compiled.isinsert @@ -973,13 +993,15 @@ def _init_compiled( if not parameters: self.compiled_parameters = [ compiled.construct_params( - extracted_parameters=extracted_parameters + extracted_parameters=extracted_parameters, + escape_names=False, ) ] else: self.compiled_parameters = [ compiled.construct_params( m, + escape_names=False, _group_number=grp, extracted_parameters=extracted_parameters, ) @@ -1064,21 +1086,44 @@ def _init_compiled( if encode: encoder = dialect._encoder for compiled_params in self.compiled_parameters: + escaped_bind_names = compiled.escaped_bind_names if encode: - param = { - encoder(key)[0]: processors[key](compiled_params[key]) - if key in processors - else compiled_params[key] - for key in compiled_params - } + if escaped_bind_names: + param = { + encoder(escaped_bind_names.get(key, key))[ + 0 + ]: processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + for key in compiled_params + } + else: + param = { + encoder(key)[0]: processors[key]( + compiled_params[key] + ) + if key in processors + else compiled_params[key] + for key in compiled_params + } else: - param = { - key: processors[key](compiled_params[key]) - if key in processors - else compiled_params[key] - for key in compiled_params - } + if escaped_bind_names: + param = { + escaped_bind_names.get(key, key): processors[key]( + compiled_params[key] + ) + if key in processors + else compiled_params[key] + for key in compiled_params + } + else: + param = { + key: processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + for key in compiled_params + } parameters.append(param) @@ -1405,11 +1450,16 @@ def supports_sane_multi_rowcount(self): return self.dialect.supports_sane_multi_rowcount def _setup_result_proxy(self): + exec_opt = self.execution_options + if self.is_crud or self.is_text: result = self._setup_dml_or_text_result() + yp = sr = False else: + yp = exec_opt.get("yield_per", None) + sr = self._is_server_side or exec_opt.get("stream_results", False) strategy = self.cursor_fetch_strategy - if self._is_server_side and strategy is _cursor._DEFAULT_FETCH: + if sr and strategy is _cursor._DEFAULT_FETCH: strategy = _cursor.BufferedRowCursorFetchStrategy( self.cursor, self.execution_options ) @@ -1442,6 +1492,9 @@ def _setup_result_proxy(self): self._soft_closed = result._soft_closed + if yp: + result = result.yield_per(yp) + return result def _setup_out_parameters(self, result): @@ -1550,7 +1603,6 @@ def inserted_primary_key_rows(self): return self._setup_ins_pk_from_empty() def _setup_ins_pk_from_lastrowid(self): - getter = self.compiled._inserted_primary_key_from_lastrowid_getter lastrowid = self.get_lastrowid() @@ -1558,7 +1610,6 @@ def _setup_ins_pk_from_lastrowid(self): def _setup_ins_pk_from_empty(self): getter = self.compiled._inserted_primary_key_from_lastrowid_getter - return [getter(None, param) for param in self.compiled_parameters] def _setup_ins_pk_from_implicit_returning(self, result, rows): @@ -1827,7 +1878,7 @@ def get_update_default(self, column): return self._exec_default(column, column.onupdate, column.type) def _process_executemany_defaults(self): - key_getter = self.compiled._key_getters_for_crud_column[2] + key_getter = self.compiled._within_exec_param_key_getter scalar_defaults = {} @@ -1865,7 +1916,7 @@ def _process_executemany_defaults(self): del self.current_parameters def _process_executesingle_defaults(self): - key_getter = self.compiled._key_getters_for_crud_column[2] + key_getter = self.compiled._within_exec_param_key_getter self.current_parameters = ( compiled_parameters ) = self.compiled_parameters[0] diff --git a/lib/sqlalchemy/engine/events.py b/lib/sqlalchemy/engine/events.py index f091c7733a8..45f8e950339 100644 --- a/lib/sqlalchemy/engine/events.py +++ b/lib/sqlalchemy/engine/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/engine/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# engine/events.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -716,8 +716,14 @@ def _accept_with(cls, target): return target elif isinstance(target, Engine): return target.dialect - else: + elif isinstance(target, Dialect): return target + elif hasattr(target, "dispatch") and hasattr( + target.dispatch._events, "_no_async_engine_events" + ): + target.dispatch._events._no_async_engine_events() + else: + return None def do_connect(self, dialect, conn_rec, cargs, cparams): """Receive connection arguments before a connection is made. diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index d1484718eb6..350e952097c 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -1,5 +1,5 @@ # engine/interfaces.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -10,6 +10,7 @@ from .. import util from ..sql.compiler import Compiled # noqa from ..sql.compiler import TypeCompiler # noqa +from ..util.concurrency import await_only class Dialect(object): @@ -582,6 +583,23 @@ def do_commit(self, dbapi_connection): raise NotImplementedError() + def do_terminate(self, dbapi_connection): + """Provide an implementation of ``connection.close()`` that tries as + much as possible to not block, given a DBAPI + connection. + + In the vast majority of cases this just calls .close(), however + for some asyncio dialects may call upon different API features. + + This hook is called by the :class:`_pool.Pool` + when a connection is being recycled or has been invalidated. + + .. versionadded:: 1.4.41 + + """ + + raise NotImplementedError() + def do_close(self, dbapi_connection): """Provide an implementation of ``connection.close()``, given a DBAPI connection. @@ -1417,10 +1435,6 @@ def get_out_parameter_values(self, out_param_names): set. This replaces the practice of setting out parameters within the now-removed ``get_result_proxy()`` method. - .. seealso:: - - :meth:`.ExecutionContext.get_result_cursor_strategy` - """ raise NotImplementedError() @@ -1434,69 +1448,6 @@ def post_exec(self): raise NotImplementedError() - def get_result_cursor_strategy(self, result): - """Return a result cursor strategy for a given result object. - - This method is implemented by the :class:`.DefaultDialect` and is - only needed by implementing dialects in the case where some special - steps regarding the cursor must be taken, such as manufacturing - fake results from some other element of the cursor, or pre-buffering - the cursor's results. - - A simplified version of the default implementation is:: - - from sqlalchemy.engine.result import DefaultCursorFetchStrategy - - class MyExecutionContext(DefaultExecutionContext): - def get_result_cursor_strategy(self, result): - return DefaultCursorFetchStrategy.create(result) - - Above, the :class:`.DefaultCursorFetchStrategy` will be applied - to the result object. For results that are pre-buffered from a - cursor that might be closed, an implementation might be:: - - - from sqlalchemy.engine.result import ( - FullyBufferedCursorFetchStrategy - ) - - class MyExecutionContext(DefaultExecutionContext): - _pre_buffered_result = None - - def pre_exec(self): - if self.special_condition_prebuffer_cursor(): - self._pre_buffered_result = ( - self.cursor.description, - self.cursor.fetchall() - ) - - def get_result_cursor_strategy(self, result): - if self._pre_buffered_result: - description, cursor_buffer = self._pre_buffered_result - return ( - FullyBufferedCursorFetchStrategy. - create_from_buffer( - result, description, cursor_buffer - ) - ) - else: - return DefaultCursorFetchStrategy.create(result) - - This method replaces the previous not-quite-documented - ``get_result_proxy()`` method. - - .. versionadded:: 1.4 - result objects now interpret cursor results - based on a pluggable "strategy" object, which is delivered - by the :class:`.ExecutionContext` via the - :meth:`.ExecutionContext.get_result_cursor_strategy` method. - - .. seealso:: - - :meth:`.ExecutionContext.get_out_parameter_values` - - """ - raise NotImplementedError() - def handle_dbapi_exception(self, e): """Receive a DBAPI exception which occurred upon execute, result fetch, etc.""" @@ -1752,5 +1703,34 @@ def driver_connection(self): """The connection object as returned by the driver after a connect.""" return self._connection + def run_async(self, fn): + """Run the awaitable returned by the given function, which is passed + the raw asyncio driver connection. + + This is used to invoke awaitable-only methods on the driver connection + within the context of a "synchronous" method, like a connection + pool event handler. + + E.g.:: + + engine = create_async_engine(...) + + @event.listens_for(engine.sync_engine, "connect") + def register_custom_types(dbapi_connection, ...): + dbapi_connection.run_async( + lambda connection: connection.set_type_codec( + 'MyCustomType', encoder, decoder, ... + ) + ) + + .. versionadded:: 1.4.30 + + .. seealso:: + + :ref:`asyncio_events_run_async` + + """ + return await_only(fn(self._connection)) + def __repr__(self): return "" % self._connection diff --git a/lib/sqlalchemy/engine/mock.py b/lib/sqlalchemy/engine/mock.py index 803fe30a285..00818f22a84 100644 --- a/lib/sqlalchemy/engine/mock.py +++ b/lib/sqlalchemy/engine/mock.py @@ -1,5 +1,5 @@ # engine/mock.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index 113aa8ea069..0367320cc37 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -1,5 +1,5 @@ # engine/reflection.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -797,6 +797,7 @@ def reflect_table( schema, table, cols_by_orig_name, + include_columns, exclude_columns, resolve_fks, _extend_on, @@ -940,6 +941,7 @@ def _reflect_fk( schema, table, cols_by_orig_name, + include_columns, exclude_columns, resolve_fks, _extend_on, @@ -956,10 +958,17 @@ def _reflect_fk( cols_by_orig_name[c].key if c in cols_by_orig_name else c for c in fkey_d["constrained_columns"] ] - if exclude_columns and set(constrained_columns).intersection( + + if ( exclude_columns + and set(constrained_columns).intersection(exclude_columns) + or ( + include_columns + and set(constrained_columns).difference(include_columns) + ) ): continue + referred_schema = fkey_d["referred_schema"] referred_table = fkey_d["referred_table"] referred_columns = fkey_d["referred_columns"] @@ -994,6 +1003,7 @@ def _reflect_fk( options = fkey_d["options"] else: options = {} + table.append_constraint( sa_schema.ForeignKeyConstraint( constrained_columns, diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 3c2e682be65..7cdeb81942d 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1,5 +1,5 @@ # engine/result.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -167,7 +167,7 @@ def __init__( if extra: recs_names = [ ( - (name,) + extras, + (name,) + (extras if extras else ()), (index, name, extras), ) for index, (name, extras) in enumerate(zip(self._keys, extra)) @@ -292,6 +292,7 @@ class ResultInternal(InPlaceGenerative): _generate_rows = True _unique_filter_state = None _post_creational_filter = None + _is_cursor = False @HasMemoized.memoized_attribute def _row_getter(self): @@ -647,6 +648,15 @@ def _column_slices(self, indexes): real_result = self._real_result if self._real_result else self if real_result._source_supports_scalars and len(indexes) == 1: + util.warn_deprecated( + "The Result.columns() method has a bug in SQLAlchemy 1.4 that " + "is causing it to yield scalar values, rather than Row " + "objects, in the case where a single index is passed and the " + "result is against ORM mapped objects. In SQLAlchemy 2.0, " + "Result will continue yield Row objects in this scenario. " + "Use the Result.scalars() method to yield scalar values.", + "2.0", + ) self._generate_rows = False else: self._generate_rows = True @@ -680,7 +690,7 @@ class _WithKeys(object): # py2k does not allow overriding the __doc__ attribute. def keys(self): """Return an iterable view which yields the string keys that would - be represented by each :class:`.Row`. + be represented by each :class:`_engine.Row`. The keys can represent the labels of the columns returned by a core statement or the names of the orm classes returned by an orm @@ -701,20 +711,21 @@ def keys(self): class Result(_WithKeys, ResultInternal): """Represent a set of database results. - .. versionadded:: 1.4 The :class:`.Result` object provides a completely - updated usage model and calling facade for SQLAlchemy Core and - SQLAlchemy ORM. In Core, it forms the basis of the - :class:`.CursorResult` object which replaces the previous - :class:`.ResultProxy` interface. When using the ORM, a higher level - object called :class:`.ChunkedIteratorResult` is normally used. + .. versionadded:: 1.4 The :class:`_engine.Result` object provides a + completely updated usage model and calling facade for SQLAlchemy + Core and SQLAlchemy ORM. In Core, it forms the basis of the + :class:`_engine.CursorResult` object which replaces the previous + :class:`_engine.ResultProxy` interface. When using the ORM, a + higher level object called :class:`_engine.ChunkedIteratorResult` + is normally used. .. note:: In SQLAlchemy 1.4 and above, this object is used for ORM results returned by :meth:`_orm.Session.execute`, which can yield instances of ORM mapped objects either individually or within - tuple-like rows. Note that the :class:`_result.Result` object does not + tuple-like rows. Note that the :class:`_engine.Result` object does not deduplicate instances or rows automatically as is the case with the legacy :class:`_orm.Query` object. For in-Python de-duplication of - instances or rows, use the :meth:`_result.Result.unique` modifier + instances or rows, use the :meth:`_engine.Result.unique` modifier method. .. seealso:: @@ -739,9 +750,44 @@ def __init__(self, cursor_metadata): def _soft_close(self, hard=False): raise NotImplementedError() + def close(self): + """close this :class:`_engine.Result`. + + The behavior of this method is implementation specific, and is + not implemented by default. The method should generally end + the resources in use by the result object and also cause any + subsequent iteration or row fetching to raise + :class:`.ResourceClosedError`. + + .. versionadded:: 1.4.27 - ``.close()`` was previously not generally + available for all :class:`_engine.Result` classes, instead only + being available on the :class:`_engine.CursorResult` returned for + Core statement executions. As most other result objects, namely the + ones used by the ORM, are proxying a :class:`_engine.CursorResult` + in any case, this allows the underlying cursor result to be closed + from the outside facade for the case when the ORM query is using + the ``yield_per`` execution option where it does not immediately + exhaust and autoclose the database cursor. + + """ + self._soft_close(hard=True) + + @property + def _soft_closed(self): + raise NotImplementedError() + + @property + def closed(self): + """return ``True`` if this :class:`_engine.Result` reports .closed + + .. versionadded:: 1.4.43 + + """ + raise NotImplementedError() + @_generative def yield_per(self, num): - """Configure the row-fetching strategy to fetch num rows at a time. + """Configure the row-fetching strategy to fetch ``num`` rows at a time. This impacts the underlying behavior of the result when iterating over the result object, or otherwise making use of methods such as @@ -756,22 +802,37 @@ def yield_per(self, num): conjunction with the :paramref:`_engine.Connection.execution_options.stream_results` execution option, which will allow the database dialect in use to make - use of a server side cursor, if the DBAPI supports it. + use of a server side cursor, if the DBAPI supports a specific "server + side cursor" mode separate from its default mode of operation. - Most DBAPIs do not use server side cursors by default, which means all - rows will be fetched upfront from the database regardless of the - :meth:`_engine.Result.yield_per` setting. However, - :meth:`_engine.Result.yield_per` may still be useful in that it batches - the SQLAlchemy-side processing of the raw data from the database, and - additionally when used for ORM scenarios will batch the conversion of - database rows into ORM entity rows. + .. tip:: + Consider using the + :paramref:`_engine.Connection.execution_options.yield_per` + execution option, which will simultaneously set + :paramref:`_engine.Connection.execution_options.stream_results` + to ensure the use of server side cursors, as well as automatically + invoke the :meth:`_engine.Result.yield_per` method to establish + a fixed row buffer size at once. + + The :paramref:`_engine.Connection.execution_options.yield_per` + execution option is available for ORM operations, with + :class:`_orm.Session`-oriented use described at + :ref:`orm_queryguide_yield_per`. The Core-only version which works + with :class:`_engine.Connection` is new as of SQLAlchemy 1.4.40. .. versionadded:: 1.4 :param num: number of rows to fetch each time the buffer is refilled. If set to a value below 1, fetches all rows for the next buffer. + .. seealso:: + + :ref:`engine_stream_results` - describes Core behavior for + :meth:`_engine.Result.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + """ self._yield_per = num @@ -854,7 +915,7 @@ def columns(self, *col_expressions): return self._column_slices(col_expressions) def scalars(self, index=0): - """Return a :class:`_result.ScalarResult` filtering object which + """Return a :class:`_engine.ScalarResult` filtering object which will return single elements rather than :class:`_row.Row` objects. E.g.:: @@ -863,24 +924,24 @@ def scalars(self, index=0): >>> result.scalars().all() [1, 2, 3] - When results are fetched from the :class:`_result.ScalarResult` + When results are fetched from the :class:`_engine.ScalarResult` filtering object, the single column-row that would be returned by the - :class:`_result.Result` is instead returned as the column's value. + :class:`_engine.Result` is instead returned as the column's value. .. versionadded:: 1.4 :param index: integer or row key indicating the column to be fetched from each row, defaults to ``0`` indicating the first column. - :return: a new :class:`_result.ScalarResult` filtering object referring - to this :class:`_result.Result` object. + :return: a new :class:`_engine.ScalarResult` filtering object referring + to this :class:`_engine.Result` object. """ return ScalarResult(self, index) def _getter(self, key, raiseerr=True): """return a callable that will retrieve the given key from a - :class:`.Row`. + :class:`_engine.Row`. """ if self._source_supports_scalars: @@ -891,7 +952,7 @@ def _getter(self, key, raiseerr=True): def _tuple_getter(self, keys): """return a callable that will retrieve the given keys from a - :class:`.Row`. + :class:`_engine.Row`. """ if self._source_supports_scalars: @@ -902,15 +963,16 @@ def _tuple_getter(self, keys): def mappings(self): """Apply a mappings filter to returned rows, returning an instance of - :class:`_result.MappingResult`. + :class:`_engine.MappingResult`. When this filter is applied, fetching rows will return - :class:`.RowMapping` objects instead of :class:`.Row` objects. + :class:`_engine.RowMapping` objects instead of :class:`_engine.Row` + objects. .. versionadded:: 1.4 - :return: a new :class:`_result.MappingResult` filtering object - referring to this :class:`_result.Result` object. + :return: a new :class:`_engine.MappingResult` filtering object + referring to this :class:`_engine.Result` object. """ @@ -919,7 +981,7 @@ def mappings(self): def _raw_row_iterator(self): """Return a safe iterator that yields raw row data. - This is used by the :meth:`._engine.Result.merge` method + This is used by the :meth:`_engine.Result.merge` method to merge multiple compatible results together. """ @@ -965,16 +1027,34 @@ def partitions(self, size=None): results, if possible. Not all drivers support this option and the option is silently ignored for those who do not. + When using the ORM, the :meth:`_engine.Result.partitions` method + is typically more effective from a memory perspective when it is + combined with use of the + :ref:`yield_per execution option `, + which instructs both the DBAPI driver to use server side cursors, + if available, as well as instructs the ORM loading internals to only + build a certain amount of ORM objects from a result at a time before + yielding them out. + .. versionadded:: 1.4 :param size: indicate the maximum number of rows to be present in each list yielded. If None, makes use of the value set by - :meth:`_engine.Result.yield_per`, if present, otherwise uses the - :meth:`_engine.Result.fetchmany` default which may be backend - specific. + the :meth:`_engine.Result.yield_per`, method, if it were called, + or the :paramref:`_engine.Connection.execution_options.yield_per` + execution option, which is equivalent in this regard. If + yield_per weren't set, it makes use of the + :meth:`_engine.Result.fetchmany` default, which may be backend + specific and not well defined. :return: iterator of lists + .. seealso:: + + :ref:`engine_stream_results` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + """ getter = self._manyrow_getter @@ -1003,8 +1083,8 @@ def fetchone(self): :meth:`_engine.Result.first` method. To iterate through all rows, iterate the :class:`_engine.Result` object directly. - :return: a :class:`.Row` object if no filters are applied, or None - if no rows remain. + :return: a :class:`_engine.Row` object if no filters are applied, + or ``None`` if no rows remain. """ row = self._onerow_getter(self) @@ -1021,10 +1101,14 @@ def fetchmany(self, size=None): This method is provided for backwards compatibility with SQLAlchemy 1.x.x. - To fetch rows in groups, use the :meth:`._result.Result.partitions` + To fetch rows in groups, use the :meth:`_engine.Result.partitions` method. - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. + + .. seealso:: + + :meth:`_engine.Result.partitions` """ @@ -1038,25 +1122,28 @@ def all(self): .. versionadded:: 1.4 - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. """ return self._allrows() def first(self): - """Fetch the first row or None if no row is present. + """Fetch the first row or ``None`` if no row is present. Closes the result set and discards remaining rows. .. note:: This method returns one **row**, e.g. tuple, by default. To return exactly one single scalar value, that is, the first - column of the first row, use the :meth:`.Result.scalar` method, - or combine :meth:`.Result.scalars` and :meth:`.Result.first`. + column of the first row, use the + :meth:`_engine.Result.scalar` method, + or combine :meth:`_engine.Result.scalars` and + :meth:`_engine.Result.first`. Additionally, in contrast to the behavior of the legacy ORM :meth:`_orm.Query.first` method, **no limit is applied** to the - SQL query which was invoked to produce this :class:`_engine.Result`; + SQL query which was invoked to produce this + :class:`_engine.Result`; for a DBAPI driver that buffers results in memory before yielding rows, all rows will be sent to the Python process and all but the first row will be discarded. @@ -1065,14 +1152,14 @@ def first(self): :ref:`migration_20_unify_select` - :return: a :class:`.Row` object, or None + :return: a :class:`_engine.Row` object, or None if no rows remain. .. seealso:: - :meth:`_result.Result.scalar` + :meth:`_engine.Result.scalar` - :meth:`_result.Result.one` + :meth:`_engine.Result.one` """ @@ -1089,15 +1176,16 @@ def one_or_none(self): .. versionadded:: 1.4 - :return: The first :class:`.Row` or None if no row is available. + :return: The first :class:`_engine.Row` or ``None`` if no row + is available. :raises: :class:`.MultipleResultsFound` .. seealso:: - :meth:`_result.Result.first` + :meth:`_engine.Result.first` - :meth:`_result.Result.one` + :meth:`_engine.Result.one` """ return self._only_one_row( @@ -1107,14 +1195,14 @@ def one_or_none(self): def scalar_one(self): """Return exactly one scalar result or raise an exception. - This is equivalent to calling :meth:`.Result.scalars` and then - :meth:`.Result.one`. + This is equivalent to calling :meth:`_engine.Result.scalars` and + then :meth:`_engine.Result.one`. .. seealso:: - :meth:`.Result.one` + :meth:`_engine.Result.one` - :meth:`.Result.scalars` + :meth:`_engine.Result.scalars` """ return self._only_one_row( @@ -1122,16 +1210,16 @@ def scalar_one(self): ) def scalar_one_or_none(self): - """Return exactly one or no scalar result. + """Return exactly one scalar result or ``None``. - This is equivalent to calling :meth:`.Result.scalars` and then - :meth:`.Result.one_or_none`. + This is equivalent to calling :meth:`_engine.Result.scalars` and + then :meth:`_engine.Result.one_or_none`. .. seealso:: - :meth:`.Result.one_or_none` + :meth:`_engine.Result.one_or_none` - :meth:`.Result.scalars` + :meth:`_engine.Result.scalars` """ return self._only_one_row( @@ -1147,22 +1235,24 @@ def one(self): .. note:: This method returns one **row**, e.g. tuple, by default. To return exactly one single scalar value, that is, the first - column of the first row, use the :meth:`.Result.scalar_one` method, - or combine :meth:`.Result.scalars` and :meth:`.Result.one`. + column of the first row, use the + :meth:`_engine.Result.scalar_one` method, or combine + :meth:`_engine.Result.scalars` and + :meth:`_engine.Result.one`. .. versionadded:: 1.4 - :return: The first :class:`.Row`. + :return: The first :class:`_engine.Row`. :raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound` .. seealso:: - :meth:`_result.Result.first` + :meth:`_engine.Result.first` - :meth:`_result.Result.one_or_none` + :meth:`_engine.Result.one_or_none` - :meth:`_result.Result.scalar_one` + :meth:`_engine.Result.scalar_one` """ return self._only_one_row( @@ -1172,7 +1262,7 @@ def one(self): def scalar(self): """Fetch the first column of the first row, and close the result set. - Returns None if there are no rows to fetch. + Returns ``None`` if there are no rows to fetch. No validation is performed to test if additional rows remain. @@ -1180,7 +1270,7 @@ def scalar(self): e.g. the :meth:`_engine.CursorResult.close` method will have been called. - :return: a Python scalar value , or None if no rows remain. + :return: a Python scalar value, or ``None`` if no rows remain. """ return self._only_one_row( @@ -1189,7 +1279,7 @@ def scalar(self): def freeze(self): """Return a callable object that will produce copies of this - :class:`.Result` when invoked. + :class:`_engine.Result` when invoked. The callable object returned is an instance of :class:`_engine.FrozenResult`. @@ -1211,7 +1301,7 @@ def freeze(self): return FrozenResult(self) def merge(self, *others): - """Merge this :class:`.Result` with other compatible result + """Merge this :class:`_engine.Result` with other compatible result objects. The object returned is an instance of :class:`_engine.MergedResult`, @@ -1229,15 +1319,62 @@ def merge(self, *others): class FilterResult(ResultInternal): """A wrapper for a :class:`_engine.Result` that returns objects other than - :class:`_result.Row` objects, such as dictionaries or scalar objects. + :class:`_engine.Row` objects, such as dictionaries or scalar objects. + + :class:`_engine.FilterResult` is the common base for additional result + APIs including :class:`_engine.MappingResult`, + :class:`_engine.ScalarResult` and :class:`_engine.AsyncResult`. """ _post_creational_filter = None + @_generative + def yield_per(self, num): + """Configure the row-fetching strategy to fetch ``num`` rows at a time. + + The :meth:`_engine.FilterResult.yield_per` method is a pass through + to the :meth:`_engine.Result.yield_per` method. See that method's + documentation for usage notes. + + .. versionadded:: 1.4.40 - added :meth:`_engine.FilterResult.yield_per` + so that the method is available on all result set implementations + + .. seealso:: + + :ref:`engine_stream_results` - describes Core behavior for + :meth:`_engine.Result.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + """ + self._real_result = self._real_result.yield_per(num) + def _soft_close(self, hard=False): self._real_result._soft_close(hard=hard) + @property + def _soft_closed(self): + return self._real_result._soft_closed + + @property + def closed(self): + """Return ``True`` if the underlying :class:`_engine.Result` reports + closed + + .. versionadded:: 1.4.43 + + """ + return self._real_result.closed # type: ignore + + def close(self): + """Close this :class:`_engine.FilterResult`. + + .. versionadded:: 1.4.43 + + """ + self._real_result.close() + @property def _attributes(self): return self._real_result._attributes @@ -1256,16 +1393,16 @@ def _fetchmany_impl(self, size=None): class ScalarResult(FilterResult): - """A wrapper for a :class:`_result.Result` that returns scalar values + """A wrapper for a :class:`_engine.Result` that returns scalar values rather than :class:`_row.Row` values. - The :class:`_result.ScalarResult` object is acquired by calling the - :meth:`_result.Result.scalars` method. + The :class:`_engine.ScalarResult` object is acquired by calling the + :meth:`_engine.Result.scalars` method. - A special limitation of :class:`_result.ScalarResult` is that it has + A special limitation of :class:`_engine.ScalarResult` is that it has no ``fetchone()`` method; since the semantics of ``fetchone()`` are that the ``None`` value indicates no more results, this is not compatible - with :class:`_result.ScalarResult` since there is no way to distinguish + with :class:`_engine.ScalarResult` since there is no way to distinguish between ``None`` as a row value versus ``None`` as an indicator. Use ``next(result)`` to receive values individually. @@ -1298,8 +1435,8 @@ def unique(self, strategy=None): def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. - Equivalent to :meth:`_result.Result.partitions` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.partitions` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1321,8 +1458,8 @@ def fetchall(self): def fetchmany(self, size=None): """Fetch many objects. - Equivalent to :meth:`_result.Result.fetchmany` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.fetchmany` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1331,8 +1468,8 @@ def fetchmany(self, size=None): def all(self): """Return all scalar values in a list. - Equivalent to :meth:`_result.Result.all` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.all` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1350,10 +1487,10 @@ def next(self): # noqa return self._next_impl() def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. - Equivalent to :meth:`_result.Result.first` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.first` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. @@ -1365,8 +1502,8 @@ def first(self): def one_or_none(self): """Return at most one object or raise an exception. - Equivalent to :meth:`_result.Result.one_or_none` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.one_or_none` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1377,8 +1514,8 @@ def one_or_none(self): def one(self): """Return exactly one object or raise an exception. - Equivalent to :meth:`_result.Result.one` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.one` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1424,9 +1561,9 @@ def columns(self, *col_expressions): def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. - Equivalent to :meth:`_result.Result.partitions` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.partitions` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1447,9 +1584,9 @@ def fetchall(self): def fetchone(self): """Fetch one object. - Equivalent to :meth:`_result.Result.fetchone` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.fetchone` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1462,9 +1599,9 @@ def fetchone(self): def fetchmany(self, size=None): """Fetch many objects. - Equivalent to :meth:`_result.Result.fetchmany` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.fetchmany` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1473,9 +1610,9 @@ def fetchmany(self, size=None): def all(self): """Return all scalar values in a list. - Equivalent to :meth:`_result.Result.all` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.all` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1493,11 +1630,11 @@ def next(self): # noqa return self._next_impl() def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. - Equivalent to :meth:`_result.Result.first` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.first` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1508,9 +1645,9 @@ def first(self): def one_or_none(self): """Return at most one object or raise an exception. - Equivalent to :meth:`_result.Result.one_or_none` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.one_or_none` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return self._only_one_row( @@ -1520,9 +1657,9 @@ def one_or_none(self): def one(self): """Return exactly one object or raise an exception. - Equivalent to :meth:`_result.Result.one` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.one` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return self._only_one_row( @@ -1531,15 +1668,15 @@ def one(self): class FrozenResult(object): - """Represents a :class:`.Result` object in a "frozen" state suitable + """Represents a :class:`_engine.Result` object in a "frozen" state suitable for caching. The :class:`_engine.FrozenResult` object is returned from the :meth:`_engine.Result.freeze` method of any :class:`_engine.Result` object. - A new iterable :class:`.Result` object is generated from a fixed - set of data each time the :class:`.FrozenResult` is invoked as + A new iterable :class:`_engine.Result` object is generated from a fixed + set of data each time the :class:`_engine.FrozenResult` is invoked as a callable:: @@ -1605,13 +1742,16 @@ def __call__(self): class IteratorResult(Result): - """A :class:`.Result` that gets data from a Python iterator of - :class:`.Row` objects. + """A :class:`_engine.Result` that gets data from a Python iterator of + :class:`_engine.Row` objects or similar row-like data. .. versionadded:: 1.4 """ + _hard_closed = False + _soft_closed = False + def __init__( self, cursor_metadata, @@ -1624,16 +1764,40 @@ def __init__( self.raw = raw self._source_supports_scalars = _source_supports_scalars - def _soft_close(self, **kw): + @property + def closed(self): + """Return ``True`` if this :class:`_engine.IteratorResult` has + been closed + + .. versionadded:: 1.4.43 + + """ + return self._hard_closed + + def _soft_close(self, hard=False, **kw): + if hard: + self._hard_closed = True + if self.raw is not None: + self.raw._soft_close(hard=hard, **kw) self.iterator = iter([]) + self._reset_memoizations() + self._soft_closed = True + + def _raise_hard_closed(self): + raise exc.ResourceClosedError("This result object is closed.") def _raw_row_iterator(self): return self.iterator def _fetchiter_impl(self): + if self._hard_closed: + self._raise_hard_closed() return self.iterator def _fetchone_impl(self, hard_close=False): + if self._hard_closed: + self._raise_hard_closed() + row = next(self.iterator, _NO_ROW) if row is _NO_ROW: self._soft_close(hard=hard_close) @@ -1642,12 +1806,18 @@ def _fetchone_impl(self, hard_close=False): return row def _fetchall_impl(self): + if self._hard_closed: + self._raise_hard_closed() + try: return list(self.iterator) finally: self._soft_close() def _fetchmany_impl(self, size=None): + if self._hard_closed: + self._raise_hard_closed() + return list(itertools.islice(self.iterator, 0, size)) @@ -1656,7 +1826,8 @@ def null_result(): class ChunkedIteratorResult(IteratorResult): - """An :class:`.IteratorResult` that works from an iterator-producing callable. + """An :class:`_engine.IteratorResult` that works from an + iterator-producing callable. The given ``chunks`` argument is a function that is given a number of rows to return in each chunk, or ``None`` for all rows. The function should @@ -1696,6 +1867,10 @@ def yield_per(self, num): self._yield_per = num self.iterator = itertools.chain.from_iterable(self.chunks(num)) + def _soft_close(self, **kw): + super(ChunkedIteratorResult, self)._soft_close(**kw) + self.chunks = lambda size: [] + def _fetchmany_impl(self, size=None): if self.dynamic_yield_per: self.iterator = itertools.chain.from_iterable(self.chunks(size)) @@ -1733,11 +1908,8 @@ def __init__(self, cursor_metadata, results): *[r._attributes for r in results] ) - def close(self): - self._soft_close(hard=True) - - def _soft_close(self, hard=False): + def _soft_close(self, hard=False, **kw): for r in self._results: - r._soft_close(hard=hard) + r._soft_close(hard=hard, **kw) if hard: self.closed = True diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index dc11e354862..fb24a463498 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -1,5 +1,5 @@ # engine/row.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -66,21 +66,25 @@ class BaseRow(object): def __init__(self, parent, processors, keymap, key_style, data): """Row objects are constructed by CursorResult objects.""" - self._parent = parent + object.__setattr__(self, "_parent", parent) if processors: - self._data = tuple( - [ - proc(value) if proc else value - for proc, value in zip(processors, data) - ] + object.__setattr__( + self, + "_data", + tuple( + [ + proc(value) if proc else value + for proc, value in zip(processors, data) + ] + ), ) else: - self._data = tuple(data) + object.__setattr__(self, "_data", tuple(data)) - self._keymap = keymap + object.__setattr__(self, "_keymap", keymap) - self._key_style = key_style + object.__setattr__(self, "_key_style", key_style) def __reduce__(self): return ( @@ -126,7 +130,10 @@ def _get_by_key_impl(self, key): try: rec = self._keymap[key] except KeyError as ke: - rec = self._parent._key_fallback(key, ke) + if isinstance(key, slice): + return tuple(self._data[key]) + else: + rec = self._parent._key_fallback(key, ke) except TypeError: if isinstance(key, slice): return tuple(self._data[key]) @@ -187,7 +194,7 @@ class Row(BaseRow, collections_abc.Sequence): .. seealso:: - :ref:`coretutorial_selecting` - includes examples of selecting + :ref:`tutorial_selecting_data` - includes examples of selecting rows from SELECT statements. :class:`.LegacyRow` - Compatibility interface introduced in SQLAlchemy @@ -211,6 +218,12 @@ class Row(BaseRow, collections_abc.Sequence): # in 2.0, this should be KEY_INTEGER_ONLY _default_key_style = KEY_OBJECTS_BUT_WARN + def __setattr__(self, name, value): + raise AttributeError("can't set attribute") + + def __delattr__(self, name): + raise AttributeError("can't delete attribute") + @property def _mapping(self): """Return a :class:`.RowMapping` for this :class:`.Row`. @@ -269,10 +282,11 @@ def __getstate__(self): } def __setstate__(self, state): - self._parent = parent = state["_parent"] - self._data = state["_data"] - self._keymap = parent._keymap - self._key_style = state["_key_style"] + parent = state["_parent"] + object.__setattr__(self, "_parent", parent) + object.__setattr__(self, "_data", state["_data"]) + object.__setattr__(self, "_keymap", parent._keymap) + object.__setattr__(self, "_key_style", state["_key_style"]) def _op(self, other, op): return ( @@ -538,7 +552,8 @@ def __ne__(self, other): class RowMapping(BaseRow, collections_abc.Mapping): - """A ``Mapping`` that maps column names and objects to :class:`.Row` values. + """A ``Mapping`` that maps column names and objects to :class:`.Row` + values. The :class:`.RowMapping` is available from a :class:`.Row` via the :attr:`.Row._mapping` attribute, as well as from the iterable interface diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py index bda1c7fae9e..728eb83da21 100644 --- a/lib/sqlalchemy/engine/strategies.py +++ b/lib/sqlalchemy/engine/strategies.py @@ -1,5 +1,5 @@ # engine/strategies.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index 488f7395270..0582d22b0b8 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -1,5 +1,5 @@ # engine/url.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -42,14 +42,21 @@ class URL( """ Represent the components of a URL used to connect to a database. - This object is suitable to be passed directly to a - :func:`_sa.create_engine` call. The fields of the URL are parsed - from a string by the :func:`.make_url` function. The string - format of the URL is an RFC-1738-style string. - - To create a new :class:`_engine.URL` object, use the - :func:`_engine.url.make_url` function. To construct a :class:`_engine.URL` - programmatically, use the :meth:`_engine.URL.create` constructor. + URLs are typically constructed from a fully formatted URL string, where the + :func:`.make_url` function is used internally by the + :func:`_sa.create_engine` function in order to parse the URL string into + its individual components, which are then used to construct a new + :class:`.URL` object. When parsing from a formatted URL string, the parsing + format generally follows + `RFC-1738 `_, with some exceptions. + + A :class:`_engine.URL` object may also be produced directly, either by + using the :func:`.make_url` function with a fully formed URL string, or + by using the :meth:`_engine.URL.create` constructor in order + to construct a :class:`_engine.URL` programmatically given individual + fields. The resulting :class:`.URL` object may be passed directly to + :func:`_sa.create_engine` in place of a string argument, which will bypass + the usage of :func:`.make_url` within the engine's creation process. .. versionchanged:: 1.4 @@ -67,13 +74,7 @@ class URL( * :attr:`_engine.URL.drivername`: database backend and driver name, such as ``postgresql+psycopg2`` * :attr:`_engine.URL.username`: username string - * :attr:`_engine.URL.password`: password string, or object that includes - a ``__str__()`` method that produces a password. - - .. note:: A password-producing object will be stringified only - **once** per :class:`_engine.Engine` object. For dynamic password - generation per connect, see :ref:`engines_dynamic_tokens`. - + * :attr:`_engine.URL.password`: password string * :attr:`_engine.URL.host`: string hostname * :attr:`_engine.URL.port`: integer port number * :attr:`_engine.URL.database`: string database name @@ -182,7 +183,7 @@ def _str_dict(cls, dict_): return util.EMPTY_DICT def _assert_value(val): - if isinstance(val, str): + if isinstance(val, compat.string_types): return val elif isinstance(val, collections_abc.Sequence): return tuple(_assert_value(elem) for elem in val) @@ -527,12 +528,12 @@ def render_as_string(self, hide_password=True): """ s = self.drivername + "://" if self.username is not None: - s += _rfc_1738_quote(self.username) + s += _sqla_url_quote(self.username) if self.password is not None: s += ":" + ( "***" if hide_password - else _rfc_1738_quote(str(self.password)) + else _sqla_url_quote(str(self.password)) ) s += "@" if self.host is not None: @@ -560,6 +561,22 @@ def __str__(self): def __repr__(self): return self.render_as_string() + def __copy__(self): + return self.__class__.create( + self.drivername, + self.username, + self.password, + self.host, + self.port, + self.database, + # note this is an immutabledict of str-> str / tuple of str, + # also fully immutable. does not require deepcopy + self.query, + ) + + def __deepcopy__(self, memo): + return self.__copy__() + def __hash__(self): return hash(str(self)) @@ -707,17 +724,23 @@ def translate_connect_args(self, names=None, **kw): def make_url(name_or_url): """Given a string or unicode instance, produce a new URL instance. - The given string is parsed according to the RFC 1738 spec. If an - existing URL object is passed, just returns the object. + + The format of the URL generally follows `RFC-1738 + `_, with some exceptions, including + that underscores, and not dashes or periods, are accepted within the + "scheme" portion. + + If a :class:`.URL` object is passed, it is returned as is. + """ if isinstance(name_or_url, util.string_types): - return _parse_rfc1738_args(name_or_url) + return _parse_url(name_or_url) else: return name_or_url -def _parse_rfc1738_args(name): +def _parse_url(name): pattern = re.compile( r""" (?P[\w\+]+):// @@ -757,10 +780,10 @@ def _parse_rfc1738_args(name): components["query"] = query if components["username"] is not None: - components["username"] = _rfc_1738_unquote(components["username"]) + components["username"] = _sqla_url_unquote(components["username"]) if components["password"] is not None: - components["password"] = _rfc_1738_unquote(components["password"]) + components["password"] = _sqla_url_unquote(components["password"]) ipv4host = components.pop("ipv4host") ipv6host = components.pop("ipv6host") @@ -774,15 +797,15 @@ def _parse_rfc1738_args(name): else: raise exc.ArgumentError( - "Could not parse rfc1738 URL from string '%s'" % name + "Could not parse SQLAlchemy URL from string '%s'" % name ) -def _rfc_1738_quote(text): +def _sqla_url_quote(text): return re.sub(r"[:@/]", lambda m: "%%%X" % ord(m.group(0)), text) -def _rfc_1738_unquote(text): +def _sqla_url_unquote(text): return util.unquote(text) diff --git a/lib/sqlalchemy/engine/util.py b/lib/sqlalchemy/engine/util.py index 4f2e031ab74..1e4e24613c3 100644 --- a/lib/sqlalchemy/engine/util.py +++ b/lib/sqlalchemy/engine/util.py @@ -1,5 +1,5 @@ # engine/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -147,9 +147,9 @@ def _distill_params_20(params): elif isinstance( params, (tuple, dict, immutabledict), - # avoid abc.__instancecheck__ - # (collections_abc.Sequence, collections_abc.Mapping), - ): + # only do abc.__instancecheck__ for Mapping after we've checked + # for plain dictionaries and would otherwise raise + ) or isinstance(params, collections_abc.Mapping): return (params,), _no_kw else: raise exc.ArgumentError("mapping or sequence expected for parameters") @@ -171,6 +171,23 @@ def _transaction_is_active(self): def _transaction_is_closed(self): raise NotImplementedError() + def _rollback_can_be_called(self): + """indicates the object is in a state that is known to be acceptable + for rollback() to be called. + + This does not necessarily mean rollback() will succeed or not raise + an error, just that there is currently no state detected that indicates + rollback() would fail or emit warnings. + + It also does not mean that there's a transaction in progress, as + it is usually safe to call rollback() even if no transaction is + present. + + .. versionadded:: 1.4.28 + + """ + raise NotImplementedError() + def _get_subject(self): raise NotImplementedError() @@ -216,7 +233,8 @@ def __exit__(self, type_, value, traceback): self.commit() except: with util.safe_reraise(): - self.rollback() + if self._rollback_can_be_called(): + self.rollback() finally: if not out_of_band_exit: subject._trans_context_manager = self._outer_trans_ctx @@ -227,7 +245,8 @@ def __exit__(self, type_, value, traceback): if not self._transaction_is_closed(): self.close() else: - self.rollback() + if self._rollback_can_be_called(): + self.rollback() finally: if not out_of_band_exit: subject._trans_context_manager = self._outer_trans_ctx diff --git a/lib/sqlalchemy/event/__init__.py b/lib/sqlalchemy/event/__init__.py index 15aae8d6d76..3d06738db99 100644 --- a/lib/sqlalchemy/event/__init__.py +++ b/lib/sqlalchemy/event/__init__.py @@ -1,5 +1,5 @@ # event/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/api.py b/lib/sqlalchemy/event/api.py index 5487c9f1afe..167e9e66da4 100644 --- a/lib/sqlalchemy/event/api.py +++ b/lib/sqlalchemy/event/api.py @@ -1,5 +1,5 @@ # event/api.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py index a0c2992213c..fcbd5283786 100644 --- a/lib/sqlalchemy/event/attr.py +++ b/lib/sqlalchemy/event/attr.py @@ -1,5 +1,5 @@ # event/attr.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -118,14 +118,14 @@ def wrap_kw(*args, **kw): return wrap_kw - def insert(self, event_key, propagate): + def _do_insert_or_append(self, event_key, is_append): target = event_key.dispatch_target assert isinstance( target, type ), "Class-level Event targets must be classes." if not getattr(target, "_sa_propagate_class_events", True): raise exc.InvalidRequestError( - "Can't assign an event directly to the %s class" % target + "Can't assign an event directly to the %s class" % (target,) ) for cls in util.walk_subclasses(target): @@ -133,38 +133,28 @@ def insert(self, event_key, propagate): self.update_subclass(cls) else: if cls not in self._clslevel: - self._assign_cls_collection(cls) - self._clslevel[cls].appendleft(event_key._listen_fn) + self.update_subclass(cls) + if is_append: + self._clslevel[cls].append(event_key._listen_fn) + else: + self._clslevel[cls].appendleft(event_key._listen_fn) registry._stored_in_collection(event_key, self) - def append(self, event_key, propagate): - target = event_key.dispatch_target - assert isinstance( - target, type - ), "Class-level Event targets must be classes." - if not getattr(target, "_sa_propagate_class_events", True): - raise exc.InvalidRequestError( - "Can't assign an event directly to the %s class" % target - ) - for cls in util.walk_subclasses(target): - if cls is not target and cls not in self._clslevel: - self.update_subclass(cls) - else: - if cls not in self._clslevel: - self._assign_cls_collection(cls) - self._clslevel[cls].append(event_key._listen_fn) - registry._stored_in_collection(event_key, self) + def insert(self, event_key, propagate): + self._do_insert_or_append(event_key, is_append=False) - def _assign_cls_collection(self, target): - if getattr(target, "_sa_propagate_class_events", True): - self._clslevel[target] = collections.deque() - else: - self._clslevel[target] = _empty_collection() + def append(self, event_key, propagate): + self._do_insert_or_append(event_key, is_append=True) def update_subclass(self, target): if target not in self._clslevel: - self._assign_cls_collection(target) + if getattr(target, "_sa_propagate_class_events", True): + self._clslevel[target] = collections.deque() + else: + self._clslevel[target] = _empty_collection() + clslevel = self._clslevel[target] + for cls in target.__mro__[1:]: if cls in self._clslevel: clslevel.extend( @@ -173,6 +163,7 @@ def update_subclass(self, target): def remove(self, event_key): target = event_key.dispatch_target + for cls in util.walk_subclasses(target): if cls in self._clslevel: self._clslevel[cls].remove(event_key._listen_fn) @@ -268,13 +259,25 @@ def __bool__(self): class _CompoundListener(_InstanceLevelDispatch): - __slots__ = "_exec_once_mutex", "_exec_once", "_exec_w_sync_once" + __slots__ = ( + "_exec_once_mutex", + "_exec_once", + "_exec_w_sync_once", + "_is_asyncio", + ) + + def __init__(self, *arg, **kw): + super(_CompoundListener, self).__init__(*arg, **kw) + self._is_asyncio = False def _set_asyncio(self): - self._exec_once_mutex = AsyncAdaptedLock() + self._is_asyncio = True def _memoized_attr__exec_once_mutex(self): - return threading.Lock() + if self._is_asyncio: + return AsyncAdaptedLock() + else: + return threading.Lock() def _exec_once_impl(self, retry_on_exception, *args, **kw): with self._exec_once_mutex: @@ -374,6 +377,7 @@ class _ListenerCollection(_CompoundListener): ) def __init__(self, parent, target_cls): + super(_ListenerCollection, self).__init__() if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self._exec_once = False @@ -410,6 +414,9 @@ def _update(self, other, only_propagate=True): existing_listeners.extend(other_listeners) + if other._is_asyncio: + self._set_asyncio() + to_associate = other.propagate.union(other_listeners) registry._stored_in_collection_multi(self, other, to_associate) diff --git a/lib/sqlalchemy/event/base.py b/lib/sqlalchemy/event/base.py index f8cbfbd7f62..76bb046827e 100644 --- a/lib/sqlalchemy/event/base.py +++ b/lib/sqlalchemy/event/base.py @@ -1,5 +1,5 @@ # event/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/legacy.py b/lib/sqlalchemy/event/legacy.py index 0dbf695048f..06a0ad77e86 100644 --- a/lib/sqlalchemy/event/legacy.py +++ b/lib/sqlalchemy/event/legacy.py @@ -1,5 +1,5 @@ # event/legacy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -144,9 +144,9 @@ def _legacy_listen_examples(dispatch_collection, sample_target, fn): def _version_signature_changes(parent_dispatch_cls, dispatch_collection): since, args, conv = dispatch_collection.legacy_signatures[0] return ( - "\n.. deprecated:: %(since)s\n" - " The :class:`.%(clsname)s.%(event_name)s` event now accepts the \n" - " arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n" + "\n.. versionchanged:: %(since)s\n" + " The :meth:`.%(clsname)s.%(event_name)s` event now accepts the \n" + " arguments %(named_event_arguments)s%(has_kw_arguments)s.\n" " Support for listener functions which accept the previous \n" ' argument signature(s) listed above as "deprecated" will be \n' " removed in a future release." @@ -154,7 +154,15 @@ def _version_signature_changes(parent_dispatch_cls, dispatch_collection): "since": since, "clsname": parent_dispatch_cls.__name__, "event_name": dispatch_collection.name, - "named_event_arguments": ", ".join(dispatch_collection.arg_names), + "named_event_arguments": ", ".join( + ":paramref:`.%(clsname)s.%(event_name)s.%(param_name)s`" + % { + "clsname": parent_dispatch_cls.__name__, + "event_name": dispatch_collection.name, + "param_name": param_name, + } + for param_name in dispatch_collection.arg_names + ), "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "", } ) diff --git a/lib/sqlalchemy/event/registry.py b/lib/sqlalchemy/event/registry.py index ca85f33684e..a27e345205d 100644 --- a/lib/sqlalchemy/event/registry.py +++ b/lib/sqlalchemy/event/registry.py @@ -1,5 +1,5 @@ # event/registry.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py index 8c0c5ff8d59..8ecd54e0918 100644 --- a/lib/sqlalchemy/events.py +++ b/lib/sqlalchemy/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# events.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/exc.py b/lib/sqlalchemy/exc.py index 7fa77120c65..e3789afad76 100644 --- a/lib/sqlalchemy/exc.py +++ b/lib/sqlalchemy/exc.py @@ -1,5 +1,5 @@ -# sqlalchemy/exc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# exc.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/__init__.py b/lib/sqlalchemy/ext/__init__.py index a4a9b34ab0c..2751bcf938a 100644 --- a/lib/sqlalchemy/ext/__init__.py +++ b/lib/sqlalchemy/ext/__init__.py @@ -1,5 +1,5 @@ # ext/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/associationproxy.py b/lib/sqlalchemy/ext/associationproxy.py index dd5c10ac956..d4ebf5250d7 100644 --- a/lib/sqlalchemy/ext/associationproxy.py +++ b/lib/sqlalchemy/ext/associationproxy.py @@ -1,5 +1,5 @@ # ext/associationproxy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -383,6 +383,22 @@ def for_proxy(cls, parent, owning_class, parent_instance): return AmbiguousAssociationProxyInstance( parent, owning_class, target_class, value_attr ) + except Exception as err: + util.raise_( + exc.InvalidRequestError( + "Association proxy received an unexpected error when " + "trying to retreive attribute " + '"%s.%s" from ' + 'class "%s": %s' + % ( + target_class.__name__, + parent.value_attr, + target_class.__name__, + err, + ) + ), + from_=err, + ) else: return cls._construct_for_assoc( target_assoc, parent, owning_class, target_class, value_attr diff --git a/lib/sqlalchemy/ext/asyncio/__init__.py b/lib/sqlalchemy/ext/asyncio/__init__.py index ac3b905c615..08132be17e5 100644 --- a/lib/sqlalchemy/ext/asyncio/__init__.py +++ b/lib/sqlalchemy/ext/asyncio/__init__.py @@ -1,10 +1,11 @@ # ext/asyncio/__init__.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +from .engine import async_engine_from_config from .engine import AsyncConnection from .engine import AsyncEngine from .engine import AsyncTransaction diff --git a/lib/sqlalchemy/ext/asyncio/base.py b/lib/sqlalchemy/ext/asyncio/base.py index 3f77f55007e..2b9798de487 100644 --- a/lib/sqlalchemy/ext/asyncio/base.py +++ b/lib/sqlalchemy/ext/asyncio/base.py @@ -1,3 +1,9 @@ +# ext/asyncio/base.py +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import abc import functools import weakref diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index bfaaea4d92e..0f3f299e5aa 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -1,12 +1,15 @@ # ext/asyncio/engine.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +import asyncio + from . import exc as async_exc from .base import ProxyComparable from .base import StartableContext +from .result import _ensure_sync_result from .result import AsyncResult from ... import exc from ... import inspection @@ -41,6 +44,29 @@ def create_async_engine(*arg, **kw): return AsyncEngine(sync_engine) +def async_engine_from_config(configuration, prefix="sqlalchemy.", **kwargs): + """Create a new AsyncEngine instance using a configuration dictionary. + + This function is analogous to the :func:`_sa.engine_from_config` function + in SQLAlchemy Core, except that the requested dialect must be an + asyncio-compatible dialect such as :ref:`dialect-postgresql-asyncpg`. + The argument signature of the function is identical to that + of :func:`_sa.engine_from_config`. + + .. versionadded:: 1.4.29 + + """ + options = { + key[len(prefix) :]: value + for key, value in configuration.items() + if key.startswith(prefix) + } + options["_coerce_config"] = True + options.update(kwargs) + url = options.pop("url") + return create_async_engine(url, **options) + + class AsyncConnectable: __slots__ = "_slots_dispatch", "__weakref__" @@ -357,15 +383,8 @@ async def exec_driver_sql( execution_options, _require_await=True, ) - if result.context._is_server_side: - raise async_exc.AsyncMethodRequired( - "Can't use the connection.exec_driver_sql() method with a " - "server-side cursor." - "Use the connection.stream() method for an async " - "streaming result set." - ) - return result + return await _ensure_sync_result(result, self.exec_driver_sql) async def stream( self, @@ -438,14 +457,7 @@ async def execute( execution_options, _require_await=True, ) - if result.context._is_server_side: - raise async_exc.AsyncMethodRequired( - "Can't use the connection.execute() method with a " - "server-side cursor." - "Use the connection.stream() method for an async " - "streaming result set." - ) - return result + return await _ensure_sync_result(result, self.execute) async def scalar( self, @@ -539,7 +551,8 @@ def __await__(self): return self.start().__await__() async def __aexit__(self, type_, value, traceback): - await self.close() + task = asyncio.get_event_loop().create_task(self.close()) + await asyncio.shield(task) @util.create_proxy_methods( @@ -590,8 +603,12 @@ async def start(self, is_ctxmanager=False): return self.conn async def __aexit__(self, type_, value, traceback): - await self.transaction.__aexit__(type_, value, traceback) - await self.conn.close() + async def go(): + await self.transaction.__aexit__(type_, value, traceback) + await self.conn.close() + + task = asyncio.get_event_loop().create_task(go()) + await asyncio.shield(task) def __init__(self, sync_engine): if not sync_engine.dialect.is_async: @@ -688,7 +705,7 @@ async def dispose(self): """ - return await greenlet_spawn(self.sync_engine.dispose) + await greenlet_spawn(self.sync_engine.dispose) class AsyncTransaction(ProxyComparable, StartableContext): diff --git a/lib/sqlalchemy/ext/asyncio/events.py b/lib/sqlalchemy/ext/asyncio/events.py index e3d8456908c..dcd3ee513ab 100644 --- a/lib/sqlalchemy/ext/asyncio/events.py +++ b/lib/sqlalchemy/ext/asyncio/events.py @@ -1,5 +1,5 @@ # ext/asyncio/events.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -16,21 +16,29 @@ class AsyncConnectionEvents(engine_event.ConnectionEvents): _dispatch_target = AsyncConnectable @classmethod - def _listen(cls, event_key, retval=False): + def _no_async_engine_events(cls): raise NotImplementedError( "asynchronous events are not implemented at this time. Apply " "synchronous listeners to the AsyncEngine.sync_engine or " "AsyncConnection.sync_connection attributes." ) + @classmethod + def _listen(cls, event_key, retval=False): + cls._no_async_engine_events() + class AsyncSessionEvents(orm_event.SessionEvents): _target_class_doc = "SomeSession" _dispatch_target = AsyncSession @classmethod - def _listen(cls, event_key, retval=False): + def _no_async_engine_events(cls): raise NotImplementedError( "asynchronous events are not implemented at this time. Apply " "synchronous listeners to the AsyncSession.sync_session." ) + + @classmethod + def _listen(cls, event_key, retval=False): + cls._no_async_engine_events() diff --git a/lib/sqlalchemy/ext/asyncio/exc.py b/lib/sqlalchemy/ext/asyncio/exc.py index fc53f5c4b8b..558187c0b41 100644 --- a/lib/sqlalchemy/ext/asyncio/exc.py +++ b/lib/sqlalchemy/ext/asyncio/exc.py @@ -1,5 +1,5 @@ # ext/asyncio/exc.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index dff87a569dd..70d027f2502 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -1,5 +1,5 @@ # ext/asyncio/result.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -7,10 +7,12 @@ import operator +from . import exc as async_exc from ...engine.result import _NO_ROW +from ...engine.result import _WithKeys from ...engine.result import FilterResult from ...engine.result import FrozenResult -from ...engine.result import MergedResult +from ...sql.base import _generative from ...util.concurrency import greenlet_spawn @@ -21,7 +23,7 @@ async def close(self): await greenlet_spawn(self._real_result.close) -class AsyncResult(AsyncCommon): +class AsyncResult(_WithKeys, AsyncCommon): """An asyncio wrapper around a :class:`_result.Result` object. The :class:`_asyncio.AsyncResult` only applies to statement executions that @@ -55,13 +57,7 @@ def __init__(self, real_result): "_row_getter", real_result.__dict__["_row_getter"] ) - def keys(self): - """Return the :meth:`_engine.Result.keys` collection from the - underlying :class:`_engine.Result`. - - """ - return self._metadata.keys - + @_generative def unique(self, strategy=None): """Apply unique filtering to the objects returned by this :class:`_asyncio.AsyncResult`. @@ -69,10 +65,8 @@ def unique(self, strategy=None): Refer to :meth:`_engine.Result.unique` in the synchronous SQLAlchemy API for a complete behavioral description. - """ self._unique_filter_state = (set(), strategy) - return self def columns(self, *col_expressions): r"""Establish the columns that should be returned in each row. @@ -80,7 +74,6 @@ def columns(self, *col_expressions): Refer to :meth:`_engine.Result.columns` in the synchronous SQLAlchemy API for a complete behavioral description. - """ return self._column_slices(col_expressions) @@ -95,9 +88,8 @@ async def scroll_results(connection): async for partition in result.partitions(100): print("list of rows: %s" % partition) - .. seealso:: - - :meth:`_engine.Result.partitions` + Refer to :meth:`_engine.Result.partitions` in the synchronous + SQLAlchemy API for a complete behavioral description. """ @@ -119,11 +111,11 @@ async def fetchone(self): SQLAlchemy 1.x.x. To fetch the first row of a result only, use the - :meth:`_engine.Result.first` method. To iterate through all - rows, iterate the :class:`_engine.Result` object directly. + :meth:`_asyncio.AsyncResult.first` method. To iterate through all + rows, iterate the :class:`_asyncio.AsyncResult` object directly. - :return: a :class:`.Row` object if no filters are applied, or None - if no rows remain. + :return: a :class:`_engine.Row` object if no filters are applied, + or ``None`` if no rows remain. """ row = await greenlet_spawn(self._onerow_getter, self) @@ -143,7 +135,7 @@ async def fetchmany(self, size=None): To fetch rows in groups, use the :meth:`._asyncio.AsyncResult.partitions` method. - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. .. seealso:: @@ -159,7 +151,7 @@ async def all(self): Closes the result set after invocation. Subsequent invocations will return an empty list. - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. """ @@ -176,17 +168,30 @@ async def __anext__(self): return row async def first(self): - """Fetch the first row or None if no row is present. + """Fetch the first row or ``None`` if no row is present. Closes the result set and discards remaining rows. - .. note:: This method returns one **row**, e.g. tuple, by default. To - return exactly one single scalar value, that is, the first column of - the first row, use the :meth:`_asyncio.AsyncResult.scalar` method, + .. note:: This method returns one **row**, e.g. tuple, by default. + To return exactly one single scalar value, that is, the first + column of the first row, use the + :meth:`_asyncio.AsyncResult.scalar` method, or combine :meth:`_asyncio.AsyncResult.scalars` and :meth:`_asyncio.AsyncResult.first`. - :return: a :class:`.Row` object, or None + Additionally, in contrast to the behavior of the legacy ORM + :meth:`_orm.Query.first` method, **no limit is applied** to the + SQL query which was invoked to produce this + :class:`_asyncio.AsyncResult`; + for a DBAPI driver that buffers results in memory before yielding + rows, all rows will be sent to the Python process and all but + the first row will be discarded. + + .. seealso:: + + :ref:`migration_20_unify_select` + + :return: a :class:`_engine.Row` object, or None if no rows remain. .. seealso:: @@ -207,7 +212,8 @@ async def one_or_none(self): .. versionadded:: 1.4 - :return: The first :class:`.Row` or None if no row is available. + :return: The first :class:`_engine.Row` or ``None`` if no row + is available. :raises: :class:`.MultipleResultsFound` @@ -236,7 +242,7 @@ async def scalar_one(self): return await greenlet_spawn(self._only_one_row, True, True, True) async def scalar_one_or_none(self): - """Return exactly one or no scalar result. + """Return exactly one scalar result or ``None``. This is equivalent to calling :meth:`_asyncio.AsyncResult.scalars` and then :meth:`_asyncio.AsyncResult.one_or_none`. @@ -266,7 +272,7 @@ async def one(self): .. versionadded:: 1.4 - :return: The first :class:`.Row`. + :return: The first :class:`_engine.Row`. :raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound` @@ -284,7 +290,7 @@ async def one(self): async def scalar(self): """Fetch the first column of the first row, and close the result set. - Returns None if there are no rows to fetch. + Returns ``None`` if there are no rows to fetch. No validation is performed to test if additional rows remain. @@ -292,7 +298,7 @@ async def scalar(self): e.g. the :meth:`_engine.CursorResult.close` method will have been called. - :return: a Python scalar value , or None if no rows remain. + :return: a Python scalar value, or ``None`` if no rows remain. """ return await greenlet_spawn(self._only_one_row, False, False, True) @@ -320,22 +326,6 @@ async def freeze(self): return await greenlet_spawn(FrozenResult, self) - def merge(self, *others): - """Merge this :class:`_asyncio.AsyncResult` with other compatible result - objects. - - The object returned is an instance of :class:`_engine.MergedResult`, - which will be composed of iterators from the given result - objects. - - The new result will use the metadata from this result object. - The subsequent result objects must be against an identical - set of result / cursor metadata, otherwise the behavior is - undefined. - - """ - return MergedResult(self._metadata, (self,) + others) - def scalars(self, index=0): """Return an :class:`_asyncio.AsyncScalarResult` filtering object which will return single elements rather than :class:`_row.Row` objects. @@ -357,10 +347,8 @@ def mappings(self): :class:`_asyncio.AsyncMappingResult`. When this filter is applied, fetching rows will return - :class:`.RowMapping` objects instead of :class:`.Row` objects. - - Refer to :meth:`_result.Result.mappings` in the synchronous - SQLAlchemy API for a complete behavioral description. + :class:`_engine.RowMapping` objects instead of :class:`_engine.Row` + objects. :return: a new :class:`_asyncio.AsyncMappingResult` filtering object referring to the underlying :class:`_result.Result` object. @@ -412,7 +400,7 @@ async def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. Equivalent to :meth:`_asyncio.AsyncResult.partitions` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -435,7 +423,7 @@ async def fetchmany(self, size=None): """Fetch many objects. Equivalent to :meth:`_asyncio.AsyncResult.fetchmany` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -445,7 +433,7 @@ async def all(self): """Return all scalar values in a list. Equivalent to :meth:`_asyncio.AsyncResult.all` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -462,10 +450,10 @@ async def __anext__(self): return row async def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. Equivalent to :meth:`_asyncio.AsyncResult.first` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -475,7 +463,7 @@ async def one_or_none(self): """Return at most one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one_or_none` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -485,16 +473,16 @@ async def one(self): """Return exactly one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ return await greenlet_spawn(self._only_one_row, True, True, False) -class AsyncMappingResult(AsyncCommon): - """A wrapper for a :class:`_asyncio.AsyncResult` that returns dictionary values - rather than :class:`_engine.Row` values. +class AsyncMappingResult(_WithKeys, AsyncCommon): + """A wrapper for a :class:`_asyncio.AsyncResult` that returns dictionary + values rather than :class:`_engine.Row` values. The :class:`_asyncio.AsyncMappingResult` object is acquired by calling the :meth:`_asyncio.AsyncResult.mappings` method. @@ -517,21 +505,6 @@ def __init__(self, result): if result._source_supports_scalars: self._metadata = self._metadata._reduce([0]) - def keys(self): - """Return an iterable view which yields the string keys that would - be represented by each :class:`.Row`. - - The view also can be tested for key containment using the Python - ``in`` operator, which will test both for the string keys represented - in the view, as well as for alternate keys such as column objects. - - .. versionchanged:: 1.4 a key view object is returned rather than a - plain list. - - - """ - return self._metadata.keys - def unique(self, strategy=None): """Apply unique filtering to the objects returned by this :class:`_asyncio.AsyncMappingResult`. @@ -550,8 +523,8 @@ async def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. Equivalent to :meth:`_asyncio.AsyncResult.partitions` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -573,8 +546,8 @@ async def fetchone(self): """Fetch one object. Equivalent to :meth:`_asyncio.AsyncResult.fetchone` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -588,8 +561,8 @@ async def fetchmany(self, size=None): """Fetch many objects. Equivalent to :meth:`_asyncio.AsyncResult.fetchmany` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -599,8 +572,8 @@ async def all(self): """Return all scalar values in a list. Equivalent to :meth:`_asyncio.AsyncResult.all` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -617,12 +590,11 @@ async def __anext__(self): return row async def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. Equivalent to :meth:`_asyncio.AsyncResult.first` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. - + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return await greenlet_spawn(self._only_one_row, False, False, False) @@ -631,8 +603,8 @@ async def one_or_none(self): """Return at most one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one_or_none` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return await greenlet_spawn(self._only_one_row, True, False, False) @@ -641,8 +613,29 @@ async def one(self): """Return exactly one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return await greenlet_spawn(self._only_one_row, True, True, False) + + +async def _ensure_sync_result(result, calling_method): + if not result._is_cursor: + cursor_result = getattr(result, "raw", None) + else: + cursor_result = result + if cursor_result and cursor_result.context._is_server_side: + await greenlet_spawn(cursor_result.close) + raise async_exc.AsyncMethodRequired( + "Can't use the %s.%s() method with a " + "server-side cursor. " + "Use the %s.stream() method for an async " + "streaming result set." + % ( + calling_method.__self__.__class__.__name__, + calling_method.__name__, + calling_method.__self__.__class__.__name__, + ) + ) + return result diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py index 4e7f15c1fda..d2df303868a 100644 --- a/lib/sqlalchemy/ext/asyncio/scoping.py +++ b/lib/sqlalchemy/ext/asyncio/scoping.py @@ -1,5 +1,5 @@ # ext/asyncio/scoping.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -36,6 +36,7 @@ "get", "get_bind", "is_modified", + "invalidate", "merge", "refresh", "rollback", @@ -84,7 +85,7 @@ def __init__(self, session_factory, scopefunc): the current scope. A function such as ``asyncio.current_task`` may be useful here. - """ # noqa E501 + """ # noqa: E501 self.session_factory = session_factory self.registry = ScopedRegistry(session_factory, scopefunc) diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index d2c96905617..b1b63ca8d1c 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -1,13 +1,16 @@ # ext/asyncio/session.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +import asyncio + from . import engine from . import result as _result from .base import ReversibleProxy from .base import StartableContext +from .result import _ensure_sync_result from ... import util from ...orm import object_session from ...orm import Session @@ -32,7 +35,6 @@ "expire_all", "expunge", "expunge_all", - "get_bind", "is_modified", "in_transaction", "in_nested_transaction", @@ -209,7 +211,7 @@ async def execute( else: execution_options = _EXECUTE_OPTIONS - return await greenlet_spawn( + result = await greenlet_spawn( self.sync_session.execute, statement, params=params, @@ -217,6 +219,7 @@ async def execute( bind_arguments=bind_arguments, **kw ) + return await _ensure_sync_result(result, self.execute) async def scalar( self, @@ -255,7 +258,10 @@ async def scalars( :return: a :class:`_result.ScalarResult` object - .. versionadded:: 1.4.24 + .. versionadded:: 1.4.24 Added :meth:`_asyncio.AsyncSession.scalars` + + .. versionadded:: 1.4.26 Added + :meth:`_asyncio.async_scoped_session.scalars` .. seealso:: @@ -311,7 +317,9 @@ async def stream( **kw ): """Execute a statement and return a streaming - :class:`_asyncio.AsyncResult` object.""" + :class:`_asyncio.AsyncResult` object. + + """ if execution_options: execution_options = util.immutabledict(execution_options).union( @@ -430,6 +438,84 @@ def get_nested_transaction(self): else: return None + def get_bind(self, mapper=None, clause=None, bind=None, **kw): + """Return a "bind" to which the synchronous proxied :class:`_orm.Session` + is bound. + + Unlike the :meth:`_orm.Session.get_bind` method, this method is + currently **not** used by this :class:`.AsyncSession` in any way + in order to resolve engines for requests. + + .. note:: + + This method proxies directly to the :meth:`_orm.Session.get_bind` + method, however is currently **not** useful as an override target, + in contrast to that of the :meth:`_orm.Session.get_bind` method. + The example below illustrates how to implement custom + :meth:`_orm.Session.get_bind` schemes that work with + :class:`.AsyncSession` and :class:`.AsyncEngine`. + + The pattern introduced at :ref:`session_custom_partitioning` + illustrates how to apply a custom bind-lookup scheme to a + :class:`_orm.Session` given a set of :class:`_engine.Engine` objects. + To apply a corresponding :meth:`_orm.Session.get_bind` implementation + for use with a :class:`.AsyncSession` and :class:`.AsyncEngine` + objects, continue to subclass :class:`_orm.Session` and apply it to + :class:`.AsyncSession` using + :paramref:`.AsyncSession.sync_session_class`. The inner method must + continue to return :class:`_engine.Engine` instances, which can be + acquired from a :class:`_asyncio.AsyncEngine` using the + :attr:`_asyncio.AsyncEngine.sync_engine` attribute:: + + # using example from "Custom Vertical Partitioning" + + + import random + + from sqlalchemy.ext.asyncio import AsyncSession + from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.orm import Session, sessionmaker + + # construct async engines w/ async drivers + engines = { + 'leader':create_async_engine("sqlite+aiosqlite:///leader.db"), + 'other':create_async_engine("sqlite+aiosqlite:///other.db"), + 'follower1':create_async_engine("sqlite+aiosqlite:///follower1.db"), + 'follower2':create_async_engine("sqlite+aiosqlite:///follower2.db"), + } + + class RoutingSession(Session): + def get_bind(self, mapper=None, clause=None, **kw): + # within get_bind(), return sync engines + if mapper and issubclass(mapper.class_, MyOtherClass): + return engines['other'].sync_engine + elif self._flushing or isinstance(clause, (Update, Delete)): + return engines['leader'].sync_engine + else: + return engines[ + random.choice(['follower1','follower2']) + ].sync_engine + + # apply to AsyncSession using sync_session_class + AsyncSessionMaker = sessionmaker( + class_=AsyncSession, + sync_session_class=RoutingSession + ) + + The :meth:`_orm.Session.get_bind` method is called in a non-asyncio, + implicitly non-blocking context in the same manner as ORM event hooks + and functions that are invoked via :meth:`.AsyncSession.run_sync`, so + routines that wish to run SQL commands inside of + :meth:`_orm.Session.get_bind` can continue to do so using + blocking-style code, which will be translated to implicitly async calls + at the point of invoking IO on the database drivers. + + """ # noqa: E501 + + return self.sync_session.get_bind( + mapper=mapper, clause=clause, bind=bind, **kw + ) + async def connection(self, **kw): r"""Return a :class:`_asyncio.AsyncConnection` object corresponding to this :class:`.Session` object's transactional state. @@ -437,8 +523,8 @@ async def connection(self, **kw): This method may also be used to establish execution options for the database connection used by the current transaction. - .. versionadded:: 1.4.24 Added **kw arguments which are passed through - to the underlying :meth:`_orm.Session.connection` method. + .. versionadded:: 1.4.24 Added \**kw arguments which are passed + through to the underlying :meth:`_orm.Session.connection` method. .. seealso:: @@ -526,7 +612,14 @@ async def close(self): :meth:`_asyncio.AsyncSession.close` """ - return await greenlet_spawn(self.sync_session.close) + await greenlet_spawn(self.sync_session.close) + + async def invalidate(self): + """Close this Session, using connection invalidation. + + For a complete description, see :meth:`_orm.Session.invalidate`. + """ + return await greenlet_spawn(self.sync_session.invalidate) @classmethod async def close_all(self): @@ -537,7 +630,8 @@ async def __aenter__(self): return self async def __aexit__(self, type_, value, traceback): - await self.close() + task = asyncio.get_event_loop().create_task(self.close()) + await asyncio.shield(task) def _maker_context_manager(self): # no @contextlib.asynccontextmanager until python3.7, gr @@ -554,8 +648,12 @@ async def __aenter__(self): return self.async_session async def __aexit__(self, type_, value, traceback): - await self.trans.__aexit__(type_, value, traceback) - await self.async_session.__aexit__(type_, value, traceback) + async def go(): + await self.trans.__aexit__(type_, value, traceback) + await self.async_session.__aexit__(type_, value, traceback) + + task = asyncio.get_event_loop().create_task(go()) + await asyncio.shield(task) class AsyncSessionTransaction(ReversibleProxy, StartableContext): diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py index 7cb2c4400b4..aae28eb5896 100644 --- a/lib/sqlalchemy/ext/automap.py +++ b/lib/sqlalchemy/ext/automap.py @@ -1,5 +1,5 @@ # ext/automap.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -9,8 +9,6 @@ which automatically generates mapped classes and relationships from a database schema, typically though not necessarily one which is reflected. -.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`. - It is hoped that the :class:`.AutomapBase` system provides a quick and modernized solution to the problem that the very famous `SQLSoup `_ @@ -21,6 +19,15 @@ a well-integrated approach to the issue of expediently auto-generating ad-hoc mappings. +.. tip:: The :ref:`automap_toplevel` extension is geared towards a + "zero declaration" approach, where a complete ORM model including classes + and pre-named relationships can be generated on the fly from a database + schema. For applications that still want to use explicit class declarations + including explicit relationship definitions in conjunction with reflection + of tables, the :class:`.DeferredReflection` class, described at + :ref:`orm_declarative_reflected_deferred_reflection`, is a better choice. + + Basic Use ========= @@ -41,7 +48,7 @@ engine = create_engine("sqlite:///mydatabase.db") # reflect the tables - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) # mapped classes are now created with names by default # matching that of the table name. @@ -121,6 +128,9 @@ Specifying Classes Explicitly ============================= +.. tip:: If explicit classes are expected to be prominent in an application, + consider using :class:`.DeferredReflection` instead. + The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined explicitly, in a way similar to that of the :class:`.DeferredReflection` class. Classes that extend from :class:`.AutomapBase` act like regular declarative @@ -151,7 +161,7 @@ class User(Base): # reflect engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) # we still have Address generated from the tablename "address", # but User is the same as Base.classes.User now @@ -215,7 +225,7 @@ def pluralize_collection(base, local_cls, referred_cls, constraint): engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True, + Base.prepare(autoload_with=engine, classname_for_table=camelize_classname, name_for_collection_relationship=pluralize_collection ) @@ -333,7 +343,7 @@ def _gen_relationship(base, direction, return_fn, Base = automap_base() engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True, + Base.prepare(autoload_with=engine, generate_relationship=_gen_relationship) Many-to-Many relationships @@ -464,7 +474,7 @@ def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): return name - Base.prepare(engine, reflect=True, + Base.prepare(autoload_with=engine, name_for_scalar_relationship=name_for_scalar_relationship) Alternatively, we can change the name on the column side. The columns @@ -478,7 +488,7 @@ class TableB(Base): __tablename__ = 'table_b' _table_a = Column('table_a', ForeignKey('table_a.id')) - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) Using Automap with Explicit Declarations @@ -547,7 +557,7 @@ def column_reflect(inspector, table, column_info): column_info['key'] = "attr_%s" % column_info['name'].lower() # run reflection - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) .. versionadded:: 1.4.0b2 the :meth:`_events.DDLEvents.column_reflect` event may be applied to a :class:`_schema.MetaData` object. @@ -743,7 +753,7 @@ class that is produced by the :func:`.declarative.declarative_base` are present under the name they were given, e.g.:: Base = automap_base() - Base.prepare(engine=some_engine, reflect=True) + Base.prepare(autoload_with=some_engine) User, Address = Base.classes.User, Base.classes.Address diff --git a/lib/sqlalchemy/ext/baked.py b/lib/sqlalchemy/ext/baked.py index 61328fce95c..0c9eb7b8c5b 100644 --- a/lib/sqlalchemy/ext/baked.py +++ b/lib/sqlalchemy/ext/baked.py @@ -1,5 +1,5 @@ -# sqlalchemy/ext/baked.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# ext/baked.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index c7eb7cc323b..ffa9f09db58 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -1,5 +1,5 @@ # ext/compiler.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -18,7 +18,7 @@ from sqlalchemy.sql.expression import ColumnClause class MyColumn(ColumnClause): - pass + inherit_cache = True @compiles(MyColumn) def compile_mycolumn(element, compiler, **kw): @@ -47,6 +47,7 @@ def compile_mycolumn(element, compiler, **kw): from sqlalchemy.schema import DDLElement class AlterColumn(DDLElement): + inherit_cache = False def __init__(self, column, cmd): self.column = column @@ -64,6 +65,8 @@ def visit_alter_column(element, compiler, **kw): The second ``visit_alter_table`` will be invoked when any ``postgresql`` dialect is used. +.. _compilerext_compiling_subelements: + Compiling sub-elements of a custom expression construct ======================================================= @@ -78,6 +81,8 @@ def visit_alter_column(element, compiler, **kw): from sqlalchemy.sql.expression import Executable, ClauseElement class InsertFromSelect(Executable, ClauseElement): + inherit_cache = False + def __init__(self, table, select): self.table = table self.select = select @@ -252,6 +257,7 @@ def compile_varchar(element, compiler, **kw): class timestamp(ColumnElement): type = TIMESTAMP() + inherit_cache = True * :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a ``ColumnElement`` and a "from clause" like object, and represents a SQL @@ -264,6 +270,7 @@ class timestamp(ColumnElement): class coalesce(FunctionElement): name = 'coalesce' + inherit_cache = True @compiles(coalesce) def compile(element, compiler, **kw): @@ -275,18 +282,114 @@ def compile(element, compiler, **kw): raise TypeError("coalesce only supports two arguments on Oracle") return "nvl(%s)" % compiler.process(element.clauses, **kw) -* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions, - like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement`` - subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``. - ``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the - ``execute_at()`` method, allowing the construct to be invoked during CREATE - TABLE and DROP TABLE sequences. +* :class:`.DDLElement` - The root of all DDL expressions, + like CREATE TABLE, ALTER TABLE, etc. Compilation of :class:`.DDLElement` + subclasses is issued by a :class:`.DDLCompiler` instead of a + :class:`.SQLCompiler`. :class:`.DDLElement` can also be used as an event hook + in conjunction with event hooks like :meth:`.DDLEvents.before_create` and + :meth:`.DDLEvents.after_create`, allowing the construct to be invoked + automatically during CREATE TABLE and DROP TABLE sequences. + + .. seealso:: + + :ref:`metadata_ddl_toplevel` - contains examples of associating + :class:`.DDL` objects (which are themselves :class:`.DDLElement` + instances) with :class:`.DDLEvents` event hooks. * :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which should be used with any expression class that represents a "standalone" SQL statement that can be passed directly to an ``execute()`` method. It is already implicit within ``DDLElement`` and ``FunctionElement``. +Most of the above constructs also respond to SQL statement caching. A +subclassed construct will want to define the caching behavior for the object, +which usually means setting the flag ``inherit_cache`` to the value of +``False`` or ``True``. See the next section :ref:`compilerext_caching` +for background. + + +.. _compilerext_caching: + +Enabling Caching Support for Custom Constructs +============================================== + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +equivalent SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement. + +For reasons discussed at :ref:`caching_caveats`, the implementation of this +caching system takes a conservative approach towards including custom SQL +constructs and/or subclasses within the caching system. This includes that +any user-defined SQL constructs, including all the examples for this +extension, will not participate in caching by default unless they positively +assert that they are able to do so. The :attr:`.HasCacheKey.inherit_cache` +attribute when set to ``True`` at the class level of a specific subclass +will indicate that instances of this class may be safely cached, using the +cache key generation scheme of the immediate superclass. This applies +for example to the "synopsis" example indicated previously:: + + class MyColumn(ColumnClause): + inherit_cache = True + + @compiles(MyColumn) + def compile_mycolumn(element, compiler, **kw): + return "[%s]" % element.name + +Above, the ``MyColumn`` class does not include any new state that +affects its SQL compilation; the cache key of ``MyColumn`` instances will +make use of that of the ``ColumnClause`` superclass, meaning it will take +into account the class of the object (``MyColumn``), the string name and +datatype of the object:: + + >>> MyColumn("some_name", String())._generate_cache_key() + CacheKey( + key=('0', , + 'name', 'some_name', + 'type', (, + ('length', None), ('collation', None)) + ), bindparams=[]) + +For objects that are likely to be **used liberally as components within many +larger statements**, such as :class:`_schema.Column` subclasses and custom SQL +datatypes, it's important that **caching be enabled as much as possible**, as +this may otherwise negatively affect performance. + +An example of an object that **does** contain state which affects its SQL +compilation is the one illustrated at :ref:`compilerext_compiling_subelements`; +this is an "INSERT FROM SELECT" construct that combines together a +:class:`_schema.Table` as well as a :class:`_sql.Select` construct, each of +which independently affect the SQL string generation of the construct. For +this class, the example illustrates that it simply does not participate in +caching:: + + class InsertFromSelect(Executable, ClauseElement): + inherit_cache = False + + def __init__(self, table, select): + self.table = table + self.select = select + + @compiles(InsertFromSelect) + def visit_insert_from_select(element, compiler, **kw): + return "INSERT INTO %s (%s)" % ( + compiler.process(element.table, asfrom=True, **kw), + compiler.process(element.select, **kw) + ) + +While it is also possible that the above ``InsertFromSelect`` could be made to +produce a cache key that is composed of that of the :class:`_schema.Table` and +:class:`_sql.Select` components together, the API for this is not at the moment +fully public. However, for an "INSERT FROM SELECT" construct, which is only +used by itself for specific operations, caching is not as critical as in the +previous example. + +For objects that are **used in relative isolation and are generally +standalone**, such as custom :term:`DML` constructs like an "INSERT FROM +SELECT", **caching is generally less critical** as the lack of caching for such +a construct will have only localized implications for that specific operation. + + Further Examples ================ @@ -309,6 +412,7 @@ def compile(element, compiler, **kw): class utcnow(expression.FunctionElement): type = DateTime() + inherit_cache = True @compiles(utcnow, 'postgresql') def pg_utcnow(element, compiler, **kw): @@ -345,6 +449,7 @@ def ms_utcnow(element, compiler, **kw): class greatest(expression.FunctionElement): type = Numeric() name = 'greatest' + inherit_cache = True @compiles(greatest) def default_greatest(element, compiler, **kw): @@ -376,7 +481,7 @@ def case_greatest(element, compiler, **kw): from sqlalchemy.ext.compiler import compiles class sql_false(expression.ColumnElement): - pass + inherit_cache = True @compiles(sql_false) def default_false(element, compiler, **kw): diff --git a/lib/sqlalchemy/ext/declarative/__init__.py b/lib/sqlalchemy/ext/declarative/__init__.py index b1c1d369123..afbce73a494 100644 --- a/lib/sqlalchemy/ext/declarative/__init__.py +++ b/lib/sqlalchemy/ext/declarative/__init__.py @@ -1,5 +1,5 @@ # ext/declarative/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/extensions.py b/lib/sqlalchemy/ext/declarative/extensions.py index 1a12b1205fc..0da7cf3a6fb 100644 --- a/lib/sqlalchemy/ext/declarative/extensions.py +++ b/lib/sqlalchemy/ext/declarative/extensions.py @@ -1,5 +1,5 @@ # ext/declarative/extensions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -144,8 +144,8 @@ class AbstractConcreteBase(ConcreteBase): .. note:: - The :class:`.AbstractConcreteBase` class does not intend to set up the - mapping for the base class until all the subclasses have been defined, + The :class:`.AbstractConcreteBase` delays the mapper creation of the + base class until all the subclasses have been defined, as it needs to create a mapping against a selectable that will include all subclass tables. In order to achieve this, it waits for the **mapper configuration event** to occur, at which point it scans @@ -155,22 +155,20 @@ class AbstractConcreteBase(ConcreteBase): While this event is normally invoked automatically, in the case of :class:`.AbstractConcreteBase`, it may be necessary to invoke it explicitly after **all** subclass mappings are defined, if the first - operation is to be a query against this base class. To do so, invoke - :func:`.configure_mappers` once all the desired classes have been - configured:: - - from sqlalchemy.orm import configure_mappers - - configure_mappers() - - .. seealso:: - - :func:`_orm.configure_mappers` + operation is to be a query against this base class. To do so, once all + the desired classes have been configured, the + :meth:`_orm.registry.configure` method on the :class:`_orm.registry` + in use can be invoked, which is available in relation to a particular + declarative base class:: + Base.registry.configure() Example:: from sqlalchemy.ext.declarative import AbstractConcreteBase + from sqlalchemy.orm import declarative_base + + Base = declarative_base() class Employee(AbstractConcreteBase, Base): pass @@ -183,9 +181,10 @@ class Manager(Employee): __mapper_args__ = { 'polymorphic_identity':'manager', - 'concrete':True} + 'concrete':True + } - configure_mappers() + Base.registry.configure() The abstract base class is handled by declarative in a special way; at class configuration time, it behaves like a declarative mixin @@ -221,18 +220,17 @@ class Manager(Employee): __mapper_args__ = { 'polymorphic_identity':'manager', - 'concrete':True} + 'concrete':True + } - configure_mappers() + Base.registry.configure() When we make use of our mappings however, both ``Manager`` and ``Employee`` will have an independently usable ``.company`` attribute:: - session.query(Employee).filter(Employee.company.has(id=5)) - - .. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase` - have been reworked to support relationships established directly - on the abstract base, without any special configurational steps. + session.execute( + select(Employee).filter(Employee.company.has(id=5)) + ) .. seealso:: @@ -240,6 +238,8 @@ class Manager(Employee): :ref:`concrete_inheritance` + :ref:`abstract_concrete_base` + """ __no_table__ = True @@ -380,6 +380,11 @@ class YetAnotherClass(ReflectedTwo): ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) + .. seealso:: + + :ref:`orm_declarative_reflected_deferred_reflection` - in the + :ref:`orm_declarative_table_config_toplevel` section. + """ @classmethod diff --git a/lib/sqlalchemy/ext/horizontal_shard.py b/lib/sqlalchemy/ext/horizontal_shard.py index 5f13ad26890..625e66be159 100644 --- a/lib/sqlalchemy/ext/horizontal_shard.py +++ b/lib/sqlalchemy/ext/horizontal_shard.py @@ -1,5 +1,5 @@ # ext/horizontal_shard.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/hybrid.py b/lib/sqlalchemy/ext/hybrid.py index eab3f2b7385..45407427781 100644 --- a/lib/sqlalchemy/ext/hybrid.py +++ b/lib/sqlalchemy/ext/hybrid.py @@ -1,5 +1,5 @@ # ext/hybrid.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -484,8 +484,12 @@ def word_insensitive(cls): ``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: class CaseInsensitiveComparator(Comparator): - def operate(self, op, other): - return op(func.lower(self.__clause_element__()), func.lower(other)) + def operate(self, op, other, **kwargs): + return op( + func.lower(self.__clause_element__()), + func.lower(other), + **kwargs, + ) .. _hybrid_reuse_subclass: @@ -575,10 +579,10 @@ def __init__(self, word): else: self.word = func.lower(word) - def operate(self, op, other): + def operate(self, op, other, **kwargs): if not isinstance(other, CaseInsensitiveWord): other = CaseInsensitiveWord(other) - return op(self.word, other.word) + return op(self.word, other.word, **kwargs) def __clause_element__(self): return self.word @@ -706,12 +710,14 @@ def grandparent(self): from sqlalchemy.ext.hybrid import Comparator class GrandparentTransformer(Comparator): - def operate(self, op, other): + def operate(self, op, other, **kwargs): def transform(q): cls = self.__clause_element__() parent_alias = aliased(cls) - return q.join(parent_alias, cls.parent).\ - filter(op(parent_alias.parent, other)) + return q.join(parent_alias, cls.parent).filter( + op(parent_alias.parent, other, **kwargs) + ) + return transform Base = declarative_base() @@ -783,8 +789,8 @@ def go(q): return q.join(self.parent_alias, Node.parent) return go - def operate(self, op, other): - return op(self.parent_alias.parent, other) + def operate(self, op, other, **kwargs): + return op(self.parent_alias.parent, other, **kwargs) .. sourcecode:: pycon+sql diff --git a/lib/sqlalchemy/ext/indexable.py b/lib/sqlalchemy/ext/indexable.py index 313ad11af6e..d2ccafecedb 100644 --- a/lib/sqlalchemy/ext/indexable.py +++ b/lib/sqlalchemy/ext/indexable.py @@ -1,5 +1,5 @@ -# ext/index.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# ext/indexable.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/instrumentation.py b/lib/sqlalchemy/ext/instrumentation.py index 54f3e64c5d5..56741a23352 100644 --- a/lib/sqlalchemy/ext/instrumentation.py +++ b/lib/sqlalchemy/ext/instrumentation.py @@ -1,3 +1,9 @@ +# ext/instrumentation.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php """Extensible class instrumentation. The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index 4eed3b2afe2..ff4f82658a9 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -1,5 +1,5 @@ # ext/mutable.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -354,6 +354,7 @@ def __setstate__(self, state): :meth:`MutableBase._parents` collection is restored to all ``Point`` objects. """ +from collections import defaultdict import weakref from .. import event @@ -496,13 +497,27 @@ def pickle(state, state_dict): val = state.dict.get(key, None) if val is not None: if "ext.mutable.values" not in state_dict: - state_dict["ext.mutable.values"] = [] - state_dict["ext.mutable.values"].append(val) + state_dict["ext.mutable.values"] = defaultdict(list) + state_dict["ext.mutable.values"][key].append(val) def unpickle(state, state_dict): if "ext.mutable.values" in state_dict: - for val in state_dict["ext.mutable.values"]: - val._parents[state] = key + collection = state_dict["ext.mutable.values"] + if isinstance(collection, list): + # legacy format + for val in collection: + val._parents[state] = key + else: + for val in state_dict["ext.mutable.values"][key]: + val._parents[state] = key + + event.listen( + parent_cls, + "_sa_event_merge_wo_load", + load, + raw=True, + propagate=True, + ) event.listen(parent_cls, "load", load, raw=True, propagate=True) event.listen( diff --git a/lib/sqlalchemy/ext/mypy/__init__.py b/lib/sqlalchemy/ext/mypy/__init__.py index e69de29bb2d..b5827cb8d36 100644 --- a/lib/sqlalchemy/ext/mypy/__init__.py +++ b/lib/sqlalchemy/ext/mypy/__init__.py @@ -0,0 +1,6 @@ +# ext/mypy/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/ext/mypy/apply.py b/lib/sqlalchemy/ext/mypy/apply.py index cf5b4fda257..694c41c8bd6 100644 --- a/lib/sqlalchemy/ext/mypy/apply.py +++ b/lib/sqlalchemy/ext/mypy/apply.py @@ -1,4 +1,10 @@ # ext/mypy/apply.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # @@ -36,6 +42,7 @@ from . import infer from . import util +from .names import NAMED_TYPE_SQLA_MAPPED def apply_mypy_mapped_attr( @@ -134,7 +141,7 @@ def re_apply_declarative_assignments( and isinstance(stmt.rvalue.callee.expr, NameExpr) and stmt.rvalue.callee.expr.node is not None and stmt.rvalue.callee.expr.node.fullname - == "sqlalchemy.orm.attributes.Mapped" + == NAMED_TYPE_SQLA_MAPPED and stmt.rvalue.callee.name == "_empty_constructor" and isinstance(stmt.rvalue.args[0], CallExpr) and isinstance(stmt.rvalue.args[0].callee, RefExpr) @@ -163,9 +170,12 @@ def re_apply_declarative_assignments( update_cls_metadata = True - if python_type_for_type is not None: + if python_type_for_type is not None and ( + not isinstance(left_node.type, Instance) + or left_node.type.type.fullname != NAMED_TYPE_SQLA_MAPPED + ): left_node.type = api.named_type( - "__sa_Mapped", [python_type_for_type] + NAMED_TYPE_SQLA_MAPPED, [python_type_for_type] ) if update_cls_metadata: @@ -200,15 +210,23 @@ class User(Base): left_node = lvalue.node assert isinstance(left_node, Var) + # to be completely honest I have no idea what the difference between + # left_node.type and stmt.type is, what it means if these are different + # vs. the same, why in order to get tests to pass I have to assign + # to stmt.type for the second case and not the first. this is complete + # trying every combination until it works stuff. + if left_hand_explicit_type is not None: left_node.type = api.named_type( - "__sa_Mapped", [left_hand_explicit_type] + NAMED_TYPE_SQLA_MAPPED, [left_hand_explicit_type] ) else: lvalue.is_inferred_def = False - left_node.type = api.named_type( - "__sa_Mapped", - [] if python_type_for_type is None else [python_type_for_type], + left_node.type = stmt.type = api.named_type( + NAMED_TYPE_SQLA_MAPPED, + [AnyType(TypeOfAny.special_form)] + if python_type_for_type is None + else [python_type_for_type], ) # so to have it skip the right side totally, we can do this: @@ -225,6 +243,11 @@ class User(Base): # internally stmt.rvalue = util.expr_to_mapped_constructor(stmt.rvalue) + if stmt.type is None or python_type_for_type is None: + stmt.type = api.named_type( + NAMED_TYPE_SQLA_MAPPED, [AnyType(TypeOfAny.special_form)] + ) + def add_additional_orm_attributes( cls: ClassDef, @@ -292,6 +315,7 @@ def _apply_placeholder_attr_to_class( else: type_ = AnyType(TypeOfAny.special_form) var = Var(attrname) + var._fullname = cls.fullname + "." + attrname var.info = cls.info var.type = type_ cls.info.names[attrname] = SymbolTableNode(MDEF, var) diff --git a/lib/sqlalchemy/ext/mypy/decl_class.py b/lib/sqlalchemy/ext/mypy/decl_class.py index b85ec0f699e..bd12c8d5ce5 100644 --- a/lib/sqlalchemy/ext/mypy/decl_class.py +++ b/lib/sqlalchemy/ext/mypy/decl_class.py @@ -1,4 +1,10 @@ # ext/mypy/decl_class.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # @@ -241,7 +247,20 @@ class MyClass: left_hand_explicit_type: Optional[ProperType] = None - if isinstance(stmt.func.type, CallableType): + if util.name_is_dunder(stmt.name): + # for dunder names like __table_args__, __tablename__, + # __mapper_args__ etc., rewrite these as simple assignment + # statements; otherwise mypy doesn't like if the decorated + # function has an annotation like ``cls: Type[Foo]`` because + # it isn't @classmethod + any_ = AnyType(TypeOfAny.special_form) + left_node = NameExpr(stmt.var.name) + left_node.node = stmt.var + new_stmt = AssignmentStmt([left_node], TempNode(any_)) + new_stmt.type = left_node.node.type + cls.defs.body[dec_index] = new_stmt + return + elif isinstance(stmt.func.type, CallableType): func_type = stmt.func.type.ret_type if isinstance(func_type, UnboundType): type_id = names.type_id_for_unbound_type(func_type, cls, api) @@ -314,7 +333,7 @@ class MyClass: ) left_node.node.type = api.named_type( - "__sa_Mapped", [left_hand_explicit_type] + names.NAMED_TYPE_SQLA_MAPPED, [left_hand_explicit_type] ) # this will ignore the rvalue entirely diff --git a/lib/sqlalchemy/ext/mypy/infer.py b/lib/sqlalchemy/ext/mypy/infer.py index 6d243b6ec1d..b68308d72e6 100644 --- a/lib/sqlalchemy/ext/mypy/infer.py +++ b/lib/sqlalchemy/ext/mypy/infer.py @@ -1,4 +1,10 @@ # ext/mypy/infer.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # @@ -9,11 +15,11 @@ from typing import Sequence from mypy.maptype import map_instance_to_supertype -from mypy.messages import format_type from mypy.nodes import AssignmentStmt from mypy.nodes import CallExpr from mypy.nodes import Expression from mypy.nodes import FuncDef +from mypy.nodes import LambdaExpr from mypy.nodes import MemberExpr from mypy.nodes import NameExpr from mypy.nodes import RefExpr @@ -147,7 +153,7 @@ class MyClass: type_is_a_collection = True if python_type_for_type is not None: python_type_for_type = api.named_type( - "__builtins__.list", [python_type_for_type] + names.NAMED_TYPE_BUILTINS_LIST, [python_type_for_type] ) elif ( uselist_arg is None or api.parse_bool(uselist_arg) is True @@ -387,6 +393,10 @@ class MyClass: elif isinstance(column_arg, (StrExpr,)): # x = Column("name", String), go to next argument continue + elif isinstance(column_arg, (LambdaExpr,)): + # x = Column("name", String, default=lambda: uuid.uuid4()) + # go to next argument + continue else: assert False @@ -438,7 +448,7 @@ def _infer_type_from_left_and_inferred_right( if not is_subtype(left_hand_explicit_type, python_type_for_type): effective_type = api.named_type( - "__sa_Mapped", [orig_python_type_for_type] + names.NAMED_TYPE_SQLA_MAPPED, [orig_python_type_for_type] ) msg = ( @@ -449,8 +459,8 @@ def _infer_type_from_left_and_inferred_right( api, msg.format( node.name, - format_type(orig_left_hand_type), - format_type(effective_type), + util.format_type(orig_left_hand_type, api.options), + util.format_type(effective_type, api.options), ), node, ) @@ -507,7 +517,9 @@ def infer_type_from_left_hand_type_only( ) util.fail(api, msg.format(node.name), node) - return api.named_type("__sa_Mapped", [AnyType(TypeOfAny.special_form)]) + return api.named_type( + names.NAMED_TYPE_SQLA_MAPPED, [AnyType(TypeOfAny.special_form)] + ) else: # use type from the left hand side @@ -529,7 +541,7 @@ def extract_python_type_from_typeengine( return Instance(first_arg.node, []) # TODO: support other pep-435 types here else: - return api.named_type("__builtins__.str", []) + return api.named_type(names.NAMED_TYPE_BUILTINS_STR, []) assert node.has_base("sqlalchemy.sql.type_api.TypeEngine"), ( "could not extract Python type from node: %s" % node diff --git a/lib/sqlalchemy/ext/mypy/names.py b/lib/sqlalchemy/ext/mypy/names.py index 3dbfcc77032..9417a98cc7b 100644 --- a/lib/sqlalchemy/ext/mypy/names.py +++ b/lib/sqlalchemy/ext/mypy/names.py @@ -1,4 +1,10 @@ # ext/mypy/names.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # @@ -47,6 +53,12 @@ DECLARATIVE_MIXIN: int = util.symbol("DECLARATIVE_MIXIN") # type: ignore QUERY_EXPRESSION: int = util.symbol("QUERY_EXPRESSION") # type: ignore +# names that must succeed with mypy.api.named_type +NAMED_TYPE_BUILTINS_OBJECT = "builtins.object" +NAMED_TYPE_BUILTINS_STR = "builtins.str" +NAMED_TYPE_BUILTINS_LIST = "builtins.list" +NAMED_TYPE_SQLA_MAPPED = "sqlalchemy.orm.attributes.Mapped" + _lookup: Dict[str, Tuple[int, Set[str]]] = { "Column": ( COLUMN, diff --git a/lib/sqlalchemy/ext/mypy/plugin.py b/lib/sqlalchemy/ext/mypy/plugin.py index 356b0d9489e..37379f7ef10 100644 --- a/lib/sqlalchemy/ext/mypy/plugin.py +++ b/lib/sqlalchemy/ext/mypy/plugin.py @@ -1,4 +1,10 @@ # ext/mypy/plugin.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # @@ -142,7 +148,7 @@ def _dynamic_class_hook(ctx: DynamicClassDefContext) -> None: ) info.bases = [Instance(cls_arg.node, [])] else: - obj = ctx.api.named_type("__builtins__.object") + obj = ctx.api.named_type(names.NAMED_TYPE_BUILTINS_OBJECT) info.bases = [obj] @@ -152,7 +158,7 @@ def _dynamic_class_hook(ctx: DynamicClassDefContext) -> None: util.fail( ctx.api, "Not able to calculate MRO for declarative base", ctx.call ) - obj = ctx.api.named_type("__builtins__.object") + obj = ctx.api.named_type(names.NAMED_TYPE_BUILTINS_OBJECT) info.bases = [obj] info.fallback_to_any = True @@ -184,10 +190,13 @@ def _fill_in_decorators(ctx: ClassDefContext) -> None: else: continue - assert isinstance(target.expr, NameExpr) - sym = ctx.api.lookup_qualified( - target.expr.name, target, suppress_errors=True - ) + if isinstance(target.expr, NameExpr): + sym = ctx.api.lookup_qualified( + target.expr.name, target, suppress_errors=True + ) + else: + continue + if sym and sym.node: sym_type = get_proper_type(sym.type) if isinstance(sym_type, Instance): diff --git a/lib/sqlalchemy/ext/mypy/util.py b/lib/sqlalchemy/ext/mypy/util.py index a3825f175f6..956c1c9ea34 100644 --- a/lib/sqlalchemy/ext/mypy/util.py +++ b/lib/sqlalchemy/ext/mypy/util.py @@ -1,3 +1,10 @@ +# ext/mypy/util.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +import re from typing import Any from typing import Iterable from typing import Iterator @@ -9,6 +16,8 @@ from typing import TypeVar from typing import Union +from mypy import version +from mypy.messages import format_type as _mypy_format_type from mypy.nodes import ARG_POS from mypy.nodes import CallExpr from mypy.nodes import ClassDef @@ -22,6 +31,7 @@ from mypy.nodes import Statement from mypy.nodes import SymbolTableNode from mypy.nodes import TypeInfo +from mypy.options import Options from mypy.plugin import ClassDefContext from mypy.plugin import DynamicClassDefContext from mypy.plugin import SemanticAnalyzerPluginInterface @@ -34,6 +44,11 @@ from mypy.types import UnboundType from mypy.types import UnionType +_vers = tuple( + [int(x) for x in version.__version__.split(".") if re.match(r"^\d+$", x)] +) +mypy_14 = _vers >= (1, 4) + _TArgType = TypeVar("_TArgType", bound=Union[CallExpr, NameExpr]) @@ -63,8 +78,9 @@ def serialize(self) -> JsonDict: } def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None: - """Expands type vars in the context of a subtype when an attribute is inherited - from a generic super type.""" + """Expands type vars in the context of a subtype when an attribute is + inherited from a generic super type. + """ if not isinstance(self.type, TypeVarType): return @@ -82,6 +98,10 @@ def deserialize( return cls(typ=typ, info=info, **data) +def name_is_dunder(name): + return bool(re.match(r"^__.+?__$", name)) + + def _set_info_metadata(info: TypeInfo, key: str, data: Any) -> None: info.metadata.setdefault("sqlalchemy", {})[key] = data @@ -145,6 +165,13 @@ def get_mapped_attributes( return attributes +def format_type(typ_: Type, options: Options) -> str: + if mypy_14: + return _mypy_format_type(typ_, options) # type: ignore + else: + return _mypy_format_type(typ_) # type: ignore + + def set_mapped_attributes( info: TypeInfo, attributes: List[SQLAlchemyAttribute] ) -> None: diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py index a5c418e722e..ae097f26fa7 100644 --- a/lib/sqlalchemy/ext/orderinglist.py +++ b/lib/sqlalchemy/ext/orderinglist.py @@ -1,5 +1,5 @@ # ext/orderinglist.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/serializer.py b/lib/sqlalchemy/ext/serializer.py index 18a54e0798a..478dcd6967d 100644 --- a/lib/sqlalchemy/ext/serializer.py +++ b/lib/sqlalchemy/ext/serializer.py @@ -1,5 +1,5 @@ # ext/serializer.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -14,6 +14,11 @@ form, but are instead re-associated with the query structure when it is deserialized. +.. warning:: The serializer extension uses pickle to serialize and + deserialize objects, so the same security consideration mentioned + in the `python documentation + `_ apply. + Usage is nearly the same as that of the standard Python pickle module:: from sqlalchemy.ext.serializer import loads, dumps @@ -66,102 +71,202 @@ from ..util import b64encode from ..util import byte_buffer from ..util import pickle +from ..util import py2k from ..util import text_type - __all__ = ["Serializer", "Deserializer", "dumps", "loads"] -def Serializer(*args, **kw): - pickler = pickle.Pickler(*args, **kw) - - def persistent_id(obj): - # print "serializing:", repr(obj) - if isinstance(obj, Mapper) and not obj.non_primary: - id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) - elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: - id_ = ( - "mapperprop:" - + b64encode(pickle.dumps(obj.parent.class_)) - + ":" - + obj.key - ) - elif isinstance(obj, Table): - if "parententity" in obj._annotations: - id_ = "mapper_selectable:" + b64encode( - pickle.dumps(obj._annotations["parententity"].class_) +if py2k: + + def Serializer(*args, **kw): + pickler = pickle.Pickler(*args, **kw) + + def persistent_id(obj): + # print "serializing:", repr(obj) + if isinstance(obj, Mapper) and not obj.non_primary: + id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) + elif ( + isinstance(obj, MapperProperty) and not obj.parent.non_primary + ): + id_ = ( + "mapperprop:" + + b64encode(pickle.dumps(obj.parent.class_)) + + ":" + + obj.key + ) + elif isinstance(obj, Table): + if "parententity" in obj._annotations: + id_ = "mapper_selectable:" + b64encode( + pickle.dumps(obj._annotations["parententity"].class_) + ) + else: + id_ = "table:" + text_type(obj.key) + elif isinstance(obj, Column) and isinstance(obj.table, Table): + id_ = ( + "column:" + + text_type(obj.table.key) + + ":" + + text_type(obj.key) + ) + elif isinstance(obj, Session): + id_ = "session:" + elif isinstance(obj, Engine): + id_ = "engine:" + else: + return None + return id_ + + pickler.persistent_id = persistent_id + return pickler + + our_ids = re.compile( + r"(mapperprop|mapper|mapper_selectable|table|column|" + r"session|attribute|engine):(.*)" + ) + + def Deserializer(file, metadata=None, scoped_session=None, engine=None): + unpickler = pickle.Unpickler(file) + + def get_engine(): + if engine: + return engine + elif scoped_session and scoped_session().bind: + return scoped_session().bind + elif metadata and metadata.bind: + return metadata.bind + else: + return None + + def persistent_load(id_): + m = our_ids.match(text_type(id_)) + if not m: + return None + else: + type_, args = m.group(1, 2) + if type_ == "attribute": + key, clsarg = args.split(":") + cls = pickle.loads(b64decode(clsarg)) + return getattr(cls, key) + elif type_ == "mapper": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls) + elif type_ == "mapper_selectable": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls).__clause_element__() + elif type_ == "mapperprop": + mapper, keyname = args.split(":") + cls = pickle.loads(b64decode(mapper)) + return class_mapper(cls).attrs[keyname] + elif type_ == "table": + return metadata.tables[args] + elif type_ == "column": + table, colname = args.split(":") + return metadata.tables[table].c[colname] + elif type_ == "session": + return scoped_session() + elif type_ == "engine": + return get_engine() + else: + raise Exception("Unknown token: %s" % type_) + + unpickler.persistent_load = persistent_load + return unpickler + + +else: + + class Serializer(pickle.Pickler): + def persistent_id(self, obj): + # print "serializing:", repr(obj) + if isinstance(obj, Mapper) and not obj.non_primary: + id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) + elif ( + isinstance(obj, MapperProperty) and not obj.parent.non_primary + ): + id_ = ( + "mapperprop:" + + b64encode(pickle.dumps(obj.parent.class_)) + + ":" + + obj.key + ) + elif isinstance(obj, Table): + if "parententity" in obj._annotations: + id_ = "mapper_selectable:" + b64encode( + pickle.dumps(obj._annotations["parententity"].class_) + ) + else: + id_ = "table:" + text_type(obj.key) + elif isinstance(obj, Column) and isinstance(obj.table, Table): + id_ = ( + "column:" + + text_type(obj.table.key) + + ":" + + text_type(obj.key) ) + elif isinstance(obj, Session): + id_ = "session:" + elif isinstance(obj, Engine): + id_ = "engine:" else: - id_ = "table:" + text_type(obj.key) - elif isinstance(obj, Column) and isinstance(obj.table, Table): - id_ = ( - "column:" + text_type(obj.table.key) + ":" + text_type(obj.key) - ) - elif isinstance(obj, Session): - id_ = "session:" - elif isinstance(obj, Engine): - id_ = "engine:" - else: - return None - return id_ - - pickler.persistent_id = persistent_id - return pickler - - -our_ids = re.compile( - r"(mapperprop|mapper|mapper_selectable|table|column|" - r"session|attribute|engine):(.*)" -) - - -def Deserializer(file, metadata=None, scoped_session=None, engine=None): - unpickler = pickle.Unpickler(file) - - def get_engine(): - if engine: - return engine - elif scoped_session and scoped_session().bind: - return scoped_session().bind - elif metadata and metadata.bind: - return metadata.bind - else: - return None - - def persistent_load(id_): - m = our_ids.match(text_type(id_)) - if not m: - return None - else: - type_, args = m.group(1, 2) - if type_ == "attribute": - key, clsarg = args.split(":") - cls = pickle.loads(b64decode(clsarg)) - return getattr(cls, key) - elif type_ == "mapper": - cls = pickle.loads(b64decode(args)) - return class_mapper(cls) - elif type_ == "mapper_selectable": - cls = pickle.loads(b64decode(args)) - return class_mapper(cls).__clause_element__() - elif type_ == "mapperprop": - mapper, keyname = args.split(":") - cls = pickle.loads(b64decode(mapper)) - return class_mapper(cls).attrs[keyname] - elif type_ == "table": - return metadata.tables[args] - elif type_ == "column": - table, colname = args.split(":") - return metadata.tables[table].c[colname] - elif type_ == "session": - return scoped_session() - elif type_ == "engine": - return get_engine() + return None + return id_ + + our_ids = re.compile( + r"(mapperprop|mapper|mapper_selectable|table|column|" + r"session|attribute|engine):(.*)" + ) + + class Deserializer(pickle.Unpickler): + def __init__( + self, file, metadata=None, scoped_session=None, engine=None + ): + super().__init__(file) + self.metadata = metadata + self.scoped_session = scoped_session + self.engine = engine + + def get_engine(self): + if self.engine: + return self.engine + elif self.scoped_session and self.scoped_session().bind: + return self.scoped_session().bind + elif self.metadata and self.metadata.bind: + return self.metadata.bind else: - raise Exception("Unknown token: %s" % type_) + return None - unpickler.persistent_load = persistent_load - return unpickler + def persistent_load(self, id_): + m = our_ids.match(text_type(id_)) + if not m: + return None + else: + type_, args = m.group(1, 2) + if type_ == "attribute": + key, clsarg = args.split(":") + cls = pickle.loads(b64decode(clsarg)) + return getattr(cls, key) + elif type_ == "mapper": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls) + elif type_ == "mapper_selectable": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls).__clause_element__() + elif type_ == "mapperprop": + mapper, keyname = args.split(":") + cls = pickle.loads(b64decode(mapper)) + return class_mapper(cls).attrs[keyname] + elif type_ == "table": + return self.metadata.tables[args] + elif type_ == "column": + table, colname = args.split(":") + return self.metadata.tables[table].c[colname] + elif type_ == "session": + return self.scoped_session() + elif type_ == "engine": + return self.get_engine() + else: + raise Exception("Unknown token: %s" % type_) def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL): diff --git a/lib/sqlalchemy/future/__init__.py b/lib/sqlalchemy/future/__init__.py index 9bf4d042df5..a5925383f7e 100644 --- a/lib/sqlalchemy/future/__init__.py +++ b/lib/sqlalchemy/future/__init__.py @@ -1,5 +1,5 @@ -# sql/future/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# future/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/engine.py b/lib/sqlalchemy/future/engine.py index ab890ca4f4c..111f1cd71df 100644 --- a/lib/sqlalchemy/future/engine.py +++ b/lib/sqlalchemy/future/engine.py @@ -1,3 +1,9 @@ +# future/engine.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import util from ..engine import Connection as _LegacyConnection from ..engine import create_engine as _create_engine @@ -353,21 +359,7 @@ def _future_facade(self, legacy_engine): execution_options=legacy_engine._execution_options, ) - class _trans_ctx(object): - def __init__(self, conn): - self.conn = conn - - def __enter__(self): - self.transaction = self.conn.begin() - self.transaction.__enter__() - return self.conn - - def __exit__(self, type_, value, traceback): - try: - self.transaction.__exit__(type_, value, traceback) - finally: - self.conn.close() - + @util.contextmanager def begin(self): """Return a :class:`_future.Connection` object with a transaction begun. @@ -390,8 +382,9 @@ def begin(self): :meth:`_future.Connection.begin` """ - conn = self.connect() - return self._trans_ctx(conn) + with self.connect() as conn: + with conn.begin(): + yield conn def connect(self): """Return a new :class:`_future.Connection` object. diff --git a/lib/sqlalchemy/future/orm/__init__.py b/lib/sqlalchemy/future/orm/__init__.py index 89b490d7136..65be463b8c7 100644 --- a/lib/sqlalchemy/future/orm/__init__.py +++ b/lib/sqlalchemy/future/orm/__init__.py @@ -1,5 +1,5 @@ -# sql/future/orm/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# future/orm/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/inspection.py b/lib/sqlalchemy/inspection.py index 40b746655cd..17740956d5a 100644 --- a/lib/sqlalchemy/inspection.py +++ b/lib/sqlalchemy/inspection.py @@ -1,5 +1,5 @@ -# sqlalchemy/inspect.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# inspection.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index 9ec3842a6df..197a86de8cd 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -1,5 +1,5 @@ -# sqlalchemy/log.py -# Copyright (C) 2006-2021 the SQLAlchemy authors and contributors +# log.py +# Copyright (C) 2006-2025 the SQLAlchemy authors and contributors # # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # @@ -21,6 +21,17 @@ import logging import sys +from .util import py311 +from .util import py38 + +if py38: + STACKLEVEL = True + # needed as of py3.11.0b1 + # #8019 + STACKLEVEL_OFFSET = 2 if py311 else 1 +else: + STACKLEVEL = False + STACKLEVEL_OFFSET = 0 # set initial level to WARN. This so that # log statements don't occur in the absence of explicit @@ -160,6 +171,11 @@ def log(self, level, msg, *args, **kwargs): selected_level = self.logger.getEffectiveLevel() if level >= selected_level: + if STACKLEVEL: + kwargs["stacklevel"] = ( + kwargs.get("stacklevel", 1) + STACKLEVEL_OFFSET + ) + self.logger._log(level, msg, args, **kwargs) def isEnabledFor(self, level): diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index bdc5cbf674d..9e32dd4c263 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -1,5 +1,5 @@ # orm/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -172,18 +172,24 @@ def dynamic_loader(argument, **kw): def backref(name, **kwargs): - """Create a back reference with explicit keyword arguments, which are the - same arguments one can send to :func:`relationship`. + """When using the :paramref:`_orm.relationship.backref` parameter, + provides specific parameters to be used when the new + :func:`_orm.relationship` is generated. - Used with the ``backref`` keyword argument to :func:`relationship` in - place of a string argument, e.g.:: + E.g.:: 'items':relationship( SomeItem, backref=backref('parent', lazy='subquery')) + The :paramref:`_orm.relationship.backref` parameter is generally + considered to be legacy; for modern applications, using + explicit :func:`_orm.relationship` constructs linked together using + the :paramref:`_orm.relationship.back_populates` parameter should be + preferred. + .. seealso:: - :ref:`relationships_backref` + :ref:`relationships_backref` - background on backrefs """ diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index 513144b8728..98c0742442a 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -1,5 +1,5 @@ # orm/attributes.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -54,6 +54,8 @@ from ..sql import roles from ..sql import traversals from ..sql import visitors +from ..sql.traversals import HasCacheKey +from ..sql.visitors import InternalTraversal class NoKey(str): @@ -223,13 +225,16 @@ def expression(self): subclass representing a column expression. """ + entity_namespace = self._entity_namespace + assert isinstance(entity_namespace, HasCacheKey) + if self.key is NO_KEY: - annotations = {"entity_namespace": self._entity_namespace} + annotations = {"entity_namespace": entity_namespace} else: annotations = { "proxy_key": self.key, "proxy_owner": self._parententity, - "entity_namespace": self._entity_namespace, + "entity_namespace": entity_namespace, } ce = self.comparator.__clause_element__() @@ -376,7 +381,8 @@ def _queryable_attribute_unreduce(key, mapped_class, parententity, entity): class Mapped(QueryableAttribute, _Generic_T): - """Represent an ORM mapped :term:`descriptor` attribute for typing purposes. + """Represent an ORM mapped :term:`descriptor` attribute for typing + purposes. This class represents the complete descriptor interface for any class attribute that will have been :term:`instrumented` by the ORM @@ -481,10 +487,22 @@ def __get__(self, instance, owner): return self.impl.get(state, dict_) -HasEntityNamespace = util.namedtuple( - "HasEntityNamespace", ["entity_namespace"] -) -HasEntityNamespace.is_mapper = HasEntityNamespace.is_aliased_class = False +class HasEntityNamespace(HasCacheKey): + __slots__ = ("_entity_namespace",) + + is_mapper = False + is_aliased_class = False + + _traverse_internals = [ + ("_entity_namespace", InternalTraversal.dp_has_cache_key), + ] + + def __init__(self, ent): + self._entity_namespace = ent + + @property + def entity_namespace(self): + return self._entity_namespace.entity_namespace def create_proxied_attribute(descriptor): @@ -526,6 +544,11 @@ def __init__( _is_internal_proxy = True + _cache_key_traversal = [ + ("key", visitors.ExtendedInternalTraversal.dp_string), + ("_parententity", visitors.ExtendedInternalTraversal.dp_multi), + ] + @property def _impl_uses_objects(self): return ( @@ -544,7 +567,7 @@ def _entity_namespace(self): else: # used by hybrid attributes which try to remain # agnostic of any ORM concepts like mappers - return HasEntityNamespace(self.class_) + return HasEntityNamespace(self._parententity) @property def property(self): @@ -569,6 +592,16 @@ def adapt_to_entity(self, adapt_to_entity): adapt_to_entity, ) + def _clone(self, **kw): + return self.__class__( + self.class_, + self.key, + self.descriptor, + self._comparator, + adapt_to_entity=self._adapt_to_entity, + original_property=self.original_property, + ) + def __get__(self, instance, owner): retval = self.descriptor.__get__(instance, owner) # detect if this is a plain Python @property, which just returns @@ -1569,7 +1602,13 @@ def set( self.dispatch.bulk_replace(state, new_values, evt) - old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) + # propagate NO_RAISE in passive through to the get() for the + # existing object (ticket #8862) + old = self.get( + state, + dict_, + passive=PASSIVE_ONLY_PERSISTENT ^ (passive & NO_RAISE), + ) if old is PASSIVE_NO_RESULT: old = self._default_value(state, dict_) elif old is orig_iterable: diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py index 6553cf66981..bd8d150d86f 100644 --- a/lib/sqlalchemy/orm/base.py +++ b/lib/sqlalchemy/orm/base.py @@ -1,5 +1,5 @@ # orm/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -162,6 +162,12 @@ canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK, ) +PASSIVE_MERGE = util.symbol( + "PASSIVE_OFF | NO_RAISE", + "Symbol used specifically for session.merge() and similar cases", + canonical=PASSIVE_OFF | NO_RAISE, +) + DEFAULT_MANAGER_ATTR = "_sa_class_manager" DEFAULT_STATE_ATTR = "_sa_instance_state" diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py index 4ec31bcf74e..69e62df70ba 100644 --- a/lib/sqlalchemy/orm/clsregistry.py +++ b/lib/sqlalchemy/orm/clsregistry.py @@ -1,5 +1,5 @@ -# ext/declarative/clsregistry.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# orm/clsregistry.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -164,11 +164,11 @@ def _remove_item(self, ref): def add_item(self, item): # protect against class registration race condition against # asynchronous garbage collection calling _remove_item, - # [ticket:3208] + # [ticket:3208] and [ticket:10782] modules = set( [ cls.__module__ - for cls in [ref() for ref in self.contents] + for cls in [ref() for ref in list(self.contents)] if cls is not None ] ) @@ -257,7 +257,7 @@ def __getattr__(self, key): else: assert isinstance(value, _MultipleClassMarker) return value.attempt_get(self.__parent.path, key) - raise AttributeError( + raise NameError( "Module %r has no mapped classes " "registered under the name %r" % (self.__parent.name, key) ) diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py index ec4d00cb0b3..857bdae182e 100644 --- a/lib/sqlalchemy/orm/collections.py +++ b/lib/sqlalchemy/orm/collections.py @@ -1,5 +1,5 @@ # orm/collections.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -1273,7 +1273,7 @@ def __delslice__(self, start, end): def extend(fn): def extend(self, iterable): - for value in iterable: + for value in list(iterable): self.append(value) _tidy(extend) @@ -1283,7 +1283,7 @@ def __iadd__(fn): def __iadd__(self, iterable): # list.__iadd__ takes any iterable and seems to let TypeError # raise as-is instead of returning NotImplemented - for value in iterable: + for value in list(iterable): self.append(value) return self diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 0a93d993af4..82515285dbd 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -1,5 +1,5 @@ # orm/context.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -22,6 +22,7 @@ from .. import inspect from .. import sql from .. import util +from ..sql import ClauseElement from ..sql import coercions from ..sql import expression from ..sql import roles @@ -105,9 +106,25 @@ def __init__( self.loaders_require_uniquing = False self.params = params - self.propagated_loader_options = { - o for o in statement._with_options if o.propagate_to_loaders - } + cached_options = compile_state.select_statement._with_options + uncached_options = statement._with_options + + # see issue #7447 , #8399 for some background + # propagated loader options will be present on loaded InstanceState + # objects under state.load_options and are typically used by + # LazyLoader to apply options to the SELECT statement it emits. + # For compile state options (i.e. loader strategy options), these + # need to line up with the ".load_path" attribute which in + # loader.py is pulled from context.compile_state.current_path. + # so, this means these options have to be the ones from the + # *cached* statement that's travelling with compile_state, not the + # *current* statement which won't match up for an ad-hoc + # AliasedClass + self.propagated_loader_options = tuple( + opt._adapt_cached_option_to_uncached_option(self, uncached_opt) + for opt, uncached_opt in zip(cached_options, uncached_options) + if opt.propagate_to_loaders + ) self.attributes = dict(compile_state.attributes) @@ -157,6 +174,7 @@ class default_compile_options(CacheableOptions): ("_set_base_alias", InternalTraversal.dp_boolean), ("_for_refresh_state", InternalTraversal.dp_boolean), ("_render_for_subquery", InternalTraversal.dp_boolean), + ("_is_star", InternalTraversal.dp_boolean), ] # set to True by default from Query._statement_20(), to indicate @@ -181,6 +199,7 @@ class default_compile_options(CacheableOptions): _set_base_alias = False _for_refresh_state = False _render_for_subquery = False + _is_star = False current_path = _path_registry @@ -265,14 +284,9 @@ def orm_pre_session_exec( else: execution_options = execution_options.union(_orm_load_exec_options) - if "yield_per" in execution_options or load_options._yield_per: + if load_options._yield_per: execution_options = execution_options.union( - { - "stream_results": True, - "max_row_buffer": execution_options.get( - "yield_per", load_options._yield_per - ), - } + {"yield_per": load_options._yield_per} ) bind_arguments["clause"] = statement @@ -320,6 +334,8 @@ def orm_setup_cursor_result( load_options = execution_options.get( "_sa_orm_load_options", QueryContext.default_load_options ) + if compile_state.compile_options._is_star: + return result querycontext = QueryContext( compile_state, @@ -345,6 +361,10 @@ def _lead_mapper_entities(self): ] def _create_with_polymorphic_adapter(self, ext_info, selectable): + """given MapperEntity or ORMColumnEntity, setup polymorphic loading + if appropriate + + """ if ( not ext_info.is_aliased_class and ext_info.mapper.persist_selectable @@ -362,6 +382,12 @@ def _mapper_loads_polymorphically_with(self, mapper, adapter): for m in m2.iterate_to_root(): # TODO: redundant ? self._polymorphic_adapters[m.local_table] = adapter + @classmethod + def _create_entities_collection(cls, query, legacy): + raise NotImplementedError( + "this method only works for ORMSelectCompileState" + ) + @sql.base.CompileState.plugin_for("orm", "orm_from_statement") class ORMFromStatementCompileState(ORMCompileState): @@ -371,6 +397,7 @@ class ORMFromStatementCompileState(ORMCompileState): _has_orm_entities = False multi_row_eager_loaders = False + eager_adding_joins = False compound_eager_adapter = None extra_criteria_entities = _EMPTY_DICT @@ -468,8 +495,8 @@ def create_for_statement(cls, statement_container, compiler, **kw): entity.setup_compile_state(self) # we did the setup just to get primary columns. - self.statement = expression.TextualSelect( - self.statement, self.primary_columns, positional=False + self.statement = _AdHocColumnsStatement( + self.statement, self.primary_columns ) else: # allow TextualSelect with implicit columns as well @@ -496,6 +523,65 @@ def _get_current_adapter(self): return None +class _AdHocColumnsStatement(ClauseElement): + """internal object created to somewhat act like a SELECT when we + are selecting columns from a DML RETURNING. + + + """ + + __visit_name__ = None + + def __init__(self, text, columns): + self.element = text + self.column_args = [ + coercions.expect(roles.ColumnsClauseRole, c) for c in columns + ] + + def _generate_cache_key(self): + raise NotImplementedError() + + def _gen_cache_key(self, anon_map, bindparams): + raise NotImplementedError() + + def _compiler_dispatch( + self, compiler, compound_index=None, asfrom=False, **kw + ): + """provide a fixed _compiler_dispatch method.""" + + toplevel = not compiler.stack + entry = ( + compiler._default_stack_entry if toplevel else compiler.stack[-1] + ) + + populate_result_map = ( + toplevel + # these two might not be needed + or ( + compound_index == 0 + and entry.get("need_result_map_for_compound", False) + ) + or entry.get("need_result_map_for_nested", False) + ) + + if populate_result_map: + compiler._ordered_columns = ( + compiler._textual_ordered_columns + ) = False + + # enable looser result column matching. this is shown to be + # needed by test_query.py::TextTest + compiler._loose_column_name_matching = True + + for c in self.column_args: + compiler.process( + c, + within_columns_clause=True, + add_to_result_map=compiler._add_to_result_map, + ) + return compiler.process(self.element, **kw) + + @sql.base.CompileState.plugin_for("orm", "select") class ORMSelectCompileState(ORMCompileState, SelectState): _joinpath = _joinpoint = _EMPTY_DICT @@ -507,6 +593,7 @@ class ORMSelectCompileState(ORMCompileState, SelectState): _has_orm_entities = False multi_row_eager_loaders = False + eager_adding_joins = False compound_eager_adapter = None correlate = None @@ -590,10 +677,6 @@ def create_for_statement(cls, statement, compiler, **kw): else: self.label_style = self.select_statement._label_style - self._label_convention = self._column_naming_convention( - statement._label_style, self.use_legacy_query_style - ) - if select_statement._memoized_select_entities: self._memoized_entities = { memoized_entities: _QueryEntity.to_compile_state( @@ -607,6 +690,14 @@ def create_for_statement(cls, statement, compiler, **kw): ) } + # label_convention is stateful and will yield deduping keys if it + # sees the same key twice. therefore it's important that it is not + # invoked for the above "memoized" entities that aren't actually + # in the columns clause + self._label_convention = self._column_naming_convention( + statement._label_style, self.use_legacy_query_style + ) + _QueryEntity.to_compile_state( self, select_statement._raw_columns, @@ -761,7 +852,7 @@ def _setup_for_generate(self): for s in query._correlate ) ) - elif query._correlate_except: + elif query._correlate_except is not None: self.correlate_except = tuple( util.flatten_iterator( sql_util.surface_selectables(s) if s is not None else None @@ -775,6 +866,11 @@ def _setup_for_generate(self): self._for_update_arg = query._for_update_arg + if self.compile_options._is_star and (len(self._entities) != 1): + raise sa_exc.CompileError( + "Can't generate ORM query that includes multiple expressions " + "at the same time as '*'; query for '*' alone if present" + ) for entity in self._entities: entity.setup_compile_state(self) @@ -806,7 +902,11 @@ def _setup_for_generate(self): if self.order_by is False: self.order_by = None - if self.multi_row_eager_loaders and self._should_nest_selectable: + if ( + self.multi_row_eager_loaders + and self.eager_adding_joins + and self._should_nest_selectable + ): self.statement = self._compound_eager_statement() else: self.statement = self._simple_statement() @@ -1154,6 +1254,8 @@ def _select_statement( correlate_except, limit_clause, offset_clause, + fetch_clause, + fetch_clause_options, distinct, distinct_on, prefixes, @@ -1186,6 +1288,8 @@ def _select_statement( statement._limit_clause = limit_clause statement._offset_clause = offset_clause + statement._fetch_clause = fetch_clause + statement._fetch_clause_options = fetch_clause_options if prefixes: statement._prefixes = prefixes @@ -1203,7 +1307,7 @@ def _select_statement( if correlate: statement.correlate.non_generative(statement, *correlate) - if correlate_except: + if correlate_except is not None: statement.correlate_except.non_generative( statement, *correlate_except ) @@ -2100,6 +2204,10 @@ def _select_args(self): "prefixes": self.select_statement._prefixes, "suffixes": self.select_statement._suffixes, "group_by": self.group_by or None, + "fetch_clause": self.select_statement._fetch_clause, + "fetch_clause_options": ( + self.select_statement._fetch_clause_options + ), } @property @@ -2123,7 +2231,8 @@ def _get_extra_criteria(self, ext_info): for ae in self.global_attributes[ ("additional_entity_criteria", ext_info.mapper) ] - if ae.include_aliases or ae.entity is ext_info + if (ae.include_aliases or ae.entity is ext_info) + and ae._should_include(self) ) else: return () @@ -2144,6 +2253,7 @@ def _adjust_for_extra_criteria(self): for fromclause in self.from_clauses: ext_info = fromclause._annotations.get("parententity", None) + if ( ext_info and ( @@ -2167,7 +2277,10 @@ def _adjust_for_extra_criteria(self): single_crit = ext_info.mapper._single_table_criterion - additional_entity_criteria = self._get_extra_criteria(ext_info) + if self.compile_options._for_refresh_state: + additional_entity_criteria = [] + else: + additional_entity_criteria = self._get_extra_criteria(ext_info) if single_crit is not None: additional_entity_criteria += (single_crit,) @@ -2354,11 +2467,15 @@ def to_compile_state( entity._select_iterable, entities_collection, idx, + is_current_entities, ) else: if entity._annotations.get("bundle", False): _BundleEntity( - compile_state, entity, entities_collection + compile_state, + entity, + entities_collection, + is_current_entities, ) elif entity._is_clause_list: # this is legacy only - test_composites.py @@ -2368,10 +2485,15 @@ def to_compile_state( entity._select_iterable, entities_collection, idx, + is_current_entities, ) else: _ColumnEntity._for_columns( - compile_state, [entity], entities_collection, idx + compile_state, + [entity], + entities_collection, + idx, + is_current_entities, ) elif entity.is_bundle: _BundleEntity(compile_state, entity, entities_collection) @@ -2448,14 +2570,7 @@ def __init__( self._with_polymorphic_mappers = ext_info.with_polymorphic_mappers self._polymorphic_discriminator = ext_info.polymorphic_on - if ( - mapper.with_polymorphic - # controversy - only if inheriting mapper is also - # polymorphic? - # or (mapper.inherits and mapper.inherits.with_polymorphic) - or mapper.inherits - or mapper._requires_row_aliasing - ): + if mapper._should_select_with_poly_adapter: compile_state._create_with_polymorphic_adapter( ext_info, self.selectable ) @@ -2576,6 +2691,7 @@ def __init__( compile_state, expr, entities_collection, + is_current_entities, setup_entities=True, parent_bundle=None, ): @@ -2606,6 +2722,7 @@ def __init__( compile_state, expr, entities_collection, + is_current_entities, parent_bundle=self, ) elif isinstance(expr, Bundle): @@ -2613,6 +2730,7 @@ def __init__( compile_state, expr, entities_collection, + is_current_entities, parent_bundle=self, ) else: @@ -2621,6 +2739,7 @@ def __init__( [expr], entities_collection, None, + is_current_entities, parent_bundle=self, ) @@ -2694,6 +2813,7 @@ def _for_columns( columns, entities_collection, raw_column_index, + is_current_entities, parent_bundle=None, ): for column in columns: @@ -2713,6 +2833,7 @@ def _for_columns( entities_collection, _entity, raw_column_index, + is_current_entities, parent_bundle=parent_bundle, ) else: @@ -2722,6 +2843,7 @@ def _for_columns( entities_collection, _entity, raw_column_index, + is_current_entities, parent_bundle=parent_bundle, ) else: @@ -2730,6 +2852,7 @@ def _for_columns( column, entities_collection, raw_column_index, + is_current_entities, parent_bundle=parent_bundle, ) @@ -2820,12 +2943,17 @@ def __init__( column, entities_collection, raw_column_index, + is_current_entities, parent_bundle=None, ): self.expr = column self.raw_column_index = raw_column_index self.translate_raw_column = raw_column_index is not None - if column._is_text_clause: + + if column._is_star: + compile_state.compile_options += {"_is_star": True} + + if not is_current_entities or column._is_text_clause: self._label_name = None else: self._label_name = compile_state._label_convention(column) @@ -2884,6 +3012,7 @@ def __init__( entities_collection, parententity, raw_column_index, + is_current_entities, parent_bundle=None, ): annotations = column._annotations @@ -2910,9 +3039,13 @@ def __init__( self.translate_raw_column = raw_column_index is not None self.raw_column_index = raw_column_index - self._label_name = compile_state._label_convention( - column, col_name=orm_key - ) + + if is_current_entities: + self._label_name = compile_state._label_convention( + column, col_name=orm_key + ) + else: + self._label_name = None _entity._post_inspect self.entity_zero = self.entity_zero_or_selectable = ezero = _entity @@ -2931,11 +3064,7 @@ def __init__( self._extra_entities = (self.expr, self.column) - if ( - mapper.with_polymorphic - or mapper.inherits - or mapper._requires_row_aliasing - ): + if mapper._should_select_with_poly_adapter: compile_state._create_with_polymorphic_adapter( ezero, ezero.selectable ) diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 94cda236d15..a2e8bbb86d5 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -1,5 +1,5 @@ -# ext/declarative/api.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# orm/decl_api.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -54,6 +54,10 @@ def has_inherited_table(cls): class DeclarativeMeta(type): def __init__(cls, classname, bases, dict_, **kw): + # use cls.__dict__, which can be modified by an + # __init_subclass__() method (#7900) + dict_ = cls.__dict__ + # early-consume registry from the initial declarative base, # assign privately to not conflict with subclass attributes named # "registry" @@ -200,7 +204,7 @@ class AddressMixin: :ref:`orm_declarative_dataclasses_mixin` - illustrates special forms for use with Python dataclasses - """ # noqa E501 + """ # noqa: E501 def __init__(self, fget, cascading=False): super(declared_attr, self).__init__(fget) @@ -228,7 +232,8 @@ def __get__(desc, self, cls): # here, we are inside of the declarative scan. use the registry # that is tracking the values of these attributes. - declarative_scan = manager.declarative_scan + declarative_scan = manager.declarative_scan() + assert declarative_scan is not None reg = declarative_scan.declared_attr_reg if desc in reg: @@ -644,7 +649,11 @@ def _dispose_cls(self, cls): def _add_manager(self, manager): self._managers[manager] = True - assert manager.registry is None + if manager.registry is not None and manager.is_mapped: + raise exc.ArgumentError( + "Class '%s' already has a primary mapper defined. " + % manager.class_ + ) manager.registry = self def configure(self, cascade=False): @@ -810,6 +819,14 @@ class Base(metaclass=DeclarativeMeta): if mapper: class_dict["__mapper_cls__"] = mapper + if hasattr(cls, "__class_getitem__"): + + def __class_getitem__(cls, key): + # allow generic classes in py3.9+ + return cls + + class_dict["__class_getitem__"] = __class_getitem__ + return metaclass(name, bases, class_dict) def mapped(self, cls): diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py index bf1bc537da4..71dbd722f7f 100644 --- a/lib/sqlalchemy/orm/decl_base.py +++ b/lib/sqlalchemy/orm/decl_base.py @@ -1,5 +1,5 @@ -# ext/declarative/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# orm/decl_base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -152,7 +152,13 @@ def _check_declared_props_nocascade(obj, name, cls): class _MapperConfig(object): - __slots__ = ("cls", "classname", "properties", "declared_attr_reg") + __slots__ = ( + "cls", + "classname", + "properties", + "declared_attr_reg", + "__weakref__", + ) @classmethod def setup_mapping(cls, registry, cls_, dict_, table, mapper_kw): @@ -300,9 +306,12 @@ def __init__( mapper_kw, ): + # grab class dict before the instrumentation manager has been added. + # reduces cycles + self.dict_ = dict(dict_) if dict_ else {} + super(_ClassScanMapperConfig, self).__init__(registry, cls_, mapper_kw) - self.dict_ = dict(dict_) if dict_ else {} self.persist_selectable = None self.declared_columns = set() self.column_copies = {} @@ -450,7 +459,14 @@ def _scan_attributes(self): attribute_is_overridden = self._cls_attr_override_checker(self.cls) + bases = [] + for base in cls.__mro__: + # collect bases and make sure standalone columns are copied + # to be the column they will ultimately be on the class, + # so that declared_attr functions use the right columns. + # need to do this all the way up the hierarchy first + # (see #8190) class_mapped = ( base is not cls @@ -463,9 +479,34 @@ def _scan_attributes(self): local_attributes_for_class = self._cls_attr_resolver(base) if not class_mapped and base is not cls: - self._produce_column_copies( - local_attributes_for_class, attribute_is_overridden + locally_collected_columns = self._produce_column_copies( + local_attributes_for_class, + attribute_is_overridden, ) + else: + locally_collected_columns = {} + + bases.append( + ( + base, + class_mapped, + local_attributes_for_class, + locally_collected_columns, + ) + ) + + for ( + base, + class_mapped, + local_attributes_for_class, + locally_collected_columns, + ) in bases: + + # this transfer can also take place as we scan each name + # for finer-grained control of how collected_attributes is + # populated, as this is what impacts column ordering. + # however it's simpler to get it out of the way here. + dict_.update(locally_collected_columns) for name, obj, is_dataclass in local_attributes_for_class(): if name == "__mapper_args__": @@ -631,6 +672,7 @@ def _produce_column_copies( ): cls = self.cls dict_ = self.dict_ + locally_collected_attributes = {} column_copies = self.column_copies # copy mixin columns to the mapped class @@ -655,7 +697,8 @@ def _produce_column_copies( column_copies[obj] = copy_ = obj._copy() copy_._creation_order = obj._creation_order setattr(cls, name, copy_) - dict_[name] = copy_ + locally_collected_attributes[name] = copy_ + return locally_collected_attributes def _extract_mappable_attributes(self): cls = self.cls diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py index 27919050ece..7aa812980c4 100644 --- a/lib/sqlalchemy/orm/dependency.py +++ b/lib/sqlalchemy/orm/dependency.py @@ -1,5 +1,5 @@ # orm/dependency.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/descriptor_props.py b/lib/sqlalchemy/orm/descriptor_props.py index 535067d88d0..fcfe5c9e89d 100644 --- a/lib/sqlalchemy/orm/descriptor_props.py +++ b/lib/sqlalchemy/orm/descriptor_props.py @@ -1,5 +1,5 @@ # orm/descriptor_props.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py index 405498aaf69..a50558d21f3 100644 --- a/lib/sqlalchemy/orm/dynamic.py +++ b/lib/sqlalchemy/orm/dynamic.py @@ -1,5 +1,5 @@ # orm/dynamic.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -302,7 +302,10 @@ def __init__(self, attr, state): # is in the FROM. So we purposely put the mapper selectable # in _from_obj[0] to ensure a user-defined join() later on # doesn't fail, and secondary is then in _from_obj[1]. - self._from_obj = (prop.mapper.selectable, prop.secondary) + + # note also, we are using the official ORM-annotated selectable + # from __clause_element__(), see #7868 + self._from_obj = (prop.mapper.__clause_element__(), prop.secondary) self._where_criteria = ( prop._with_parent(instance, alias_secondary=False), @@ -341,10 +344,12 @@ def _iter(self): return result.IteratorResult( result.SimpleResultMetaData([self.attr.class_.__name__]), - self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE, - ).added_items, + iter( + self.attr._get_collection_history( + attributes.instance_state(self.instance), + attributes.PASSIVE_NO_INITIALIZE, + ).added_items + ), _source_supports_scalars=True, ).scalars() else: diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py index 69d80dd8bdb..dc5dd1310c3 100644 --- a/lib/sqlalchemy/orm/evaluator.py +++ b/lib/sqlalchemy/orm/evaluator.py @@ -1,5 +1,5 @@ # orm/evaluator.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -11,6 +11,8 @@ from .. import util from ..sql import and_ from ..sql import operators +from ..sql.sqltypes import Integer +from ..sql.sqltypes import Numeric class UnevaluatableError(Exception): @@ -30,12 +32,6 @@ def reverse_operate(self, *arg, **kw): _straight_ops = set( getattr(operators, op) for op in ( - "add", - "mul", - "sub", - "div", - "mod", - "truediv", "lt", "le", "ne", @@ -45,6 +41,18 @@ def reverse_operate(self, *arg, **kw): ) ) +_math_only_straight_ops = set( + getattr(operators, op) + for op in ( + "add", + "mul", + "sub", + "div", + "mod", + "truediv", + ) +) + _extended_ops = { operators.in_op: (lambda a, b: a in b if a is not _NO_OBJECT else None), operators.not_in_op: ( @@ -62,7 +70,6 @@ def reverse_operate(self, *arg, **kw): "startswith_op", "between_op", "endswith_op", - "concat_op", ) ) @@ -191,6 +198,11 @@ def evaluate(obj): def evaluate(obj): return eval_left(obj) != eval_right(obj) + elif operator is operators.concat_op: + + def evaluate(obj): + return eval_left(obj) + eval_right(obj) + elif operator in _extended_ops: def evaluate(obj): @@ -201,6 +213,28 @@ def evaluate(obj): return _extended_ops[operator](left_val, right_val) + elif operator in _math_only_straight_ops: + if ( + clause.left.type._type_affinity + not in ( + Numeric, + Integer, + ) + or clause.right.type._type_affinity not in (Numeric, Integer) + ): + raise UnevaluatableError( + 'Cannot evaluate math operator "%s" for ' + "datatypes %s, %s" + % (operator.__name__, clause.left.type, clause.right.type) + ) + + def evaluate(obj): + left_val = eval_left(obj) + right_val = eval_right(obj) + if left_val is None or right_val is None: + return None + return operator(eval_left(obj), eval_right(obj)) + elif operator in _straight_ops: def evaluate(obj): diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 2c8d155ad88..0341d08d091 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1,5 +1,5 @@ # orm/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -325,6 +325,23 @@ def init_failure(self, target, args, kwargs): """ + def _sa_event_merge_wo_load(self, target, context): + """receive an object instance after it was the subject of a merge() + call, when load=False was passed. + + The target would be the already-loaded object in the Session which + would have had its attributes overwritten by the incoming object. This + overwrite operation does not use attribute events, instead just + populating dict directly. Therefore the purpose of this event is so + that extensions like sqlalchemy.ext.mutable know that object state has + changed and incoming state needs to be set up for "parents" etc. + + This functionality is acceptable to be made public in a later release. + + .. versionadded:: 1.4.41 + + """ + def load(self, target, context): """Receive an object instance after it has been created via ``__new__``, and after initial attribute population has @@ -1351,7 +1368,7 @@ def my_before_commit(session): """ - _target_class_doc = "SomeSessionOrFactory" + _target_class_doc = "SomeSessionClassOrObject" _dispatch_target = Session @@ -1419,15 +1436,31 @@ def wrap(session, state, *arg, **kw): event_key.base_listen(**kw) def do_orm_execute(self, orm_execute_state): - """Intercept statement executions that occur in terms of a :class:`.Session`. - - This event is invoked for all top-level SQL statements invoked - from the :meth:`_orm.Session.execute` method. As of SQLAlchemy 1.4, - all ORM queries emitted on behalf of a :class:`_orm.Session` will - flow through this method, so this event hook provides the single - point at which ORM queries of all types may be intercepted before - they are invoked, and additionally to replace their execution with - a different process. + """Intercept statement executions that occur on behalf of an + ORM :class:`.Session` object. + + This event is invoked for all top-level SQL statements invoked from the + :meth:`_orm.Session.execute` method, as well as related methods such as + :meth:`_orm.Session.scalars` and :meth:`_orm.Session.scalar`. As of + SQLAlchemy 1.4, all ORM queries emitted on behalf of a + :class:`_orm.Session` will flow through this method, so this event hook + provides the single point at which ORM queries of all types may be + intercepted before they are invoked, and additionally to replace their + execution with a different process. + + .. note:: The :meth:`_orm.SessionEvents.do_orm_execute` event hook + is triggered **for ORM statement executions only**, meaning those + invoked via the :meth:`_orm.Session.execute` and similar methods on + the :class:`_orm.Session` object. It does **not** trigger for + statements that are invoked by SQLAlchemy Core only, i.e. statements + invoked directly using :meth:`_engine.Connection.execute` or + otherwise originating from an :class:`_engine.Engine` object without + any :class:`_orm.Session` involved. To intercept **all** SQL + executions regardless of whether the Core or ORM APIs are in use, + see the event hooks at + :class:`.ConnectionEvents`, such as + :meth:`.ConnectionEvents.before_execute` and + :meth:`.ConnectionEvents.before_cursor_execute`. This event is a ``do_`` event, meaning it has the capability to replace the operation that the :meth:`_orm.Session.execute` method normally @@ -1879,7 +1912,8 @@ def after_bulk_delete(self, delete_context): @_lifecycle_event def transient_to_pending(self, session, instance): - """Intercept the "transient to pending" transition for a specific object. + """Intercept the "transient to pending" transition for a specific + object. This event is a specialization of the :meth:`.SessionEvents.after_attach` event which is only invoked @@ -1900,7 +1934,8 @@ def transient_to_pending(self, session, instance): @_lifecycle_event def pending_to_transient(self, session, instance): - """Intercept the "pending to transient" transition for a specific object. + """Intercept the "pending to transient" transition for a specific + object. This less common transition occurs when an pending object that has not been flushed is evicted from the session; this can occur @@ -1921,7 +1956,8 @@ def pending_to_transient(self, session, instance): @_lifecycle_event def persistent_to_transient(self, session, instance): - """Intercept the "persistent to transient" transition for a specific object. + """Intercept the "persistent to transient" transition for a specific + object. This less common transition occurs when an pending object that has has been flushed is evicted from the session; this can occur @@ -1941,7 +1977,8 @@ def persistent_to_transient(self, session, instance): @_lifecycle_event def pending_to_persistent(self, session, instance): - """Intercept the "pending to persistent"" transition for a specific object. + """Intercept the "pending to persistent"" transition for a specific + object. This event is invoked within the flush process, and is similar to scanning the :attr:`.Session.new` collection within @@ -1963,7 +2000,8 @@ def pending_to_persistent(self, session, instance): @_lifecycle_event def detached_to_persistent(self, session, instance): - """Intercept the "detached to persistent" transition for a specific object. + """Intercept the "detached to persistent" transition for a specific + object. This event is a specialization of the :meth:`.SessionEvents.after_attach` event which is only invoked @@ -1999,7 +2037,8 @@ def detached_to_persistent(self, session, instance): @_lifecycle_event def loaded_as_persistent(self, session, instance): - """Intercept the "loaded as persistent" transition for a specific object. + """Intercept the "loaded as persistent" transition for a specific + object. This event is invoked within the ORM loading process, and is invoked very similarly to the :meth:`.InstanceEvents.load` event. However, @@ -2034,7 +2073,8 @@ def loaded_as_persistent(self, session, instance): @_lifecycle_event def persistent_to_deleted(self, session, instance): - """Intercept the "persistent to deleted" transition for a specific object. + """Intercept the "persistent to deleted" transition for a specific + object. This event is invoked when a persistent object's identity is deleted from the database within a flush, however the object @@ -2066,7 +2106,8 @@ def persistent_to_deleted(self, session, instance): @_lifecycle_event def deleted_to_persistent(self, session, instance): - """Intercept the "deleted to persistent" transition for a specific object. + """Intercept the "deleted to persistent" transition for a specific + object. This transition occurs only when an object that's been deleted successfully in a flush is restored due to a call to @@ -2083,7 +2124,8 @@ def deleted_to_persistent(self, session, instance): @_lifecycle_event def deleted_to_detached(self, session, instance): - """Intercept the "deleted to detached" transition for a specific object. + """Intercept the "deleted to detached" transition for a specific + object. This event is invoked when a deleted object is evicted from the session. The typical case when this occurs is when @@ -2106,7 +2148,8 @@ def deleted_to_detached(self, session, instance): @_lifecycle_event def persistent_to_detached(self, session, instance): - """Intercept the "persistent to detached" transition for a specific object. + """Intercept the "persistent to detached" transition for a specific + object. This event is invoked when a persistent object is evicted from the session. There are many conditions that cause this @@ -2142,7 +2185,8 @@ class AttributeEvents(event.Events): These are typically defined on the class-bound descriptor for the target class. - e.g.:: + For example, to register a listener that will receive the + :meth:`_orm.AttributeEvents.append` event:: from sqlalchemy import event @@ -2153,7 +2197,8 @@ def my_append_listener(target, value, initiator): Listeners have the option to return a possibly modified version of the value, when the :paramref:`.AttributeEvents.retval` flag is passed to - :func:`.event.listen` or :func:`.event.listens_for`:: + :func:`.event.listen` or :func:`.event.listens_for`, such as below, + illustrated using the :meth:`_orm.AttributeEvents.set` event:: def validate_phone(target, value, oldvalue, initiator): "Strip non-numeric characters from a phone number" diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py index dbb499d5dc0..b1ce8edba53 100644 --- a/lib/sqlalchemy/orm/exc.py +++ b/lib/sqlalchemy/orm/exc.py @@ -1,5 +1,5 @@ # orm/exc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py index 6aea0d18547..65798aec573 100644 --- a/lib/sqlalchemy/orm/identity.py +++ b/lib/sqlalchemy/orm/identity.py @@ -1,5 +1,5 @@ # orm/identity.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py index 02fc7379322..5c2345cab44 100644 --- a/lib/sqlalchemy/orm/instrumentation.py +++ b/lib/sqlalchemy/orm/instrumentation.py @@ -1,5 +1,5 @@ # orm/instrumentation.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -30,6 +30,8 @@ """ +import weakref + from . import base from . import collections from . import exc @@ -131,7 +133,7 @@ def _update_state( if registry: registry._add_manager(self) if declarative_scan: - self.declarative_scan = declarative_scan + self.declarative_scan = weakref.ref(declarative_scan) if expired_attribute_loader: self.expired_attribute_loader = expired_attribute_loader @@ -197,7 +199,7 @@ def _loader_impls(self): return frozenset([attr.impl for attr in self.values()]) @util.memoized_property - def mapper(self): + def mapper(self): # noqa: F811 # raises unless self.mapper has been assigned raise exc.UnmappedClassError(self.class_) diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 9eb362c437b..895d932132c 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -1,5 +1,5 @@ # orm/interfaces.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -754,16 +754,95 @@ class ORMOption(ExecutableOption): _is_strategy_option = False + def _adapt_cached_option_to_uncached_option(self, context, uncached_opt): + """given "self" which is an option from a cached query, as well as the + corresponding option from the uncached version of the same query, + return the option we should use in a new query, in the context of a + loader strategy being asked to load related rows on behalf of that + cached query, which is assumed to be building a new query based on + entities passed to us from the cached query. + + Currently this routine chooses between "self" and "uncached" without + manufacturing anything new. If the option is itself a loader strategy + option which has a path, that path needs to match to the entities being + passed to us by the cached query, so the :class:`_orm.Load` subclass + overrides this to return "self". For all other options, we return the + uncached form which may have changing state, such as a + with_loader_criteria() option which will very often have new state. + + This routine could in the future involve + generating a new option based on both inputs if use cases arise, + such as if with_loader_criteria() needed to match up to + ``AliasedClass`` instances given in the parent query. + + However, longer term it might be better to restructure things such that + ``AliasedClass`` entities are always matched up on their cache key, + instead of identity, in things like paths and such, so that this whole + issue of "the uncached option does not match the entities" goes away. + However this would make ``PathRegistry`` more complicated and difficult + to debug as well as potentially less performant in that it would be + hashing enormous cache keys rather than a simple AliasedInsp. UNLESS, + we could get cache keys overall to be reliably hashed into something + like an md5 key. + + .. versionadded:: 1.4.41 -class LoaderOption(ORMOption): - """Describe a loader modification to an ORM statement at compilation time. - .. versionadded:: 1.4 + """ + if uncached_opt is not None: + return uncached_opt + else: + return self + + +class CompileStateOption(HasCacheKey, ORMOption): + """base for :class:`.ORMOption` classes that affect the compilation of + a SQL query and therefore need to be part of the cache key. + + .. note:: :class:`.CompileStateOption` is generally non-public and + should not be used as a base class for user-defined options; instead, + use :class:`.UserDefinedOption`, which is easier to use as it does not + interact with ORM compilation internals or caching. + + :class:`.CompileStateOption` defines an internal attribute + ``_is_compile_state=True`` which has the effect of the ORM compilation + routines for SELECT and other statements will call upon these options when + a SQL string is being compiled. As such, these classes implement + :class:`.HasCacheKey` and need to provide robust ``_cache_key_traversal`` + structures. + + The :class:`.CompileStateOption` class is used to implement the ORM + :class:`.LoaderOption` and :class:`.CriteriaOption` classes. + + .. versionadded:: 1.4.28 + """ _is_compile_state = True + def process_compile_state(self, compile_state): + """Apply a modification to a given :class:`.CompileState`.""" + + def process_compile_state_replaced_entities( + self, compile_state, mapper_entities + ): + """Apply a modification to a given :class:`.CompileState`, + given entities that were replaced by with_only_columns() or + with_entities(). + + .. versionadded:: 1.4.19 + + """ + + +class LoaderOption(CompileStateOption): + """Describe a loader modification to an ORM statement at compilation time. + + .. versionadded:: 1.4 + + """ + def process_compile_state_replaced_entities( self, compile_state, mapper_entities ): @@ -780,7 +859,7 @@ def process_compile_state(self, compile_state): """Apply a modification to a given :class:`.CompileState`.""" -class CriteriaOption(ORMOption): +class CriteriaOption(CompileStateOption): """Describe a WHERE criteria modification to an ORM statement at compilation time. @@ -788,7 +867,6 @@ class CriteriaOption(ORMOption): """ - _is_compile_state = True _is_criteria_option = True def process_compile_state(self, compile_state): diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py index bbad98144d2..dc700421fed 100644 --- a/lib/sqlalchemy/orm/loading.py +++ b/lib/sqlalchemy/orm/loading.py @@ -1,5 +1,5 @@ # orm/loading.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -758,7 +758,7 @@ def _instance_processor( # test.orm.inheritance.test_basic -> # EagerTargetingTest.test_adapt_stringency # OptimizedLoadTest.test_column_expression_joined - # PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa E501 + # PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa: E501 # adapted_col = adapter.columns[col] diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 4de12b88c77..ffd131a886b 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1,5 +1,5 @@ # orm/mapper.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -37,6 +37,7 @@ from .interfaces import MapperProperty from .interfaces import ORMEntityColumnsClauseRole from .interfaces import ORMFromClauseRole +from .interfaces import StrategizedProperty from .path_registry import PathRegistry from .. import event from .. import exc as sa_exc @@ -91,35 +92,14 @@ class Mapper( sql_base.MemoizedHasCacheKey, InspectionAttr, ): - """Define the correlation of class attributes to database table - columns. + """Defines an association between a Python class and a database table or + other relational structure, so that ORM operations against the class may + proceed. - The :class:`_orm.Mapper` object is instantiated using the - :func:`~sqlalchemy.orm.mapper` function. For information + The :class:`_orm.Mapper` object is instantiated using mapping methods + present on the :class:`_orm.registry` object. For information about instantiating new :class:`_orm.Mapper` objects, see - that function's documentation. - - - When :func:`.mapper` is used - explicitly to link a user defined class with table - metadata, this is referred to as *classical mapping*. - Modern SQLAlchemy usage tends to favor the - :mod:`sqlalchemy.ext.declarative` extension for class - configuration, which - makes usage of :func:`.mapper` behind the scenes. - - Given a particular class known to be mapped by the ORM, - the :class:`_orm.Mapper` which maintains it can be acquired - using the :func:`_sa.inspect` function:: - - from sqlalchemy import inspect - - mapper = inspect(MyClass) - - A class which was mapped by the :mod:`sqlalchemy.ext.declarative` - extension will also have its mapper available via the ``__mapper__`` - attribute. - + :ref:`orm_mapping_classes_toplevel`. """ @@ -225,10 +205,29 @@ class will overwrite all data within object instances that already :param column_prefix: A string which will be prepended to the mapped attribute name when :class:`_schema.Column` objects are automatically assigned as attributes to the - mapped class. Does not affect explicitly specified - column-based properties. - - See the section :ref:`column_prefix` for an example. + mapped class. Does not affect :class:`.Column` objects that + are mapped explicitly in the :paramref:`.mapper.properties` + dictionary. + + This parameter is typically useful with imperative mappings + that keep the :class:`.Table` object separate. Below, assuming + the ``user_table`` :class:`.Table` object has columns named + ``user_id``, ``user_name``, and ``password``:: + + class User(Base): + __table__ = user_table + __mapper_args__ = {'column_prefix':'_'} + + The above mapping will assign the ``user_id``, ``user_name``, and + ``password`` columns to attributes named ``_user_id``, + ``_user_name``, and ``_password`` on the mapped ``User`` class. + + The :paramref:`.mapper.column_prefix` parameter is uncommon in + modern use. For dealing with reflected tables, a more flexible + approach to automating a naming scheme is to intercept the + :class:`.Column` objects as they are reflected; see the section + :ref:`mapper_automated_reflection_schemes` for notes on this usage + pattern. :param concrete: If True, indicates this mapper should use concrete table inheritance with its parent mapper. @@ -322,7 +321,7 @@ class will overwrite all data within object instances that already mapping of the class to an alternate selectable, for loading only. - .. seealso:: + .. seealso:: :ref:`relationship_aliased_class` - the new pattern that removes the need for the :paramref:`_orm.Mapper.non_primary` flag. @@ -516,12 +515,21 @@ def set_identity(instance, *arg, **kw): based on all those :class:`.MapperProperty` instances declared in the declared class body. + .. seealso:: + + :ref:`orm_mapping_properties` - in the + :ref:`orm_mapping_classes_toplevel` + :param primary_key: A list of :class:`_schema.Column` objects which define the primary key to be used against this mapper's selectable unit. This is normally simply the primary key of the ``local_table``, but can be overridden here. + .. seealso:: + + :ref:`mapper_primary_key` - background and example use + :param version_id_col: A :class:`_schema.Column` that will be used to keep a running version id of rows in the table. This is used to detect concurrent updates or @@ -1071,10 +1079,27 @@ def _configure_inheritance(self): else: self.persist_selectable = self.local_table - if self.polymorphic_identity is not None and not self.concrete: - self._identity_class = self.inherits._identity_class - else: + if self.polymorphic_identity is None: + self._identity_class = self.class_ + + if self.inherits.base_mapper.polymorphic_on is not None: + util.warn( + "Mapper %s does not indicate a polymorphic_identity, " + "yet is part of an inheritance hierarchy that has a " + "polymorphic_on column of '%s'. Objects of this type " + "cannot be loaded polymorphically which can lead to " + "degraded or incorrect loading behavior in some " + "scenarios. Please establish a polmorphic_identity " + "for this class, or leave it un-mapped. " + "To omit mapping an intermediary class when using " + "declarative, set the '__abstract__ = True' " + "attribute on that class." + % (self, self.inherits.base_mapper.polymorphic_on) + ) + elif self.concrete: self._identity_class = self.class_ + else: + self._identity_class = self.inherits._identity_class if self.version_id_col is None: self.version_id_col = self.inherits.version_id_col @@ -1251,6 +1276,11 @@ def _configure_class_instrumentation(self): if manager is not None: assert manager.class_ is self.class_ if manager.is_mapped: + # changed in #7579: + # this message is defined in two places as of this change, + # also in decl_api -> _add_manager(). in 2.0, this codepath + # is removed as any calls to mapper() / Mapper without + # the registry setting up first will be rejected. raise sa_exc.ArgumentError( "Class '%s' already has a primary mapper defined. " % self.class_ @@ -1391,17 +1421,17 @@ def _configure_pks(self): # that of the inheriting (unless concrete or explicit) self.primary_key = self.inherits.primary_key else: - # determine primary key from argument or persist_selectable pks - - # reduce to the minimal set of columns + # determine primary key from argument or persist_selectable pks if self._primary_key_argument: - primary_key = sql_util.reduce_columns( - [ - self.persist_selectable.corresponding_column(c) - for c in self._primary_key_argument - ], - ignore_nonexistent_tables=True, - ) + primary_key = [ + self.persist_selectable.corresponding_column(c) + for c in self._primary_key_argument + ] else: + # if heuristically determined PKs, reduce to the minimal set + # of columns by eliminating FK->PK pairs for a multi-table + # expression. May over-reduce for some kinds of UNIONs + # / CTEs; use explicit PK argument for these special cases primary_key = sql_util.reduce_columns( self._pks_by_table[self.persist_selectable], ignore_nonexistent_tables=True, @@ -1430,19 +1460,14 @@ def _configure_pks(self): ) def _configure_properties(self): - # Column and other ClauseElement objects which are mapped - # TODO: technically this should be a DedupeColumnCollection - # however DCC needs changes and more tests to fully cover - # storing columns under a separate key name + # TODO: consider using DedupeColumnCollection self.columns = self.c = sql_base.ColumnCollection() # object attribute names mapped to MapperProperty objects self._props = util.OrderedDict() - # table columns mapped to lists of MapperProperty objects - # using a list allows a single column to be defined as - # populating multiple object attributes + # table columns mapped to MapperProperty self._columntoproperty = _ColumnMapping(self) # load custom properties @@ -1775,7 +1800,7 @@ def _configure_property(self, key, prop, init=True, setparent=True): col.key = col._tq_key_label = key self.columns.add(col, key) - for col in prop.columns + prop._orig_columns: + for col in prop.columns: for col in col.proxy_set: self._columntoproperty[col] = prop @@ -2111,12 +2136,47 @@ def _selectable_from_mappers(self, mappers, innerjoin): @HasMemoized.memoized_attribute def _single_table_criterion(self): if self.single and self.inherits and self.polymorphic_on is not None: - return self.polymorphic_on._annotate({"parentmapper": self}).in_( - m.polymorphic_identity for m in self.self_and_descendants - ) + return self.polymorphic_on._annotate( + {"parententity": self, "parentmapper": self} + ).in_(m.polymorphic_identity for m in self.self_and_descendants) else: return None + @HasMemoized.memoized_attribute + def _should_select_with_poly_adapter(self): + """determine if _MapperEntity or _ORMColumnEntity will need to use + polymorphic adaption when setting up a SELECT as well as fetching + rows for mapped classes and subclasses against this Mapper. + + moved here from context.py for #8456 to generalize the ruleset + for this condition. + + """ + + # this has been simplified as of #8456. + # rule is: if we have a with_polymorphic or a concrete-style + # polymorphic selectable, *or* if the base mapper has either of those, + # we turn on the adaption thing. if not, we do *no* adaption. + # + # this splits the behavior among the "regular" joined inheritance + # and single inheritance mappers, vs. the "weird / difficult" + # concrete and joined inh mappings that use a with_polymorphic of + # some kind or polymorphic_union. + # + # note we have some tests in test_polymorphic_rel that query against + # a subclass, then refer to the superclass that has a with_polymorphic + # on it (such as test_join_from_polymorphic_explicit_aliased_three). + # these tests actually adapt the polymorphic selectable (like, the + # UNION or the SELECT subquery with JOIN in it) to be just the simple + # subclass table. Hence even if we are a "plain" inheriting mapper + # but our base has a wpoly on it, we turn on adaption. + return ( + self.with_polymorphic + or self._requires_row_aliasing + or self.base_mapper.with_polymorphic + or self.base_mapper._requires_row_aliasing + ) + @HasMemoized.memoized_attribute def _with_polymorphic_mappers(self): self._check_configure() @@ -2515,6 +2575,24 @@ class in which it first appeared. dict(self.class_manager._all_sqla_attributes()) ) + @HasMemoized.memoized_attribute + @util.preload_module("sqlalchemy.orm.descriptor_props") + def _pk_synonyms(self): + """return a dictionary of {syn_attribute_name: pk_attr_name} for + all synonyms that refer to primary key columns + + """ + descriptor_props = util.preloaded.orm_descriptor_props + + pk_keys = {prop.key for prop in self._identity_key_props} + + return { + syn.key: syn.name + for k, syn in self._props.items() + if isinstance(syn, descriptor_props.SynonymProperty) + and syn.name in pk_keys + } + @HasMemoized.memoized_attribute @util.preload_module("sqlalchemy.orm.descriptor_props") def synonyms(self): @@ -3024,23 +3102,28 @@ def visit_binary(binary): allconds = [] + start = False + + # as of #7507, from the lowest base table on upwards, + # we include all intermediary tables. + + for mapper in reversed(list(self.iterate_to_root())): + if mapper.local_table in tables: + start = True + elif not isinstance(mapper.local_table, expression.TableClause): + return None + if start and not mapper.single: + allconds.append(mapper.inherit_condition) + tables.add(mapper.local_table) + + # only the bottom table needs its criteria to be altered to fit + # the primary key ident - the rest of the tables upwards to the + # descendant-most class should all be present and joined to each + # other. try: - start = False - for mapper in reversed(list(self.iterate_to_root())): - if mapper.local_table in tables: - start = True - elif not isinstance( - mapper.local_table, expression.TableClause - ): - return None - if start and not mapper.single: - allconds.append( - visitors.cloned_traverse( - mapper.inherit_condition, - {}, - {"binary": visit_binary}, - ) - ) + allconds[0] = visitors.cloned_traverse( + allconds[0], {}, {"binary": visit_binary} + ) except _OptGetColumnsNotAvailable: return None @@ -3100,19 +3183,33 @@ def _subclass_load_via_in(self, entity): assert self.inherits - polymorphic_prop = self._columntoproperty[self.polymorphic_on] - keep_props = set([polymorphic_prop] + self._identity_key_props) + if self.polymorphic_on is not None: + polymorphic_prop = self._columntoproperty[self.polymorphic_on] + keep_props = set([polymorphic_prop] + self._identity_key_props) + else: + keep_props = set(self._identity_key_props) disable_opt = strategy_options.Load(entity) enable_opt = strategy_options.Load(entity) for prop in self.attrs: + + # skip prop keys that are not instrumented on the mapped class. + # this is primarily the "_sa_polymorphic_on" property that gets + # created for an ad-hoc polymorphic_on SQL expression, issue #8704 + if prop.key not in self.class_manager: + continue + if prop.parent is self or prop in keep_props: # "enable" options, to turn on the properties that we want to # load by default (subject to options from the query) + if not isinstance(prop, StrategizedProperty): + continue + enable_opt.set_generic_strategy( # convert string name to an attribute before passing - # to loader strategy + # to loader strategy. note this must be in terms + # of given entity, such as AliasedClass, etc. (getattr(entity.entity_namespace, prop.key),), dict(prop.strategy_key), ) @@ -3122,7 +3219,8 @@ def _subclass_load_via_in(self, entity): # the options from the query to override them disable_opt.set_generic_strategy( # convert string name to an attribute before passing - # to loader strategy + # to loader strategy. note this must be in terms + # of given entity, such as AliasedClass, etc. (getattr(entity.entity_namespace, prop.key),), {"do_nothing": True}, ) @@ -3525,6 +3623,12 @@ def reconstructor(fn): method that will be called by the ORM after the instance has been loaded from the database or otherwise reconstituted. + .. tip:: + + The :func:`_orm.reconstructor` decorator makes use of the + :meth:`_orm.InstanceEvents.load` event hook, which can be + used directly. + The reconstructor will be invoked with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded @@ -3535,8 +3639,6 @@ def reconstructor(fn): .. seealso:: - :ref:`mapping_constructors` - :meth:`.InstanceEvents.load` """ @@ -3622,6 +3724,7 @@ class _ColumnMapping(dict): __slots__ = ("mapper",) def __init__(self, mapper): + # TODO: weakref would be a good idea here self.mapper = mapper def __missing__(self, column): diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py index 6bebbd006e1..a2391474470 100644 --- a/lib/sqlalchemy/orm/path_registry.py +++ b/lib/sqlalchemy/orm/path_registry.py @@ -1,5 +1,5 @@ # orm/path_registry.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -120,7 +120,7 @@ def pairs(self): def contains_mapper(self, mapper): for path_mapper in [self.path[i] for i in range(0, len(self.path), 2)]: - if path_mapper.is_mapper and path_mapper.isa(mapper): + if path_mapper.mapper.isa(mapper): return True else: return False diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 4ba1917f63f..1e7b2b1cc0a 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -1,5 +1,5 @@ # orm/persistence.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -40,6 +40,7 @@ from ..sql.base import CompileState from ..sql.base import Options from ..sql.dml import DeleteDMLState +from ..sql.dml import InsertDMLState from ..sql.dml import UpdateDMLState from ..sql.elements import BooleanClauseList from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL @@ -1177,6 +1178,22 @@ def _emit_insert_statements( c.inserted_primary_key_rows, c.returned_defaults_rows or (), ): + if inserted_primary_key is None: + # this is a real problem and means that we didn't + # get back as many PK rows. we can't continue + # since this indicates PK rows were missing, which + # means we likely mis-populated records starting + # at that point with incorrectly matched PK + # values. + raise orm_exc.FlushError( + "Multi-row INSERT statement for %s did not " + "produce " + "the correct number of INSERTed rows for " + "RETURNING. Ensure there are no triggers or " + "special driver issues preventing INSERT from " + "functioning properly." % mapper_rec + ) + for pk, col in zip( inserted_primary_key, mapper._pks_by_table[table], @@ -1225,6 +1242,15 @@ def _emit_insert_statements( ) primary_key = result.inserted_primary_key + if primary_key is None: + raise orm_exc.FlushError( + "Single-row INSERT statement for %s " + "did not produce a " + "new primary key result " + "being invoked. Ensure there are no triggers or " + "special driver issues preventing INSERT from " + "functioning properly." % (mapper_rec,) + ) for pk, col in zip( primary_key, mapper._pks_by_table[table] ): @@ -2112,8 +2138,92 @@ def skip_for_full_returning(orm_context): } +class ORMDMLState: + @classmethod + def get_entity_description(cls, statement): + ext_info = statement.table._annotations["parententity"] + mapper = ext_info.mapper + if ext_info.is_aliased_class: + _label_name = ext_info.name + else: + _label_name = mapper.class_.__name__ + + return { + "name": _label_name, + "type": mapper.class_, + "expr": ext_info.entity, + "entity": ext_info.entity, + "table": mapper.local_table, + } + + @classmethod + def get_returning_column_descriptions(cls, statement): + def _ent_for_col(c): + return c._annotations.get("parententity", None) + + def _attr_for_col(c, ent): + if ent is None: + return c + proxy_key = c._annotations.get("proxy_key", None) + if not proxy_key: + return c + else: + return getattr(ent.entity, proxy_key, c) + + return [ + { + "name": c.key, + "type": c.type, + "expr": _attr_for_col(c, ent), + "aliased": ent.is_aliased_class, + "entity": ent.entity, + } + for c, ent in [ + (c, _ent_for_col(c)) for c in statement._all_selected_columns + ] + ] + + +@CompileState.plugin_for("orm", "insert") +class ORMInsert(ORMDMLState, InsertDMLState): + @classmethod + def orm_pre_session_exec( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + is_reentrant_invoke, + ): + bind_arguments["clause"] = statement + try: + plugin_subject = statement._propagate_attrs["plugin_subject"] + except KeyError: + assert False, "statement had 'orm' plugin but no plugin_subject" + else: + bind_arguments["mapper"] = plugin_subject.mapper + + return ( + statement, + util.immutabledict(execution_options), + ) + + @classmethod + def orm_setup_cursor_result( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + result, + ): + return result + + @CompileState.plugin_for("orm", "update") -class BulkORMUpdate(UpdateDMLState, BulkUDCompileState): +class BulkORMUpdate(ORMDMLState, UpdateDMLState, BulkUDCompileState): @classmethod def create_for_statement(cls, statement, compiler, **kw): @@ -2331,7 +2441,7 @@ def _do_post_synchronize_fetch(cls, session, result, update_options): @CompileState.plugin_for("orm", "delete") -class BulkORMDelete(DeleteDMLState, BulkUDCompileState): +class BulkORMDelete(ORMDMLState, DeleteDMLState, BulkUDCompileState): @classmethod def create_for_statement(cls, statement, compiler, **kw): self = cls.__new__(cls) diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index fa230d10930..e701dea7d67 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -1,5 +1,5 @@ # orm/properties.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -20,7 +20,6 @@ from .interfaces import PropComparator from .interfaces import StrategizedProperty from .relationships import RelationshipProperty -from .util import _orm_full_deannotate from .. import log from .. import util from ..sql import coercions @@ -49,7 +48,6 @@ class ColumnProperty(StrategizedProperty): _links_to_entity = False __slots__ = ( - "_orig_columns", "columns", "group", "deferred", @@ -155,14 +153,8 @@ def __init__(self, *columns, **kwargs): """ super(ColumnProperty, self).__init__() - self._orig_columns = [ - coercions.expect(roles.LabeledColumnExprRole, c) for c in columns - ] self.columns = [ - coercions.expect( - roles.LabeledColumnExprRole, _orm_full_deannotate(c) - ) - for c in columns + coercions.expect(roles.LabeledColumnExprRole, c) for c in columns ] self.group = kwargs.pop("group", None) self.deferred = kwargs.pop("deferred", False) @@ -205,6 +197,9 @@ def __init__(self, *columns, **kwargs): self.strategy_key += (("raiseload", True),) def _memoized_attr__renders_in_subqueries(self): + if ("query_expression", True) in self.strategy_key: + return self.strategy._have_default_expression + return ("deferred", True) not in self.strategy_key or ( self not in self.parent._readonly_props ) diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index bd897211cad..9c0a2c17445 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1,5 +1,5 @@ # orm/query.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -511,7 +511,7 @@ def cte(self, name=None, recursive=False, nesting=False): Here is the `PostgreSQL WITH RECURSIVE example - `_. + `_. Note that, in this example, the ``included_parts`` cte and the ``incl_alias`` alias of it are Core selectables, which means the columns are accessed via the ``.c.`` attribute. The @@ -850,6 +850,10 @@ def yield_per(self, count): level. See the section :ref:`orm_queryguide_yield_per` for further background on this option. + .. seealso:: + + :ref:`orm_queryguide_yield_per` + """ self.load_options += {"_yield_per": count} @@ -1557,12 +1561,21 @@ def execution_options(self, **kwargs): automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` method or execution option is used. + .. versionadded:: 1.4 - added ORM options to + :meth:`_orm.Query.execution_options` + The execution options may also be specified on a per execution basis when using :term:`2.0 style` queries via the :paramref:`_orm.Session.execution_options` parameter. - .. versionadded:: 1.4 - added ORM options to - :meth:`_orm.Query.execution_options` + .. warning:: The + :paramref:`_engine.Connection.execution_options.stream_results` + parameter should not be used at the level of individual ORM + statement executions, as the :class:`_orm.Session` will not track + objects from different schema translate maps within a single + session. For multiple schema translate maps within the scope of a + single :class:`_orm.Session`, see :ref:`examples_sharding`. + .. seealso:: @@ -1707,7 +1720,7 @@ def filter(self, *criterion): self._where_criteria += (criterion,) @util.memoized_property - def _last_joined_entity(self): + def _last_joined_entity(self): # noqa: F811 if self._legacy_setup_joins: return _legacy_determine_last_joined_entity( self._legacy_setup_joins, self._entity_from_pre_ent_zero() @@ -1806,9 +1819,10 @@ def order_by(self, *clauses): q = session.query(Entity).order_by(Entity.id, Entity.name) - All existing ORDER BY criteria may be cancelled by passing - ``None`` by itself. New ORDER BY criteria may then be added by - invoking :meth:`_orm.Query.order_by` again, e.g.:: + Calling this method multiple times is equivalent to calling it once + with all the clauses concatenated. All existing ORDER BY criteria may + be cancelled by passing ``None`` by itself. New ORDER BY criteria may + then be added by invoking :meth:`_orm.Query.order_by` again, e.g.:: # will erase all ORDER BY and ORDER BY new_col alone q = q.order_by(None).order_by(new_col) @@ -2884,7 +2898,15 @@ def scalar(self): return None def __iter__(self): - return self._iter().__iter__() + result = self._iter() + try: + for row in result: + yield row + except GeneratorExit: + # issue #8710 - direct iteration is not re-usable after + # an iterable block is broken, so close the result + result._soft_close() + raise def _iter(self): # new style execution. @@ -2964,6 +2986,15 @@ def column_descriptions(self): } ] + .. seealso:: + + This API is available using :term:`2.0 style` queries as well, + documented at: + + * :ref:`queryguide_inspection` + + * :attr:`.Select.column_descriptions` + """ return _column_descriptions(self, legacy=True) @@ -3221,9 +3252,8 @@ def update(self, values, synchronize_session="evaluate", update_args=None): :param values: a dictionary with attributes names, or alternatively mapped attributes or SQL expressions, as keys, and literal values or sql expressions as values. If :ref:`parameter-ordered - mode ` is desired, the values can be - passed as a list of 2-tuples; - this requires that the + mode ` is desired, the values can + be passed as a list of 2-tuples; this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag is passed to the :paramref:`.Query.update.update_args` dictionary as well. @@ -3425,6 +3455,8 @@ def __init__(self, alias): """ + inherit_cache = False + def process_compile_state(self, compile_state): pass diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index d021ac9a298..a9a30a5ffea 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -1,5 +1,5 @@ # orm/relationships.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -21,6 +21,7 @@ from . import attributes from .base import _is_mapped_class +from .base import PASSIVE_MERGE from .base import state_str from .interfaces import MANYTOMANY from .interfaces import MANYTOONE @@ -212,7 +213,7 @@ class Parent(Base): :ref:`relationship_config_toplevel` - Full introductory and reference documentation for :func:`_orm.relationship`. - :ref:`orm_tutorial_relationship` - ORM tutorial introduction. + :ref:`tutorial_orm_related_objects` - ORM tutorial introduction. :param argument: A mapped class, or actual :class:`_orm.Mapper` instance, @@ -279,9 +280,6 @@ class name or dotted package-qualified name. :ref:`relationships_many_to_many` - Reference example of "many to many". - :ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to - many-to-many relationships. - :ref:`self_referential_many_to_many` - Specifics on using many-to-many in a self-referential case. @@ -314,41 +312,51 @@ class name or dotted package-qualified name. the "previous" value of the attribute. :param backref: - Indicates the string name of a property to be placed on the related - mapper's class that will handle this relationship in the other - direction. The other property will be created automatically - when the mappers are configured. Can also be passed as a - :func:`.backref` object to control the configuration of the - new relationship. + A reference to a string relationship name, or a :func:`_orm.backref` + construct, which will be used to automatically generate a new + :func:`_orm.relationship` on the related class, which then refers to + this one using a bi-directional + :paramref:`_orm.relationship.back_populates` configuration. + + In modern Python, explicit use of :func:`_orm.relationship` with + :paramref:`_orm.relationship.back_populates` should be preferred, as + it is more robust in terms of mapper configuration as well as more + conceptually straightforward. It also integrates with new :pep:`484` + typing features introduced in SQLAlchemy 2.0 which is not possible + with dynamically generated attributes. .. seealso:: - :ref:`relationships_backref` - Introductory documentation and - examples. + :ref:`relationships_backref` - notes on using + :paramref:`_orm.relationship.backref` - :paramref:`_orm.relationship.back_populates` - alternative form - of backref specification. + :ref:`tutorial_orm_related_objects` - in the + :ref:`unified_tutorial`, presents an overview of bi-directional + relationship configuration and behaviors using + :paramref:`_orm.relationship.back_populates` - :func:`.backref` - allows control over :func:`_orm.relationship` - configuration when using :paramref:`_orm.relationship.backref`. + :func:`.backref` - allows control over :func:`_orm.relationship` + configuration when using :paramref:`_orm.relationship.backref`. :param back_populates: - Takes a string name and has the same meaning as - :paramref:`_orm.relationship.backref`, except the complementing - property is **not** created automatically, and instead must be - configured explicitly on the other mapper. The complementing - property should also indicate - :paramref:`_orm.relationship.back_populates` to this relationship to - ensure proper functioning. + Indicates the name of a :func:`_orm.relationship` on the related + class that will be synchronized with this one. It is usually + expected that the :func:`_orm.relationship` on the related class + also refer to this one. This allows objects on both sides of + each :func:`_orm.relationship` to synchronize in-Python state + changes and also provides directives to the :term:`unit of work` + flush process how changes along these relationships should + be persisted. .. seealso:: - :ref:`relationships_backref` - Introductory documentation and - examples. + :ref:`tutorial_orm_related_objects` - in the + :ref:`unified_tutorial`, presents an overview of bi-directional + relationship configuration and behaviors. - :paramref:`_orm.relationship.backref` - alternative form - of backref specification. + :ref:`relationship_patterns` - includes many examples of + :paramref:`_orm.relationship.back_populates`. :param overlaps: A string name or comma-delimited set of names of other relationships @@ -367,20 +375,11 @@ class name or dotted package-qualified name. :ref:`error_qzyx` - usage example :param bake_queries=True: - Enable :ref:`lambda caching ` for loader - strategies, if applicable, which adds a performance gain to the - construction of SQL constructs used by loader strategies, in addition - to the usual SQL statement caching used throughout SQLAlchemy. This - parameter currently applies only to the "lazy" and "selectin" loader - strategies. There is generally no reason to set this parameter to - False. + Legacy parameter, not used. - .. versionchanged:: 1.4 Relationship loaders no longer use the - previous "baked query" system of query caching. The "lazy" - and "selectin" loaders make use of the "lambda cache" system - for the construction of SQL constructs, - as well as the usual SQL caching system that is throughout - SQLAlchemy as of the 1.4 series. + .. versionchanged:: 1.4.23 the "lambda caching" system is no longer + used by loader strategies and the ``bake_queries`` parameter + has no effect. :param cascade: A comma-separated list of cascade rules which determines how @@ -401,9 +400,6 @@ class name or dotted package-qualified name. :ref:`unitofwork_cascades` - Full detail on each of the available cascade options. - :ref:`tutorial_delete_cascade` - Tutorial example describing - a delete cascade. - :param cascade_backrefs=True: A boolean value indicating if the ``save-update`` cascade should operate along an assignment event intercepted by a backref. @@ -762,9 +758,7 @@ class name or dotted package-qualified name. :param post_update: This indicates that the relationship should be handled by a second UPDATE statement after an INSERT or before a - DELETE. Currently, it also will issue an UPDATE after the - instance was UPDATEd as well, although this technically should - be improved. This flag is used to handle saving bi-directional + DELETE. This flag is used to handle saving bi-directional dependencies between two individual rows (i.e. each row references the other), where it would otherwise be impossible to INSERT or DELETE both rows fully since one row exists before the @@ -1053,7 +1047,7 @@ class name or dotted package-qualified name. if cascade is not False: self.cascade = cascade elif self.viewonly: - self.cascade = "none" + self.cascade = "merge" else: self.cascade = "save-update, merge" @@ -1544,7 +1538,7 @@ def contains(self, other, **kwargs): See :meth:`~.RelationshipProperty.Comparator.any` for a less-performant alternative using EXISTS, or refer to :meth:`_query.Query.outerjoin` - as well as :ref:`ormtutorial_joins` + as well as :ref:`orm_queryguide_joins` for more details on constructing outer joins. kwargs may be ignored by this operator but are required for API @@ -1909,7 +1903,9 @@ def merge( # map for those already present. # also assumes CollectionAttributeImpl behavior of loading # "old" list in any case - dest_state.get_impl(self.key).get(dest_state, dest_dict) + dest_state.get_impl(self.key).get( + dest_state, dest_dict, passive=PASSIVE_MERGE + ) dest_list = [] for current in instances_iterable: @@ -1934,7 +1930,11 @@ def merge( coll.append_without_event(c) else: dest_state.get_impl(self.key).set( - dest_state, dest_dict, dest_list, _adapt=False + dest_state, + dest_dict, + dest_list, + _adapt=False, + passive=PASSIVE_MERGE, ) else: current = source_dict[self.key] @@ -3200,6 +3200,22 @@ def _check_remote_side(self): "condition that are on the remote side of " "the relationship." % (self.prop,) ) + else: + + not_target = util.column_set( + self.parent_persist_selectable.c + ).difference(self.child_persist_selectable.c) + + for _, rmt in self.local_remote_pairs: + if rmt in not_target: + util.warn( + "Expression %s is marked as 'remote', but these " + "column(s) are local to the local side. The " + "remote() annotation is needed only for a " + "self-referential relationship where both sides " + "of the relationship refer to the same tables." + % (rmt,) + ) def _check_foreign_cols(self, join_condition, primary): """Check the foreign key columns collected and emit error diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py index df3012df1e4..5be2a82cf5a 100644 --- a/lib/sqlalchemy/orm/scoping.py +++ b/lib/sqlalchemy/orm/scoping.py @@ -1,5 +1,5 @@ # orm/scoping.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -125,11 +125,11 @@ class scoped_session(ScopedSessionMixin): See :ref:`unitofwork_contextual` for a tutorial. - ..warning:: + .. note:: - When using :ref:`asyncio_toplevel` the async - version :class:`_asyncio.async_scoped_session` should be - used instead. + When using :ref:`asyncio_toplevel`, the async-compatible + :class:`_asyncio.async_scoped_session` class should be + used in place of :class:`.scoped_session`. """ diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index bb12f7021d6..7caf8bca4cd 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1,5 +1,5 @@ # orm/session.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -347,7 +347,10 @@ def update_execution_options(self, **opts): def _orm_compile_options(self): if not self.is_select: return None - opts = self.statement._compile_options + try: + opts = self.statement._compile_options + except AttributeError: + return None if opts.isinstance(context.ORMCompileState.default_compile_options): return opts else: @@ -939,6 +942,9 @@ def _transaction_is_active(self): def _transaction_is_closed(self): return self._state is CLOSED + def _rollback_can_be_called(self): + return self._state not in (COMMITTED, CLOSED) + class Session(_SessionClassMethods): """Manages persistence operations for ORM-mapped objects. @@ -1000,6 +1006,10 @@ def __init__( :meth:`~.Session.flush` are rarely needed; you usually only need to call :meth:`~.Session.commit` (which flushes) to finalize changes. + .. seealso:: + + :ref:`session_flushing` - additional background on autoflush + :param bind: An optional :class:`_engine.Engine` or :class:`_engine.Connection` to which this ``Session`` should be bound. When specified, all SQL @@ -1394,8 +1404,22 @@ def rollback(self): def commit(self): """Flush pending changes and commit the current transaction. - If no transaction is in progress, the method will first - "autobegin" a new transaction and commit. + When the COMMIT operation is complete, all objects are fully + :term:`expired`, erasing their internal contents, which will be + automatically re-loaded when the objects are next accessed. In the + interim, these objects are in an expired state and will not function if + they are :term:`detached` from the :class:`.Session`. Additionally, + this re-load operation is not supported when using asyncio-oriented + APIs. The :paramref:`.Session.expire_on_commit` parameter may be used + to disable this behavior. + + When there is no transaction in place for the :class:`.Session`, + indicating that no operations were invoked on this :class:`.Session` + since the previous call to :meth:`.Session.commit`, the method will + begin and commit an internal-only "logical" transaction, that does not + normally affect the database unless pending flush changes were + detected, but will still invoke event handlers and object expiration + rules. If :term:`1.x-style` use is in effect and there are currently SAVEPOINTs in progress via :meth:`_orm.Session.begin_nested`, @@ -1420,6 +1444,8 @@ def commit(self): :ref:`unitofwork_transaction` + :ref:`asyncio_orm_avoid_lazyloads` + """ if self._transaction is None: if not self._autobegin(): @@ -1616,6 +1642,8 @@ def execute( bind_arguments.update(kw) elif not bind_arguments: bind_arguments = {} + else: + bind_arguments = dict(bind_arguments) if ( statement._propagate_attrs.get("compile_state_plugin", None) @@ -1741,7 +1769,9 @@ def scalars( :return: a :class:`_result.ScalarResult` object - .. versionadded:: 1.4.24 + .. versionadded:: 1.4.24 Added :meth:`_orm.Session.scalars` + + .. versionadded:: 1.4.26 Added :meth:`_orm.scoped_session.scalars` """ @@ -2580,13 +2610,28 @@ def _remove_newly_deleted(self, states): persistent_to_deleted(self, state) def add(self, instance, _warn=True): - """Place an object in the ``Session``. + """Place an object into this :class:`_orm.Session`. + + Objects that are in the :term:`transient` state when passed to the + :meth:`_orm.Session.add` method will move to the + :term:`pending` state, until the next flush, at which point they + will move to the :term:`persistent` state. - Its state will be persisted to the database on the next flush - operation. + Objects that are in the :term:`detached` state when passed to the + :meth:`_orm.Session.add` method will move to the :term:`persistent` + state directly. - Repeated calls to ``add()`` will be ignored. The opposite of ``add()`` - is ``expunge()``. + If the transaction used by the :class:`_orm.Session` is rolled back, + objects which were transient when they were passed to + :meth:`_orm.Session.add` will be moved back to the + :term:`transient` state, and will no longer be present within this + :class:`_orm.Session`. + + .. seealso:: + + :meth:`_orm.Session.add_all` + + :ref:`session_adding` - at :ref:`session_basics` """ if _warn and self._warn_on_events: @@ -2603,7 +2648,18 @@ def add(self, instance, _warn=True): self._save_or_update_state(state) def add_all(self, instances): - """Add the given collection of instances to this ``Session``.""" + """Add the given collection of instances to this :class:`_orm.Session`. + + See the documentation for :meth:`_orm.Session.add` for a general + behavioral description. + + .. seealso:: + + :meth:`_orm.Session.add` + + :ref:`session_adding` - at :ref:`session_basics` + + """ if self._warn_on_events: self._flush_warning("Session.add_all()") @@ -2624,7 +2680,22 @@ def _save_or_update_state(self, state): def delete(self, instance): """Mark an instance as deleted. - The database delete operation occurs upon ``flush()``. + The object is assumed to be either :term:`persistent` or + :term:`detached` when passed; after the method is called, the + object will remain in the :term:`persistent` state until the next + flush proceeds. During this time, the object will also be a member + of the :attr:`_orm.Session.deleted` collection. + + When the next flush proceeds, the object will move to the + :term:`deleted` state, indicating a ``DELETE`` statement was emitted + for its row within the current transaction. When the transaction + is successfully committed, + the deleted object is moved to the :term:`detached` state and is + no longer present within this :class:`_orm.Session`. + + .. seealso:: + + :ref:`session_deleting` - at :ref:`session_basics` """ if self._warn_on_events: @@ -2682,6 +2753,7 @@ def get( populate_existing=False, with_for_update=None, identity_token=None, + execution_options=None, ): """Return an instance based on the given primary key identifier, or ``None`` if not found. @@ -2762,6 +2834,19 @@ def get( :meth:`_query.Query.with_for_update`. Supersedes the :paramref:`.Session.refresh.lockmode` parameter. + :param execution_options: optional dictionary of execution options, + which will be associated with the query execution if one is emitted. + This dictionary can provide a subset of the options that are + accepted by :meth:`_engine.Connection.execution_options`, and may + also provide additional options understood only in an ORM context. + + .. versionadded:: 1.4.29 + + .. seealso:: + + :ref:`orm_queryguide_execution_options` - ORM-specific execution + options + :return: The object instance, or ``None``. """ @@ -2773,6 +2858,7 @@ def get( populate_existing=populate_existing, with_for_update=with_for_update, identity_token=identity_token, + execution_options=execution_options, ) def _get_impl( @@ -2812,6 +2898,21 @@ def _get_impl( ) if is_dict: + + pk_synonyms = mapper._pk_synonyms + + if pk_synonyms: + correct_keys = set(pk_synonyms).intersection( + primary_key_identity + ) + + if correct_keys: + primary_key_identity = dict(primary_key_identity) + for k in correct_keys: + primary_key_identity[ + pk_synonyms[k] + ] = primary_key_identity[k] + try: primary_key_identity = list( primary_key_identity[prop.key] @@ -2823,7 +2924,7 @@ def _get_impl( sa_exc.InvalidRequestError( "Incorrect names of values in identifier to formulate " "primary key for session.get(); primary key attribute " - "names are %s" + "names are %s (synonym names are also accepted)" % ",".join( "'%s'" % prop.key for prop in mapper._identity_key_props @@ -3113,6 +3214,9 @@ def _merge( if not load: # remove any history merged_state._commit_all(merged_dict, self.identity_map) + merged_state.manager.dispatch._sa_event_merge_wo_load( + merged_state, None + ) if new_instance: merged_state.manager.dispatch.load(merged_state, None) @@ -3511,23 +3615,19 @@ def bulk_save_objects( as an alternative newer mass-insert features such as :ref:`orm_dml_returning_objects`. - .. warning:: + .. legacy:: The bulk save feature allows for a lower-latency INSERT/UPDATE of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw + and SQL clause support are silently omitted in favor of raw INSERT/UPDATES of records. - Please note that newer versions of SQLAlchemy are **greatly - improving the efficiency** of the standard flush process. It is - **strongly recommended** to not use the bulk methods as they - represent a forking of SQLAlchemy's functionality and are slowly - being moved into legacy status. New features such as - :ref:`orm_dml_returning_objects` are both more efficient than - the "bulk" methods and provide more predictable functionality. + In SQLAlchemy 2.0, improved versions of the bulk insert/update + methods are introduced, with clearer behavior and + documentation, new capabilities, and much better performance. - **Please read the list of caveats at** + For 1.4 use, **please read the list of caveats at** :ref:`bulk_operations_caveats` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** @@ -3584,14 +3684,24 @@ def bulk_save_objects( """ - def key(state): - return (state.mapper, state.key is not None) - obj_states = (attributes.instance_state(obj) for obj in objects) + if not preserve_order: - obj_states = sorted(obj_states, key=key) + # the purpose of this sort is just so that common mappers + # and persistence states are grouped together, so that groupby + # will return a single group for a particular type of mapper. + # it's not trying to be deterministic beyond that. + obj_states = sorted( + obj_states, + key=lambda state: (id(state.mapper), state.key is not None), + ) + + def grouping_key(state): + return (state.mapper, state.key is not None) - for (mapper, isupdate), states in itertools.groupby(obj_states, key): + for (mapper, isupdate), states in itertools.groupby( + obj_states, grouping_key + ): self._bulk_save_mappings( mapper, states, @@ -3622,23 +3732,19 @@ def bulk_insert_mappings( .. versionadded:: 1.0.0 - .. warning:: + .. legacy:: The bulk insert feature allows for a lower-latency INSERT of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw + and SQL clause support are silently omitted in favor of raw INSERT of records. - Please note that newer versions of SQLAlchemy are **greatly - improving the efficiency** of the standard flush process. It is - **strongly recommended** to not use the bulk methods as they - represent a forking of SQLAlchemy's functionality and are slowly - being moved into legacy status. New features such as - :ref:`orm_dml_returning_objects` are both more efficient than - the "bulk" methods and provide more predictable functionality. + In SQLAlchemy 2.0, improved versions of the bulk insert/update + methods are introduced, with clearer behavior and + documentation, new capabilities, and much better performance. - **Please read the list of caveats at** + For 1.4 use, **please read the list of caveats at** :ref:`bulk_operations_caveats` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** @@ -3723,23 +3829,19 @@ def bulk_update_mappings(self, mapper, mappings): .. versionadded:: 1.0.0 - .. warning:: + .. legacy:: The bulk update feature allows for a lower-latency UPDATE of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw + and SQL clause support are silently omitted in favor of raw UPDATES of records. - Please note that newer versions of SQLAlchemy are **greatly - improving the efficiency** of the standard flush process. It is - **strongly recommended** to not use the bulk methods as they - represent a forking of SQLAlchemy's functionality and are slowly - being moved into legacy status. New features such as - :ref:`orm_dml_returning_objects` are both more efficient than - the "bulk" methods and provide more predictable functionality. + In SQLAlchemy 2.0, improved versions of the bulk insert/update + methods are introduced, with clearer behavior and + documentation, new capabilities, and much better performance. - **Please read the list of caveats at** + For 1.4 use, **please read the list of caveats at** :ref:`bulk_operations_caveats` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index 994cbe53e7b..6175dc69e7b 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -1,5 +1,5 @@ # orm/state.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -58,17 +58,19 @@ class InstanceState(interfaces.InspectionAttrInfo): >>> from sqlalchemy import inspect >>> insp = inspect(some_mapped_object) + >>> insp.attrs.nickname.history + History(added=['new nickname'], unchanged=(), deleted=['nickname']) .. seealso:: - :ref:`core_inspection_toplevel` + :ref:`orm_mapper_inspection_instancestate` """ session_id = None key = None runid = None - load_options = util.EMPTY_SET + load_options = () load_path = PathRegistry.root insert_order = None _strong_obj = None diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 2a283caad6e..a560899c9b3 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -1,5 +1,5 @@ # orm/strategies.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -27,6 +27,7 @@ from .base import _SET_DEFERRED_EXPIRED from .context import _column_descriptions from .context import ORMCompileState +from .context import ORMSelectCompileState from .context import QueryContext from .interfaces import LoaderStrategy from .interfaces import StrategizedProperty @@ -248,6 +249,7 @@ def create_row_processor( ): # look through list of columns represented here # to see which, if any, is present in the row. + for col in self.columns: if adapter: col = adapter.columns[col] @@ -382,7 +384,26 @@ def create_row_processor( # dictionary. Normally, the DeferredColumnLoader.setup_query() # sets up that data in the "memoized_populators" dictionary # and "create_row_processor()" here is never invoked. - if not self.is_class_level: + + if ( + context.refresh_state + and context.query._compile_options._only_load_props + and self.key in context.query._compile_options._only_load_props + ): + self.parent_property._get_strategy( + (("deferred", False), ("instrument", True)) + ).create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + elif not self.is_class_level: if self.raiseload: set_deferred_for_local_state = ( self.parent_property._raise_column_loader @@ -954,20 +975,18 @@ def _emit_lazyload( if state.load_options or (loadopt and loadopt._extra_criteria): effective_path = state.load_path[self.parent_property] - opts = list(state.load_options) + opts = tuple(state.load_options) if loadopt and loadopt._extra_criteria: use_get = False opts += ( orm_util.LoaderCriteriaOption(self.entity, extra_criteria), ) - stmt._with_options = opts else: # this path is used if there are not already any options # in the query, but an event may want to add them effective_path = state.mapper._path_registry[self.parent_property] - stmt._compile_options += {"_current_path": effective_path} if use_get: @@ -1782,6 +1801,11 @@ def create_row_processor( # the other post loaders, however we have this here for consistency elif self._check_recursive_postload(context, path, self.join_depth): return + elif not isinstance(context.compile_state, ORMSelectCompileState): + # issue 7505 - subqueryload() in 1.3 and previous would silently + # degrade for from_statement() without warning. this behavior + # is restored here + return if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( @@ -1941,6 +1965,9 @@ def setup_query( ) if user_defined_adapter is not False: + + # setup an adapter but dont create any JOIN, assume it's already + # in the query ( clauses, adapter, @@ -1952,6 +1979,11 @@ def setup_query( adapter, user_defined_adapter, ) + + # don't do "wrap" for multi-row, we want to wrap + # limited/distinct SELECT, + # because we want to put the JOIN on the outside. + else: # if not via query option, check for # a cycle @@ -1962,6 +1994,7 @@ def setup_query( elif path.contains_mapper(self.mapper): return + # add the JOIN and create an adapter ( clauses, adapter, @@ -1978,6 +2011,10 @@ def setup_query( chained_from_outerjoin, ) + # for multi-row, we want to wrap limited/distinct SELECT, + # because we want to put the JOIN on the outside. + compile_state.eager_adding_joins = True + with_poly_entity = path.get( compile_state.attributes, "path_with_polymorphic", None ) @@ -2349,6 +2386,11 @@ def _splice_nested_inner_join( self, path, join_obj, clauses, onclause, extra_criteria, splicing=False ): + # recursive fn to splice a nested join into an existing one. + # splicing=False means this is the outermost call, and it + # should return a value. splicing= is the recursive + # form, where it can return None to indicate the end of the recursion + if splicing is False: # first call is always handed a join object # from the outside @@ -2363,7 +2405,7 @@ def _splice_nested_inner_join( splicing, ) elif not isinstance(join_obj, orm_util._ORMJoin): - if path[-2] is splicing: + if path[-2].isa(splicing): return orm_util._ORMJoin( join_obj, clauses.aliased_class, @@ -2374,7 +2416,6 @@ def _splice_nested_inner_join( _extra_criteria=extra_criteria, ) else: - # only here if splicing == True return None target_join = self._splice_nested_inner_join( @@ -2397,7 +2438,7 @@ def _splice_nested_inner_join( ) if target_join is None: # should only return None when recursively called, - # e.g. splicing==True + # e.g. splicing refers to a from obj assert ( splicing is not False ), "assertion failed attempting to produce joined eager loads" @@ -2907,29 +2948,25 @@ def _load_for_path( # cached query, meaning it won't match on paths and loader lookups # and loaders like this one will be skipped if it is used in options. # - # Now we want to transfer loader options from the parent query to the - # "selectinload" query we're about to run. Which query do we transfer - # the options from? We use the cached query, because the options in - # that query will be in terms of the effective entity we were just - # handed. + # as it turns out, standard loader options like selectinload(), + # lazyload() that have a path need + # to come from the cached query so that the AliasedInsp etc. objects + # that are in the query line up with the object that's in the path + # of the strategy object. however other options like + # with_loader_criteria() that doesn't have a path (has a fixed entity) + # and needs to have access to the latest closure state in order to + # be correct, we need to use the uncached one. # - # But now the selectinload query we are running is *also* - # cached. What if it's cached and running from some previous iteration - # of that AliasedInsp? Well in that case it will also use the previous - # iteration of the loader options. If the query expires and - # gets generated again, it will be handed the current effective_entity - # and the current _with_options, again in terms of whatever - # compile_state.select_statement happens to be right now, so the - # query will still be internally consistent and loader callables - # will be correctly invoked. + # as of #8399 we let the loader option itself figure out what it + # wants to do given cached and uncached version of itself. effective_path = path[self.parent_property] if orig_query is context.query: - options = new_options = orig_query._with_options - user_defined_options = [] + new_options = orig_query._with_options else: - options = orig_query._with_options + cached_options = orig_query._with_options + uncached_options = context.query._with_options # propagate compile state options from the original query, # updating their "extra_criteria" as necessary. @@ -2937,20 +2974,13 @@ def _load_for_path( # "orig" options if extra_criteria is present, because the copy # of extra_criteria will have different boundparam than that of # the QueryableAttribute in the path - new_options = [ - orig_opt._adjust_for_extra_criteria(context) - if orig_opt._is_strategy_option - else orig_opt - for orig_opt in options - if orig_opt._is_compile_state or orig_opt._is_legacy_option - ] - - # propagate user defined options from the current query - user_defined_options = [ - opt - for opt in context.query._with_options - if not opt._is_compile_state and not opt._is_legacy_option + orig_opt._adapt_cached_option_to_uncached_option( + context, uncached_opt + ) + for orig_opt, uncached_opt in zip( + cached_options, uncached_options + ) ] if loadopt and loadopt._extra_criteria: @@ -2961,12 +2991,9 @@ def _load_for_path( ), ) - q = q.options(*new_options)._update_compile_options( - {"_current_path": effective_path} - ) - if user_defined_options: - q = q.options(*user_defined_options) + q = q.options(*new_options) + q = q._update_compile_options({"_current_path": effective_path}) if context.populate_existing: q = q.execution_options(populate_existing=True) diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index 675c7218bd6..8157c5b61e5 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -1,4 +1,5 @@ -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# orm/strategy_options.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -115,6 +116,9 @@ def for_existing_path(cls, path): load._extra_criteria = () return load + def _adapt_cached_option_to_uncached_option(self, context, uncached_opt): + return self._adjust_for_extra_criteria(context) + def _generate_extra_criteria(self, context): """Apply the current bound parameters in a QueryContext to the immediate "extra_criteria" stored with this Load object. @@ -260,7 +264,8 @@ def process_compile_state(self, compile_state): self._process( compile_state, compile_state._lead_mapper_entities, - not bool(compile_state.current_path), + not bool(compile_state.current_path) + and not compile_state.compile_options._for_refresh_state, ) def _process(self, compile_state, mapper_entities, raiseerr): @@ -514,7 +519,19 @@ def options(self, *opts): "for 'unbound' loader options" ) for opt in opts: - opt._apply_to_parent(self, apply_cache, bound) + try: + opt._apply_to_parent(self, apply_cache, bound) + except AttributeError as ae: + if not isinstance(opt, Load): + util.raise_( + sa_exc.ArgumentError( + "Loader option %s is not compatible with the " + "Load.options() method." % (opt,) + ), + from_=ae, + ) + else: + raise @_generative def set_relationship_strategy( @@ -919,6 +936,15 @@ def _split_key(key): return (_DEFAULT_TOKEN,) # coerce fooload(".*") into "wildcard on default entity" elif key.startswith("." + _WILDCARD_TOKEN): + util.warn_deprecated( + "The undocumented `.{WILDCARD}` format is deprecated " + "and will be removed in a future version as it is " + "believed to be unused. " + "If you have been using this functionality, please " + "comment on Issue #4390 on the SQLAlchemy project " + "tracker.", + version="1.4", + ) key = key[1:] return key.split(".") else: @@ -1930,10 +1956,17 @@ def with_expression(loadopt, key, expression): .. versionadded:: 1.2 - :param key: Attribute to be undeferred. + :param key: Attribute to be populated. :param expr: SQL expression to be applied to the attribute. + .. versionchanged:: 1.4 Loader options such as + :func:`_orm.with_expression` + take effect only at the **outermost** query used, and should not be used + within subqueries or inner elements of a UNION. See the change notes at + :ref:`change_8879` for background on how to correctly add arbitrary + columns to subqueries. + .. note:: the target attribute is populated only if the target object is **not currently loaded** in the current :class:`_orm.Session` unless the :meth:`_query.Query.populate_existing` method is used. diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py index 9d684a2a872..c6f2ab99278 100644 --- a/lib/sqlalchemy/orm/sync.py +++ b/lib/sqlalchemy/orm/sync.py @@ -1,5 +1,5 @@ # orm/sync.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index f29d11bcd59..efb4ed54bc8 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -1,5 +1,5 @@ # orm/unitofwork.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 0e844906809..99a07190b5c 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1,5 +1,5 @@ # orm/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -70,7 +70,7 @@ class CascadeOptions(frozenset): ) _allowed_cascades = all_cascades - _viewonly_cascades = ["expunge", "all", "none", "refresh-expire"] + _viewonly_cascades = ["expunge", "all", "none", "refresh-expire", "merge"] __slots__ = ( "save_update", @@ -415,7 +415,8 @@ def __init__( def _include_fn(self, elem): entity = elem._annotations.get("parentmapper", None) - return not entity or entity.isa(self.mapper) + + return not entity or entity.isa(self.mapper) or self.mapper.isa(entity) class AliasedClass(object): @@ -494,11 +495,20 @@ def __init__( insp = inspection.inspect(mapped_class_or_ac) mapper = insp.mapper + nest_adapters = False + if alias is None: - alias = mapper._with_polymorphic_selectable._anonymous_fromclause( - name=name, - flat=flat, - ) + if insp.is_aliased_class and insp.selectable._is_subquery: + alias = insp.selectable.alias() + else: + alias = ( + mapper._with_polymorphic_selectable._anonymous_fromclause( + name=name, + flat=flat, + ) + ) + elif insp.is_aliased_class: + nest_adapters = True self._aliased_insp = AliasedInsp( self, @@ -515,6 +525,7 @@ def __init__( use_mapper_path, adapt_on_names, represents_outer_join, + nest_adapters, ) self.__name__ = "AliasedClass_%s" % mapper.class_.__name__ @@ -639,6 +650,19 @@ class AliasedInsp( """ + _cache_key_traversal = [ + ("name", visitors.ExtendedInternalTraversal.dp_string), + ("_adapt_on_names", visitors.ExtendedInternalTraversal.dp_boolean), + ("_use_mapper_path", visitors.ExtendedInternalTraversal.dp_boolean), + ("_target", visitors.ExtendedInternalTraversal.dp_inspectable), + ("selectable", visitors.ExtendedInternalTraversal.dp_clauseelement), + ( + "with_polymorphic_mappers", + visitors.InternalTraversal.dp_has_cache_key_list, + ), + ("polymorphic_on", visitors.InternalTraversal.dp_clauseelement), + ] + def __init__( self, entity, @@ -651,6 +675,7 @@ def __init__( _use_mapper_path, adapt_on_names, represents_outer_join, + nest_adapters, ): mapped_class_or_ac = inspected.entity @@ -666,6 +691,7 @@ def __init__( self._base_alias = weakref.ref(_base_alias or self) self._use_mapper_path = _use_mapper_path self.represents_outer_join = represents_outer_join + self._nest_adapters = nest_adapters if with_polymorphic_mappers: self._is_with_polymorphic = True @@ -696,12 +722,14 @@ def __init__( # make sure the adapter doesn't try to grab other tables that # are not even the thing we are mapping, such as embedded # selectables in subqueries or CTEs. See issue #6060 - adapt_from_selectables=[ - m.selectable for m in self.with_polymorphic_mappers - ], + adapt_from_selectables={ + m.selectable + for m in self.with_polymorphic_mappers + if not adapt_on_names + }, ) - if inspected.is_aliased_class: + if nest_adapters: self._adapter = inspected._adapter.wrap(self._adapter) self._adapt_on_names = adapt_on_names @@ -741,12 +769,6 @@ def __clause_element__(self): def entity_namespace(self): return self.entity - _cache_key_traversal = [ - ("name", visitors.ExtendedInternalTraversal.dp_string), - ("_adapt_on_names", visitors.ExtendedInternalTraversal.dp_boolean), - ("selectable", visitors.ExtendedInternalTraversal.dp_clauseelement), - ] - @property def class_(self): """Return the mapped class ultimately represented by this @@ -772,6 +794,7 @@ def __getstate__(self): "base_alias": self._base_alias(), "use_mapper_path": self._use_mapper_path, "represents_outer_join": self.represents_outer_join, + "nest_adapters": self._nest_adapters, } def __setstate__(self, state): @@ -786,6 +809,7 @@ def __setstate__(self, state): state["use_mapper_path"], state["adapt_on_names"], state["represents_outer_join"], + state["nest_adapters"], ) def _adapt_element(self, elem, key=None): @@ -891,6 +915,8 @@ class _WrapUserEntity(object): """ + __slots__ = ("subject",) + def __init__(self, subject): self.subject = subject @@ -1076,12 +1102,18 @@ class of a particular set of mapped classes, to which the rule accepts a target class as an argument, when the given class is a base with many different mapped subclasses. + .. note:: To support pickling, use a module-level Python function to + produce the SQL expression instead of a lambda or a fixed SQL + expression, which tend to not be picklable. + :param include_aliases: if True, apply the rule to :func:`_orm.aliased` constructs as well. :param propagate_to_loaders: defaults to True, apply to relationship - loaders such as lazy loaders. - + loaders such as lazy loaders. This indicates that the + option object itself including SQL expression is carried along with + each loaded instance. Set to ``False`` to prevent the object from + being assigned to individual instances. .. seealso:: @@ -1109,6 +1141,7 @@ class of a particular set of mapped classes, to which the rule self.root_entity = None self.entity = entity + self._where_crit_orig = where_criteria if callable(where_criteria): self.deferred_where_criteria = True self.where_criteria = lambdas.DeferredLambdaElement( @@ -1134,7 +1167,30 @@ class of a particular set of mapped classes, to which the rule self.include_aliases = include_aliases self.propagate_to_loaders = propagate_to_loaders + @classmethod + def _unreduce( + cls, entity, where_criteria, include_aliases, propagate_to_loaders + ): + return LoaderCriteriaOption( + entity, + where_criteria, + include_aliases=include_aliases, + propagate_to_loaders=propagate_to_loaders, + ) + + def __reduce__(self): + return ( + LoaderCriteriaOption._unreduce, + ( + self.entity.class_ if self.entity else self.root_entity, + self._where_crit_orig, + self.include_aliases, + self.propagate_to_loaders, + ), + ) + def _all_mappers(self): + if self.entity: for ent in self.entity.mapper.self_and_descendants: yield ent @@ -1149,11 +1205,24 @@ def _all_mappers(self): else: stack.extend(subclass.__subclasses__()) + def _should_include(self, compile_state): + if ( + compile_state.select_statement._annotations.get( + "for_loader_criteria", None + ) + is self + ): + return False + return True + def _resolve_where_criteria(self, ext_info): if self.deferred_where_criteria: - return self.where_criteria._resolve_with_args(ext_info.entity) + crit = self.where_criteria._resolve_with_args(ext_info.entity) else: - return self.where_criteria + crit = self.where_criteria + return sql_util._deep_annotate( + crit, {"for_loader_criteria": self}, detect_subquery_cols=True + ) def process_compile_state_replaced_entities( self, compile_state, mapper_entities @@ -1173,8 +1242,7 @@ def process_compile_state(self, compile_state): "Please migrate code to use the with_polymorphic() standalone " "function before using with_loader_criteria()." ) - if not compile_state.compile_options._for_refresh_state: - self.get_global_criteria(compile_state.global_attributes) + self.get_global_criteria(compile_state.global_attributes) def get_global_criteria(self, attributes): for mp in self._all_mappers(): @@ -1227,8 +1295,6 @@ def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False): :ref:`orm_queryguide_orm_aliases` - in the :ref:`queryguide_toplevel` - :ref:`ormtutorial_aliases` - in the legacy :ref:`ormtutorial_toplevel` - :param element: element to be aliased. Is normally a mapped class, but for convenience can also be a :class:`_expression.FromClause` element. @@ -1311,6 +1377,7 @@ def with_polymorphic( flat=False, polymorphic_on=None, aliased=False, + adapt_on_names=False, innerjoin=False, _use_mapper_path=False, _existing_alias=None, @@ -1375,6 +1442,15 @@ def with_polymorphic( :param innerjoin: if True, an INNER JOIN will be used. This should only be specified if querying for one specific subtype only + + :param adapt_on_names: Passes through the + :paramref:`_orm.aliased.adapt_on_names` + parameter to the aliased object. This may be useful in situations where + the given selectable is not directly related to the existing mapped + selectable. + + .. versionadded:: 1.4.33 + """ primary_mapper = _class_to_mapper(base) @@ -1402,6 +1478,7 @@ def with_polymorphic( return AliasedClass( base, selectable, + adapt_on_names=adapt_on_names, with_polymorphic_mappers=mappers, with_polymorphic_discriminator=polymorphic_on, use_mapper_path=_use_mapper_path, @@ -1660,9 +1737,8 @@ def __init__( else: prop = None + left_selectable = left_info.selectable if prop: - left_selectable = left_info.selectable - if sql_util.clause_is_present(on_selectable, left_selectable): adapt_from = on_selectable else: @@ -1697,7 +1773,25 @@ def __init__( self._target_adapter = target_adapter - augment_onclause = onclause is None and _extra_criteria + # we don't use the normal coercions logic for _ORMJoin + # (probably should), so do some gymnastics to get the entity. + # logic here is for #8721, which was a major bug in 1.4 + # for almost two years, not reported/fixed until 1.4.43 (!) + if left_info.is_selectable: + parententity = left_selectable._annotations.get( + "parententity", None + ) + elif left_info.is_mapper or left_info.is_aliased_class: + parententity = left_info + else: + parententity = None + + if parententity is not None: + self._annotations = self._annotations.union( + {"parententity": parententity} + ) + + augment_onclause = bool(_extra_criteria) and not prop expression.Join.__init__(self, left, right, onclause, isouter, full) if augment_onclause: @@ -1770,36 +1864,44 @@ def join( left and right selectables may be not only core selectable objects such as :class:`_schema.Table`, but also mapped classes or :class:`.AliasedClass` instances. The "on" clause can - be a SQL expression, or an attribute or string name + be a SQL expression or an ORM mapped attribute referencing a configured :func:`_orm.relationship`. + .. deprecated:: 1.4 using a string relationship name for the "onclause" + is deprecated and will be removed in 2.0; the onclause may be only + an ORM-mapped relationship attribute or a SQL expression construct. + :func:`_orm.join` is not commonly needed in modern usage, as its functionality is encapsulated within that of the - :meth:`_query.Query.join` method, which features a + :meth:`_sql.Select.join` and :meth:`_query.Query.join` + methods. which feature a significant amount of automation beyond :func:`_orm.join` - by itself. Explicit usage of :func:`_orm.join` - with :class:`_query.Query` involves usage of the - :meth:`_query.Query.select_from` method, as in:: + by itself. Explicit use of :func:`_orm.join` + with ORM-enabled SELECT statements involves use of the + :meth:`_sql.Select.select_from` method, as in:: from sqlalchemy.orm import join - session.query(User).\ + stmt = select(User).\ select_from(join(User, Address, User.addresses)).\ filter(Address.email_address=='foo@bar.com') In modern SQLAlchemy the above join can be written more succinctly as:: - session.query(User).\ + stmt = select(User).\ join(User.addresses).\ filter(Address.email_address=='foo@bar.com') - See :meth:`_query.Query.join` for information on modern usage - of ORM level joins. + .. warning:: using :func:`_orm.join` directly may not work properly + with modern ORM options such as :func:`_orm.with_loader_criteria`. + It is strongly recommended to use the idiomatic join patterns + provided by methods such as :meth:`.Select.join` and + :meth:`.Select.join_from` when creating ORM joins. - .. deprecated:: 0.8 + .. seealso:: - the ``join_to_left`` parameter is deprecated, and will be removed - in a future release. The parameter has no effect. + :ref:`orm_queryguide_joins` - in the :ref:`queryguide_toplevel` for + background on idiomatic ORM join patterns """ return _ORMJoin(left, right, onclause, isouter, full) @@ -1824,7 +1926,7 @@ def with_parent(instance, prop, from_entity=None): E.g.:: - stmt = select(Address).where(with_parent(some_user, Address.user)) + stmt = select(Address).where(with_parent(some_user, User.addresses)) The SQL rendered is the same as that rendered when a lazy loader diff --git a/lib/sqlalchemy/pool/__init__.py b/lib/sqlalchemy/pool/__init__.py index 5b4f4ebb101..7ad79ee8a32 100644 --- a/lib/sqlalchemy/pool/__init__.py +++ b/lib/sqlalchemy/pool/__init__.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# pool/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index 38b0f67cb88..5004bdda1c7 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# pool/base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -36,6 +36,7 @@ class _ConnDialect(object): """ is_async = False + has_terminate = False def do_rollback(self, dbapi_connection): dbapi_connection.rollback() @@ -43,6 +44,9 @@ def do_rollback(self, dbapi_connection): def do_commit(self, dbapi_connection): dbapi_connection.commit() + def do_terminate(self, dbapi_connection): + dbapi_connection.close() + def do_close(self, dbapi_connection): dbapi_connection.close() @@ -111,34 +115,39 @@ def __init__( logging. :param reset_on_return: Determine steps to take on - connections as they are returned to the pool, which were - not otherwise handled by a :class:`_engine.Connection`. - - reset_on_return can have any of these values: - - * ``"rollback"`` - call rollback() on the connection, - to release locks and transaction resources. - This is the default value. The vast majority - of use cases should leave this value set. - * ``True`` - same as 'rollback', this is here for - backwards compatibility. - * ``"commit"`` - call commit() on the connection, - to release locks and transaction resources. - A commit here may be desirable for databases that - cache query plans if a commit is emitted, - such as Microsoft SQL Server. However, this - value is more dangerous than 'rollback' because - any data changes present on the transaction - are committed unconditionally. - * ``None`` - don't do anything on the connection. - This setting is only appropriate if the database / DBAPI - works in pure "autocommit" mode at all times, or if the - application uses the :class:`_engine.Engine` with consistent - connectivity patterns. See the section - :ref:`pool_reset_on_return` for more details. - - * ``False`` - same as None, this is here for - backwards compatibility. + connections as they are returned to the pool, which were + not otherwise handled by a :class:`_engine.Connection`. + Available from :func:`_sa.create_engine` via the + :paramref:`_sa.create_engine.pool_reset_on_return` parameter. + + :paramref:`_pool.Pool.reset_on_return` can have any of these values: + + * ``"rollback"`` - call rollback() on the connection, + to release locks and transaction resources. + This is the default value. The vast majority + of use cases should leave this value set. + * ``"commit"`` - call commit() on the connection, + to release locks and transaction resources. + A commit here may be desirable for databases that + cache query plans if a commit is emitted, + such as Microsoft SQL Server. However, this + value is more dangerous than 'rollback' because + any data changes present on the transaction + are committed unconditionally. + * ``None`` - don't do anything on the connection. + This setting may be appropriate if the database / DBAPI + works in pure "autocommit" mode at all times, or if + a custom reset handler is established using the + :meth:`.PoolEvents.reset` event handler. + * ``True`` - same as 'rollback', this is here for + backwards compatibility. + * ``False`` - same as None, this is here for + backwards compatibility. + + For further customization of reset on return, the + :meth:`.PoolEvents.reset` event hook may be used which can perform + any connection activity desired on reset. (requires version 1.4.43 + or greater) .. seealso:: @@ -240,15 +249,23 @@ def _should_wrap_creator(self, creator): else: return lambda crec: creator() - def _close_connection(self, connection): - self.logger.debug("Closing connection %r", connection) - + def _close_connection(self, connection, terminate=False): + self.logger.debug( + "%s connection %r", + "Hard-closing" if terminate else "Closing", + connection, + ) try: - self._dialect.do_close(connection) - except Exception: + if terminate: + self._dialect.do_terminate(connection) + else: + self._dialect.do_close(connection) + except BaseException as e: self.logger.error( "Exception closing connection %r", connection, exc_info=True ) + if not isinstance(e, Exception): + raise def _create_connection(self): """Called by subclasses to create a new ConnectionRecord.""" @@ -476,16 +493,22 @@ def checkout(cls, pool): rec = pool._do_get() try: dbapi_connection = rec.get_connection() - except Exception as err: + except BaseException as err: with util.safe_reraise(): rec._checkin_failed(err, _fairy_was_created=False) + + # never called, this is for code linters + raise + echo = pool._should_log_debug() fairy = _ConnectionFairy(dbapi_connection, rec, echo) rec.fairy_ref = ref = weakref.ref( fairy, lambda ref: _finalize_fairy - and _finalize_fairy(None, rec, pool, ref, echo, True), + and _finalize_fairy( + None, rec, pool, ref, echo, transaction_was_reset=False + ), ) _strong_ref_connection_records[ref] = rec if echo: @@ -584,7 +607,7 @@ def invalidate(self, e=None, soft=False): if soft: self._soft_invalidate_time = time.time() else: - self.__close() + self.__close(terminate=True) self.dbapi_connection = None def get_connection(self): @@ -630,7 +653,7 @@ def get_connection(self): recycle = True if recycle: - self.__close() + self.__close(terminate=True) self.info.clear() self.__connect() @@ -643,11 +666,13 @@ def _is_hard_or_soft_invalidated(self): or (self._soft_invalidate_time > self.starttime) ) - def __close(self): + def __close(self, terminate=False): self.finalize_callback.clear() if self.__pool.dispatch.close: self.__pool.dispatch.close(self.dbapi_connection, self) - self.__pool._close_connection(self.dbapi_connection) + self.__pool._close_connection( + self.dbapi_connection, terminate=terminate + ) self.dbapi_connection = None def __connect(self): @@ -661,7 +686,7 @@ def __connect(self): self.dbapi_connection = connection = pool._invoke_creator(self) pool.logger.debug("Created new connection %r", connection) self.fresh = True - except Exception as e: + except BaseException as e: with util.safe_reraise(): pool.logger.debug("Error on connect(): %s", e) else: @@ -685,7 +710,7 @@ def _finalize_fairy( pool, ref, # this is None when called directly, not by the gc echo, - reset=True, + transaction_was_reset=False, fairy=None, ): """Cleanup for a :class:`._ConnectionFairy` whether or not it's already @@ -709,7 +734,9 @@ def _finalize_fairy( dbapi_connection = connection_record.dbapi_connection # null pool is not _is_asyncio but can be used also with async dialects - dont_restore_gced = pool._dialect.is_async + dont_restore_gced = ( + pool._dialect.is_async and not pool._dialect.has_terminate + ) if dont_restore_gced: detach = not connection_record or ref @@ -721,11 +748,8 @@ def _finalize_fairy( if dbapi_connection is not None: if connection_record and echo: pool.logger.debug( - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", dbapi_connection, - ", transaction state was already reset by caller" - if not reset - else "", ) try: @@ -735,8 +759,8 @@ def _finalize_fairy( echo, ) assert fairy.dbapi_connection is dbapi_connection - if reset and can_manipulate_connection: - fairy._reset(pool) + if can_manipulate_connection: + fairy._reset(pool, transaction_was_reset) if detach: if connection_record: @@ -751,8 +775,10 @@ def _finalize_fairy( else: message = ( "The garbage collector is trying to clean up " - "connection %r. This feature is unsupported on async " - "dbapi, since no IO can be performed at this stage to " + "connection %r. This feature is unsupported on " + "unsupported on asyncio " + 'dbapis that lack a "terminate" feature, ' + "since no IO can be performed at this stage to " "reset the connection. Please close out all " "connections when they are no longer used, calling " "``close()`` or using a context manager to " @@ -887,6 +913,7 @@ def _checkout(cls, pool, threadconns=None, fairy=None): # is not accessible from a connection standpoint, those won't proceed # here. attempts = 2 + while attempts > 0: connection_is_fresh = fairy._connection_record.fresh fairy._connection_record.fresh = False @@ -939,7 +966,7 @@ def _checkout(cls, pool, threadconns=None, fairy=None): fairy.dbapi_connection = ( fairy._connection_record.get_connection() ) - except Exception as err: + except BaseException as err: with util.safe_reraise(): fairy._connection_record._checkin_failed( err, @@ -954,6 +981,21 @@ def _checkout(cls, pool, threadconns=None, fairy=None): del fairy attempts -= 1 + except BaseException as be_outer: + with util.safe_reraise(): + rec = fairy._connection_record + if rec is not None: + rec._checkin_failed( + be_outer, + _fairy_was_created=True, + ) + + # prevent _ConnectionFairy from being carried + # in the stack trace, see above + del fairy + + # never called, this is for code linters + raise pool.logger.info("Reconnection attempts exhausted on checkout") fairy.invalidate() @@ -962,14 +1004,14 @@ def _checkout(cls, pool, threadconns=None, fairy=None): def _checkout_existing(self): return _ConnectionFairy._checkout(self._pool, fairy=self) - def _checkin(self, reset=True): + def _checkin(self, transaction_was_reset=False): _finalize_fairy( self.dbapi_connection, self._connection_record, self._pool, None, self._echo, - reset=reset, + transaction_was_reset=transaction_was_reset, fairy=self, ) self.dbapi_connection = None @@ -977,15 +1019,23 @@ def _checkin(self, reset=True): _close = _checkin - def _reset(self, pool): + def _reset(self, pool, transaction_was_reset=False): if pool.dispatch.reset: pool.dispatch.reset(self, self._connection_record) if pool._reset_on_return is reset_rollback: - if self._echo: - pool.logger.debug( - "Connection %s rollback-on-return", self.dbapi_connection - ) - pool._dialect.do_rollback(self) + if transaction_was_reset: + if self._echo: + pool.logger.debug( + "Connection %s reset, transaction already reset", + self.dbapi_connection, + ) + else: + if self._echo: + pool.logger.debug( + "Connection %s rollback-on-return", + self.dbapi_connection, + ) + pool._dialect.do_rollback(self) elif pool._reset_on_return is reset_commit: if self._echo: pool.logger.debug( @@ -1115,7 +1165,7 @@ def close(self): if self._counter == 0: self._checkin() - def _close_no_reset(self): + def _close_special(self, transaction_reset=False): self._counter -= 1 if self._counter == 0: - self._checkin(reset=False) + self._checkin(transaction_was_reset=transaction_reset) diff --git a/lib/sqlalchemy/pool/dbapi_proxy.py b/lib/sqlalchemy/pool/dbapi_proxy.py index 7dfb59e36e9..930c242514a 100644 --- a/lib/sqlalchemy/pool/dbapi_proxy.py +++ b/lib/sqlalchemy/pool/dbapi_proxy.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool/dbapi_proxy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# pool/dbapi_proxy.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py index 7c2cae7c5eb..a91c126ffa5 100644 --- a/lib/sqlalchemy/pool/events.py +++ b/lib/sqlalchemy/pool/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# pool/events.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -51,8 +51,14 @@ def _accept_with(cls, target): return target elif isinstance(target, Engine): return target.pool - else: + elif isinstance(target, Pool): return target + elif hasattr(target, "dispatch") and hasattr( + target.dispatch._events, "_no_async_engine_events" + ): + target.dispatch._events._no_async_engine_events() + else: + return None @classmethod def _listen(cls, event_key, **kw): @@ -145,17 +151,37 @@ def checkin(self, dbapi_connection, connection_record): def reset(self, dbapi_connection, connection_record): """Called before the "reset" action occurs for a pooled connection. - This event represents - when the ``rollback()`` method is called on the DBAPI connection - before it is returned to the pool. The behavior of "reset" can - be controlled, including disabled, using the ``reset_on_return`` - pool argument. - - + This event represents when the ``rollback()`` method is called on the + DBAPI connection before it is returned to the pool or discarded. A + custom "reset" strategy may be implemented using this event hook, which + may also be combined with disabling the default "reset" behavior using + the :paramref:`_pool.Pool.reset_on_return` parameter. + + The primary difference between the :meth:`_events.PoolEvents.reset` and + :meth:`_events.PoolEvents.checkin` events are that + :meth:`_events.PoolEvents.reset` is called not just for pooled + connections that are being returned to the pool, but also for + connections that were detached using the + :meth:`_engine.Connection.detach` method. + + Note that the event **is not** invoked for connections that were + invalidated using :meth:`_engine.Connection.invalidate`. These + events may be intercepted using the :meth:`.PoolEvents.soft_invalidate` + and :meth:`.PoolEvents.invalidate` event hooks, and all "connection + close" events may be intercepted using :meth:`.PoolEvents.close`. The :meth:`_events.PoolEvents.reset` event is usually followed by the - :meth:`_events.PoolEvents.checkin` event is called, except in those + :meth:`_events.PoolEvents.checkin` event, except in those cases where the connection is discarded immediately after reset. + In the 1.4 series, the event is also not invoked for asyncio + connections that are being garbage collected without their being + explicitly returned to the pool. This is due to the lack of an event + loop which prevents "reset" operations from taking place. Version 2.0 + will feature an enhanced version of :meth:`.PoolEvents.reset` which is + invoked in this scenario while passing additional contextual + information indicating that an event loop is not guaranteed + to be present. + :param dbapi_connection: a DBAPI connection. The :attr:`._ConnectionRecord.dbapi_connection` attribute. diff --git a/lib/sqlalchemy/pool/impl.py b/lib/sqlalchemy/pool/impl.py index 3ef33d02d2b..6bc4ad80474 100644 --- a/lib/sqlalchemy/pool/impl.py +++ b/lib/sqlalchemy/pool/impl.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# pool/impl.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/processors.py b/lib/sqlalchemy/processors.py index 0c0aa1bd6c4..92a7da64890 100644 --- a/lib/sqlalchemy/processors.py +++ b/lib/sqlalchemy/processors.py @@ -1,5 +1,5 @@ -# sqlalchemy/processors.py -# Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +# processors.py +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors # # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py index eeb7f751abd..37949c7a4de 100644 --- a/lib/sqlalchemy/schema.py +++ b/lib/sqlalchemy/schema.py @@ -1,5 +1,5 @@ # schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py index f374d555d5f..0c8ae3307c5 100644 --- a/lib/sqlalchemy/sql/__init__.py +++ b/lib/sqlalchemy/sql/__init__.py @@ -1,5 +1,5 @@ # sql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index e6618937a4f..60455956865 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -1,5 +1,5 @@ # sql/annotation.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -26,12 +26,16 @@ class SupportsAnnotations(object): @util.memoized_property def _annotations_cache_key(self): anon_map_ = anon_map() + + return self._gen_annotations_cache_key(anon_map_) + + def _gen_annotations_cache_key(self, anon_map): return ( "_annotations", tuple( ( key, - value._gen_cache_key(anon_map_, []) + value._gen_cache_key(anon_map, []) if isinstance(value, HasCacheKey) else value, ) @@ -238,7 +242,21 @@ def entity_namespace(self): annotated_classes = {} -def _deep_annotate(element, annotations, exclude=None): +def _safe_annotate(to_annotate, annotations): + try: + _annotate = to_annotate._annotate + except AttributeError: + # skip objects that don't actually have an `_annotate` + # attribute, namely QueryableAttribute inside of a join + # condition + return to_annotate + else: + return _annotate(annotations) + + +def _deep_annotate( + element, annotations, exclude=None, detect_subquery_cols=False +): """Deep copy the given ClauseElement, annotating each element with the given annotations dictionary. @@ -252,6 +270,7 @@ def _deep_annotate(element, annotations, exclude=None): cloned_ids = {} def clone(elem, **kw): + kw["detect_subquery_cols"] = detect_subquery_cols id_ = id(elem) if id_ in cloned_ids: @@ -262,9 +281,14 @@ def clone(elem, **kw): and hasattr(elem, "proxy_set") and elem.proxy_set.intersection(exclude) ): - newelem = elem._clone(**kw) + newelem = elem._clone(clone=clone, **kw) elif annotations != elem._annotations: - newelem = elem._annotate(annotations) + if detect_subquery_cols and elem._is_immutable: + newelem = _safe_annotate( + elem._clone(clone=clone, **kw), annotations + ) + else: + newelem = _safe_annotate(elem, annotations) else: newelem = elem newelem._copy_internals(clone=clone) diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index aba80222a6b..12049c08eaf 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -1,5 +1,5 @@ # sql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -559,8 +559,9 @@ def _generate(self): cls = self.__class__ s = cls.__new__(cls) if skip: + # ensure this iteration remains atomic s.__dict__ = { - k: v for k, v in self.__dict__.items() if k not in skip + k: v for k, v in self.__dict__.copy().items() if k not in skip } else: s.__dict__ = self.__dict__.copy() @@ -768,11 +769,13 @@ def _generate_cache_key(self): return HasCacheKey._generate_cache_key_for_object(self) -class ExecutableOption(HasCopyInternals, HasCacheKey): +class ExecutableOption(HasCopyInternals): _annotations = util.EMPTY_DICT __visit_name__ = "executable_option" + _is_has_cache_key = False + def _clone(self, **kw): """Create a shallow copy of this ExecutableOption.""" c = self.__class__.__new__(self.__class__) @@ -846,7 +849,8 @@ def options(self, *options): """ self._with_options += tuple( - coercions.expect(roles.HasCacheKeyRole, opt) for opt in options + coercions.expect(roles.ExecutableOptionRole, opt) + for opt in options ) @_generative @@ -1426,7 +1430,7 @@ def embedded(expanded_proxy_set, target_set): operator.add, [ sc._annotations.get("weight", 1) - for sc in col._uncached_proxy_set() + for sc in col._uncached_proxy_list() if sc.shares_lineage(column) ], ) @@ -1434,7 +1438,7 @@ def embedded(expanded_proxy_set, target_set): operator.add, [ sc._annotations.get("weight", 1) - for sc in c._uncached_proxy_set() + for sc in c._uncached_proxy_list() if sc.shares_lineage(column) ], ) diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index f888bad4cac..d6fd8ba6b4a 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -1,5 +1,5 @@ # sql/coercions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -11,6 +11,7 @@ from . import operators from . import roles from . import visitors +from .base import ExecutableOption from .base import Options from .traversals import HasCacheKey from .visitors import Visitable @@ -140,7 +141,12 @@ def expect( if not isinstance( element, - (elements.ClauseElement, schema.SchemaItem, schema.FetchedValue), + ( + elements.ClauseElement, + schema.SchemaItem, + schema.FetchedValue, + lambdas.PyWrapper, + ), ): resolved = None @@ -189,6 +195,8 @@ def expect( ) else: resolved = element + elif isinstance(element, lambdas.PyWrapper): + resolved = element._sa__py_wrapper_literal(**kw) else: resolved = element if ( @@ -458,6 +466,21 @@ def _literal_coercion(self, element, **kw): return element +class ExecutableOptionImpl(RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, original_element, resolved, argname=None, **kw + ): + if isinstance(original_element, ExecutableOption): + return original_element + else: + self._raise_for_expected(original_element, argname, resolved) + + def _literal_coercion(self, element, **kw): + return element + + class ExpressionElementImpl(_ColumnCoercions, RoleImpl): __slots__ = () @@ -508,9 +531,11 @@ def _literal_coercion( except exc.ArgumentError as err: self._raise_for_expected(element, err=err) - def _post_coercion(self, resolved, expr, **kw): + def _post_coercion(self, resolved, expr, bindparam_type=None, **kw): if resolved.type._isnull and not expr.type._isnull: - resolved = resolved._with_binary_element_type(expr.type) + resolved = resolved._with_binary_element_type( + bindparam_type if bindparam_type is not None else expr.type + ) return resolved diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 0cd568fcc64..ca9ca962869 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1,5 +1,5 @@ # sql/compiler.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -38,6 +38,7 @@ from . import schema from . import selectable from . import sqltypes +from . import util as sql_util from .base import NO_ARG from .base import prefix_anon_map from .elements import quoted_name @@ -165,8 +166,8 @@ "named": ":%(name)s", } -_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\]]") -_BIND_TRANSLATE_CHARS = dict(zip("%():[]", "PAZC__")) +_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\] ]") +_BIND_TRANSLATE_CHARS = dict(zip("%():[] ", "PAZC___")) OPERATORS = { # binary @@ -402,6 +403,18 @@ class Compiled(object): """ + dml_compile_state = None + """Optional :class:`.CompileState` assigned at the same point that + .isinsert, .isupdate, or .isdelete is assigned. + + This will normally be the same object as .compile_state, with the + exception of cases like the :class:`.ORMFromStatementCompileState` + object. + + .. versionadded:: 1.4.40 + + """ + cache_key = None _gen_time = None @@ -490,7 +503,9 @@ def __str__(self): return self.string or "" - def construct_params(self, params=None, extracted_parameters=None): + def construct_params( + self, params=None, extracted_parameters=None, escape_names=True + ): """Return the bind params for this compiled object. :param params: a dict of string/object pairs whose values will @@ -597,6 +612,20 @@ class SQLCompiler(Compiled): _textual_ordered_columns = False """tell the result object that the column names as rendered are important, but they are also "ordered" vs. what is in the compiled object here. + + As of 1.4.42 this condition is only present when the statement is a + TextualSelect, e.g. text("....").columns(...), where it is required + that the columns are considered positionally and not by name. + + """ + + _ad_hoc_textual = False + """tell the result that we encountered text() or '*' constructs in the + middle of the result columns, but we also have compiled columns, so + if the number of columns in cursor.description does not match how many + expressions we have, that means we can't rely on positional at all and + should match on name. + """ _ordered_columns = True @@ -670,6 +699,24 @@ class SQLCompiler(Compiled): """ + _post_compile_pattern = re.compile(r"__\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]") + + positiontup = None + """for a compiled construct that uses a positional paramstyle, will be + a sequence of strings, indicating the names of bound parameters in order. + + This is used in order to render bound parameters in their correct order, + and is combined with the :attr:`_sql.Compiled.params` dictionary to + render parameters. + + .. seealso:: + + :ref:`faq_sql_expression_string` - includes a usage example for + debugging use cases. + + """ + positiontup_level = None + inline = False def __init__( @@ -740,6 +787,7 @@ def __init__( # true if the paramstyle is positional self.positional = dialect.positional if self.positional: + self.positiontup_level = {} self.positiontup = [] self._numeric_binds = dialect.paramstyle == "numeric" self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] @@ -850,6 +898,8 @@ def _init_cte_state(self): self.ctes_recursive = False if self.positional: self.cte_positional = {} + self.cte_level = {} + self.cte_order = collections.defaultdict(list) @contextlib.contextmanager def _nested_result(self): @@ -883,8 +933,12 @@ def _apply_numbered_params(self): @util.memoized_property def _bind_processors(self): + return dict( - (key, value) + ( + key, + value, + ) for key, value in ( ( self.bind_names[bindparam], @@ -913,10 +967,11 @@ def construct_params( _group_number=None, _check=True, extracted_parameters=None, + escape_names=True, ): """return a dictionary of bind parameter keys and values""" - has_escaped_names = bool(self.escaped_bind_names) + has_escaped_names = escape_names and bool(self.escaped_bind_names) if extracted_parameters: # related the bound parameters collected in the original cache key @@ -1091,7 +1146,14 @@ def _lookup_type(typ): @property def params(self): """Return the bind param dictionary embedded into this - compiled object, for those values that are present.""" + compiled object, for those values that are present. + + .. seealso:: + + :ref:`faq_sql_expression_string` - includes a usage example for + debugging use cases. + + """ return self.construct_params(_check=False) def _process_parameters_for_postcompile( @@ -1109,8 +1171,9 @@ def _process_parameters_for_postcompile( N as a bound parameter. """ + if parameters is None: - parameters = self.construct_params() + parameters = self.construct_params(escape_names=False) expanded_parameters = {} if self.positional: @@ -1151,16 +1214,16 @@ def _process_parameters_for_postcompile( if self.escaped_bind_names else name ) + parameter = self.binds[name] if parameter in self.literal_execute_params: if escaped_name not in replacement_expressions: - value = parameters.pop(escaped_name) - - replacement_expressions[ - escaped_name - ] = self.render_literal_bindparam( - parameter, render_literal_value=value - ) + replacement_expressions[ + escaped_name + ] = self.render_literal_bindparam( + parameter, + render_literal_value=parameters.pop(escaped_name), + ) continue if parameter in self.post_compile_params: @@ -1173,7 +1236,12 @@ def _process_parameters_for_postcompile( # process it. the single name is being replaced with # individual numbered parameters for each value in the # param. - values = parameters.pop(escaped_name) + # + # note we are also inserting *escaped* parameter names + # into the given dictionary. default dialect will + # use these param names directly as they will not be + # in the escaped_bind_names dictionary. + values = parameters.pop(name) leep = self._literal_execute_expanding_parameter to_update, replacement_expr = leep( @@ -1226,7 +1294,7 @@ def process_expanding(m): return expr statement = re.sub( - r"\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]", + self._post_compile_pattern, process_expanding, self.string, ) @@ -1268,16 +1336,21 @@ def _create_result_map(self): self._result_columns ) + @util.memoized_property + def _within_exec_param_key_getter(self): + getter = self._key_getters_for_crud_column[2] + return getter + @util.memoized_property @util.preload_module("sqlalchemy.engine.result") def _inserted_primary_key_from_lastrowid_getter(self): result = util.preloaded.engine_result - key_getter = self._key_getters_for_crud_column[2] + param_key_getter = self._within_exec_param_key_getter table = self.statement.table getters = [ - (operator.methodcaller("get", key_getter(col), None), col) + (operator.methodcaller("get", param_key_getter(col), None), col) for col in table.primary_key ] @@ -1293,6 +1366,12 @@ def _inserted_primary_key_from_lastrowid_getter(self): row_fn = result.result_tuple([col.key for col in table.primary_key]) def get(lastrowid, parameters): + """given cursor.lastrowid value and the parameters used for INSERT, + return a "row" that represents the primary key, either by + using the "lastrowid" or by extracting values from the parameters + that were sent along with the INSERT. + + """ if proc is not None: lastrowid = proc(lastrowid) @@ -1311,7 +1390,7 @@ def get(lastrowid, parameters): def _inserted_primary_key_from_returning_getter(self): result = util.preloaded.engine_result - key_getter = self._key_getters_for_crud_column[2] + param_key_getter = self._within_exec_param_key_getter table = self.statement.table ret = {col: idx for idx, col in enumerate(self.returning)} @@ -1319,7 +1398,10 @@ def _inserted_primary_key_from_returning_getter(self): getters = [ (operator.itemgetter(ret[col]), True) if col in ret - else (operator.methodcaller("get", key_getter(col), None), False) + else ( + operator.methodcaller("get", param_key_getter(col), None), + False, + ) for col in table.primary_key ] @@ -1577,6 +1659,17 @@ def visit_textual_select( toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] + new_entry = { + "correlate_froms": set(), + "asfrom_froms": set(), + "selectable": taf, + } + self.stack.append(new_entry) + + if taf._independent_ctes: + for cte in taf._independent_ctes: + cte._compiler_dispatch(self, **kw) + populate_result_map = ( toplevel or ( @@ -1604,7 +1697,20 @@ def visit_textual_select( add_to_result_map=self._add_to_result_map, ) - return self.process(taf.element, **kw) + text = self.process(taf.element, **kw) + if self.ctes: + nesting_level = len(self.stack) if not toplevel else None + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kw.get("visiting_cte"), + ) + + text + ) + + self.stack.pop(-1) + + return text def visit_null(self, expr, **kw): return "NULL" @@ -1710,6 +1816,7 @@ def _format_frame_clause(self, range_, **kw): ) def visit_over(self, over, **kwargs): + text = over.element._compiler_dispatch(self, **kwargs) if over.range_: range_ = "RANGE BETWEEN %s" % self._format_frame_clause( over.range_, **kwargs @@ -1722,7 +1829,7 @@ def visit_over(self, over, **kwargs): range_ = None return "%s OVER (%s)" % ( - over.element._compiler_dispatch(self, **kwargs), + text, " ".join( [ "%s BY %s" @@ -1868,7 +1975,9 @@ def visit_compound_select( nesting_level = len(self.stack) if not toplevel else None text = ( self._render_cte_clause( - nesting_level=nesting_level, include_following_stack=True + nesting_level=nesting_level, + include_following_stack=True, + visiting_cte=kwargs.get("visiting_cte"), ) + text ) @@ -1985,12 +2094,16 @@ def visit_empty_set_expr(self, element_types): ) def _literal_execute_expanding_parameter_literal_binds( - self, parameter, values + self, parameter, values, bind_expression_template=None ): typ_dialect_impl = parameter.type._unwrapped_dialect_impl(self.dialect) if not values: + # empty IN expression. note we don't need to use + # bind_expression_template here because there are no + # expressions to render. + if typ_dialect_impl._is_tuple_type: replacement_expression = ( "VALUES " if self.dialect.tuple_in_values else "" @@ -2003,8 +2116,20 @@ def _literal_execute_expanding_parameter_literal_binds( [parameter.type], parameter.expand_op ) - elif isinstance(values[0], (tuple, list)): - assert typ_dialect_impl._is_tuple_type + elif typ_dialect_impl._is_tuple_type or ( + typ_dialect_impl._isnull + and isinstance(values[0], util.collections_abc.Sequence) + and not isinstance( + values[0], util.string_types + util.binary_types + ) + ): + + if typ_dialect_impl._has_bind_expression: + raise NotImplementedError( + "bind_expression() on TupleType not supported with " + "literal_binds" + ) + replacement_expression = ( "VALUES " if self.dialect.tuple_in_values else "" ) + ", ".join( @@ -2020,11 +2145,29 @@ def _literal_execute_expanding_parameter_literal_binds( for i, tuple_element in enumerate(values) ) else: - assert not typ_dialect_impl._is_tuple_type - replacement_expression = ", ".join( - self.render_literal_value(value, parameter.type) - for value in values - ) + if bind_expression_template: + post_compile_pattern = self._post_compile_pattern + m = post_compile_pattern.search(bind_expression_template) + assert m and m.group( + 2 + ), "unexpected format for expanding parameter" + + tok = m.group(2).split("~~") + be_left, be_right = tok[1], tok[3] + replacement_expression = ", ".join( + "%s%s%s" + % ( + be_left, + self.render_literal_value(value, parameter.type), + be_right, + ) + for value in values + ) + else: + replacement_expression = ", ".join( + self.render_literal_value(value, parameter.type) + for value in values + ) return (), replacement_expression @@ -2049,10 +2192,14 @@ def _literal_execute_expanding_parameter(self, name, parameter, values): [parameter.type], parameter.expand_op ) - elif ( - isinstance(values[0], (tuple, list)) - and not typ_dialect_impl._is_array + elif typ_dialect_impl._is_tuple_type or ( + typ_dialect_impl._isnull + and isinstance(values[0], util.collections_abc.Sequence) + and not isinstance( + values[0], util.string_types + util.binary_types + ) ): + assert not typ_dialect_impl._is_array to_update = [ ("%s_%s_%s" % (name, i, j), value) for i, tuple_element in enumerate(values, 1) @@ -2206,37 +2353,37 @@ def _like_percent_literal(self): def visit_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right).__add__(percent) + binary.right = percent.concat(binary.right).concat(percent) return self.visit_like_op_binary(binary, operator, **kw) def visit_not_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right).__add__(percent) + binary.right = percent.concat(binary.right).concat(percent) return self.visit_not_like_op_binary(binary, operator, **kw) def visit_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__radd__(binary.right) + binary.right = percent._rconcat(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_not_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__radd__(binary.right) + binary.right = percent._rconcat(binary.right) return self.visit_not_like_op_binary(binary, operator, **kw) def visit_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right) + binary.right = percent.concat(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_not_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right) + binary.right = percent.concat(binary.right) return self.visit_not_like_op_binary(binary, operator, **kw) def visit_like_op_binary(self, binary, operator, **kw): @@ -2335,7 +2482,7 @@ def visit_bindparam( bind_expression, skip_bind_expression=True, within_columns_clause=within_columns_clause, - literal_binds=literal_binds, + literal_binds=literal_binds and not bindparam.expanding, literal_execute=literal_execute, render_postcompile=render_postcompile, **kwargs @@ -2343,14 +2490,26 @@ def visit_bindparam( if bindparam.expanding: # for postcompile w/ expanding, move the "wrapped" part # of this into the inside + m = re.match( - r"^(.*)\(\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped + r"^(.*)\(__\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped ) - wrapped = "([POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % ( + assert m, "unexpected format for expanding parameter" + wrapped = "(__[POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % ( m.group(2), m.group(1), m.group(3), ) + + if literal_binds: + ret = self.render_literal_bindparam( + bindparam, + within_columns_clause=True, + bind_expression_template=wrapped, + **kwargs + ) + return "(%s)" % ret + return wrapped if not literal_binds: @@ -2389,6 +2548,15 @@ def visit_bindparam( "Bind parameter '%s' conflicts with " "unique bind parameter of the same name" % name ) + elif existing.expanding != bindparam.expanding: + raise exc.CompileError( + "Can't reuse bound parameter name '%s' in both " + "'expanding' (e.g. within an IN expression) and " + "non-expanding contexts. If this parameter is to " + "receive a list/array value, set 'expanding=True' on " + "it for expressions that aren't IN, otherwise use " + "a different parameter name." % (name,) + ) elif existing._is_crud or bindparam._is_crud: raise exc.CompileError( "bindparam() name '%s' is reserved " @@ -2441,7 +2609,11 @@ def visit_bindparam( return ret def render_literal_bindparam( - self, bindparam, render_literal_value=NO_ARG, **kw + self, + bindparam, + render_literal_value=NO_ARG, + bind_expression_template=None, + **kw ): if render_literal_value is not NO_ARG: value = render_literal_value @@ -2460,7 +2632,11 @@ def render_literal_bindparam( if bindparam.expanding: leep = self._literal_execute_expanding_parameter_literal_binds - to_update, replacement_expr = leep(bindparam, value) + to_update, replacement_expr = leep( + bindparam, + value, + bind_expression_template=bind_expression_template, + ) return replacement_expr else: return self.render_literal_value(value, bindparam.type) @@ -2478,10 +2654,29 @@ def render_literal_value(self, value, type_): processor = type_._cached_literal_processor(self.dialect) if processor: - return processor(value) + try: + return processor(value) + except Exception as e: + util.raise_( + exc.CompileError( + "Could not render literal value " + '"%s" ' + "with datatype " + "%s; see parent stack trace for " + "more detail." + % ( + sql_util._repr_single_value(value), + type_, + ) + ), + from_=e, + ) + else: - raise NotImplementedError( - "Don't know how to literal-quote value %r" % value + raise exc.CompileError( + "No literal value renderer is available for literal value " + '"%s" with datatype %s' + % (sql_util._repr_single_value(value), type_) ) def _truncate_bindparam(self, bindparam): @@ -2534,7 +2729,8 @@ def bindparam_string( positional_names.append(name) else: self.positiontup.append(name) - elif not escaped_from: + self.positiontup_level[name] = len(self.stack) + if not escaped_from: if _BIND_TRANSLATE_RE.search(name): # not quite the translate use case as we want to @@ -2552,7 +2748,7 @@ def bindparam_string( self.escaped_bind_names = {} self.escaped_bind_names[escaped_from] = name if post_compile: - return "[POSTCOMPILE_%s]" % name + return "__[POSTCOMPILE_%s]" % name else: return self.bindtemplate % {"name": name} @@ -2610,10 +2806,19 @@ def visit_cte( del self.level_name_by_cte[existing_cte_reference_cte] else: - raise exc.CompileError( - "Multiple, unrelated CTEs found with " - "the same name: %r" % cte_name - ) + # if the two CTEs are deep-copy identical, consider them + # the same, **if** they are clones, that is, they came from + # the ORM or other visit method + if ( + cte._is_clone_of is not None + or existing_cte._is_clone_of is not None + ) and cte.compare(existing_cte): + is_new_cte = False + else: + raise exc.CompileError( + "Multiple, unrelated CTEs found with " + "the same name: %r" % cte_name + ) if not asfrom and not is_new_cte: return None @@ -2644,6 +2849,8 @@ def visit_cte( ] } ) + if self.positional: + self.cte_level[cte] = cte_level if pre_alias_cte not in self.ctes: self.visit_cte(pre_alias_cte, **kwargs) @@ -2734,6 +2941,8 @@ def visit_cte( return self.preparer.format_alias(cte, cte_name) def visit_table_valued_alias(self, element, **kw): + if element.joins_implicitly: + kw["from_linter"] = None if element._is_lateral: return self.visit_lateral(element, **kw) else: @@ -2915,7 +3124,7 @@ def get_render_as_alias_suffix(self, alias_name_text): def _add_to_result_map(self, keyname, name, objects, type_): if keyname is None or keyname == "*": self._ordered_columns = False - self._textual_ordered_columns = True + self._ad_hoc_textual = True if type_._is_tuple_type: raise exc.CompileError( "Most backends don't support SELECTing " @@ -2924,7 +3133,9 @@ def _add_to_result_map(self, keyname, name, objects, type_): ) self._result_columns.append((keyname, name, objects, type_)) - def _label_returning_column(self, stmt, column, column_clause_args=None): + def _label_returning_column( + self, stmt, column, column_clause_args=None, **kw + ): """Render a column with necessary labels inside of a RETURNING clause. This method is provided for individual dialects in place of calling @@ -2940,6 +3151,7 @@ def _label_returning_column(self, stmt, column, column_clause_args=None): True, False, {} if column_clause_args is None else column_clause_args, + **kw ) def _label_select_column( @@ -3004,7 +3216,6 @@ def add_to_result_map(keyname, name, objects, type_): "_label_select_column is only relevant within " "the columns clause of a SELECT or RETURNING" ) - if isinstance(column, elements.Label): if col_expr is not column: result_expr = _CompileLabel( @@ -3186,6 +3397,9 @@ def visit_select( # passed in. for ORM use this will convert from an ORM-state # SELECT to a regular "Core" SELECT. other composed operations # such as computation of joins will be performed. + + kwargs["within_columns_clause"] = False + compile_state = select_stmt._compile_state_factory( select_stmt, self, **kwargs ) @@ -3346,13 +3560,16 @@ def visit_select( if per_dialect: text += " " + self.get_statement_hint_text(per_dialect) - if self.ctes: - # In compound query, CTEs are shared at the compound level - if not is_embedded_select: - nesting_level = len(self.stack) if not toplevel else None - text = ( - self._render_cte_clause(nesting_level=nesting_level) + text + # In compound query, CTEs are shared at the compound level + if self.ctes and (not is_embedded_select or toplevel): + nesting_level = len(self.stack) if not toplevel else None + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kwargs.get("visiting_cte"), ) + + text + ) if select_stmt._suffixes: text += " " + self._generate_prefixes( @@ -3528,6 +3745,7 @@ def _render_cte_clause( self, nesting_level=None, include_following_stack=False, + visiting_cte=None, ): """ include_following_stack @@ -3557,14 +3775,48 @@ def _render_cte_clause( if not ctes: return "" - ctes_recursive = any([cte.recursive for cte in ctes]) if self.positional: - self.positiontup = ( - sum([self.cte_positional[cte] for cte in ctes], []) - + self.positiontup - ) + self.cte_order[visiting_cte].extend(ctes) + + if visiting_cte is None and self.cte_order: + assert self.positiontup is not None + + def get_nested_positional(cte): + if cte in self.cte_order: + children = self.cte_order.pop(cte) + to_add = list( + itertools.chain.from_iterable( + get_nested_positional(child_cte) + for child_cte in children + ) + ) + if cte in self.cte_positional: + return reorder_positional( + self.cte_positional[cte], + to_add, + self.cte_level[children[0]], + ) + else: + return to_add + else: + return self.cte_positional.get(cte, []) + + def reorder_positional(pos, to_add, level): + if not level: + return to_add + pos + index = 0 + for index, name in enumerate(reversed(pos)): + if self.positiontup_level[name] < level: # type: ignore[index] # noqa: E501 + break + return pos[:-index] + to_add + pos[-index:] + + to_add = get_nested_positional(None) + self.positiontup = reorder_positional( + self.positiontup, to_add, nesting_level + ) + cte_text = self.get_cte_preamble(ctes_recursive) + " " cte_text += ", \n".join([txt for txt in ctes.values()]) cte_text += "\n " @@ -3747,6 +3999,8 @@ def visit_insert(self, insert_stmt, **kw): if toplevel: self.isinsert = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state if not self.compile_state: self.compile_state = compile_state @@ -3834,6 +4088,7 @@ def visit_insert(self, insert_stmt, **kw): self._render_cte_clause( nesting_level=nesting_level, include_following_stack=True, + visiting_cte=kw.get("visiting_cte"), ), select_text, ) @@ -3854,14 +4109,7 @@ def visit_insert(self, insert_stmt, **kw): [value for c, expr, value in crud_params] ) text += " VALUES (%s)" % insert_single_values_expr - if toplevel and insert_stmt._post_values_clause is None: - # don't assign insert_single_values_expr if _post_values_clause - # is present. what this means concretely is that the - # "fast insert executemany helper" won't be used, in other - # words we won't convert "executemany()" of many parameter - # sets into a single INSERT with many elements in VALUES. - # We can't apply that optimization safely if for example the - # statement includes a clause like "ON CONFLICT DO UPDATE" + if toplevel: self.insert_single_values_expr = insert_single_values_expr if insert_stmt._post_values_clause is not None: @@ -3878,7 +4126,9 @@ def visit_insert(self, insert_stmt, **kw): nesting_level = len(self.stack) if not toplevel else None text = ( self._render_cte_clause( - nesting_level=nesting_level, include_following_stack=True + nesting_level=nesting_level, + include_following_stack=True, + visiting_cte=kw.get("visiting_cte"), ) + text ) @@ -3924,6 +4174,8 @@ def visit_update(self, update_stmt, **kw): toplevel = not self.stack if toplevel: self.isupdate = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state if not self.compile_state: self.compile_state = compile_state @@ -4016,7 +4268,13 @@ def visit_update(self, update_stmt, **kw): if self.ctes: nesting_level = len(self.stack) if not toplevel else None - text = self._render_cte_clause(nesting_level=nesting_level) + text + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kw.get("visiting_cte"), + ) + + text + ) self.stack.pop(-1) @@ -4050,6 +4308,8 @@ def visit_delete(self, delete_stmt, **kw): toplevel = not self.stack if toplevel: self.isdelete = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state if not self.compile_state: self.compile_state = compile_state @@ -4120,7 +4380,13 @@ def visit_delete(self, delete_stmt, **kw): if self.ctes: nesting_level = len(self.stack) if not toplevel else None - text = self._render_cte_clause(nesting_level=nesting_level) + text + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kw.get("visiting_cte"), + ) + + text + ) self.stack.pop(-1) @@ -4194,7 +4460,9 @@ def visit_sequence(self, seq, **kw): def returning_clause(self, stmt, returning_cols): columns = [ - self._label_select_column(None, c, True, False, {}) + self._label_select_column( + None, c, True, False, {}, fallback_label_name=c._non_anon_label + ) for c in base._select_iterables(returning_cols) ] @@ -4231,11 +4499,9 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): return self._generate_generic_binary(binary, " ", **kw) def visit_regexp_replace_op_binary(self, binary, operator, **kw): - replacement = binary.modifiers["replacement"] - return "(%s, %s, %s)" % ( + return "(%s, %s)" % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw), - replacement._compiler_dispatch(self, **kw), ) @@ -4250,7 +4516,9 @@ def sql_compiler(self): def type_compiler(self): return self.dialect.type_compiler - def construct_params(self, params=None, extracted_parameters=None): + def construct_params( + self, params=None, extracted_parameters=None, escape_names=True + ): return None def visit_ddl(self, ddl, **kwargs): @@ -4520,8 +4788,6 @@ def get_identity_options(self, identity_options): text.append("NO MAXVALUE") if identity_options.cache is not None: text.append("CACHE %d" % identity_options.cache) - if identity_options.order is not None: - text.append("ORDER" if identity_options.order else "NO ORDER") if identity_options.cycle is not None: text.append("CYCLE" if identity_options.cycle else "NO CYCLE") return " ".join(text) @@ -5007,7 +5273,7 @@ def symbol_getter(obj): "in schema translate name '%s'" % name ) return quoted_name( - "[SCHEMA_%s]" % (name or "_none"), quote=False + "__[SCHEMA_%s]" % (name or "_none"), quote=False ) else: return obj.schema @@ -5033,7 +5299,7 @@ def replace(m): ) return self.quote_schema(effective_schema) - return re.sub(r"(\[SCHEMA_([^\]]+)\])", replace, statement) + return re.sub(r"(__\[SCHEMA_([^\]]+)\])", replace, statement) def _escape_identifier(self, value): """Escape an identifier. diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py index a9c9cb4c133..49bac18121f 100644 --- a/lib/sqlalchemy/sql/crud.py +++ b/lib/sqlalchemy/sql/crud.py @@ -1,5 +1,5 @@ # sql/crud.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -16,6 +16,7 @@ from . import dml from . import elements from . import roles +from .selectable import Select from .. import exc from .. import util @@ -76,14 +77,17 @@ def _get_crud_params(compiler, stmt, compile_state, **kw): if compile_state._has_multi_parameters: spd = compile_state._multi_parameters[0] stmt_parameter_tuples = list(spd.items()) + spd_str_key = {_column_as_key(key) for key in spd} elif compile_state._ordered_values: spd = compile_state._dict_parameters stmt_parameter_tuples = compile_state._ordered_values + spd_str_key = {_column_as_key(key) for key in spd} elif compile_state._dict_parameters: spd = compile_state._dict_parameters stmt_parameter_tuples = list(spd.items()) + spd_str_key = {_column_as_key(key) for key in spd} else: - stmt_parameter_tuples = spd = None + stmt_parameter_tuples = spd = spd_str_key = None # if we have statement parameters - set defaults in the # compiled params @@ -93,7 +97,7 @@ def _get_crud_params(compiler, stmt, compile_state, **kw): parameters = dict( (_column_as_key(key), REQUIRED) for key in compiler.column_keys - if key not in spd + if key not in spd_str_key ) else: parameters = dict( @@ -339,10 +343,20 @@ def _scan_insert_from_select_cols( if add_select_cols: values.extend(add_select_cols) ins_from_select = compiler.stack[-1]["insert_from_select"] + if not isinstance(ins_from_select, Select): + raise exc.CompileError( + "Can't extend statement for INSERT..FROM SELECT to include " + "additional default-holding column(s) " + "%s. Convert the selectable to a subquery() first, or pass " + "include_defaults=False to Insert.from_select() to skip these " + "columns." + % (", ".join(repr(key) for _, key, _ in add_select_cols),) + ) ins_from_select = ins_from_select._generate() - ins_from_select._raw_columns = tuple( - ins_from_select._raw_columns - ) + tuple(expr for col, col_expr, expr in add_select_cols) + # copy raw_columns + ins_from_select._raw_columns = list(ins_from_select._raw_columns) + [ + expr for col, col_expr, expr in add_select_cols + ] compiler.stack[-1]["insert_from_select"] = ins_from_select diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index f8985548ee0..2c88dc67d53 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -1,5 +1,5 @@ # sql/ddl.py -# Copyright (C) 2009-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -22,6 +22,9 @@ class _DDLCompiles(ClauseElement): + _hierarchy_supports_caching = False + """disable cache warnings for all _DDLCompiles subclasses. """ + def _compiler(self, dialect, **kw): """Return a compiler appropriate for this ClauseElement, given a Dialect.""" @@ -309,8 +312,10 @@ def __init__(self, statement, context=None, bind=None): :param statement: A string or unicode string to be executed. Statements will be - processed with Python's string formatting operator. See the - ``context`` argument and the ``execute_at`` method. + processed with Python's string formatting operator using + a fixed set of string substitutions, as well as additional + substitutions provided by the optional :paramref:`.DDL.context` + parameter. A literal '%' in a statement must be escaped as '%%'. @@ -417,6 +422,8 @@ class CreateSchema(_CreateDropBase): __visit_name__ = "create_schema" + stringify_dialect = "default" + def __init__(self, name, quote=None, **kw): """Create a new :class:`.CreateSchema` construct.""" @@ -433,6 +440,8 @@ class DropSchema(_CreateDropBase): __visit_name__ = "drop_schema" + stringify_dialect = "default" + def __init__(self, name, quote=None, cascade=False, **kw): """Create a new :class:`.DropSchema` construct.""" diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index 036a96e9fd2..e09c53b636b 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -1,5 +1,5 @@ # sql/default_comparator.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -168,7 +168,11 @@ def _in_impl(expr, op, seq_or_selectable, negate_op, **kw): def _getitem_impl(expr, op, other, **kw): - if isinstance(expr.type, type_api.INDEXABLE): + if ( + isinstance(expr.type, type_api.INDEXABLE) + or isinstance(expr.type, type_api.TypeDecorator) + and isinstance(expr.type.impl, type_api.INDEXABLE) + ): other = coercions.expect( roles.BinaryElementRole, other, expr=expr, operator=op ) @@ -260,41 +264,41 @@ def _collate_impl(expr, op, other, **kw): def _regexp_match_impl(expr, op, pattern, flags, **kw): - if flags is not None: - flags = coercions.expect( + return BinaryExpression( + expr, + coercions.expect( roles.BinaryElementRole, - flags, + pattern, expr=expr, - operator=operators.regexp_replace_op, - ) - return _boolean_compare( - expr, + operator=operators.comma_op, + ), op, - pattern, - flags=flags, - negate=operators.not_regexp_match_op - if op is operators.regexp_match_op - else operators.regexp_match_op, - **kw + negate=operators.not_regexp_match_op, + modifiers={"flags": flags}, ) def _regexp_replace_impl(expr, op, pattern, replacement, flags, **kw): - replacement = coercions.expect( - roles.BinaryElementRole, - replacement, - expr=expr, - operator=operators.regexp_replace_op, - ) - if flags is not None: - flags = coercions.expect( - roles.BinaryElementRole, - flags, - expr=expr, - operator=operators.regexp_replace_op, - ) - return _binary_operate( - expr, op, pattern, replacement=replacement, flags=flags, **kw + return BinaryExpression( + expr, + ClauseList( + coercions.expect( + roles.BinaryElementRole, + pattern, + expr=expr, + operator=operators.comma_op, + ), + coercions.expect( + roles.BinaryElementRole, + replacement, + expr=expr, + operator=operators.comma_op, + ), + operator=operators.comma_op, + group=False, + ), + op, + modifiers={"flags": flags}, ) diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index ebff0df88d1..d25e3f85c62 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -1,5 +1,5 @@ # sql/dml.py -# Copyright (C) 2009-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -48,6 +48,21 @@ class DMLState(CompileState): def __init__(self, statement, compiler, **kw): raise NotImplementedError() + @classmethod + def get_entity_description(cls, statement): + return {"name": statement.table.name, "table": statement.table} + + @classmethod + def get_returning_column_descriptions(cls, statement): + return [ + { + "name": c.key, + "type": c.type, + "expr": c, + } + for c in statement._all_selected_columns + ] + @property def dml_table(self): return self.statement.table @@ -174,6 +189,14 @@ def __init__(self, statement, compiler, **kw): if statement._multi_values: self._process_multi_values(statement) + @util.memoized_property + def _insert_col_keys(self): + # this is also done in crud.py -> _key_getters_for_crud_column + return [ + coercions.expect_as_key(roles.DMLColumnRole, col) + for col in self._dict_parameters + ] + @CompileState.plugin_for("default", "update") class UpdateDMLState(DMLState): @@ -424,7 +447,7 @@ def returning(self, *cols): :ref:`tutorial_insert_returning` - in the :ref:`unified_tutorial` - """ # noqa E501 + """ # noqa: E501 if self._return_defaults: raise exc.InvalidRequestError( "return_defaults() is already configured on this statement" @@ -486,6 +509,89 @@ def with_hint(self, text, selectable=None, dialect_name="*"): self._hints = self._hints.union({(selectable, dialect_name): text}) + @property + def entity_description(self): + """Return a :term:`plugin-enabled` description of the table and/or + entity which this DML construct is operating against. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core statement, the structure returned by this accessor + is derived from the :attr:`.UpdateBase.table` attribute, and + refers to the :class:`.Table` being inserted, updated, or deleted:: + + >>> stmt = insert(user_table) + >>> stmt.entity_description + { + "name": "user_table", + "table": Table("user_table", ...) + } + + .. versionadded:: 1.4.33 + + .. seealso:: + + :attr:`.UpdateBase.returning_column_descriptions` + + :attr:`.Select.column_descriptions` - entity information for + a :func:`.select` construct + + :ref:`queryguide_inspection` - ORM background + + """ + meth = DMLState.get_plugin_class(self).get_entity_description + return meth(self) + + @property + def returning_column_descriptions(self): + """Return a :term:`plugin-enabled` description of the columns + which this DML construct is RETURNING against, in other words + the expressions established as part of :meth:`.UpdateBase.returning`. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core statement, the structure returned by this accessor is + derived from the same objects that are returned by the + :attr:`.UpdateBase.exported_columns` accessor:: + + >>> stmt = insert(user_table).returning(user_table.c.id, user_table.c.name) + >>> stmt.entity_description + [ + { + "name": "id", + "type": Integer, + "expr": Column("id", Integer(), table=, ...) + }, + { + "name": "name", + "type": String(), + "expr": Column("name", String(), table=, ...) + }, + ] + + .. versionadded:: 1.4.33 + + .. seealso:: + + :attr:`.UpdateBase.entity_description` + + :attr:`.Select.column_descriptions` - entity information for + a :func:`.select` construct + + :ref:`queryguide_inspection` - ORM background + + """ # noqa: E501 + meth = DMLState.get_plugin_class( + self + ).get_returning_column_descriptions + return meth(self) + class ValuesBase(UpdateBase): """Supplies support for :meth:`.ValuesBase.values` to @@ -605,7 +711,7 @@ def values(self, *args, **kwargs): .. seealso:: - :ref:`execute_multiple` - an introduction to + :ref:`tutorial_multiple_parameters` - an introduction to the traditional Core method of multiple parameter set invocation for INSERTs and other statements. @@ -822,12 +928,12 @@ class Insert(ValuesBase): ("_multi_values", InternalTraversal.dp_dml_multi_values), ("select", InternalTraversal.dp_clauseelement), ("_post_values_clause", InternalTraversal.dp_clauseelement), - ("_returning", InternalTraversal.dp_clauseelement_list), + ("_returning", InternalTraversal.dp_clauseelement_tuple), ("_hints", InternalTraversal.dp_table_hint_list), ("_return_defaults", InternalTraversal.dp_boolean), ( "_return_defaults_columns", - InternalTraversal.dp_clauseelement_list, + InternalTraversal.dp_clauseelement_tuple, ), ] + HasPrefixes._has_prefixes_traverse_internals @@ -876,9 +982,6 @@ def __init__( .. seealso:: - :ref:`coretutorial_insert_expressions` - in the - :ref:`1.x tutorial ` - :ref:`tutorial_core_insert` - in the :ref:`unified_tutorial` @@ -920,9 +1023,7 @@ def __init__( .. seealso:: - :ref:`coretutorial_insert_expressions` - SQL Expression Tutorial - - :ref:`inserts_and_updates` - SQL Expression Tutorial + :ref:`tutorial_core_insert` - in the :ref:`unified_tutorial` """ super(Insert, self).__init__(table, values, prefixes) @@ -1037,16 +1138,6 @@ def where(self, *whereclause): .. seealso:: - **1.x Tutorial Examples** - - :ref:`tutorial_1x_correlated_updates` - - :ref:`multi_table_updates` - - :ref:`multi_table_deletes` - - **2.0 Tutorial Examples** - :ref:`tutorial_correlated_updates` :ref:`tutorial_update_from` @@ -1117,16 +1208,16 @@ class Update(DMLWhereBase, ValuesBase): _traverse_internals = ( [ ("table", InternalTraversal.dp_clauseelement), - ("_where_criteria", InternalTraversal.dp_clauseelement_list), + ("_where_criteria", InternalTraversal.dp_clauseelement_tuple), ("_inline", InternalTraversal.dp_boolean), ("_ordered_values", InternalTraversal.dp_dml_ordered_values), ("_values", InternalTraversal.dp_dml_values), - ("_returning", InternalTraversal.dp_clauseelement_list), + ("_returning", InternalTraversal.dp_clauseelement_tuple), ("_hints", InternalTraversal.dp_table_hint_list), ("_return_defaults", InternalTraversal.dp_boolean), ( "_return_defaults_columns", - InternalTraversal.dp_clauseelement_list, + InternalTraversal.dp_clauseelement_tuple, ), ] + HasPrefixes._has_prefixes_traverse_internals @@ -1178,15 +1269,6 @@ def __init__( :meth:`_expression.TableClause.update` method on :class:`_schema.Table`. - .. seealso:: - - :ref:`inserts_and_updates` - in the - :ref:`1.x tutorial ` - - :ref:`tutorial_core_update_delete` - in the :ref:`unified_tutorial` - - - :param table: A :class:`_schema.Table` object representing the database table to be updated. @@ -1298,7 +1380,7 @@ def ordered_values(self, *args): .. seealso:: - :ref:`updates_order_parameters` - full example of the + :ref:`tutorial_parameter_ordered_updates` - full example of the :meth:`_expression.Update.ordered_values` method. .. versionchanged:: 1.4 The :meth:`_expression.Update.ordered_values` @@ -1354,8 +1436,8 @@ class Delete(DMLWhereBase, UpdateBase): _traverse_internals = ( [ ("table", InternalTraversal.dp_clauseelement), - ("_where_criteria", InternalTraversal.dp_clauseelement_list), - ("_returning", InternalTraversal.dp_clauseelement_list), + ("_where_criteria", InternalTraversal.dp_clauseelement_tuple), + ("_returning", InternalTraversal.dp_clauseelement_tuple), ("_hints", InternalTraversal.dp_table_hint_list), ] + HasPrefixes._has_prefixes_traverse_internals diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index e883454de65..6c9bbbaccfc 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1,5 +1,5 @@ # sql/elements.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -148,7 +148,7 @@ def literal(value, type_=None): def outparam(key, type_=None): - """Create an 'OUT' parameter for usage in functions (stored procedures), + r"""Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them. The ``outparam`` can be used like a regular function parameter. @@ -203,7 +203,8 @@ class ClauseElement( is_clause_element = True is_selectable = False - + _gen_static_annotations_cache_key = False + _is_table = False _is_textual = False _is_from_clause = False _is_returns_rows = False @@ -216,6 +217,7 @@ class ClauseElement( _is_lambda_element = False _is_singleton_constant = False _is_immutable = False + _is_star = False _order_by_label_element = None @@ -241,15 +243,22 @@ def _clone(self, **kw): """ skip = self._memoized_keys c = self.__class__.__new__(self.__class__) - c.__dict__ = {k: v for k, v in self.__dict__.items() if k not in skip} + + if skip: + # ensure this iteration remains atomic + c.__dict__ = { + k: v for k, v in self.__dict__.copy().items() if k not in skip + } + else: + c.__dict__ = self.__dict__.copy() # this is a marker that helps to "equate" clauses to each other # when a Select returns its list of FROM clauses. the cloning # process leaves around a lot of remnants of the previous clause # typically in the form of column expressions still attached to the # old table. - c._is_clone_of = self - + cc = self._is_clone_of + c._is_clone_of = cc if cc is not None else self return c def _negate_in_binary(self, negated_op, original_op): @@ -358,6 +367,7 @@ def params(self, *optionaldict, **kwargs): return self._replace_params(False, optionaldict, kwargs) def _replace_params(self, unique, optionaldict, kwargs): + if len(optionaldict) == 1: kwargs.update(optionaldict[0]) elif len(optionaldict) > 1: @@ -373,7 +383,9 @@ def visit_bindparam(bind): bind._convert_to_unique() return cloned_traverse( - self, {"maintain_key": True}, {"bindparam": visit_bindparam} + self, + {"maintain_key": True, "detect_subquery_cols": True}, + {"bindparam": visit_bindparam}, ) def compare(self, other, **kw): @@ -838,6 +850,9 @@ def comparator(self): else: return comparator_factory(self) + def __setstate__(self, state): + self.__dict__.update(state) + def __getattr__(self, key): try: return getattr(self.comparator, key) @@ -890,21 +905,21 @@ def base_columns(self): @util.memoized_property def proxy_set(self): - s = util.column_set([self]) + s = util.column_set([self._deannotate()]) for c in self._proxies: s.update(c.proxy_set) return s - def _uncached_proxy_set(self): + def _uncached_proxy_list(self): """An 'uncached' version of proxy set. This is so that we can read annotations from the list of columns without breaking the caching of the above proxy_set. """ - s = util.column_set([self]) + s = [self] for c in self._proxies: - s.update(c._uncached_proxy_set()) + s.extend(c._uncached_proxy_list()) return s def shares_lineage(self, othercolumn): @@ -995,7 +1010,7 @@ def cast(self, type_): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`_expression.cast` @@ -1198,6 +1213,14 @@ def _dedupe_anon_label_idx(self, idx): else: return self._dedupe_anon_tq_label_idx(idx) + @property + def _proxy_key(self): + wce = self.wrapped_column_expression + + if not wce._is_text_clause: + return wce._proxy_key + return super(WrapsColumnExpression, self)._proxy_key + class BindParameter(roles.InElementRole, ColumnElement): r"""Represent a "bound expression". @@ -1226,6 +1249,7 @@ class BindParameter(roles.InElementRole, ColumnElement): ("type", InternalTraversal.dp_type), ("callable", InternalTraversal.dp_plain_dict), ("value", InternalTraversal.dp_plain_obj), + ("literal_execute", InternalTraversal.dp_boolean), ] _is_crud = False @@ -1453,15 +1477,6 @@ def __init__( .. versionchanged:: 1.3 the "expanding" bound parameter feature now supports empty lists. - - .. seealso:: - - :ref:`coretutorial_bind_param` - - :ref:`coretutorial_insert_expressions` - - :func:`.outparam` - :param literal_execute: if True, the bound parameter will be rendered in the compile phase with a special "POSTCOMPILE" token, and the SQLAlchemy compiler will @@ -1483,6 +1498,11 @@ def __init__( :ref:`change_4808`. + .. seealso:: + + :ref:`tutorial_sending_parameters` - in the + :ref:`unified_tutorial` + """ if required is NO_ARG: required = value is NO_ARG and callable_ is None @@ -1628,6 +1648,15 @@ def _with_binary_element_type(self, type_): def _clone(self, maintain_key=False, **kw): c = ClauseElement._clone(self, **kw) + # ensure all the BindParameter objects stay in cloned set. + # in #7823, we changed "clone" so that a clone only keeps a reference + # to the "original" element, since for column correspondence, that's + # all we need. However, for BindParam, _cloned_set is used by + # the "cache key bind match" lookup, which means if any of those + # interim BindParameter objects became part of a cache key in the + # cache, we need it. So here, make sure all clones keep carrying + # forward. + c._cloned_set.update(self._cloned_set) if not maintain_key and self.unique: c.key = _anonymous_label.safe_construct( id(c), c._orig_key or "param", sanitize_key=True @@ -1659,6 +1688,7 @@ def _gen_cache_key(self, anon_map, bindparams): self.__class__, self.type._static_cache_key, self.key % anon_map if self._key_is_anon else self.key, + self.literal_execute, ) def _convert_to_unique(self): @@ -1778,6 +1808,10 @@ def _select_iterable(self): _allow_label_resolve = False + @property + def _is_star(self): + return self.text == "*" + def __init__(self, text, bind=None): self._bind = bind self._bindparams = {} @@ -1884,7 +1918,7 @@ def _create_text(cls, text, bind=None): .. seealso:: - :ref:`sqlexpression_text` - in the Core tutorial + :ref:`tutorial_select_arbitrary_text` """ @@ -2943,28 +2977,18 @@ def __init__(self, *whens, **kw): pass value = kw.pop("value", None) - if value is not None: - whenlist = [ - ( - coercions.expect( - roles.ExpressionElementRole, - c, - apply_propagate_attrs=self, - ).self_group(), - coercions.expect(roles.ExpressionElementRole, r), - ) - for (c, r) in whens - ] - else: - whenlist = [ - ( - coercions.expect( - roles.ColumnArgumentRole, c, apply_propagate_attrs=self - ).self_group(), - coercions.expect(roles.ExpressionElementRole, r), - ) - for (c, r) in whens - ] + + whenlist = [ + ( + coercions.expect( + roles.ExpressionElementRole, + c, + apply_propagate_attrs=self, + ).self_group(), + coercions.expect(roles.ExpressionElementRole, r), + ) + for (c, r) in whens + ] if whenlist: type_ = list(whenlist[-1])[-1].type @@ -3045,7 +3069,7 @@ class Cast(WrapsColumnExpression, ColumnElement): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`.cast` @@ -3059,7 +3083,7 @@ class Cast(WrapsColumnExpression, ColumnElement): _traverse_internals = [ ("clause", InternalTraversal.dp_clauseelement), - ("typeclause", InternalTraversal.dp_clauseelement), + ("type", InternalTraversal.dp_type), ] def __init__(self, expression, type_): @@ -3106,7 +3130,7 @@ def __init__(self, expression, type_): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`.type_coerce` - an alternative to CAST that coerces the type on the Python side only, which is often sufficient to generate the @@ -3227,7 +3251,7 @@ def __init__(self, expression, type_): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`.cast` @@ -3664,6 +3688,8 @@ class CollectionAggregate(UnaryExpression): """ + inherit_cache = True + @classmethod def _create_any(cls, expr): """Produce an ANY expression. @@ -3858,7 +3884,20 @@ class BinaryExpression(ColumnElement): ( "type", InternalTraversal.dp_type, - ), # affects JSON CAST operators + ), + ] + + _cache_key_traversal = [ + ("left", InternalTraversal.dp_clauseelement), + ("right", InternalTraversal.dp_clauseelement), + ("operator", InternalTraversal.dp_operator), + ("modifiers", InternalTraversal.dp_plain_dict), + # "type" affects JSON CAST operators, so while redundant in most cases, + # is needed for that one + ( + "type", + InternalTraversal.dp_type, + ), ] _is_implicitly_boolean = True @@ -3971,7 +4010,7 @@ class IndexExpression(BinaryExpression): """Represent the class of expressions that are like an "index" operation.""" - pass + inherit_cache = True class GroupedElement(ClauseElement): @@ -3994,6 +4033,10 @@ class Grouping(GroupedElement, ColumnElement): ("type", InternalTraversal.dp_type), ] + _cache_key_traversal = [ + ("element", InternalTraversal.dp_clauseelement), + ] + def __init__(self, element): self.element = element self.type = getattr(element, "type", type_api.NULLTYPE) @@ -4494,6 +4537,11 @@ class Label(roles.LabeledColumnExprRole, ColumnElement): ("_element", InternalTraversal.dp_clauseelement), ] + _cache_key_traversal = [ + ("name", InternalTraversal.dp_anon_name), + ("_element", InternalTraversal.dp_clauseelement), + ] + def __init__(self, name, element, type_=None): """Return a :class:`Label` object for the given :class:`_expression.ColumnElement`. @@ -4614,7 +4662,7 @@ def _make_proxy(self, selectable, name=None, **kw): # when a label name conflicts with other columns and select() # is attempting to disambiguate an explicit label, which is not what # the user would want. See issue #6090. - if key != self.name: + if key != self.name and not isinstance(self.name, _anonymous_label): raise exc.InvalidRequestError( "Label name %s is being renamed to an anonymous label due " "to disambiguation " @@ -4778,6 +4826,10 @@ class is usable by itself in those cases where behavioral requirements _is_multiparam_column = False + @property + def _is_star(self): + return self.is_literal and self.name == "*" + def __init__(self, text, type_=None, is_literal=False, _selectable=None): """Produce a :class:`.ColumnClause` object. @@ -4867,7 +4919,7 @@ def __init__(self, text, type_=None, is_literal=False, _selectable=None): :func:`_expression.text` - :ref:`sqlexpression_literal_column` + :ref:`tutorial_select_arbitrary_text` """ self.key = self.name = text @@ -4888,6 +4940,19 @@ def entity_namespace(self): else: return super(ColumnClause, self).entity_namespace + def _clone(self, detect_subquery_cols=False, **kw): + if ( + detect_subquery_cols + and self.table is not None + and self.table._is_subquery + ): + clone = kw.pop("clone") + table = clone(self.table, **kw) + new = table.c.corresponding_column(self) + return new + + return super(ColumnClause, self)._clone(**kw) + @HasMemoized.memoized_attribute def _from_objects(self): t = self.table @@ -5064,14 +5129,17 @@ def __init__(self, ident): class SavepointClause(_IdentifiedClause): __visit_name__ = "savepoint" + inherit_cache = False class RollbackToSavepointClause(_IdentifiedClause): __visit_name__ = "rollback_to_savepoint" + inherit_cache = False class ReleaseSavepointClause(_IdentifiedClause): __visit_name__ = "release_savepoint" + inherit_cache = False class quoted_name(util.MemoizedSlots, util.text_type): @@ -5107,10 +5175,11 @@ class quoted_name(util.MemoizedSlots, util.text_type): an unconditionally quoted name:: from sqlalchemy import create_engine + from sqlalchemy import inspect from sqlalchemy.sql import quoted_name engine = create_engine("oracle+cx_oracle://some_dsn") - engine.has_table(quoted_name("some_table", True)) + print(inspect(engine).has_table(quoted_name("some_table", True))) The above logic will run the "has table" logic against the Oracle backend, passing the name exactly as ``"some_table"`` without converting to @@ -5214,7 +5283,14 @@ def __init__(self, element, values): def _with_annotations(self, values): clone = super(AnnotatedColumnElement, self)._with_annotations(values) - clone.__dict__.pop("comparator", None) + for attr in ( + "comparator", + "_proxy_key", + "_tq_key_label", + "_tq_label", + "_non_anon_label", + ): + clone.__dict__.pop(attr, None) return clone @util.memoized_property @@ -5328,8 +5404,13 @@ def safe_construct( cls, seed, body, enclosing_label=None, sanitize_key=False ): + # need to escape chars that interfere with format + # strings in any case, issue #8724 + body = re.sub(r"[%\(\) \$]+", "_", body) + if sanitize_key: - body = re.sub(r"[%\(\) \$]+", "_", body).strip("_") + # sanitize_key is then an extra step used by BindParameter + body = body.strip("_") label = "%%(%d %s)s" % (seed, body.replace("%", "%%")) if enclosing_label: diff --git a/lib/sqlalchemy/sql/events.py b/lib/sqlalchemy/sql/events.py index db80b51e352..63327814c90 100644 --- a/lib/sqlalchemy/sql/events.py +++ b/lib/sqlalchemy/sql/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/sql/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# sql/events.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -268,9 +268,6 @@ def receive_column_reflect(inspector, table, column_info): ] ) - A future release will allow it to be associated with a specific - :class:`_schema.MetaData` object as well. - The dictionary of column information as returned by the dialect is passed, and can be modified. The dictionary is that returned in each element of the list returned diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py index 129e628ab8b..787b698d1b7 100644 --- a/lib/sqlalchemy/sql/expression.py +++ b/lib/sqlalchemy/sql/expression.py @@ -1,5 +1,5 @@ # sql/expression.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 5729f81f512..b44d5de3079 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -1,5 +1,5 @@ # sql/functions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -71,7 +71,7 @@ class FunctionElement(Executable, ColumnElement, FromClause, Generative): .. seealso:: - :ref:`coretutorial_functions` - in the Core tutorial + :ref:`tutorial_functions` - in the :ref:`unified_tutorial` :class:`.Function` - named SQL function. @@ -167,7 +167,7 @@ def scalar_table_valued(self, name, type_=None): :meth:`_functions.FunctionElement.column_valued` - """ # noqa E501 + """ # noqa: E501 return ScalarFunctionColumn(self, name, type_) @@ -212,8 +212,16 @@ def table_valued(self, *expr, **kw): string name will be added as a column to the .c collection of the resulting :class:`_sql.TableValuedAlias`. + :param joins_implicitly: when True, the table valued function may be + used in the FROM clause without any explicit JOIN to other tables + in the SQL query, and no "cartesian product" warning will be generated. + May be useful for SQL functions such as ``func.json_each()``. + + .. versionadded:: 1.4.33 + .. versionadded:: 1.4.0b2 + .. seealso:: :ref:`tutorial_functions_table_valued` - in the :ref:`unified_tutorial` @@ -229,11 +237,12 @@ def table_valued(self, *expr, **kw): :meth:`_sql.TableValuedAlias.render_derived` - renders the alias using a derived column clause, e.g. ``AS name(col1, col2, ...)`` - """ # noqa 501 + """ # noqa: 501 new_func = self._generate() with_ordinality = kw.pop("with_ordinality", None) + joins_implicitly = kw.pop("joins_implicitly", None) name = kw.pop("name", None) if with_ordinality: @@ -244,9 +253,9 @@ def table_valued(self, *expr, **kw): *expr ) - return new_func.alias(name=name) + return new_func.alias(name=name, joins_implicitly=joins_implicitly) - def column_valued(self, name=None): + def column_valued(self, name=None, joins_implicitly=False): """Return this :class:`_functions.FunctionElement` as a column expression that selects from itself as a FROM clause. @@ -262,6 +271,16 @@ def column_valued(self, name=None): gs = func.generate_series(1, 5, -1).alias().column + :param name: optional name to assign to the alias name that's generated. + If omitted, a unique anonymizing name is used. + + :param joins_implicitly: when True, the "table" portion of the column + valued function may be a member of the FROM clause without any + explicit JOIN to other tables in the SQL query, and no "cartesian + product" warning will be generated. May be useful for SQL functions + such as ``func.json_array_elements()``. + + .. versionadded:: 1.4.46 .. seealso:: @@ -271,9 +290,9 @@ def column_valued(self, name=None): :meth:`_functions.FunctionElement.table_valued` - """ # noqa 501 + """ # noqa: 501 - return self.alias(name=name).column + return self.alias(name=name, joins_implicitly=joins_implicitly).column @property def columns(self): @@ -296,7 +315,7 @@ def columns(self): :meth:`_functions.FunctionElement.table_valued` - generates table-valued SQL function expressions. - """ # noqa E501 + """ # noqa: E501 return ColumnCollection( columns=[(col.key, col) for col in self._all_selected_columns] @@ -409,7 +428,8 @@ def filter(self, *criterion): return FunctionFilter(self, *criterion) def as_comparison(self, left_index, right_index): - """Interpret this expression as a boolean comparison between two values. + """Interpret this expression as a boolean comparison between two + values. This method is used for an ORM use case described at :ref:`relationship_custom_operator_sql_function`. @@ -497,7 +517,7 @@ def within_group_type(self, within_group): return None - def alias(self, name=None): + def alias(self, name=None, joins_implicitly=False): r"""Produce a :class:`_expression.Alias` construct against this :class:`.FunctionElement`. @@ -518,7 +538,7 @@ def alias(self, name=None): in the columns or where clause, for a backend such as PostgreSQL. For a full table-valued expression, use the - :meth:`_function.FunctionElement.table_valued` method first to + :meth:`_functions.FunctionElement.table_valued` method first to establish named columns. e.g.:: @@ -539,6 +559,17 @@ def alias(self, name=None): .. versionadded:: 1.4.0b2 Added the ``.column`` accessor + :param name: alias name, will be rendered as ``AS `` in the + FROM clause + + :param joins_implicitly: when True, the table valued function may be + used in the FROM clause without any explicit JOIN to other tables + in the SQL query, and no "cartesian product" warning will be + generated. May be useful for SQL functions such as + ``func.json_each()``. + + .. versionadded:: 1.4.33 + .. seealso:: :ref:`tutorial_functions_table_valued` - @@ -554,7 +585,10 @@ def alias(self, name=None): """ return TableValuedAlias._construct( - self, name, table_value_type=self.type + self, + name, + table_value_type=self.type, + joins_implicitly=joins_implicitly, ) def select(self): @@ -777,7 +811,7 @@ class _FunctionGenerator(object): .. seealso:: - :ref:`coretutorial_functions` - in the Core Tutorial + :ref:`tutorial_functions` - in the :ref:`unified_tutorial` :class:`.Function` @@ -977,6 +1011,7 @@ class that is instantiated automatically when called class as_utc(GenericFunction): type = DateTime + inherit_cache = True print(select(func.as_utc())) @@ -991,6 +1026,7 @@ class as_utc(GenericFunction): class as_utc(GenericFunction): type = DateTime package = "time" + inherit_cache = True The above function would be available from :data:`.func` using the package name ``time``:: @@ -1008,6 +1044,7 @@ class GeoBuffer(GenericFunction): package = "geo" name = "ST_Buffer" identifier = "buffer" + inherit_cache = True The above function will render as follows:: @@ -1026,6 +1063,7 @@ class GeoBuffer(GenericFunction): package = "geo" name = quoted_name("ST_Buffer", True) identifier = "buffer" + inherit_cache = True The above function will render as:: @@ -1143,19 +1181,19 @@ class coalesce(ReturnTypeFromArgs): inherit_cache = True -class max(ReturnTypeFromArgs): # noqa A001 +class max(ReturnTypeFromArgs): # noqa: A001 """The SQL MAX() aggregate function.""" inherit_cache = True -class min(ReturnTypeFromArgs): # noqa A001 +class min(ReturnTypeFromArgs): # noqa: A001 """The SQL MIN() aggregate function.""" inherit_cache = True -class sum(ReturnTypeFromArgs): # noqa A001 +class sum(ReturnTypeFromArgs): # noqa: A001 """The SQL SUM() aggregate function.""" inherit_cache = True diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index 03cd05f0202..446ceb09ff2 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -1,13 +1,15 @@ # sql/lambdas.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +import inspect import itertools import operator import sys +import threading import types import weakref @@ -16,7 +18,6 @@ from . import roles from . import schema from . import traversals -from . import type_api from . import visitors from .base import _clone from .base import Options @@ -217,11 +218,17 @@ def _retrieve_tracker_rec(self, fn, apply_propagate_attrs, opts): if rec is None: if cache_key is not traversals.NO_CACHE: - rec = AnalyzedFunction( - tracker, self, apply_propagate_attrs, fn - ) - rec.closure_bindparams = bindparams - lambda_cache[tracker_key + cache_key] = rec + + with AnalyzedCode._generation_mutex: + key = tracker_key + cache_key + if key not in lambda_cache: + rec = AnalyzedFunction( + tracker, self, apply_propagate_attrs, fn + ) + rec.closure_bindparams = bindparams + lambda_cache[key] = rec + else: + rec = lambda_cache[key] else: rec = NonAnalyzedFunction(self._invoke_user_fn(fn)) @@ -606,6 +613,8 @@ class AnalyzedCode(object): ) _fns = weakref.WeakKeyDictionary() + _generation_mutex = threading.RLock() + @classmethod def get(cls, fn, lambda_element, lambda_kw, **kw): try: @@ -613,12 +622,22 @@ def get(cls, fn, lambda_element, lambda_kw, **kw): return cls._fns[fn.__code__] except KeyError: pass - cls._fns[fn.__code__] = analyzed = AnalyzedCode( - fn, lambda_element, lambda_kw, **kw - ) - return analyzed + + with cls._generation_mutex: + # check for other thread already created object + if fn.__code__ in cls._fns: + return cls._fns[fn.__code__] + + cls._fns[fn.__code__] = analyzed = AnalyzedCode( + fn, lambda_element, lambda_kw, **kw + ) + return analyzed def __init__(self, fn, lambda_element, opts): + if inspect.ismethod(fn): + raise exc.ArgumentError( + "Method %s may not be passed as a SQL expression" % fn + ) closure = fn.__closure__ self.track_bound_values = ( @@ -1195,11 +1214,11 @@ def __call__(self, *arg, **kw): return value def operate(self, op, *other, **kwargs): - elem = object.__getattribute__(self, "__clause_element__")() + elem = object.__getattribute__(self, "_py_wrapper_literal")() return op(elem, *other, **kwargs) def reverse_operate(self, op, other, **kwargs): - elem = object.__getattribute__(self, "__clause_element__")() + elem = object.__getattribute__(self, "_py_wrapper_literal")() return op(other, elem, **kwargs) def _extract_bound_parameters(self, starting_point, result_list): @@ -1212,16 +1231,19 @@ def _extract_bound_parameters(self, starting_point, result_list): element = getter(starting_point) pywrapper._sa__extract_bound_parameters(element, result_list) - def __clause_element__(self): + def _py_wrapper_literal(self, expr=None, operator=None, **kw): param = object.__getattribute__(self, "_param") to_evaluate = object.__getattribute__(self, "_to_evaluate") if param is None: name = object.__getattribute__(self, "_name") self._param = param = elements.BindParameter( - name, required=False, unique=True + name, + required=False, + unique=True, + _compared_to_operator=operator, + _compared_to_type=expr.type if expr is not None else None, ) self._has_param = True - param.type = type_api._resolve_value_to_type(to_evaluate) return param._with_value(to_evaluate, maintain_key=True) def __bool__(self): @@ -1239,6 +1261,7 @@ def __getattribute__(self, key): "__clause_element__", "operate", "reverse_operate", + "_py_wrapper_literal", "__class__", "__dict__", ): diff --git a/lib/sqlalchemy/sql/naming.py b/lib/sqlalchemy/sql/naming.py index d01eabb5883..6b890303081 100644 --- a/lib/sqlalchemy/sql/naming.py +++ b/lib/sqlalchemy/sql/naming.py @@ -1,5 +1,5 @@ -# sqlalchemy/naming.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# sql/naming.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 695e086b88f..189b1e8dce1 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -1,5 +1,5 @@ # sql/operators.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -145,22 +145,31 @@ def op( between this element and the expression passed to the generated function. - :param precedence: precedence to apply to the operator, when - parenthesizing expressions. A lower number will cause the expression - to be parenthesized when applied against another operator with - higher precedence. The default value of ``0`` is lower than all - operators except for the comma (``,``) and ``AS`` operators. - A value of 100 will be higher or equal to all operators, and -100 - will be lower than or equal to all operators. - - :param is_comparison: if True, the operator will be considered as a - "comparison" operator, that is which evaluates to a boolean - true/false value, like ``==``, ``>``, etc. This flag should be set + :param precedence: precedence which the database is expected to apply + to the operator in SQL expressions. This integer value acts as a hint + for the SQL compiler to know when explicit parenthesis should be + rendered around a particular operation. A lower number will cause the + expression to be parenthesized when applied against another operator + with higher precedence. The default value of ``0`` is lower than all + operators except for the comma (``,``) and ``AS`` operators. A value + of 100 will be higher or equal to all operators, and -100 will be + lower than or equal to all operators. + + .. seealso:: + + :ref:`faq_sql_expression_op_parenthesis` - detailed description + of how the SQLAlchemy SQL compiler renders parenthesis + + :param is_comparison: legacy; if True, the operator will be considered + as a "comparison" operator, that is which evaluates to a boolean + true/false value, like ``==``, ``>``, etc. This flag is provided so that ORM relationships can establish that the operator is a comparison operator when used in a custom join condition. - .. versionadded:: 0.9.2 - added the - :paramref:`.Operators.op.is_comparison` flag. + Using the ``is_comparison`` parameter is superseded by using the + :meth:`.Operators.bool_op` method instead; this more succinct + operator sets this parameter automatically. In SQLAlchemy 2.0 it + will also provide for improved typing support. :param return_type: a :class:`.TypeEngine` class or object that will force the return type of an expression produced by this operator @@ -171,6 +180,8 @@ def op( .. seealso:: + :meth:`.Operators.bool_op` + :ref:`types_operators` :ref:`relationship_custom_operator` @@ -189,7 +200,9 @@ def bool_op(self, opstring, precedence=0): This method is shorthand for calling :meth:`.Operators.op` and passing the :paramref:`.Operators.op.is_comparison` - flag with True. + flag with True. A key advantage to using :meth:`.Operators.bool_op` + is that when using column constructs, the "boolean" nature of the + returned expression will be present for :pep:`484` purposes. .. seealso:: @@ -211,8 +224,8 @@ def operate(self, op, *other, **kwargs): side:: class MyComparator(ColumnOperators): - def operate(self, op, other): - return op(func.lower(self), func.lower(other)) + def operate(self, op, other, **kwargs): + return op(func.lower(self), func.lower(other), **kwargs) :param op: Operator callable. :param \*other: the 'other' side of the operation. Will @@ -280,10 +293,24 @@ def __init__( ) def __eq__(self, other): - return isinstance(other, custom_op) and other.opstring == self.opstring + return ( + isinstance(other, custom_op) + and other._hash_key() == self._hash_key() + ) def __hash__(self): - return id(self) + return hash(self._hash_key()) + + def _hash_key(self): + return ( + self.__class__, + self.opstring, + self.precedence, + self.is_comparison, + self.natural_self_precedent, + self.eager_grouping, + self.return_type._static_cache_key if self.return_type else None, + ) def __call__(self, left, right, **kw): return left.operate(self, right, **kw) @@ -460,6 +487,16 @@ def concat(self, other): """ return self.operate(concat_op, other) + def _rconcat(self, other): + """Implement an 'rconcat' operator. + + this is for internal use at the moment + + .. versionadded:: 1.4.40 + + """ + return self.reverse_operate(concat_op, other) + def like(self, other, escape=None): r"""Implement the ``like`` operator. @@ -1000,8 +1037,8 @@ def regexp_match(self, pattern, flags=None): :param pattern: The regular expression pattern string or column clause. - :param flags: Any regular expression string flags to apply. Flags - tend to be backend specific. It can be a string or a column clause. + :param flags: Any regular expression string flags to apply, passed as + plain Python string only. These flags are backend specific. Some backends, like PostgreSQL and MariaDB, may alternatively specify the flags as part of the pattern. When using the ignore case flag 'i' in PostgreSQL, the ignore case @@ -1009,6 +1046,14 @@ def regexp_match(self, pattern, flags=None): .. versionadded:: 1.4 + .. versionchanged:: 1.4.48, 2.0.18 Note that due to an implementation + error, the "flags" parameter previously accepted SQL expression + objects such as column expressions in addition to plain Python + strings. This implementation did not work correctly with caching + and was removed; strings only should be passed for the "flags" + parameter, as these flags are rendered as literal inline values + within SQL expressions. + .. seealso:: :meth:`_sql.ColumnOperators.regexp_replace` @@ -1043,13 +1088,22 @@ def regexp_replace(self, pattern, replacement, flags=None): :param pattern: The regular expression pattern string or column clause. :param pattern: The replacement string or column clause. - :param flags: Any regular expression string flags to apply. Flags - tend to be backend specific. It can be a string or a column clause. + :param flags: Any regular expression string flags to apply, passed as + plain Python string only. These flags are backend specific. Some backends, like PostgreSQL and MariaDB, may alternatively specify the flags as part of the pattern. .. versionadded:: 1.4 + .. versionchanged:: 1.4.48, 2.0.18 Note that due to an implementation + error, the "flags" parameter previously accepted SQL expression + objects such as column expressions in addition to plain Python + strings. This implementation did not work correctly with caching + and was removed; strings only should be passed for the "flags" + parameter, as these flags are rendered as literal inline values + within SQL expressions. + + .. seealso:: :meth:`_sql.ColumnOperators.regexp_match` @@ -1506,7 +1560,12 @@ def filter_op(a, b): def concat_op(a, b): - return a.concat(b) + try: + concat = a.concat + except AttributeError: + return b._rconcat(a) + else: + return concat(b) def desc_op(a): diff --git a/lib/sqlalchemy/sql/roles.py b/lib/sqlalchemy/sql/roles.py index 70ad4cefa7f..1f952b641c1 100644 --- a/lib/sqlalchemy/sql/roles.py +++ b/lib/sqlalchemy/sql/roles.py @@ -1,5 +1,5 @@ # sql/roles.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -36,6 +36,11 @@ class HasCacheKeyRole(SQLRole): _role_name = "Cacheable Core or ORM object" +class ExecutableOptionRole(SQLRole): + __slots__ = () + _role_name = "ExecutionOption Core or ORM object" + + class LiteralValueRole(SQLRole): _role_name = "Literal Python value" diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 166ad98cd89..a8812376c0c 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1,5 +1,5 @@ # sql/schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -58,11 +58,20 @@ from .. import util -RETAIN_SCHEMA = util.symbol("retain_schema") +RETAIN_SCHEMA = util.symbol( + "retain_schema" + """Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` + or in some cases a :class:`_schema.ForeignKey` object, in situations + where the object is being copied for a :meth:`.Table.to_metadata` + operation, should retain the schema name that it already has. + + """ +) BLANK_SCHEMA = util.symbol( "blank_schema", - """Symbol indicating that a :class:`_schema.Table` or :class:`.Sequence` + """Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` + or in some cases a :class:`_schema.ForeignKey` object should have 'None' for its schema, even if the parent :class:`_schema.MetaData` has specified a schema. @@ -163,10 +172,11 @@ class Table(DialectKWArgs, SchemaItem, TableClause): e.g.:: - mytable = Table("mytable", metadata, - Column('mytable_id', Integer, primary_key=True), - Column('value', String(50)) - ) + mytable = Table( + "mytable", metadata, + Column('mytable_id', Integer, primary_key=True), + Column('value', String(50)) + ) The :class:`_schema.Table` object constructs a unique instance of itself based @@ -534,6 +544,8 @@ def listen_for_reflect(table, column_info): ("schema", InternalTraversal.dp_string) ] + _is_table = True + def _gen_cache_key(self, anon_map, bindparams): if self._annotations: return (self,) + self._annotations_cache_key @@ -763,25 +775,25 @@ def _init_existing(self, *args, **kwargs): ) include_columns = kwargs.pop("include_columns", None) - - resolve_fks = kwargs.pop("resolve_fks", True) - if include_columns is not None: for c in self.c: if c.name not in include_columns: self._columns.remove(c) + resolve_fks = kwargs.pop("resolve_fks", True) + for key in ("quote", "quote_schema"): if key in kwargs: raise exc.ArgumentError( "Can't redefine 'quote' or 'quote_schema' arguments" ) - if "comment" in kwargs: - self.comment = kwargs.pop("comment", None) - - if "info" in kwargs: - self.info = kwargs.pop("info") + # update `self` with these kwargs, if provided + self.comment = kwargs.pop("comment", self.comment) + self.implicit_returning = kwargs.pop( + "implicit_returning", self.implicit_returning + ) + self.info = kwargs.pop("info", self.info) if autoload: if not autoload_replace: @@ -1047,7 +1059,14 @@ def to_metadata( target schema that we are changing to, the :class:`_schema.ForeignKeyConstraint` object, and the existing "target schema" of that constraint. The function should return the - string schema name that should be applied. + string schema name that should be applied. To reset the schema + to "none", return the symbol :data:`.BLANK_SCHEMA`. To effect no + change, return ``None`` or :data:`.RETAIN_SCHEMA`. + + .. versionchanged:: 1.4.33 The ``referred_schema_fn`` function + may return the :data:`.BLANK_SCHEMA` or :data:`.RETAIN_SCHEMA` + symbols. + E.g.:: def referred_schema_fn(table, to_schema, @@ -1202,22 +1221,63 @@ def __init__(self, *args, **kwargs): equivalent keyword argument is available such as ``server_default``, ``default`` and ``unique``. - :param autoincrement: Set up "auto increment" semantics for an integer - primary key column. The default value is the string ``"auto"`` - which indicates that a single-column primary key that is of - an INTEGER type with no stated client-side or python-side defaults - should receive auto increment semantics automatically; - all other varieties of primary key columns will not. This - includes that :term:`DDL` such as PostgreSQL SERIAL or MySQL - AUTO_INCREMENT will be emitted for this column during a table - create, as well as that the column is assumed to generate new - integer primary key values when an INSERT statement invokes which - will be retrieved by the dialect. When used in conjunction with - :class:`.Identity` on a dialect that supports it, this parameter - has no effect. - - The flag may be set to ``True`` to indicate that a column which - is part of a composite (e.g. multi-column) primary key should + :param autoincrement: Set up "auto increment" semantics for an + **integer primary key column with no foreign key dependencies** + (see later in this docstring for a more specific definition). + This may influence the :term:`DDL` that will be emitted for + this column during a table create, as well as how the column + will be considered when INSERT statements are compiled and + executed. + + The default value is the string ``"auto"``, + which indicates that a single-column (i.e. non-composite) primary key + that is of an INTEGER type with no other client-side or server-side + default constructs indicated should receive auto increment semantics + automatically. Other values include ``True`` (force this column to + have auto-increment semantics for a :term:`composite primary key` as + well), ``False`` (this column should never have auto-increment + semantics), and the string ``"ignore_fk"`` (special-case for foreign + key columns, see below). + + The term "auto increment semantics" refers both to the kind of DDL + that will be emitted for the column within a CREATE TABLE statement, + when methods such as :meth:`.MetaData.create_all` and + :meth:`.Table.create` are invoked, as well as how the column will be + considered when an INSERT statement is compiled and emitted to the + database: + + * **DDL rendering** (i.e. :meth:`.MetaData.create_all`, + :meth:`.Table.create`): When used on a :class:`.Column` that has + no other + default-generating construct associated with it (such as a + :class:`.Sequence` or :class:`.Identity` construct), the parameter + will imply that database-specific keywords such as PostgreSQL + ``SERIAL``, MySQL ``AUTO_INCREMENT``, or ``IDENTITY`` on SQL Server + should also be rendered. Not every database backend has an + "implied" default generator available; for example the Oracle + backend always needs an explicit construct such as + :class:`.Identity` to be included with a :class:`.Column` in order + for the DDL rendered to include auto-generating constructs to also + be produced in the database. + + * **INSERT semantics** (i.e. when a :func:`_sql.insert` construct is + compiled into a SQL string and is then executed on a database using + :meth:`_engine.Connection.execute` or equivalent): A single-row + INSERT statement will be known to produce a new integer primary key + value automatically for this column, which will be accessible + after the statement is invoked via the + :attr:`.CursorResult.inserted_primary_key` attribute upon the + :class:`_result.Result` object. This also applies towards use of the + ORM when ORM-mapped objects are persisted to the database, + indicating that a new integer primary key will be available to + become part of the :term:`identity key` for that object. This + behavior takes place regardless of what DDL constructs are + associated with the :class:`_schema.Column` and is independent + of the "DDL Rendering" behavior discussed in the previous note + above. + + The parameter may be set to ``True`` to indicate that a column which + is part of a composite (i.e. multi-column) primary key should have autoincrement semantics, though note that only one column within a primary key may have this setting. It can also be set to ``True`` to indicate autoincrement semantics on a @@ -1239,7 +1299,6 @@ def __init__(self, *args, **kwargs): that has an explicit client-side or server-side default, subject to limitations of the backend database and dialect. - The setting *only* has an effect for columns which are: * Integer derived (i.e. INT, SMALLINT, BIGINT). @@ -1255,34 +1314,72 @@ def __init__(self, *args, **kwargs): Column('id', ForeignKey('other.id'), primary_key=True, autoincrement='ignore_fk') - It is typically not desirable to have "autoincrement" enabled on a - column that refers to another via foreign key, as such a column is - required to refer to a value that originates from elsewhere. + It is typically not desirable to have "autoincrement" enabled on a + column that refers to another via foreign key, as such a column is + required to refer to a value that originates from elsewhere. - The setting has these two effects on columns that meet the + The setting has these effects on columns that meet the above criteria: - * DDL issued for the column will include database-specific + * DDL issued for the column, if the column does not already include + a default generating construct supported by the backend such as + :class:`.Identity`, will include database-specific keywords intended to signify this column as an - "autoincrement" column, such as AUTO INCREMENT on MySQL, - SERIAL on PostgreSQL, and IDENTITY on MS-SQL. It does - *not* issue AUTOINCREMENT for SQLite since this is a - special SQLite flag that is not required for autoincrementing - behavior. - - .. seealso:: - - :ref:`sqlite_autoincrement` - - * The column will be considered to be available using an - "autoincrement" method specific to the backend database, such - as calling upon ``cursor.lastrowid``, using RETURNING in an - INSERT statement to get at a sequence-generated value, or using - special functions such as "SELECT scope_identity()". - These methods are highly specific to the DBAPIs and databases in - use and vary greatly, so care should be taken when associating - ``autoincrement=True`` with a custom default generation function. - + "autoincrement" column for specific backends. Behavior for + primary SQLAlchemy dialects includes: + + * AUTO INCREMENT on MySQL and MariaDB + * SERIAL on PostgreSQL + * IDENTITY on MS-SQL - this occurs even without the + :class:`.Identity` construct as the + :paramref:`.Column.autoincrement` parameter pre-dates this + construct. + * SQLite - SQLite integer primary key columns are implicitly + "auto incrementing" and no additional keywords are rendered; + to render the special SQLite keyword ``AUTOINCREMENT`` + is not included as this is unnecessary and not recommended + by the database vendor. See the section + :ref:`sqlite_autoincrement` for more background. + * Oracle - The Oracle dialect has no default "autoincrement" + feature available at this time, instead the :class:`.Identity` + construct is recommended to achieve this (the :class:`.Sequence` + construct may also be used). + * Third-party dialects - consult those dialects' documentation + for details on their specific behaviors. + + * When a single-row :func:`_sql.insert` construct is compiled and + executed, which does not set the :meth:`_sql.Insert.inline` + modifier, newly generated primary key values for this column + will be automatically retrieved upon statement execution + using a method specific to the database driver in use: + + * MySQL, SQLite - calling upon ``cursor.lastrowid()`` + (see + `https://www.python.org/dev/peps/pep-0249/#lastrowid + `_) + * PostgreSQL, SQL Server, Oracle - use RETURNING or an equivalent + construct when rendering an INSERT statement, and then retrieving + the newly generated primary key values after execution + * PostgreSQL, Oracle for :class:`_schema.Table` objects that + set :paramref:`_schema.Table.implicit_returning` to False - + for a :class:`.Sequence` only, the :class:`.Sequence` is invoked + explicitly before the INSERT statement takes place so that the + newly generated primary key value is available to the client + * SQL Server for :class:`_schema.Table` objects that + set :paramref:`_schema.Table.implicit_returning` to False - + the ``SELECT scope_identity()`` construct is used after the + INSERT statement is invoked to retrieve the newly generated + primary key value. + * Third-party dialects - consult those dialects' documentation + for details on their specific behaviors. + + * For multiple-row :func:`_sql.insert` constructs invoked with + a list of parameters (i.e. "executemany" semantics), primary-key + retrieving behaviors are generally disabled, however there may + be special APIs that may be used to retrieve lists of new + primary key values for an "executemany", such as the psycopg2 + "fast insertmany" feature. Such features are very new and + may not yet be well covered in documentation. :param default: A scalar, Python callable, or :class:`_expression.ColumnElement` expression representing the @@ -1566,7 +1663,7 @@ def __init__(self, *args, **kwargs): parameter to :class:`_schema.Column`. - """ # noqa E501 + """ # noqa: E501, RST201, RST202 name = kwargs.pop("name", None) type_ = kwargs.pop("type_", None) @@ -1715,6 +1812,17 @@ def __init__(self, *args, **kwargs): """ + @util.memoized_property + def _gen_static_annotations_cache_key(self): + """special attribute used by cache key gen, if true, we will + use a static cache key for the annotations dictionary, else we + will generate a new cache key for annotations each time. + + Added for #8790 + + """ + return self.table is not None and self.table._is_table + def _extra_kwargs(self, **kwargs): self._validate_dialect_kwargs(kwargs) @@ -1955,10 +2063,19 @@ def _make_proxy( information is not transferred. """ + fk = [ - ForeignKey(f.column, _constraint=f.constraint) - for f in self.foreign_keys + ForeignKey( + col if col is not None else f._colspec, + _unresolvable=col is None, + _constraint=f.constraint, + ) + for f, col in [ + (fk, fk._resolve_column(raiseerr=False)) + for fk in self.foreign_keys + ] ] + if name is None and self.name is None: raise exc.InvalidRequestError( "Cannot initialize a sub-selectable" @@ -2058,6 +2175,7 @@ def __init__( link_to_name=False, match=None, info=None, + _unresolvable=False, **dialect_kw ): r""" @@ -2131,6 +2249,7 @@ def __init__( """ self._colspec = coercions.expect(roles.DDLReferredColumnRole, column) + self._unresolvable = _unresolvable if isinstance(self._colspec, util.string_types): self._table_column = None @@ -2213,11 +2332,14 @@ def _get_colspec(self, schema=None, table_name=None): argument first passed to the object's constructor. """ - if schema: + if schema not in (None, RETAIN_SCHEMA): _schema, tname, colname = self._column_tokens if table_name is not None: tname = table_name - return "%s.%s.%s" % (schema, tname, colname) + if schema is BLANK_SCHEMA: + return "%s.%s" % (tname, colname) + else: + return "%s.%s.%s" % (schema, tname, colname) elif table_name: schema, tname, colname = self._column_tokens if schema: @@ -2314,6 +2436,11 @@ def _resolve_col_tokens(self): parenttable = self.parent.table + if self._unresolvable: + schema, tname, colname = self._column_tokens + tablekey = _get_table_key(tname, schema) + return parenttable, tablekey, colname + # assertion # basically Column._make_proxy() sends the actual # target Column to the ForeignKey object, so the @@ -2335,10 +2462,6 @@ def _resolve_col_tokens(self): return parenttable, tablekey, colname def _link_to_col_by_colstring(self, parenttable, table, colname): - if not hasattr(self.constraint, "_referred_table"): - self.constraint._referred_table = table - else: - assert self.constraint._referred_table is table _column = None if colname is None: @@ -2346,8 +2469,12 @@ def _link_to_col_by_colstring(self, parenttable, table, colname): # was specified as table name only, in which case we # match the column name to the same column on the # parent. - key = self.parent - _column = table.c.get(self.parent.key, None) + # this use case wasn't working in later 1.x series + # as it had no test coverage; fixed in 2.0 + parent = self.parent + assert parent is not None + key = parent.key + _column = table.c.get(key, None) elif self.link_to_name: key = colname for c in table.c: @@ -2367,10 +2494,10 @@ def _link_to_col_by_colstring(self, parenttable, table, colname): key, ) - self._set_target_column(_column) + return _column def _set_target_column(self, column): - assert isinstance(self.parent.table, Table) + assert self.parent is not None # propagate TypeEngine to parent if it didn't have one if self.parent.type._isnull: @@ -2402,11 +2529,17 @@ def column(self): """ + return self._resolve_column() + + def _resolve_column(self, raiseerr=True): + if isinstance(self._colspec, util.string_types): parenttable, tablekey, colname = self._resolve_col_tokens() - if tablekey not in parenttable.metadata: + if self._unresolvable or tablekey not in parenttable.metadata: + if not raiseerr: + return None raise exc.NoReferencedTableError( "Foreign key associated with column '%s' could not find " "table '%s' with which to generate a " @@ -2415,19 +2548,18 @@ def column(self): tablekey, ) elif parenttable.key not in parenttable.metadata: + if not raiseerr: + return None raise exc.InvalidRequestError( "Table %s is no longer associated with its " "parent MetaData" % parenttable ) else: - raise exc.NoReferencedColumnError( - "Could not initialize target column for " - "ForeignKey '%s' on table '%s': " - "table '%s' has no column named '%s'" - % (self._colspec, parenttable.name, tablekey, colname), - tablekey, - colname, + table = parenttable.metadata.tables[tablekey] + return self._link_to_col_by_colstring( + parenttable, table, colname ) + elif hasattr(self._colspec, "__clause_element__"): _column = self._colspec.__clause_element__() return _column @@ -2447,6 +2579,11 @@ def _set_parent(self, column, **kw): def _set_remote_table(self, table): parenttable, tablekey, colname = self._resolve_col_tokens() self._link_to_col_by_colstring(parenttable, table, colname) + + _column = self._link_to_col_by_colstring(parenttable, table, colname) + self._set_target_column(_column) + assert self.constraint is not None + self.constraint._validate_dest_table(table) def _remove_from_metadata(self, metadata): @@ -2485,10 +2622,14 @@ def _set_table(self, column, table): if table_key in parenttable.metadata.tables: table = parenttable.metadata.tables[table_key] try: - self._link_to_col_by_colstring(parenttable, table, colname) + _column = self._link_to_col_by_colstring( + parenttable, table, colname + ) except exc.NoReferencedColumnError: # this is OK, we'll try later pass + else: + self._set_target_column(_column) parenttable.metadata._fk_memos[fk_key].append(self) elif hasattr(self._colspec, "__clause_element__"): _column = self._colspec.__clause_element__() @@ -3622,8 +3763,8 @@ def __init__( ) else: # e.g. FOREIGN KEY (a) REFERENCES r (b, c) - # paraphrasing https://www.postgresql.org/docs/9.2/static/\ - # ddl-constraints.html + # paraphrasing + # https://www.postgresql.org/docs/current/static/ddl-constraints.html raise exc.ArgumentError( "ForeignKeyConstraint number " "of constrained columns must match the number of " @@ -3954,7 +4095,11 @@ def columns_autoinc_first(self): def _autoincrement_column(self): def _validate_autoinc(col, autoinc_true): if col.type._type_affinity is None or not issubclass( - col.type._type_affinity, type_api.INTEGERTYPE._type_affinity + col.type._type_affinity, + ( + type_api.INTEGERTYPE._type_affinity, + type_api.NUMERICTYPE._type_affinity, + ), ): if autoinc_true: raise exc.ArgumentError( @@ -4897,7 +5042,7 @@ class Computed(FetchedValue, SchemaItem): from sqlalchemy import Computed - Table('square', meta, + Table('square', metadata_obj, Column('side', Float, nullable=False), Column('area', Float, Computed('side * side')) ) @@ -4994,7 +5139,7 @@ class Identity(IdentityOptions, FetchedValue, SchemaItem): from sqlalchemy import Identity - Table('foo', meta, + Table('foo', metadata_obj, Column('id', Integer, Identity()) Column('description', Text), ) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index aed6482972a..f302822a5ae 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1,5 +1,5 @@ # sql/selectable.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -170,7 +170,7 @@ def lateral(self, name=None): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. """ return Lateral._construct(self, name) @@ -607,7 +607,7 @@ def alias(self, name=None, flat=False): .. seealso:: - :ref:`core_tutorial_aliases` + :ref:`tutorial_using_aliases` :func:`_expression.alias` @@ -889,7 +889,7 @@ def _anonymous_fromclause(self, name=None, flat=False): .. versionadded:: 1.4 -""", # noqa E501 +""", # noqa: E501 ) LABEL_STYLE_TABLENAME_PLUS_COL = util.symbol( @@ -901,7 +901,7 @@ def _anonymous_fromclause(self, name=None, flat=False): Below, all column names are given a label so that the two same-named columns ``columna`` are disambiguated as ``table1_columna`` and - ``table2_columna`:: + ``table2_columna``:: >>> from sqlalchemy import table, column, select, true, LABEL_STYLE_TABLENAME_PLUS_COL >>> table1 = table("table1", column("columna"), column("columnb")) @@ -919,7 +919,7 @@ def _anonymous_fromclause(self, name=None, flat=False): .. versionadded:: 1.4 -""", # noqa E501 +""", # noqa: E501 ) @@ -1757,13 +1757,14 @@ class TableValuedAlias(Alias): :ref:`tutorial_functions_table_valued` - in the :ref:`unified_tutorial` - """ # noqa E501 + """ # noqa: E501 __visit_name__ = "table_valued_alias" _supports_derived_columns = True _render_derived = False _render_derived_w_types = False + joins_implicitly = False _traverse_internals = [ ("element", InternalTraversal.dp_clauseelement), @@ -1773,9 +1774,16 @@ class TableValuedAlias(Alias): ("_render_derived_w_types", InternalTraversal.dp_boolean), ] - def _init(self, selectable, name=None, table_value_type=None): + def _init( + self, + selectable, + name=None, + table_value_type=None, + joins_implicitly=False, + ): super(TableValuedAlias, self)._init(selectable, name=name) + self.joins_implicitly = joins_implicitly self._tableval_type = ( type_api.TABLEVALUE if table_value_type is None @@ -1812,15 +1820,22 @@ def alias(self, name=None): """ - tva = TableValuedAlias._construct(self, name=name) + tva = TableValuedAlias._construct( + self, + name=name, + table_value_type=self._tableval_type, + joins_implicitly=self.joins_implicitly, + ) + if self._render_derived: tva._render_derived = True tva._render_derived_w_types = self._render_derived_w_types + return tva def lateral(self, name=None): - """Return a new :class:`_sql.TableValuedAlias` with the lateral flag set, - so that it renders as LATERAL. + """Return a new :class:`_sql.TableValuedAlias` with the lateral flag + set, so that it renders as LATERAL. .. seealso:: @@ -1869,14 +1884,22 @@ def render_derived(self, name=None, with_types=False): datatype specification with each column. This is a special syntax currently known to be required by PostgreSQL for some SQL functions. - """ # noqa E501 + """ # noqa: E501 # note: don't use the @_generative system here, keep a reference # to the original object. otherwise you can have re-use of the # python id() of the original which can cause name conflicts if # a new anon-name grabs the same identifier as the local anon-name # (just saw it happen on CI) - new_alias = TableValuedAlias._construct(self, name=name) + + # construct against original to prevent memory growth + # for repeated generations + new_alias = TableValuedAlias._construct( + self.element, + name=name, + table_value_type=self._tableval_type, + joins_implicitly=self.joins_implicitly, + ) new_alias._render_derived = True new_alias._render_derived_w_types = with_types return new_alias @@ -1897,7 +1920,7 @@ class Lateral(AliasedReturnsRows): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. """ @@ -1924,7 +1947,8 @@ def _factory(cls, selectable, name=None): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. + """ return coercions.expect( @@ -2106,7 +2130,7 @@ def alias(self, name=None, flat=False): .. seealso:: - :ref:`core_tutorial_aliases` + :ref:`tutorial_using_aliases` :func:`_expression.alias` @@ -2121,9 +2145,23 @@ def alias(self, name=None, flat=False): _suffixes=self._suffixes, ) - def union(self, other): + def union(self, *other): + r"""Return a new :class:`_expression.CTE` with a SQL ``UNION`` + of the original CTE against the given selectables provided + as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 multiple elements are now accepted. + + .. seealso:: + + :meth:`_sql.HasCTE.cte` - examples of calling styles + + """ return CTE._construct( - self.element.union(other), + self.element.union(*other), name=self.name, recursive=self.recursive, nesting=self.nesting, @@ -2132,9 +2170,23 @@ def union(self, other): _suffixes=self._suffixes, ) - def union_all(self, other): + def union_all(self, *other): + r"""Return a new :class:`_expression.CTE` with a SQL ``UNION ALL`` + of the original CTE against the given selectables provided + as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 multiple elements are now accepted. + + .. seealso:: + + :meth:`_sql.HasCTE.cte` - examples of calling styles + + """ return CTE._construct( - self.element.union_all(other), + self.element.union_all(*other), name=self.name, recursive=self.recursive, nesting=self.nesting, @@ -2396,7 +2448,7 @@ def cte(self, name=None, recursive=False, nesting=False): connection.execute(upsert) - Example 4, Nesting CTE:: + Example 4, Nesting CTE (SQLAlchemy 1.4.24 and above):: value_a = select( literal("root").label("n") @@ -2426,6 +2478,44 @@ def cte(self, name=None, recursive=False, nesting=False): SELECT value_a.n AS a, value_b.n AS b FROM value_a, value_b + Example 5, Non-Linear CTE (SQLAlchemy 1.4.28 and above):: + + edge = Table( + "edge", + metadata, + Column("id", Integer, primary_key=True), + Column("left", Integer), + Column("right", Integer), + ) + + root_node = select(literal(1).label("node")).cte( + "nodes", recursive=True + ) + + left_edge = select(edge.c.left).join( + root_node, edge.c.right == root_node.c.node + ) + right_edge = select(edge.c.right).join( + root_node, edge.c.left == root_node.c.node + ) + + subgraph_cte = root_node.union(left_edge, right_edge) + + subgraph = select(subgraph_cte) + + The above query will render 2 UNIONs inside the recursive CTE:: + + WITH RECURSIVE nodes(node) AS ( + SELECT 1 AS node + UNION + SELECT edge."left" AS "left" + FROM edge JOIN nodes ON edge."right" = nodes.node + UNION + SELECT edge."right" AS "right" + FROM edge JOIN nodes ON edge."left" = nodes.node + ) + SELECT nodes.node FROM nodes + .. seealso:: :meth:`_orm.Query.cte` - ORM version of @@ -2595,6 +2685,7 @@ class TableClause(roles.DMLTableRole, Immutable, FromClause): InternalTraversal.dp_fromclause_canonical_column_collection, ), ("name", InternalTraversal.dp_string), + ("schema", InternalTraversal.dp_string), ] named_with_column = True @@ -2740,6 +2831,7 @@ class ForUpdateArg(ClauseElement): ("nowait", InternalTraversal.dp_boolean), ("read", InternalTraversal.dp_boolean), ("skip_locked", InternalTraversal.dp_boolean), + ("key_share", InternalTraversal.dp_boolean), ] @classmethod @@ -2875,7 +2967,7 @@ def alias(self, name, **kw): .. seealso:: - :ref:`core_tutorial_aliases` + :ref:`tutorial_using_aliases` :func:`_expression.alias` @@ -3112,8 +3204,6 @@ def scalar_subquery(self): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`scalar_selects` - in the 1.x tutorial - """ if self._label_style is not LABEL_STYLE_NONE: self = self.set_label_style(LABEL_STYLE_NONE) @@ -3141,7 +3231,7 @@ def lateral(self, name=None): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. """ return Lateral._factory(self, name) @@ -3493,7 +3583,7 @@ def set_label_style(self, style): :data:`_sql.LABEL_STYLE_DISAMBIGUATE_ONLY`, :data:`_sql.LABEL_STYLE_TABLENAME_PLUS_COL`, and :data:`_sql.LABEL_STYLE_NONE`. The default style is - :data:`_sql.LABEL_STYLE_TABLENAME_PLUS_COL`. + :data:`_sql.LABEL_STYLE_DISAMBIGUATE_ONLY`. In modern SQLAlchemy, there is not generally a need to change the labeling style, as per-expression labels are more effectively used by @@ -3743,7 +3833,7 @@ def slice(self, start, stop): For example, :: - stmt = select(User).order_by(User).id.slice(1, 3) + stmt = select(User).order_by(User.id).slice(1, 3) renders as @@ -3787,9 +3877,10 @@ def order_by(self, *clauses): stmt = select(table).order_by(table.c.id, table.c.name) - All existing ORDER BY criteria may be cancelled by passing - ``None`` by itself. New ORDER BY criteria may then be added by - invoking :meth:`_sql.Select.order_by` again, e.g.:: + Calling this method multiple times is equivalent to calling it once + with all the clauses concatenated. All existing ORDER BY criteria may + be cancelled by passing ``None`` by itself. New ORDER BY criteria may + then be added by invoking :meth:`_orm.Query.order_by` again, e.g.:: # will erase all ORDER BY and ORDER BY new_col alone stmt = stmt.order_by(None).order_by(new_col) @@ -3819,6 +3910,8 @@ def group_by(self, *clauses): r"""Return a new selectable with the given list of GROUP BY criterion applied. + All existing GROUP BY settings can be suppressed by passing ``None``. + e.g.:: stmt = select(table.c.name, func.max(table.c.stat)).\ @@ -4312,7 +4405,16 @@ def _plugin_not_implemented(cls): @classmethod def get_column_descriptions(cls, statement): - cls._plugin_not_implemented() + return [ + { + "name": name, + "type": element.type, + "expr": element, + } + for _, name, _, element, _ in ( + statement._generate_columns_plus_names(False) + ) + ] @classmethod def from_statement(cls, statement, from_statement): @@ -4705,7 +4807,8 @@ class _MemoizedSelectEntities( def _clone(self, **kw): c = self.__class__.__new__(self.__class__) c.__dict__ = {k: v for k, v in self.__dict__.items()} - c._is_clone_of = self + + c._is_clone_of = self.__dict__.get("_is_clone_of", self) return c @classmethod @@ -4747,8 +4850,6 @@ class Select( :func:`_sql.select` - :ref:`coretutorial_selecting` - in the 1.x tutorial - :ref:`tutorial_selecting_data` - in the 2.0 tutorial """ @@ -4855,8 +4956,7 @@ def create_legacy_select( .. seealso:: - :ref:`coretutorial_selecting` - Core Tutorial description of - :func:`_expression.select`. + :ref:`tutorial_selecting_data` - in the :ref:`unified_tutorial` :param columns: A list of :class:`_expression.ColumnElement` or @@ -5254,8 +5354,43 @@ def filter_by(self, **kwargs): @property def column_descriptions(self): - """Return a 'column descriptions' structure which may be - :term:`plugin-specific`. + """Return a :term:`plugin-enabled` 'column descriptions' structure + referring to the columns which are SELECTed by this statement. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core-only statement, the structure returned by this accessor + is derived from the same objects that are returned by the + :attr:`.Select.selected_columns` accessor, formatted as a list of + dictionaries which contain the keys ``name``, ``type`` and ``expr``, + which indicate the column expressions to be selected:: + + >>> stmt = select(user_table) + >>> stmt.column_descriptions + [ + { + 'name': 'id', + 'type': Integer(), + 'expr': Column('id', Integer(), ...)}, + { + 'name': 'name', + 'type': String(length=30), + 'expr': Column('name', String(length=30), ...)} + ] + + .. versionchanged:: 1.4.33 The :attr:`.Select.column_descriptions` + attribute returns a structure for a Core-only set of entities, + not just ORM-only entities. + + .. seealso:: + + :attr:`.UpdateBase.entity_description` - entity information for + an :func:`.insert`, :func:`.update`, or :func:`.delete` + + :ref:`queryguide_inspection` - ORM background """ meth = SelectState.get_plugin_class(self).get_column_descriptions @@ -5353,10 +5488,9 @@ def join(self, target, onclause=None, isouter=False, full=False): ) def outerjoin_from(self, from_, target, onclause=None, full=False): - r"""Create a SQL LEFT OUTER JOIN against this :class:`_expression.Select` - object's criterion - and apply generatively, returning the newly resulting - :class:`_expression.Select`. + r"""Create a SQL LEFT OUTER JOIN against this + :class:`_expression.Select` object's criterion and apply generatively, + returning the newly resulting :class:`_expression.Select`. Usage is the same as that of :meth:`_selectable.Select.join_from`. @@ -5761,7 +5895,7 @@ def with_only_columns(self, *columns, **kw): .. versionadded:: 1.4.23 - """ # noqa E501 + """ # noqa: E501 # memoizations should be cleared here as of # I95c560ffcbfa30b26644999412fb6a385125f663 , asserting this @@ -5942,7 +6076,7 @@ def correlate(self, *fromclauses): :meth:`_expression.Select.correlate_except` - :ref:`correlated_subqueries` + :ref:`tutorial_scalar_subquery` """ @@ -5980,7 +6114,7 @@ def correlate_except(self, *fromclauses): :meth:`_expression.Select.correlate` - :ref:`correlated_subqueries` + :ref:`tutorial_scalar_subquery` """ @@ -6059,7 +6193,7 @@ def _ensure_disambiguated_names(self): self = self.set_label_style(LABEL_STYLE_DISAMBIGUATE_ONLY) return self - def _generate_columns_plus_names(self, anon_for_dupe_key): + def _generate_columns_plus_names(self, anon_for_dupe_key, cols=None): """Generate column names as rendered in a SELECT statement by the compiler. @@ -6069,7 +6203,9 @@ def _generate_columns_plus_names(self, anon_for_dupe_key): _column_naming_convention as well. """ - cols = self._all_selected_columns + + if cols is None: + cols = self._all_selected_columns key_naming_convention = SelectState._column_naming_convention( self._label_style @@ -6268,47 +6404,107 @@ def self_group(self, against=None): else: return SelectStatementGrouping(self) - def union(self, other, **kwargs): - """Return a SQL ``UNION`` of this select() construct against - the given selectable. + def union(self, *other, **kwargs): + r"""Return a SQL ``UNION`` of this select() construct against + the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_union(self, other, **kwargs) + return CompoundSelect._create_union(self, *other, **kwargs) + + def union_all(self, *other, **kwargs): + r"""Return a SQL ``UNION ALL`` of this select() construct against + the given selectables provided as positional arguments. - def union_all(self, other, **kwargs): - """Return a SQL ``UNION ALL`` of this select() construct against - the given selectable. + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_union_all(self, other, **kwargs) + return CompoundSelect._create_union_all(self, *other, **kwargs) + + def except_(self, *other, **kwargs): + r"""Return a SQL ``EXCEPT`` of this select() construct against + the given selectable provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. - def except_(self, other, **kwargs): - """Return a SQL ``EXCEPT`` of this select() construct against - the given selectable. + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_except(self, other, **kwargs) + return CompoundSelect._create_except(self, *other, **kwargs) - def except_all(self, other, **kwargs): - """Return a SQL ``EXCEPT ALL`` of this select() construct against - the given selectable. + def except_all(self, *other, **kwargs): + r"""Return a SQL ``EXCEPT ALL`` of this select() construct against + the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_except_all(self, other, **kwargs) + return CompoundSelect._create_except_all(self, *other, **kwargs) + + def intersect(self, *other, **kwargs): + r"""Return a SQL ``INTERSECT`` of this select() construct against + the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 - def intersect(self, other, **kwargs): - """Return a SQL ``INTERSECT`` of this select() construct against - the given selectable. + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_intersect(self, other, **kwargs) + return CompoundSelect._create_intersect(self, *other, **kwargs) + + def intersect_all(self, *other, **kwargs): + r"""Return a SQL ``INTERSECT ALL`` of this select() construct + against the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 - def intersect_all(self, other, **kwargs): - """Return a SQL ``INTERSECT ALL`` of this select() construct - against the given selectable. + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_intersect_all(self, other, **kwargs) + return CompoundSelect._create_intersect_all(self, *other, **kwargs) @property @util.deprecated_20( @@ -6363,8 +6559,6 @@ class ScalarSelect(roles.InElementRole, Generative, Grouping): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`scalar_selects` - in the 1.x tutorial - """ _from_objects = [] @@ -6423,8 +6617,6 @@ def correlate(self, *fromclauses): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`correlated_subqueries` - in the 1.x tutorial - """ self.element = self.element.correlate(*fromclauses) @@ -6456,8 +6648,6 @@ def correlate_except(self, *fromclauses): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`correlated_subqueries` - in the 1.x tutorial - """ @@ -6469,6 +6659,9 @@ class Exists(UnaryExpression): See :func:`_sql.exists` for a description of usage. + An ``EXISTS`` clause can also be constructed from a :func:`_sql.select` + instance by calling :meth:`_sql.SelectBase.exists`. + """ _from_objects = [] @@ -6507,7 +6700,10 @@ def __init__(self, *args, **kwargs): :ref:`tutorial_exists` - in the :term:`2.0 style` tutorial. - """ # noqa E501 + :meth:`_sql.SelectBase.exists` - method to transform a ``SELECT`` to an + ``EXISTS`` clause. + + """ # noqa: E501 if args and isinstance(args[0], (SelectBase, ScalarSelect)): s = args[0] else: @@ -6576,7 +6772,8 @@ def select(self, whereclause=None, **kwargs): return Select._create_select_from_fromclause(self, [self], **kwargs) def correlate(self, *fromclause): - """Apply correlation to the subquery noted by this :class:`_sql.Exists`. + """Apply correlation to the subquery noted by this + :class:`_sql.Exists`. .. seealso:: @@ -6590,7 +6787,8 @@ def correlate(self, *fromclause): return e def correlate_except(self, *fromclause): - """Apply correlation to the subquery noted by this :class:`_sql.Exists`. + """Apply correlation to the subquery noted by this + :class:`_sql.Exists`. .. seealso:: @@ -6621,7 +6819,7 @@ def select_from(self, *froms): e.element = self._regroup(lambda element: element.select_from(*froms)) return e - def where(self, clause): + def where(self, *clause): """Return a new :func:`_expression.exists` construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. @@ -6634,7 +6832,7 @@ def where(self, clause): """ e = self._clone() - e.element = self._regroup(lambda element: element.where(clause)) + e.element = self._regroup(lambda element: element.where(*clause)) return e @@ -6670,10 +6868,14 @@ class was renamed _label_style = LABEL_STYLE_NONE - _traverse_internals = [ - ("element", InternalTraversal.dp_clauseelement), - ("column_args", InternalTraversal.dp_clauseelement_list), - ] + SupportsCloneAnnotations._clone_annotations_traverse_internals + _traverse_internals = ( + [ + ("element", InternalTraversal.dp_clauseelement), + ("column_args", InternalTraversal.dp_clauseelement_list), + ] + + SupportsCloneAnnotations._clone_annotations_traverse_internals + + HasCTE._has_ctes_traverse_internals + ) _is_textual = True diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index ae589d648a8..5dc901bd219 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1,5 +1,5 @@ # sql/sqltypes.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -587,41 +587,34 @@ class BigInteger(Integer): class Numeric(_LookupExpressionAdapter, TypeEngine): - """A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``. + """Base for non-integer numeric types, such as + ``NUMERIC``, ``FLOAT``, ``DECIMAL``, and other variants. - This type returns Python ``decimal.Decimal`` objects by default, unless - the :paramref:`.Numeric.asdecimal` flag is set to False, in which case - they are coerced to Python ``float`` objects. + The :class:`.Numeric` datatype when used directly will render DDL + corresponding to precision numerics if available, such as + ``NUMERIC(precision, scale)``. The :class:`.Float` subclass will + attempt to render a floating-point datatype such as ``FLOAT(precision)``. - .. note:: + :class:`.Numeric` returns Python ``decimal.Decimal`` objects by default, + based on the default value of ``True`` for the + :paramref:`.Numeric.asdecimal` parameter. If this parameter is set to + False, returned values are coerced to Python ``float`` objects. - The :class:`.Numeric` type is designed to receive data from a database - type that is explicitly known to be a decimal type - (e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point - type (e.g. ``FLOAT``, ``REAL``, others). - If the database column on the server is in fact a floating-point - type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float` - type or a subclass, otherwise numeric coercion between - ``float``/``Decimal`` may or may not function as expected. + The :class:`.Float` subtype, being more specific to floating point, + defaults the :paramref:`.Float.asdecimal` flag to False so that the + default Python datatype is ``float``. .. note:: - The Python ``decimal.Decimal`` class is generally slow - performing; cPython 3.3 has now switched to use the `cdecimal - `_ library natively. For - older Python versions, the ``cdecimal`` library can be patched - into any application where it will replace the ``decimal`` - library fully, however this needs to be applied globally and - before any other modules have been imported, as follows:: - - import sys - import cdecimal - sys.modules["decimal"] = cdecimal - - Note that the ``cdecimal`` and ``decimal`` libraries are **not - compatible with each other**, so patching ``cdecimal`` at the - global level is the only way it can be used effectively with - various DBAPIs that hardcode to import the ``decimal`` library. + When using a :class:`.Numeric` datatype against a database type that + returns Python floating point values to the driver, the accuracy of the + decimal conversion indicated by :paramref:`.Numeric.asdecimal` may be + limited. The behavior of specific numeric/floating point datatypes + is a product of the SQL datatype in use, the Python :term:`DBAPI` + in use, as well as strategies that may be present within + the SQLAlchemy dialect in use. Users requiring specific precision/ + scale are encouraged to experiment with the available datatypes + in order to determine the best results. """ @@ -661,8 +654,6 @@ def __init__( value of ".scale" as the default for decimal_return_scale, if not otherwise specified. - .. versionadded:: 0.9.0 - When using the ``Numeric`` type, care should be taken to ensure that the asdecimal setting is appropriate for the DBAPI in use - when Numeric applies a conversion from Decimal->float or float-> @@ -771,16 +762,6 @@ class Float(Numeric): :paramref:`.Float.asdecimal` flag is set to True, in which case they are coerced to ``decimal.Decimal`` objects. - .. note:: - - The :class:`.Float` type is designed to receive data from a database - type that is explicitly known to be a floating point type - (e.g. ``FLOAT``, ``REAL``, others) - and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others). - If the database column on the server is in fact a Numeric - type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric` - type or a subclass, otherwise numeric coercion between - ``float``/``Decimal`` may or may not function as expected. """ @@ -867,6 +848,13 @@ def __init__(self, timezone=False): def get_dbapi_type(self, dbapi): return dbapi.DATETIME + def _resolve_for_literal(self, value): + with_timezone = value.tzinfo is not None + if with_timezone and not self.timezone: + return DATETIME_TIMEZONE + else: + return self + @property def python_type(self): return dt.datetime @@ -874,8 +862,8 @@ def python_type(self): @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: {Interval: self.__class__}, @@ -898,8 +886,8 @@ def python_type(self): @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: { @@ -937,10 +925,17 @@ def get_dbapi_type(self, dbapi): def python_type(self): return dt.time + def _resolve_for_literal(self, value): + with_timezone = value.tzinfo is not None + if with_timezone and not self.timezone: + return TIME_TIMEZONE + else: + return self + @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: {Date: DateTime, Interval: self.__class__}, @@ -1275,6 +1270,8 @@ class Enum(Emulated, String, SchemaType): a plain-string enumerated type:: import enum + from sqlalchemy import Enum + class MyEnum(enum.Enum): one = 1 two = 2 @@ -1386,8 +1383,9 @@ class was used, its name (converted to lower case) is used by :param native_enum: Use the database's native ENUM type when available. Defaults to True. When False, uses VARCHAR + check - constraint for all backends. The VARCHAR length can be controlled - with :paramref:`.Enum.length` + constraint for all backends. When False, the VARCHAR length can be + controlled with :paramref:`.Enum.length`; currently "length" is + ignored if native_enum=True. :param length: Allows specifying a custom length for the VARCHAR when :paramref:`.Enum.native_enum` is False. By default it uses the @@ -1486,7 +1484,7 @@ def _enum_init(self, enums, kw): self._sort_key_function = kw.pop("sort_key_function", NO_ARG) length_arg = kw.pop("length", NO_ARG) self._omit_aliases = kw.pop("omit_aliases", NO_ARG) - + _disable_warnings = kw.pop("_disable_warnings", False) values, objects = self._parse_into_values(enums, kw) self._setup_for_values(values, objects, kw) @@ -1506,17 +1504,27 @@ def _enum_init(self, enums, kw): _expect_unicode = convert_unicode if self.enums: - length = max(len(x) for x in self.enums) + self._default_length = length = max(len(x) for x in self.enums) else: - length = 0 - if not self.native_enum and length_arg is not NO_ARG: - if length_arg < length: - raise ValueError( - "When provided, length must be larger or equal" - " than the length of the longest enum value. %s < %s" - % (length_arg, length) - ) - length = length_arg + self._default_length = length = 0 + + if length_arg is not NO_ARG: + if self.native_enum: + if not _disable_warnings: + util.warn( + "Enum 'length' argument is currently ignored unless " + "native_enum is specified as False, including for DDL " + "that renders VARCHAR in any case. This may change " + "in a future release." + ) + else: + if not _disable_warnings and length_arg < length: + raise ValueError( + "When provided, length must be larger or equal" + " than the length of the longest enum value. %s < %s" + % (length_arg, length) + ) + length = length_arg self._valid_lookup[None] = self._object_lookup[None] = None @@ -1658,7 +1666,11 @@ def _object_value_for_elem(self, elem): def __repr__(self): return util.generic_repr( self, - additional_kw=[("native_enum", True)], + additional_kw=[ + ("native_enum", True), + ("create_constraint", False), + ("length", self._default_length), + ], to_inspect=[Enum, SchemaType], ) @@ -1672,12 +1684,15 @@ def as_generic(self, allow_nulltype=False): "an `enums` attribute." ) - return util.constructor_copy(self, self._generic_type_affinity, *args) + return util.constructor_copy( + self, self._generic_type_affinity, *args, _disable_warnings=True + ) def adapt_to_emulated(self, impltype, **kw): kw.setdefault("_expect_unicode", self._expect_unicode) kw.setdefault("validate_strings", self.validate_strings) kw.setdefault("name", self.name) + kw["_disable_warnings"] = True kw.setdefault("schema", self.schema) kw.setdefault("inherit_schema", self.inherit_schema) kw.setdefault("metadata", self.metadata) @@ -1692,6 +1707,7 @@ def adapt_to_emulated(self, impltype, **kw): def adapt(self, impltype, **kw): kw["_enums"] = self._enums_argument + kw["_disable_warnings"] = True return super(Enum, self).adapt(impltype, **kw) def _should_create_constraint(self, compiler, **kw): @@ -1956,10 +1972,10 @@ def python_type(self): def _strict_as_bool(self, value): if value not in self._strict_bools: if not isinstance(value, int): - raise TypeError("Not a boolean value: %r" % value) + raise TypeError("Not a boolean value: %r" % (value,)) else: raise ValueError( - "Value %r is not None, True, or False" % value + "Value %r is not None, True, or False" % (value,) ) return value @@ -1998,8 +2014,8 @@ def result_processor(self, dialect, coltype): class _AbstractInterval(_LookupExpressionAdapter, TypeEngine): @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: { @@ -2042,7 +2058,12 @@ class Interval(Emulated, _AbstractInterval, TypeDecorator): """ impl = DateTime - epoch = dt.datetime.utcfromtimestamp(0) + if compat.py2k: + epoch = dt.datetime.utcfromtimestamp(0) + else: + epoch = dt.datetime.fromtimestamp(0, dt.timezone.utc).replace( + tzinfo=None + ) cache_ok = True def __init__(self, native=True, second_precision=None, day_precision=None): @@ -2148,7 +2169,7 @@ class JSON(Indexable, TypeEngine): with engine.connect() as conn: conn.execute( data_table.insert(), - data = {"key1": "value1", "key2": "value2"} + {"data": {"key1": "value1", "key2": "value2"}} ) **JSON-Specific Expression Operators** @@ -2237,27 +2258,33 @@ class JSON(Indexable, TypeEngine): The :class:`_types.JSON` type, when used with the SQLAlchemy ORM, does not detect in-place mutations to the structure. In order to detect these, the - :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will + :mod:`sqlalchemy.ext.mutable` extension must be used, most typically + using the :class:`.MutableDict` class. This extension will allow "in-place" changes to the datastructure to produce events which will be detected by the unit of work. See the example at :class:`.HSTORE` for a simple example involving a dictionary. + Alternatively, assigning a JSON structure to an ORM element that + replaces the old one will always trigger a change event. + **Support for JSON null vs. SQL NULL** - When working with NULL values, the :class:`_types.JSON` - type recommends the + When working with NULL values, the :class:`_types.JSON` type recommends the use of two specific constants in order to differentiate between a column - that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string - of ``"null"``. To insert or select against a value that is SQL NULL, - use the constant :func:`.null`:: + that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string of + ``"null"``. To insert or select against a value that is SQL NULL, use the + constant :func:`.null`. This symbol may be passed as a parameter value + specifically when using the :class:`_types.JSON` datatype, which contains + special logic that interprets this symbol to mean that the column value + should be SQL NULL as opposed to JSON ``"null"``:: from sqlalchemy import null - conn.execute(table.insert(), json_value=null()) + conn.execute(table.insert(), {"json_value": null()}) To insert or select against a value that is JSON ``"null"``, use the constant :attr:`_types.JSON.NULL`:: - conn.execute(table.insert(), json_value=JSON.NULL) + conn.execute(table.insert(), {"json_value": JSON.NULL}) The :class:`_types.JSON` type supports a flag :paramref:`_types.JSON.none_as_null` which when set to True will result @@ -2358,12 +2385,14 @@ def __init__(self, none_as_null=False): """Construct a :class:`_types.JSON` type. :param none_as_null=False: if True, persist the value ``None`` as a - SQL NULL value, not the JSON encoding of ``null``. Note that - when this flag is False, the :func:`.null` construct can still - be used to persist a NULL value:: + SQL NULL value, not the JSON encoding of ``null``. Note that when this + flag is False, the :func:`.null` construct can still be used to + persist a NULL value, which may be passed directly as a parameter + value that is specially interpreted by the :class:`_types.JSON` type + as SQL NULL:: from sqlalchemy import null - conn.execute(table.insert(), data=null()) + conn.execute(table.insert(), {"data": null()}) .. note:: @@ -2689,7 +2718,7 @@ class ARRAY(SchemaEventTarget, Indexable, Concatenable, TypeEngine): connection.execute( mytable.insert(), - data=[1,2,3] + {"data": [1,2,3]} ) The :class:`_types.ARRAY` type can be constructed given a fixed number @@ -2744,6 +2773,31 @@ class ARRAY(SchemaEventTarget, Indexable, Concatenable, TypeEngine): :meth:`.types.ARRAY.Comparator.all`. The PostgreSQL-specific version of :class:`_types.ARRAY` also provides additional operators. + .. container:: topic + + **Detecting Changes in ARRAY columns when using the ORM** + + The :class:`_sqltypes.ARRAY` type, when used with the SQLAlchemy ORM, + does not detect in-place mutations to the array. In order to detect + these, the :mod:`sqlalchemy.ext.mutable` extension must be used, using + the :class:`.MutableList` class:: + + from sqlalchemy import ARRAY + from sqlalchemy.ext.mutable import MutableList + + class SomeOrmClass(Base): + # ... + + data = Column(MutableList.as_mutable(ARRAY(Integer))) + + This extension will allow "in-place" changes such to the array + such as ``.append()`` to produce events which will be detected by the + unit of work. Note that changes to elements **inside** the array, + including subarrays that are mutated in place, are **not** detected. + + Alternatively, assigning a new array value to an ORM element that + replaces the old one will always trigger a change event. + .. versionadded:: 1.1.0 .. seealso:: @@ -2792,6 +2846,13 @@ def _setup_getitem(self, index): return operators.getitem, index, return_type def contains(self, *arg, **kw): + """``ARRAY.contains()`` not implemented for the base ARRAY type. + Use the dialect-specific ARRAY type. + + .. seealso:: + + :class:`_postgresql.ARRAY` - PostgreSQL specific version. + """ raise NotImplementedError( "ARRAY.contains() not implemented for the base " "ARRAY type; please use the dialect-specific ARRAY type" @@ -2833,10 +2894,18 @@ def any(self, other, operator=None): elements = util.preloaded.sql_elements operator = operator if operator else operators.eq + arr_type = self.type + # send plain BinaryExpression so that negate remains at None, # leading to NOT expr for negation. return elements.BinaryExpression( - coercions.expect(roles.ExpressionElementRole, other), + coercions.expect( + roles.BinaryElementRole, + element=other, + operator=operator, + expr=self.expr, + bindparam_type=arr_type.item_type, + ), elements.CollectionAggregate._create_any(self.expr), operator, ) @@ -2877,10 +2946,18 @@ def all(self, other, operator=None): elements = util.preloaded.sql_elements operator = operator if operator else operators.eq + arr_type = self.type + # send plain BinaryExpression so that negate remains at None, # leading to NOT expr for negation. return elements.BinaryExpression( - coercions.expect(roles.ExpressionElementRole, other), + coercions.expect( + roles.BinaryElementRole, + element=other, + operator=operator, + expr=self.expr, + bindparam_type=arr_type.item_type, + ), elements.CollectionAggregate._create_all(self.expr), operator, ) @@ -2966,7 +3043,10 @@ class TupleType(TypeEngine): def __init__(self, *types): self._fully_typed = NULLTYPE not in types - self.types = types + self.types = [ + item_type() if isinstance(item_type, type) else item_type + for item_type in types + ] def _resolve_values_to_types(self, value): if self._fully_typed: @@ -3194,12 +3274,7 @@ class NullType(TypeEngine): _isnull = True def literal_processor(self, dialect): - def process(value): - raise exc.CompileError( - "Don't know how to render literal SQL value: %r" % value - ) - - return process + return None class Comparator(TypeEngine.Comparator): def _adapt_expression(self, op, other_comparator): @@ -3249,8 +3324,11 @@ class MatchType(Boolean): BOOLEANTYPE = Boolean() STRINGTYPE = String() INTEGERTYPE = Integer() +NUMERICTYPE = Numeric() MATCHTYPE = MatchType() TABLEVALUE = TableValueType() +DATETIME_TIMEZONE = DateTime(timezone=True) +TIME_TIMEZONE = Time(timezone=True) _type_map = { int: Integer(), @@ -3289,11 +3367,11 @@ def _resolve_value_to_type(value): insp.__class__ in inspection._registrars ): raise exc.ArgumentError( - "Object %r is not legal as a SQL literal value" % value + "Object %r is not legal as a SQL literal value" % (value,) ) return NULLTYPE else: - return _result_type + return _result_type._resolve_for_literal(value) # back-assign to type_api @@ -3301,6 +3379,7 @@ def _resolve_value_to_type(value): type_api.STRINGTYPE = STRINGTYPE type_api.INTEGERTYPE = INTEGERTYPE type_api.NULLTYPE = NULLTYPE +type_api.NUMERICTYPE = NUMERICTYPE type_api.MATCHTYPE = MATCHTYPE type_api.INDEXABLE = Indexable type_api.TABLEVALUE = TABLEVALUE diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index 3d377271f99..b705c6aa191 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -1,3 +1,9 @@ +# sql/traversals.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from collections import deque from collections import namedtuple import itertools @@ -49,7 +55,50 @@ def _preconfigure_traversals(target_hierarchy): class HasCacheKey(object): + """Mixin for objects which can produce a cache key. + + .. seealso:: + + :class:`.CacheKey` + + :ref:`sql_caching` + + """ + _cache_key_traversal = NO_CACHE + + _is_has_cache_key = True + + _hierarchy_supports_caching = True + """private attribute which may be set to False to prevent the + inherit_cache warning from being emitted for a hierarchy of subclasses. + + Currently applies to the DDLElement hierarchy which does not implement + caching. + + """ + + inherit_cache = None + """Indicate if this :class:`.HasCacheKey` instance should make use of the + cache key generation scheme used by its immediate superclass. + + The attribute defaults to ``None``, which indicates that a construct has + not yet taken into account whether or not its appropriate for it to + participate in caching; this is functionally equivalent to setting the + value to ``False``, except that a warning is also emitted. + + This flag can be set to ``True`` on a particular class, if the SQL that + corresponds to the object does not change based on attributes which + are local to this class, and not its superclass. + + .. seealso:: + + :ref:`compilerext_caching` - General guideslines for setting the + :attr:`.HasCacheKey.inherit_cache` attribute for third-party or user + defined SQL constructs. + + """ + __slots__ = () @classmethod @@ -60,7 +109,8 @@ def _generate_cache_attrs(cls): so should only be called once per class. """ - inherit = cls.__dict__.get("inherit_cache", False) + inherit_cache = cls.__dict__.get("inherit_cache", None) + inherit = bool(inherit_cache) if inherit: _cache_key_traversal = getattr(cls, "_cache_key_traversal", None) @@ -89,6 +139,23 @@ def _generate_cache_attrs(cls): ) if _cache_key_traversal is None: cls._generated_cache_key_traversal = NO_CACHE + if ( + inherit_cache is None + and cls._hierarchy_supports_caching + ): + util.warn( + "Class %s will not make use of SQL compilation " + "caching as it does not set the 'inherit_cache' " + "attribute to ``True``. This can have " + "significant performance implications including " + "some performance degradations in comparison to " + "prior SQLAlchemy versions. Set this attribute " + "to True if this object can make use of the cache " + "key generated by the superclass. Alternatively, " + "this attribute may be set to False which will " + "disable this warning." % (cls.__name__), + code="cprf", + ) return NO_CACHE return _cache_key_traversal_visitor.generate_dispatch( @@ -185,12 +252,16 @@ def _gen_cache_key(self, anon_map, bindparams): else None, ) elif meth is InternalTraversal.dp_annotations_key: - # obj is here is the _annotations dict. however, we - # want to use the memoized cache key version of it. for - # Columns, this should be long lived. For select() - # statements, not so much, but they usually won't have - # annotations. - result += self._annotations_cache_key + # obj is here is the _annotations dict. Table uses + # a memoized version of it. however in other cases, + # we generate it given anon_map as we may be from a + # Join, Aliased, etc. + # see #8790 + + if self._gen_static_annotations_cache_key: # type: ignore # noqa: E501 + result += self._annotations_cache_key # type: ignore # noqa: E501 + else: + result += self._gen_annotations_cache_key(anon_map) # type: ignore # noqa: E501 elif ( meth is InternalTraversal.dp_clauseelement_list or meth is InternalTraversal.dp_clauseelement_tuple @@ -210,7 +281,6 @@ def _gen_cache_key(self, anon_map, bindparams): result += meth( attrname, obj, self, anon_map, bindparams ) - return result def _generate_cache_key(self): @@ -273,6 +343,15 @@ def _generate_cache_key(self): class CacheKey(namedtuple("CacheKey", ["key", "bindparams"])): + """The key used to identify a SQL statement construct in the + SQL compilation cache. + + .. seealso:: + + :ref:`sql_caching` + + """ + def __hash__(self): """CacheKey itself is not hashable - hash the .key portion""" @@ -310,7 +389,10 @@ def to_offline_string(self, statement_cache, statement, parameters): return repr((sql_str, param_tuple)) def __eq__(self, other): - return self.key == other.key + return bool(self.key == other.key) + + def __ne__(self, other): + return not (self.key == other.key) @classmethod def _diff_tuples(cls, left, right): @@ -480,7 +562,19 @@ def visit_has_cache_key_list( tuple(elem._gen_cache_key(anon_map, bindparams) for elem in obj), ) - visit_executable_options = visit_has_cache_key_list + def visit_executable_options( + self, attrname, obj, parent, anon_map, bindparams + ): + if not obj: + return () + return ( + attrname, + tuple( + elem._gen_cache_key(anon_map, bindparams) + for elem in obj + if elem._is_has_cache_key + ), + ) def visit_inspectable_list( self, attrname, obj, parent, anon_map, bindparams @@ -1065,6 +1159,8 @@ def compare(self, obj1, obj2, **kw): return False else: continue + elif right_child is None: + return False comparison = dispatch( left_attrname, left, left_child, right, right_child, **kw @@ -1102,7 +1198,20 @@ def visit_has_cache_key_list( ): return COMPARE_FAILED - visit_executable_options = visit_has_cache_key_list + def visit_executable_options( + self, attrname, left_parent, left, right_parent, right, **kw + ): + for l, r in util.zip_longest(left, right, fillvalue=None): + if ( + l._gen_cache_key(self.anon_map[0], []) + if l._is_has_cache_key + else l + ) != ( + r._gen_cache_key(self.anon_map[1], []) + if r._is_has_cache_key + else r + ): + return COMPARE_FAILED def visit_clauseelement( self, attrname, left_parent, left, right_parent, right, **kw @@ -1202,7 +1311,7 @@ def visit_boolean( def visit_operator( self, attrname, left_parent, left, right_parent, right, **kw ): - return left is right + return left == right def visit_type( self, attrname, left_parent, left, right_parent, right, **kw diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 2a4688bcceb..8d9d2f254d9 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -1,5 +1,5 @@ -# sql/types_api.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# sql/type_api.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -22,6 +22,7 @@ BOOLEANTYPE = None INTEGERTYPE = None NULLTYPE = None +NUMERICTYPE = None STRINGTYPE = None MATCHTYPE = None INDEXABLE = None @@ -110,8 +111,12 @@ def _adapt_expression(self, op, other_comparator): return op, self.type + # note: this reduce is needed for tests to pass under python 2. + # it does not appear to apply to python 3. It has however been + # modified to accommodate issue #10213. In SQLA 2 this reduce + # has been removed. def __reduce__(self): - return _reconstitute_comparator, (self.expr,) + return _reconstitute_comparator, (self.expr, self.expr.type) hashable = True """Flag, if False, means values from this type aren't hashable. @@ -178,8 +183,8 @@ def __reduce__(self): """ def evaluates_none(self): - """Return a copy of this type which has the :attr:`.should_evaluate_none` - flag set to True. + """Return a copy of this type which has the + :attr:`.should_evaluate_none` flag set to True. E.g.:: @@ -542,9 +547,25 @@ def with_variant(self, type_, dialect_name): :param dialect_name: base name of the dialect which uses this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) + .. seealso:: + + :ref:`types_with_variant` - illustrates the use of + :meth:`_types.TypeEngine.with_variant`. + """ return Variant(self, {dialect_name: to_instance(type_)}) + def _resolve_for_literal(self, value): + """adjust this type given a literal Python value that will be + stored in a bound parameter. + + Used exclusively by _resolve_value_to_type(). + + .. versionadded:: 1.4.30 or 2.0 + + """ + return self + @util.memoized_property def _type_affinity(self): """Return a rudimental 'affinity' value expressing the general class @@ -733,7 +754,9 @@ def _static_cache_key(self): else self.__dict__[k], ) for k in names - if k in self.__dict__ and not k.startswith("_") + if k in self.__dict__ + and not k.startswith("_") + and self.__dict__[k] is not None ) def adapt(self, cls, **kw): @@ -815,7 +838,185 @@ class VisitableCheckKWArg(util.EnsureKWArgType, TraversibleType): pass -class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)): +class ExternalType(object): + """mixin that defines attributes and behaviors specific to third-party + datatypes. + + "Third party" refers to datatypes that are defined outside the scope + of SQLAlchemy within either end-user application code or within + external extensions to SQLAlchemy. + + Subclasses currently include :class:`.TypeDecorator` and + :class:`.UserDefinedType`. + + .. versionadded:: 1.4.28 + + """ + + cache_ok = None + """Indicate if statements using this :class:`.ExternalType` are "safe to + cache". + + The default value ``None`` will emit a warning and then not allow caching + of a statement which includes this type. Set to ``False`` to disable + statements using this type from being cached at all without a warning. + When set to ``True``, the object's class and selected elements from its + state will be used as part of the cache key. For example, using a + :class:`.TypeDecorator`:: + + class MyType(TypeDecorator): + impl = String + + cache_ok = True + + def __init__(self, choices): + self.choices = tuple(choices) + self.internal_only = True + + The cache key for the above type would be equivalent to:: + + >>> MyType(["a", "b", "c"])._static_cache_key + (, ('choices', ('a', 'b', 'c'))) + + The caching scheme will extract attributes from the type that correspond + to the names of parameters in the ``__init__()`` method. Above, the + "choices" attribute becomes part of the cache key but "internal_only" + does not, because there is no parameter named "internal_only". + + The requirements for cacheable elements is that they are hashable + and also that they indicate the same SQL rendered for expressions using + this type every time for a given cache value. + + To accommodate for datatypes that refer to unhashable structures such + as dictionaries, sets and lists, these objects can be made "cacheable" + by assigning hashable structures to the attributes whose names + correspond with the names of the arguments. For example, a datatype + which accepts a dictionary of lookup values may publish this as a sorted + series of tuples. Given a previously un-cacheable type as:: + + class LookupType(UserDefinedType): + '''a custom type that accepts a dictionary as a parameter. + + this is the non-cacheable version, as "self.lookup" is not + hashable. + + ''' + + def __init__(self, lookup): + self.lookup = lookup + + def get_col_spec(self, **kw): + return "VARCHAR(255)" + + def bind_processor(self, dialect): + # ... works with "self.lookup" ... + + Where "lookup" is a dictionary. The type will not be able to generate + a cache key:: + + >>> type_ = LookupType({"a": 10, "b": 20}) + >>> type_._static_cache_key + :1: SAWarning: UserDefinedType LookupType({'a': 10, 'b': 20}) will not + produce a cache key because the ``cache_ok`` flag is not set to True. + Set this flag to True if this type object's state is safe to use + in a cache key, or False to disable this warning. + symbol('no_cache') + + If we **did** set up such a cache key, it wouldn't be usable. We would + get a tuple structure that contains a dictionary inside of it, which + cannot itself be used as a key in a "cache dictionary" such as SQLAlchemy's + statement cache, since Python dictionaries aren't hashable:: + + >>> # set cache_ok = True + >>> type_.cache_ok = True + + >>> # this is the cache key it would generate + >>> key = type_._static_cache_key + >>> key + (, ('lookup', {'a': 10, 'b': 20})) + + >>> # however this key is not hashable, will fail when used with + >>> # SQLAlchemy statement cache + >>> some_cache = {key: "some sql value"} + Traceback (most recent call last): File "", line 1, + in TypeError: unhashable type: 'dict' + + The type may be made cacheable by assigning a sorted tuple of tuples + to the ".lookup" attribute:: + + class LookupType(UserDefinedType): + '''a custom type that accepts a dictionary as a parameter. + + The dictionary is stored both as itself in a private variable, + and published in a public variable as a sorted tuple of tuples, + which is hashable and will also return the same value for any + two equivalent dictionaries. Note it assumes the keys and + values of the dictionary are themselves hashable. + + ''' + + cache_ok = True + + def __init__(self, lookup): + self._lookup = lookup + + # assume keys/values of "lookup" are hashable; otherwise + # they would also need to be converted in some way here + self.lookup = tuple( + (key, lookup[key]) for key in sorted(lookup) + ) + + def get_col_spec(self, **kw): + return "VARCHAR(255)" + + def bind_processor(self, dialect): + # ... works with "self._lookup" ... + + Where above, the cache key for ``LookupType({"a": 10, "b": 20})`` will be:: + + >>> LookupType({"a": 10, "b": 20})._static_cache_key + (, ('lookup', (('a', 10), ('b', 20)))) + + .. versionadded:: 1.4.14 - added the ``cache_ok`` flag to allow + some configurability of caching for :class:`.TypeDecorator` classes. + + .. versionadded:: 1.4.28 - added the :class:`.ExternalType` mixin which + generalizes the ``cache_ok`` flag to both the :class:`.TypeDecorator` + and :class:`.UserDefinedType` classes. + + .. seealso:: + + :ref:`sql_caching` + + """ # noqa: E501 + + @property + def _static_cache_key(self): + cache_ok = self.__class__.__dict__.get("cache_ok", None) + + if cache_ok is None: + subtype_idx = self.__class__.__mro__.index(ExternalType) + subtype = self.__class__.__mro__[max(subtype_idx - 1, 0)] + + util.warn( + "%s %r will not produce a cache key because " + "the ``cache_ok`` attribute is not set to True. This can " + "have significant performance implications including some " + "performance degradations in comparison to prior SQLAlchemy " + "versions. Set this attribute to True if this type object's " + "state is safe to use in a cache key, or False to " + "disable this warning." % (subtype.__name__, self), + code="cprf", + ) + elif cache_ok is True: + return super(ExternalType, self)._static_cache_key + + return NO_CACHE + + +class UserDefinedType( + util.with_metaclass(VisitableCheckKWArg, ExternalType, TypeEngine) +): """Base for user defined types. This should be the base of new types. Note that @@ -825,6 +1026,8 @@ class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)): import sqlalchemy.types as types class MyType(types.UserDefinedType): + cache_ok = True + def __init__(self, precision = 8): self.precision = precision @@ -843,7 +1046,7 @@ def process(value): Once the type is made, it's immediately usable:: - table = Table('foo', meta, + table = Table('foo', metadata_obj, Column('id', Integer, primary_key=True), Column('data', MyType(16)) ) @@ -860,6 +1063,20 @@ def process(value): the ``get_col_spec()`` method via the keyword argument ``type_expression``, if it receives ``**kw`` in its signature. + The :attr:`.UserDefinedType.cache_ok` class-level flag indicates if this + custom :class:`.UserDefinedType` is safe to be used as part of a cache key. + This flag defaults to ``None`` which will initially generate a warning + when the SQL compiler attempts to generate a cache key for a statement + that uses this type. If the :class:`.UserDefinedType` is not guaranteed + to produce the same bind/result behavior and SQL generation + every time, this flag should be set to ``False``; otherwise if the + class produces the same behavior each time, it may be set to ``True``. + See :attr:`.UserDefinedType.cache_ok` for further notes on how this works. + + .. versionadded:: 1.4.28 Generalized the :attr:`.ExternalType.cache_ok` + flag so that it is available for both :class:`.TypeDecorator` as well + as :class:`.UserDefinedType`. + """ __visit_name__ = "user_defined" @@ -899,7 +1116,8 @@ class Emulated(object): """ def adapt_to_emulated(self, impltype, **kw): - """Given an impl class, adapt this type to the impl assuming "emulated". + """Given an impl class, adapt this type to the impl assuming + "emulated". The impl should also be an "emulated" version of this type, most likely the same class as this type itself. @@ -946,7 +1164,8 @@ def adapt_native_to_emulated(cls, impl, **kw): @classmethod def adapt_emulated_to_native(cls, impl, **kw): - """Given an impl, adapt this type's class to the impl assuming "native". + """Given an impl, adapt this type's class to the impl assuming + "native". The impl will be an :class:`.Emulated` class but not a :class:`.NativeForEmulated`. @@ -957,7 +1176,7 @@ def adapt_emulated_to_native(cls, impl, **kw): return cls(**kw) -class TypeDecorator(SchemaEventTarget, TypeEngine): +class TypeDecorator(ExternalType, SchemaEventTarget, TypeEngine): """Allows the creation of types which add additional functionality to an existing type. @@ -1025,6 +1244,8 @@ class produces the same behavior each time, it may be set to ``True``. class MyEpochType(types.TypeDecorator): impl = types.Integer + cache_ok = True + epoch = datetime.date(1970, 1, 1) def process_bind_param(self, value, dialect): @@ -1059,8 +1280,11 @@ def coerce_compared_value(self, op, value): the default rules of :meth:`.TypeEngine.coerce_compared_value` should be used in order to deal with operators like index operations:: + from sqlalchemy import JSON + from sqlalchemy import TypeDecorator + class MyJsonType(TypeDecorator): - impl = postgresql.JSON + impl = JSON cache_ok = True @@ -1070,6 +1294,24 @@ def coerce_compared_value(self, op, value): Without the above step, index operations such as ``mycol['foo']`` will cause the index value ``'foo'`` to be JSON encoded. + Similarly, when working with the :class:`.ARRAY` datatype, the + type coercion for index operations (e.g. ``mycol[5]``) is also + handled by :meth:`.TypeDecorator.coerce_compared_value`, where + again a simple override is sufficient unless special rules are needed + for particular operators:: + + from sqlalchemy import ARRAY + from sqlalchemy import TypeDecorator + + class MyArrayType(TypeDecorator): + impl = ARRAY + + cache_ok = True + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + """ __visit_name__ = "type_decorator" @@ -1120,47 +1362,6 @@ def __init__(self, *args, **kwargs): """ - cache_ok = None - """Indicate if statements using this :class:`.TypeDecorator` are "safe to - cache". - - The default value ``None`` will emit a warning and then not allow caching - of a statement which includes this type. Set to ``False`` to disable - statements using this type from being cached at all without a warning. - When set to ``True``, the object's class and selected elements from its - state will be used as part of the cache key, e.g.:: - - class MyType(TypeDecorator): - impl = String - - cache_ok = True - - def __init__(self, choices): - self.choices = tuple(choices) - self.internal_only = True - - The cache key for the above type would be equivalent to:: - - (, ('choices', ('a', 'b', 'c'))) - - The caching scheme will extract attributes from the type that correspond - to the names of parameters in the ``__init__()`` method. Above, the - "choices" attribute becomes part of the cache key but "internal_only" - does not, because there is no parameter named "internal_only". - - The requirements for cacheable elements is that they are hashable - and also that they indicate the same SQL rendered for expressions using - this type every time for a given cache value. - - .. versionadded:: 1.4.14 - added the ``cache_ok`` flag to allow - some configurability of caching for :class:`.TypeDecorator` classes. - - .. seealso:: - - :ref:`sql_caching` - - """ - class Comparator(TypeEngine.Comparator): """A :class:`.TypeEngine.Comparator` that is specific to :class:`.TypeDecorator`. @@ -1196,21 +1397,6 @@ def comparator_factory(self): {}, ) - @property - def _static_cache_key(self): - if self.cache_ok is None: - util.warn( - "TypeDecorator %r will not produce a cache key because " - "the ``cache_ok`` flag is not set to True. " - "Set this flag to True if this type object's " - "state is safe to use in a cache key, or False to " - "disable this warning." % self - ) - elif self.cache_ok is True: - return super(TypeDecorator, self)._static_cache_key - - return NO_CACHE - def _gen_dialect_impl(self, dialect): """ #todo @@ -1765,8 +1951,14 @@ def comparator_factory(self): return self.impl.comparator_factory -def _reconstitute_comparator(expression): - return expression.comparator +def _reconstitute_comparator(expression, type_=None): + # changed for #10213, added type_ argument. + # for previous pickles, keep type_ optional + if type_ is None: + return expression.comparator + + comparator_factory = type_.comparator_factory + return comparator_factory(expression) def to_instance(typeobj, *arg, **kw): diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index 7fcb45709f5..2b6de8fd1e8 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -1,5 +1,5 @@ # sql/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -151,7 +151,7 @@ def find_left_clause_to_join_from(clauses, join_to, onclause): if set(f.c).union(s.c).issuperset(cols_in_onclause): idx.append(i) break - elif Join._can_join(f, s) or onclause is not None: + elif onclause is not None or Join._can_join(f, s): idx.append(i) break @@ -484,6 +484,12 @@ def trunc(self, value): return rep +def _repr_single_value(value): + rp = _repr_base() + rp.max_chars = 300 + return rp.trunc(value) + + class _repr_row(_repr_base): """Provide a string view of a row.""" diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py index 7111c5efd70..cd73d369ab3 100644 --- a/lib/sqlalchemy/sql/visitors.py +++ b/lib/sqlalchemy/sql/visitors.py @@ -1,5 +1,5 @@ # sql/visitors.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -772,7 +772,7 @@ def clone(elem, **kw): cloned[id(elem)] = newelem return newelem - cloned[id(elem)] = newelem = elem._clone(**kw) + cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw) newelem._copy_internals(clone=clone, **kw) meth = visitors.get(newelem.__visit_name__, None) if meth: diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index d78e241819e..62ca95a5f9e 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -1,5 +1,5 @@ # testing/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -12,6 +12,8 @@ from .assertions import assert_raises_context_ok from .assertions import assert_raises_message from .assertions import assert_raises_message_context_ok +from .assertions import assert_warns +from .assertions import assert_warns_message from .assertions import AssertsCompiledSQL from .assertions import AssertsExecutionResults from .assertions import ComparesTables @@ -26,6 +28,7 @@ from .assertions import expect_raises_message from .assertions import expect_warnings from .assertions import in_ +from .assertions import int_within_variance from .assertions import is_ from .assertions import is_false from .assertions import is_instance_of @@ -46,6 +49,9 @@ from .config import db from .config import fixture from .config import requirements as requires +from .config import skip_test +from .config import Variation +from .config import variation from .exclusions import _is_excluded from .exclusions import _server_version from .exclusions import against as _against diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 6bf14aecde9..c08dba41f30 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -1,5 +1,5 @@ # testing/assertions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -7,7 +7,9 @@ from __future__ import absolute_import +from collections import defaultdict import contextlib +from copy import copy import re import sys import warnings @@ -143,14 +145,16 @@ def _expect_warnings( exc_cls, messages, regex=True, + search_msg=False, assert_=True, py2konly=False, raise_on_any_unexpected=False, + squelch_other_warnings=False, ): global _FILTERS, _SEEN, _EXC_CLS - if regex: + if regex or search_msg: filters = [re.compile(msg, re.I | re.S) for msg in messages] else: filters = list(messages) @@ -188,19 +192,25 @@ def our_warn(msg, *arg, **kw): exception = None if not exception or not issubclass(exception, _EXC_CLS): - return real_warn(msg, *arg, **kw) + if not squelch_other_warnings: + return real_warn(msg, *arg, **kw) + else: + return if not filters and not raise_on_any_unexpected: return for filter_ in filters: - if (regex and filter_.match(msg)) or ( - not regex and filter_ == msg + if ( + (search_msg and filter_.search(msg)) + or (regex and filter_.match(msg)) + or (not regex and filter_ == msg) ): seen.discard(filter_) break else: - real_warn(msg, *arg, **kw) + if not squelch_other_warnings: + real_warn(msg, *arg, **kw) with mock.patch("warnings.warn", our_warn), mock.patch( "sqlalchemy.util.SQLALCHEMY_WARN_20", True @@ -235,6 +245,17 @@ def _assert_no_stray_pool_connections(): engines.testing_reaper.assert_all_closed() +def int_within_variance(expected, received, variance): + deviance = int(expected * variance) + assert ( + abs(received - expected) < deviance + ), "Given int value %s is not within %d%% of expected value %s" % ( + received, + variance * 100, + expected, + ) + + def eq_regex(a, b, msg=None): assert re.match(b, a), msg or "%r !~ %r" % (a, b) @@ -313,8 +334,10 @@ def startswith_(a, fragment, msg=None): def eq_ignore_whitespace(a, b, msg=None): a = re.sub(r"^\s+?|\n", "", a) a = re.sub(r" {2,}", " ", a) + a = re.sub(r"\t", "", a) b = re.sub(r"^\s+?|\n", "", b) b = re.sub(r" {2,}", " ", b) + b = re.sub(r"\t", "", b) assert a == b, msg or "%r != %r" % (a, b) @@ -357,6 +380,40 @@ def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): ) +def assert_warns(except_cls, callable_, *args, **kwargs): + """legacy adapter function for functions that were previously using + assert_raises with SAWarning or similar. + + has some workarounds to accommodate the fact that the callable completes + with this approach rather than stopping at the exception raise. + + + """ + with _expect_warnings(except_cls, [".*"], squelch_other_warnings=True): + return callable_(*args, **kwargs) + + +def assert_warns_message(except_cls, msg, callable_, *args, **kwargs): + """legacy adapter function for functions that were previously using + assert_raises with SAWarning or similar. + + has some workarounds to accommodate the fact that the callable completes + with this approach rather than stopping at the exception raise. + + Also uses regex.search() to match the given message to the error string + rather than regex.match(). + + """ + with _expect_warnings( + except_cls, + [msg], + search_msg=True, + regex=False, + squelch_other_warnings=True, + ): + return callable_(*args, **kwargs) + + def assert_raises_message_context_ok( except_cls, msg, callable_, *args, **kwargs ): @@ -378,6 +435,15 @@ class _ErrorContainer(object): @contextlib.contextmanager def _expect_raises(except_cls, msg=None, check_context=False): + if ( + isinstance(except_cls, type) + and issubclass(except_cls, Warning) + or isinstance(except_cls, Warning) + ): + raise TypeError( + "Use expect_warnings for warnings, not " + "expect_raises / assert_raises" + ) ec = _ErrorContainer() if check_context: are_we_already_in_a_traceback = sys.exc_info()[0] @@ -437,6 +503,7 @@ def assert_compile( render_schema_translate=False, default_schema_name=None, from_linting=False, + check_param_order=True, ): if use_default_dialect: dialect = default.DefaultDialect() @@ -450,8 +517,11 @@ def assert_compile( if dialect is None: dialect = config.db.dialect - elif dialect == "default": - dialect = default.DefaultDialect() + elif dialect == "default" or dialect == "default_qmark": + if dialect == "default": + dialect = default.DefaultDialect() + else: + dialect = default.DefaultDialect(paramstyle="qmark") dialect.supports_default_values = supports_default_values dialect.supports_default_metavalue = supports_default_metavalue elif dialect == "default_enhanced": @@ -552,6 +622,15 @@ def _compiler_dispatch(self, compiler, **kwargs): # are the "self.statement" element c = CheckCompilerAccess(clause).compile(dialect=dialect, **kw) + if isinstance(clause, sqltypes.TypeEngine): + cache_key_no_warnings = clause._static_cache_key + if cache_key_no_warnings: + hash(cache_key_no_warnings) + else: + cache_key_no_warnings = clause._generate_cache_key() + if cache_key_no_warnings: + hash(cache_key_no_warnings[0]) + param_str = repr(getattr(c, "params", {})) if util.py3k: param_str = param_str.encode("utf-8").decode("ascii", "ignore") @@ -574,7 +653,7 @@ def _compiler_dispatch(self, compiler, **kwargs): if checkparams is not None: eq_(c.construct_params(params), checkparams) if checkpositional is not None: - p = c.construct_params(params) + p = c.construct_params(params, escape_names=False) eq_(tuple([p[x] for x in c.positiontup]), checkpositional) if check_prefetch is not None: eq_(c.prefetch, check_prefetch) @@ -594,6 +673,58 @@ def _compiler_dispatch(self, compiler, **kwargs): }, check_post_param, ) + if check_param_order and getattr(c, "params", None): + + def get_dialect(paramstyle, positional): + cp = copy(dialect) + cp.paramstyle = paramstyle + cp.positional = positional + return cp + + pyformat_dialect = get_dialect("pyformat", False) + pyformat_c = clause.compile(dialect=pyformat_dialect, **kw) + stmt = re.sub(r"[\n\t]", "", pyformat_c.string) + + qmark_dialect = get_dialect("qmark", True) + qmark_c = clause.compile(dialect=qmark_dialect, **kw) + values = list(qmark_c.positiontup) + escaped = qmark_c.escaped_bind_names + + for post_param in ( + qmark_c.post_compile_params | qmark_c.literal_execute_params + ): + name = qmark_c.bind_names[post_param] + if name in values: + values = [v for v in values if v != name] + positions = [] + pos_by_value = defaultdict(list) + for v in values: + try: + if v in pos_by_value: + start = pos_by_value[v][-1] + else: + start = 0 + esc = escaped.get(v, v) + pos = stmt.index("%%(%s)s" % (esc,), start) + 2 + positions.append(pos) + pos_by_value[v].append(pos) + except ValueError: + msg = "Expected to find bindparam %r in %r" % (v, stmt) + assert False, msg + + ordered = all( + positions[i - 1] < positions[i] + for i in range(1, len(positions)) + ) + + expected = [v for _, v in sorted(zip(positions, values))] + + msg = ( + "Order of parameters %s does not match the order " + "in the statement %s. Statement %r" % (values, expected, stmt) + ) + + is_true(ordered, msg) class ComparesTables(object): diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py index 4ee4c5844b0..39eeddda7cc 100644 --- a/lib/sqlalchemy/testing/assertsql.py +++ b/lib/sqlalchemy/testing/assertsql.py @@ -1,5 +1,5 @@ # testing/assertsql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/asyncio.py b/lib/sqlalchemy/testing/asyncio.py index 877d1eb94bf..6d4201c4945 100644 --- a/lib/sqlalchemy/testing/asyncio.py +++ b/lib/sqlalchemy/testing/asyncio.py @@ -1,5 +1,5 @@ # testing/asyncio.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -21,16 +21,21 @@ import inspect from . import config -from ..util.concurrency import _util_async_run -from ..util.concurrency import _util_async_run_coroutine_function +from ..util.concurrency import _AsyncUtil # may be set to False if the # --disable-asyncio flag is passed to the test runner. ENABLE_ASYNCIO = True +_async_util = _AsyncUtil() # it has lazy init so just always create one + + +def _shutdown(): + """called when the test finishes""" + _async_util.close() def _run_coroutine_function(fn, *args, **kwargs): - return _util_async_run_coroutine_function(fn, *args, **kwargs) + return _async_util.run(fn, *args, **kwargs) def _assume_async(fn, *args, **kwargs): @@ -47,7 +52,7 @@ def _assume_async(fn, *args, **kwargs): if not ENABLE_ASYNCIO: return fn(*args, **kwargs) - return _util_async_run(fn, *args, **kwargs) + return _async_util.run_in_greenlet(fn, *args, **kwargs) def _maybe_async_provisioning(fn, *args, **kwargs): @@ -63,11 +68,10 @@ def _maybe_async_provisioning(fn, *args, **kwargs): """ if not ENABLE_ASYNCIO: - return fn(*args, **kwargs) if config.any_async: - return _util_async_run(fn, *args, **kwargs) + return _async_util.run_in_greenlet(fn, *args, **kwargs) else: return fn(*args, **kwargs) @@ -88,7 +92,7 @@ def _maybe_async(fn, *args, **kwargs): is_async = config._current.is_async if is_async: - return _util_async_run(fn, *args, **kwargs) + return _async_util.run_in_greenlet(fn, *args, **kwargs) else: return fn(*args, **kwargs) diff --git a/lib/sqlalchemy/testing/config.py b/lib/sqlalchemy/testing/config.py index 097eb94e413..ae3061c0d87 100644 --- a/lib/sqlalchemy/testing/config.py +++ b/lib/sqlalchemy/testing/config.py @@ -1,5 +1,5 @@ # testing/config.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -94,6 +94,126 @@ def combinations_list(arg_iterable, **kw): return combinations(*arg_iterable, **kw) +class Variation(object): + __slots__ = ("_name", "_argname") + + def __init__(self, case, argname, case_names): + self._name = case + self._argname = argname + for casename in case_names: + setattr(self, casename, casename == case) + + @property + def name(self): + return self._name + + def __bool__(self): + return self._name == self._argname + + def __nonzero__(self): + return not self.__bool__() + + def __str__(self): + return "%s=%r" % (self._argname, self._name) + + def __repr__(self): + return str(self) + + def fail(self): + # can't import util.fail() under py2.x without resolving + # import cycle + assert False, "Unknown %s" % (self,) + + @classmethod + def idfn(cls, variation): + return variation.name + + @classmethod + def generate_cases(cls, argname, cases): + case_names = [ + argname if c is True else "not_" + argname if c is False else c + for c in cases + ] + + typ = type( + argname, + (Variation,), + { + "__slots__": tuple(case_names), + }, + ) + + return [typ(casename, argname, case_names) for casename in case_names] + + +def variation(argname, cases): + """a helper around testing.combinations that provides a single namespace + that can be used as a switch. + + e.g.:: + + @testing.variation("querytyp", ["select", "subquery", "legacy_query"]) + @testing.variation("lazy", ["select", "raise", "raise_on_sql"]) + def test_thing( + self, + querytyp, + lazy, + decl_base + ): + class Thing(decl_base): + __tablename__ = 'thing' + + # use name directly + rel = relationship("Rel", lazy=lazy.name) + + # use as a switch + if querytyp.select: + stmt = select(Thing) + elif querytyp.subquery: + stmt = select(Thing).subquery() + elif querytyp.legacy_query: + stmt = Session.query(Thing) + else: + querytyp.fail() + + + The variable provided is a slots object of boolean variables, as well + as the name of the case itself under the attribute ".name" + + """ + + cases_plus_limitations = [ + entry + if (isinstance(entry, tuple) and len(entry) == 2) + else (entry, None) + for entry in cases + ] + + variations = Variation.generate_cases( + argname, [c for c, l in cases_plus_limitations] + ) + return combinations( + id_="ia", + argnames=argname, + *[ + (variation._name, variation, limitation) + if limitation is not None + else (variation._name, variation) + for variation, (case, limitation) in zip( + variations, cases_plus_limitations + ) + ] + ) + + +def variation_fixture(argname, cases, scope="function"): + return fixture( + params=Variation.generate_cases(argname, cases), + ids=Variation.idfn, + scope=scope, + ) + + def fixture(*arg, **kw): return _fixture_functions.fixture(*arg, **kw) diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index a54f70c5e08..8cad9eda32f 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -1,5 +1,5 @@ # testing/engines.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -17,6 +17,7 @@ from .util import gc_collect from .. import event from .. import pool +from ..util import await_only class ConnectionKiller(object): @@ -96,7 +97,10 @@ def _drop_testing_engines(self, scope): and proxy_ref._pool is rec.pool ): self._safe(proxy_ref._checkin) - rec.dispose() + if hasattr(rec, "sync_engine"): + await_only(rec.dispose()) + else: + rec.dispose() eng.clear() def after_test(self): @@ -272,11 +276,15 @@ def testing_engine( future=None, asyncio=False, transfer_staticpool=False, + _sqlite_savepoint=False, ): """Produce an engine configured by --options with optional overrides.""" if asyncio: - from sqlalchemy.ext.asyncio import create_async_engine as create_engine + assert not _sqlite_savepoint + from sqlalchemy.ext.asyncio import ( + create_async_engine as create_engine, + ) elif future or ( config.db and config.db._is_future and future is not False ): @@ -288,9 +296,11 @@ def testing_engine( if not options: use_reaper = True scope = "function" + sqlite_savepoint = False else: use_reaper = options.pop("use_reaper", True) scope = options.pop("scope", "function") + sqlite_savepoint = options.pop("sqlite_savepoint", False) url = url or config.db.url @@ -306,10 +316,21 @@ def testing_engine( engine = create_engine(url, **options) + if sqlite_savepoint and engine.name == "sqlite": + # apply SQLite savepoint workaround + @event.listens_for(engine, "connect") + def do_connect(dbapi_connection, connection_record): + dbapi_connection.isolation_level = None + + @event.listens_for(engine, "begin") + def do_begin(conn): + conn.exec_driver_sql("BEGIN") + if transfer_staticpool: from sqlalchemy.pool import StaticPool if config.db is not None and isinstance(config.db.pool, StaticPool): + use_reaper = False engine.pool._transfer_from(config.db.pool) if scope == "global": diff --git a/lib/sqlalchemy/testing/entities.py b/lib/sqlalchemy/testing/entities.py index 9daa5c61f80..6cec155cf23 100644 --- a/lib/sqlalchemy/testing/entities.py +++ b/lib/sqlalchemy/testing/entities.py @@ -1,5 +1,5 @@ # testing/entities.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index d5522289b4f..1aff19c0ea2 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -1,5 +1,5 @@ # testing/exclusions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -429,9 +429,8 @@ def fails(reason=None): return fails_if(BooleanPredicate(True, reason or "expected to fail")) -@decorator -def future(fn, *arg): - return fails_if(LambdaPredicate(fn), "Future feature") +def future(): + return fails_if(BooleanPredicate(True, "Future feature")) def fails_on(db, reason=None): diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index f04056c5e5e..0ba9343b580 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -1,11 +1,12 @@ # testing/fixtures.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php import contextlib +import itertools import re import sys @@ -13,6 +14,8 @@ from . import assertions from . import config from . import schema +from .assertions import eq_ +from .assertions import ne_ from .entities import BasicEntity from .entities import ComparableEntity from .entities import ComparableMixin # noqa @@ -24,6 +27,8 @@ from ..orm import registry from ..orm.decl_api import DeclarativeMeta from ..schema import sort_tables_and_constraints +from ..sql import visitors +from ..sql.elements import ClauseElement @config.mark_base_test_class() @@ -50,6 +55,13 @@ class TestBase(object): def assert_(self, val, msg=None): assert val, msg + @config.fixture() + def nocache(self): + _cache = config.db._compiled_cache + config.db._compiled_cache = None + yield + config.db._compiled_cache = _cache + @config.fixture() def connection_no_trans(self): eng = getattr(self, "bind", None) or config.db @@ -78,12 +90,38 @@ def connection(self): # run a close all connections. conn.close() + @config.fixture() + def close_result_when_finished(self): + to_close = [] + to_consume = [] + + def go(result, consume=False): + to_close.append(result) + if consume: + to_consume.append(result) + + yield go + for r in to_consume: + try: + r.all() + except: + pass + for r in to_close: + try: + r.close() + except: + pass + @config.fixture() def registry(self, metadata): reg = registry(metadata=metadata) yield reg reg.dispose() + @config.fixture + def decl_base(self, registry): + return registry.generate_base() + @config.fixture() def future_connection(self, future_engine, connection): # integrate the future_engine and connection fixtures so @@ -131,6 +169,10 @@ def go(**kw): return go + @config.fixture + def fixture_session(self): + return fixture_session() + @config.fixture() def metadata(self, request): """Provide bound MetaData for a single test, dropping afterwards.""" @@ -432,6 +474,10 @@ def _teardown_each_tables(self): elif self.run_create_tables == "each": drop_all_tables_from_metadata(self._tables_metadata, self.bind) + savepoints = getattr(config.requirements, "savepoints", False) + if savepoints: + savepoints = savepoints.enabled + # no need to run deletes if tables are recreated on setup if ( self.run_define_tables != "each" @@ -449,7 +495,11 @@ def _teardown_each_tables(self): ] ): try: - conn.execute(table.delete()) + if savepoints: + with conn.begin_nested(): + conn.execute(table.delete()) + else: + conn.execute(table.delete()) except sa.exc.DBAPIError as ex: util.print_( ("Error emptying table %s: %r" % (table, ex)), @@ -823,3 +873,106 @@ def define_tables(cls, metadata): Computed("normal * 42", persisted=True), ) ) + + +class CacheKeyFixture(object): + def _compare_equal(self, a, b, compare_values): + a_key = a._generate_cache_key() + b_key = b._generate_cache_key() + + if a_key is None: + assert a._annotations.get("nocache") + + assert b_key is None + else: + + eq_(a_key.key, b_key.key) + eq_(hash(a_key.key), hash(b_key.key)) + + for a_param, b_param in zip(a_key.bindparams, b_key.bindparams): + assert a_param.compare(b_param, compare_values=compare_values) + return a_key, b_key + + def _run_cache_key_fixture(self, fixture, compare_values): + case_a = fixture() + case_b = fixture() + + for a, b in itertools.combinations_with_replacement( + range(len(case_a)), 2 + ): + if a == b: + a_key, b_key = self._compare_equal( + case_a[a], case_b[b], compare_values + ) + if a_key is None: + continue + else: + a_key = case_a[a]._generate_cache_key() + b_key = case_b[b]._generate_cache_key() + + if a_key is None or b_key is None: + if a_key is None: + assert case_a[a]._annotations.get("nocache") + if b_key is None: + assert case_b[b]._annotations.get("nocache") + continue + + if a_key.key == b_key.key: + for a_param, b_param in zip( + a_key.bindparams, b_key.bindparams + ): + if not a_param.compare( + b_param, compare_values=compare_values + ): + break + else: + # this fails unconditionally since we could not + # find bound parameter values that differed. + # Usually we intended to get two distinct keys here + # so the failure will be more descriptive using the + # ne_() assertion. + ne_(a_key.key, b_key.key) + else: + ne_(a_key.key, b_key.key) + + # ClauseElement-specific test to ensure the cache key + # collected all the bound parameters that aren't marked + # as "literal execute" + if isinstance(case_a[a], ClauseElement) and isinstance( + case_b[b], ClauseElement + ): + assert_a_params = [] + assert_b_params = [] + + for elem in visitors.iterate(case_a[a]): + if elem.__visit_name__ == "bindparam": + assert_a_params.append(elem) + + for elem in visitors.iterate(case_b[b]): + if elem.__visit_name__ == "bindparam": + assert_b_params.append(elem) + + # note we're asserting the order of the params as well as + # if there are dupes or not. ordering has to be + # deterministic and matches what a traversal would provide. + eq_( + sorted(a_key.bindparams, key=lambda b: b.key), + sorted( + util.unique_list(assert_a_params), key=lambda b: b.key + ), + ) + eq_( + sorted(b_key.bindparams, key=lambda b: b.key), + sorted( + util.unique_list(assert_b_params), key=lambda b: b.key + ), + ) + + def _run_cache_key_equal_fixture(self, fixture, compare_values): + case_a = fixture() + case_b = fixture() + + for a, b in itertools.combinations_with_replacement( + range(len(case_a)), 2 + ): + self._compare_equal(case_a[a], case_b[b], compare_values) diff --git a/lib/sqlalchemy/testing/mock.py b/lib/sqlalchemy/testing/mock.py index 8fe08a6789f..d164c5856f4 100644 --- a/lib/sqlalchemy/testing/mock.py +++ b/lib/sqlalchemy/testing/mock.py @@ -1,5 +1,5 @@ # testing/mock.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index 430cb5fb687..e2227e61828 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -1,5 +1,5 @@ # testing/pickleable.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -10,6 +10,8 @@ """ from . import fixtures +from ..schema import Column +from ..types import String class User(fixtures.ComparableEntity): @@ -51,6 +53,14 @@ def __init__(self, obj, parent=None): self.parent = parent +class Mixin(object): + email_address = Column(String) + + +class AddressWMixin(Mixin, fixtures.ComparableEntity): + pass + + class Foo(object): def __init__(self, moredata, stuff="im stuff"): self.data = "im data" diff --git a/lib/sqlalchemy/testing/plugin/__init__.py b/lib/sqlalchemy/testing/plugin/__init__.py index e69de29bb2d..ce960be967d 100644 --- a/lib/sqlalchemy/testing/plugin/__init__.py +++ b/lib/sqlalchemy/testing/plugin/__init__.py @@ -0,0 +1,6 @@ +# testing/plugin/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/testing/plugin/bootstrap.py b/lib/sqlalchemy/testing/plugin/bootstrap.py index b4691c57d42..e9cdff4bc04 100644 --- a/lib/sqlalchemy/testing/plugin/bootstrap.py +++ b/lib/sqlalchemy/testing/plugin/bootstrap.py @@ -1,3 +1,9 @@ +# testing/plugin/bootstrap.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php """ Bootstrapper for test framework plugins. @@ -12,8 +18,6 @@ SQLAlchemy/Alembic themselves without the need to ship/install a separate package outside of SQLAlchemy. -NOTE: copied/adapted from SQLAlchemy main for backwards compatibility; -this should be removable when Alembic targets SQLAlchemy 1.0.0. """ @@ -27,14 +31,20 @@ def load_file_as_module(name): path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) - if sys.version_info >= (3, 3): - from importlib import machinery - mod = machinery.SourceFileLoader(name, path).load_module() + if sys.version_info >= (3, 5): + import importlib.util + + spec = importlib.util.spec_from_file_location(name, path) + assert spec is not None + assert spec.loader is not None + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) else: import imp mod = imp.load_source(name, path) + return mod diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index d2e4a0f690a..d78c2a76ff9 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -1,5 +1,5 @@ -# plugin/plugin_base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# testing/plugin/plugin_base.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -101,7 +101,7 @@ def setup_options(make_option): make_option( "--dbdriver", action="append", - type="string", + type=str, dest="dbdriver", help="Additional database drivers to include in tests. " "These are linked to the existing database URLs by the " @@ -391,7 +391,7 @@ def _init_symbols(options, file_config): config._fixture_functions = _fixture_fn_class() -@post +@pre def _set_disable_asyncio(opt, file_config): if opt.disable_asyncio or not py3k: from sqlalchemy.testing import asyncio diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py index 6c6287060b7..b33dcdb0d84 100644 --- a/lib/sqlalchemy/testing/plugin/pytestplugin.py +++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py @@ -1,3 +1,9 @@ +# testing/plugin/pytestplugin.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php try: # installed by bootstrap.py import sqla_plugin_base as plugin_base @@ -14,16 +20,10 @@ import os import re import sys +import uuid import pytest -try: - import xdist # noqa - - has_xdist = True -except ImportError: - has_xdist = False - py2k = sys.version_info < (3, 0) if py2k: @@ -84,6 +84,9 @@ def __call__( def pytest_configure(config): + if config.pluginmanager.hasplugin("xdist"): + config.pluginmanager.register(XDistHooks()) + if hasattr(config, "workerinput"): plugin_base.restore_important_follower_config(config.workerinput) plugin_base.configure_follower(config.workerinput["follower_ident"]) @@ -137,6 +140,12 @@ def pytest_sessionfinish(session): collect_types.dump_stats(session.config.option.dump_pyannotate) +def pytest_unconfigure(config): + from sqlalchemy.testing import asyncio + + asyncio._shutdown() + + def pytest_collection_finish(session): if session.config.option.dump_pyannotate: from pyannotate_runtime import collect_types @@ -157,10 +166,8 @@ def _filter(filename): collect_types.init_types_collection(filter_filename=_filter) -if has_xdist: - import uuid - - def pytest_configure_node(node): +class XDistHooks(object): + def pytest_configure_node(self, node): from sqlalchemy.testing import provision from sqlalchemy.testing import asyncio @@ -175,7 +182,7 @@ def pytest_configure_node(node): provision.create_follower_db, node.workerinput["follower_ident"] ) - def pytest_testnodedown(node, error): + def pytest_testnodedown(self, node, error): from sqlalchemy.testing import provision from sqlalchemy.testing import asyncio @@ -205,27 +212,36 @@ def pytest_collection_modifyitems(session, config, items): items[:] = [ item for item in items - if isinstance(item.parent, pytest.Instance) - and not item.parent.parent.name.startswith("_") + if item.getparent(pytest.Class) is not None + and not item.getparent(pytest.Class).name.startswith("_") ] - test_classes = set(item.parent for item in items) + test_classes = set(item.getparent(pytest.Class) for item in items) + + def collect(element): + for inst_or_fn in element.collect(): + if isinstance(inst_or_fn, pytest.Collector): + # no yield from in 2.7 + for el in collect(inst_or_fn): + yield el + else: + yield inst_or_fn def setup_test_classes(): for test_class in test_classes: for sub_cls in plugin_base.generate_sub_tests( - test_class.cls, test_class.parent.module + test_class.cls, test_class.module ): if sub_cls is not test_class.cls: per_cls_dict = rebuilt_items[test_class.cls] # support pytest 5.4.0 and above pytest.Class.from_parent ctor = getattr(pytest.Class, "from_parent", pytest.Class) - for inst in ctor( - name=sub_cls.__name__, parent=test_class.parent.parent - ).collect(): - for t in inst.collect(): - per_cls_dict[t.name].append(t) + module = test_class.getparent(pytest.Module) + for fn in collect( + ctor(name=sub_cls.__name__, parent=module) + ): + per_cls_dict[fn.name].append(fn) # class requirements will sometimes need to access the DB to check # capabilities, so need to do this for async @@ -233,8 +249,9 @@ def setup_test_classes(): newitems = [] for item in items: - if item.parent.cls in rebuilt_items: - newitems.extend(rebuilt_items[item.parent.cls][item.name]) + cls_ = item.cls + if cls_ in rebuilt_items: + newitems.extend(rebuilt_items[cls_][item.name]) else: newitems.append(item) @@ -247,8 +264,8 @@ def setup_test_classes(): items[:] = sorted( newitems, key=lambda item: ( - item.parent.parent.parent.name, - item.parent.parent.name, + item.getparent(pytest.Module).name, + item.getparent(pytest.Class).name, item.name, ), ) @@ -268,7 +285,7 @@ def pytest_pycollect_makeitem(collector, name, obj): ] elif ( inspect.isfunction(obj) - and isinstance(collector, pytest.Instance) + and collector.cls is not None and plugin_base.want_method(collector.cls, obj) ): # None means, fall back to default logic, which includes @@ -358,10 +375,6 @@ def _parametrize_cls(module, cls): def pytest_runtest_setup(item): from sqlalchemy.testing import asyncio - from sqlalchemy.util import string_types - - if not isinstance(item, pytest.Function): - return # pytest_runtest_setup runs *before* pytest fixtures with scope="class". # plugin_base.start_test_class_outside_fixtures may opt to raise SkipTest @@ -371,48 +384,66 @@ def pytest_runtest_setup(item): global _current_class - if _current_class is None: + if isinstance(item, pytest.Function) and _current_class is None: asyncio._maybe_async_provisioning( plugin_base.start_test_class_outside_fixtures, - item.parent.parent.cls, + item.cls, ) - _current_class = item.parent.parent + _current_class = item.getparent(pytest.Class) - def finalize(): - global _current_class, _current_report - _current_class = None - try: - asyncio._maybe_async_provisioning( - plugin_base.stop_test_class_outside_fixtures, - item.parent.parent.cls, - ) - except Exception as e: - # in case of an exception during teardown attach the original - # error to the exception message, otherwise it will get lost - if _current_report.failed: - if not e.args: - e.args = ( - "__Original test failure__:\n" - + _current_report.longreprtext, - ) - elif e.args[-1] and isinstance(e.args[-1], string_types): - args = list(e.args) - args[-1] += ( - "\n__Original test failure__:\n" - + _current_report.longreprtext - ) - e.args = tuple(args) - else: - e.args += ( - "__Original test failure__", - _current_report.longreprtext, - ) - raise - finally: - _current_report = None - - item.parent.parent.addfinalizer(finalize) +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_teardown(item, nextitem): + # runs inside of pytest function fixture scope + # after test function runs + from sqlalchemy.testing import asyncio + from sqlalchemy.util import string_types + + asyncio._maybe_async(plugin_base.after_test, item) + + yield + # this is now after all the fixture teardown have run, the class can be + # finalized. Since pytest v7 this finalizer can no longer be added in + # pytest_runtest_setup since the class has not yet been setup at that + # time. + # See https://github.com/pytest-dev/pytest/issues/9343 + global _current_class, _current_report + + if _current_class is not None and ( + # last test or a new class + nextitem is None + or nextitem.getparent(pytest.Class) is not _current_class + ): + _current_class = None + + try: + asyncio._maybe_async_provisioning( + plugin_base.stop_test_class_outside_fixtures, item.cls + ) + except Exception as e: + # in case of an exception during teardown attach the original + # error to the exception message, otherwise it will get lost + if _current_report.failed: + if not e.args: + e.args = ( + "__Original test failure__:\n" + + _current_report.longreprtext, + ) + elif e.args[-1] and isinstance(e.args[-1], string_types): + args = list(e.args) + args[-1] += ( + "\n__Original test failure__:\n" + + _current_report.longreprtext + ) + e.args = tuple(args) + else: + e.args += ( + "__Original test failure__", + _current_report.longreprtext, + ) + raise + finally: + _current_report = None def pytest_runtest_call(item): @@ -424,8 +455,8 @@ def pytest_runtest_call(item): asyncio._maybe_async( plugin_base.before_test, item, - item.parent.module.__name__, - item.parent.cls, + item.module.__name__, + item.cls, item.name, ) @@ -439,15 +470,6 @@ def pytest_runtest_logreport(report): _current_report = report -def pytest_runtest_teardown(item, nextitem): - # runs inside of pytest function fixture scope - # after test function runs - - from sqlalchemy.testing import asyncio - - asyncio._maybe_async(plugin_base.after_test, item) - - @pytest.fixture(scope="class") def setup_class_methods(request): from sqlalchemy.testing import asyncio diff --git a/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py b/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py index 36b68417bce..a765090135a 100644 --- a/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py +++ b/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py @@ -1,3 +1,9 @@ +# testing/plugin/reinvent_fixtures_py2k.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php """ invent a quick version of pytest autouse fixtures as pytest's unacceptably slow collection/high memory use in pytest 4.6.11, which is the highest version that diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index de4847f2f3d..a116730ec98 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -1,5 +1,5 @@ # testing/profiling.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -251,6 +251,8 @@ def wrap(fn, *args, **kw): with mock.patch.object( deprecations, "SQLALCHEMY_WARN_20", False + ), mock.patch.object( + deprecations, "SILENCE_UBER_WARNING", True ), mock.patch.object( row.LegacyRow, "_default_key_style", row.KEY_OBJECTS_NO_WARN ): diff --git a/lib/sqlalchemy/testing/provision.py b/lib/sqlalchemy/testing/provision.py index a911ba69cee..370ee12c7eb 100644 --- a/lib/sqlalchemy/testing/provision.py +++ b/lib/sqlalchemy/testing/provision.py @@ -1,3 +1,9 @@ +# testing/provision.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import collections import logging @@ -287,13 +293,15 @@ def create_db(cfg, eng, ident): Used when a test run will employ multiple processes, e.g., when run via `tox` or `pytest -n4`. """ - raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) + raise NotImplementedError( + "no DB creation routine for cfg: %s" % (eng.url,) + ) @register.init def drop_db(cfg, eng, ident): """Drop a database that we dynamically created for testing.""" - raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) + raise NotImplementedError("no DB drop routine for cfg: %s" % (eng.url,)) @register.init @@ -306,7 +314,7 @@ def update_db_opts(db_url, db_opts): def post_configure_engine(url, engine, follower_ident): """Perform extra steps after configuring an engine for testing. - (For the internal dialects, currently only used by sqlite, oracle) + (For the internal dialects, currently only used by sqlite, oracle, mssql) """ pass @@ -377,7 +385,7 @@ def temp_table_keyword_args(cfg, eng): ComponentReflectionTest class in suite/test_reflection.py """ raise NotImplementedError( - "no temp table keyword args routine for cfg: %s" % eng.url + "no temp table keyword args routine for cfg: %s" % (eng.url,) ) diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index f8b5dd6062a..d8247dc2830 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1,5 +1,5 @@ # testing/requirements.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -18,6 +18,7 @@ import platform import sys +from . import asyncio as _test_asyncio from . import exclusions from . import only_on from .. import util @@ -151,6 +152,17 @@ def implicitly_named_constraints(self): return exclusions.open() + @property + def unusual_column_name_characters(self): + """target database allows column names that have unusual characters + in them, such as dots, spaces, slashes, or percent signs. + + The column names are as always in such a case quoted, however the + DB still needs to support those characters in the name somehow. + + """ + return exclusions.open() + @property def subqueries(self): """Target database must support subqueries.""" @@ -228,9 +240,8 @@ def nullsordering(self): @property def standalone_binds(self): - """target database/driver supports bound parameters as column expressions - without being in the context of a typed column. - + """target database/driver supports bound parameters as column + expressions without being in the context of a typed column. """ return exclusions.closed() @@ -664,6 +675,11 @@ def temp_table_names(self): """target dialect supports listing of temporary table names""" return exclusions.closed() + @property + def has_temp_table(self): + """target dialect supports checking a single temp table name""" + return exclusions.closed() + @property def temporary_tables(self): """target database supports temporary tables""" @@ -753,6 +769,29 @@ def datetime(self): return exclusions.open() + @property + def datetime_timezone(self): + """target dialect supports representation of Python + datetime.datetime() with tzinfo with DateTime(timezone=True).""" + + return exclusions.closed() + + @property + def time_timezone(self): + """target dialect supports representation of Python + datetime.time() with tzinfo with Time(timezone=True).""" + + return exclusions.closed() + + @property + def datetime_implicit_bound(self): + """target dialect when given a datetime object will bind it such + that the database server knows the object is a datetime, and not + a plain string. + + """ + return exclusions.open() + @property def datetime_microseconds(self): """target dialect supports representation of Python @@ -767,6 +806,16 @@ def timestamp_microseconds(self): if TIMESTAMP is used.""" return exclusions.closed() + @property + def timestamp_microseconds_implicit_bound(self): + """target dialect when given a datetime object which also includes + a microseconds portion when using the TIMESTAMP data type + will bind it such that the database server knows + the object is a datetime with microseconds, and not a plain string. + + """ + return self.timestamp_microseconds + @property def datetime_historic(self): """target dialect supports representation of Python @@ -980,6 +1029,12 @@ def precision_numerics_retains_significant_digits(self): return exclusions.closed() + @property + def infinity_floats(self): + """The Float type can persist and load float('inf'), float('-inf').""" + + return exclusions.closed() + @property def precision_generic_float_type(self): """target backend will return native floating point numbers with at @@ -988,6 +1043,20 @@ def precision_generic_float_type(self): """ return exclusions.open() + @property + def literal_float_coercion(self): + """target backend will return the exact float value 15.7563 + with only four significant digits from this statement: + + SELECT :param + + where :param is the Python float 15.7563 + + i.e. it does not return 15.75629997253418 + + """ + return exclusions.open() + @property def floats_to_four_decimals(self): """target backend can return a floating-point number with four @@ -1256,12 +1325,36 @@ def python37(self): def dataclasses(self): return self.python37 + @property + def python38(self): + return exclusions.only_if( + lambda: util.py38, "Python 3.8 or above required" + ) + + @property + def not_python314(self): + """This requirement is interim to assist with backporting of + issue #12405. + + SQLAlchemy 1.4 still includes the ``await_fallback()`` method that + makes use of ``asyncio.get_event_loop_policy()``. This is removed + in SQLAlchemy 2.1. + + """ + return exclusions.skip_if( + lambda: util.py314, "Python 3.14 or above not supported" + ) + @property def cpython(self): return exclusions.only_if( lambda: util.cpython, "cPython interpreter needed" ) + @property + def is64bit(self): + return exclusions.only_if(lambda: util.is64bit, "64bit required") + @property def patch_library(self): def check_lib(): @@ -1337,11 +1430,18 @@ def async_dialect(self): return exclusions.closed() + @property + def asyncio(self): + return self.greenlet + @property def greenlet(self): def go(config): + if not _test_asyncio.ENABLE_ASYNCIO: + return False + try: - import greenlet # noqa F401 + import greenlet # noqa: F401 except ImportError: return False else: diff --git a/lib/sqlalchemy/testing/schema.py b/lib/sqlalchemy/testing/schema.py index 9c6bf9e4c29..1281a27e589 100644 --- a/lib/sqlalchemy/testing/schema.py +++ b/lib/sqlalchemy/testing/schema.py @@ -1,5 +1,5 @@ # testing/schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/__init__.py b/lib/sqlalchemy/testing/suite/__init__.py index 30817e1e445..8435aa004f3 100644 --- a/lib/sqlalchemy/testing/suite/__init__.py +++ b/lib/sqlalchemy/testing/suite/__init__.py @@ -1,3 +1,9 @@ +# testing/suite/__init__.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .test_cte import * # noqa from .test_ddl import * # noqa from .test_deprecations import * # noqa diff --git a/lib/sqlalchemy/testing/suite/test_cte.py b/lib/sqlalchemy/testing/suite/test_cte.py index a94ee55dc03..56180ca8d44 100644 --- a/lib/sqlalchemy/testing/suite/test_cte.py +++ b/lib/sqlalchemy/testing/suite/test_cte.py @@ -1,3 +1,9 @@ +# testing/suite/test_cte.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import fixtures from ..assertions import eq_ from ..schema import Column diff --git a/lib/sqlalchemy/testing/suite/test_ddl.py b/lib/sqlalchemy/testing/suite/test_ddl.py index b3fee551e01..ee8b802ac74 100644 --- a/lib/sqlalchemy/testing/suite/test_ddl.py +++ b/lib/sqlalchemy/testing/suite/test_ddl.py @@ -1,3 +1,9 @@ +# testing/suite/test_ddl.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import random from . import testing diff --git a/lib/sqlalchemy/testing/suite/test_deprecations.py b/lib/sqlalchemy/testing/suite/test_deprecations.py index b36162fa59b..95eed768c32 100644 --- a/lib/sqlalchemy/testing/suite/test_deprecations.py +++ b/lib/sqlalchemy/testing/suite/test_deprecations.py @@ -1,3 +1,9 @@ +# testing/suite/test_deprecations.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import fixtures from ..assertions import eq_ from ..schema import Column diff --git a/lib/sqlalchemy/testing/suite/test_dialect.py b/lib/sqlalchemy/testing/suite/test_dialect.py index c2c17d0ddd1..6f32fee86ed 100644 --- a/lib/sqlalchemy/testing/suite/test_dialect.py +++ b/lib/sqlalchemy/testing/suite/test_dialect.py @@ -1,4 +1,10 @@ #! coding: utf-8 +# testing/suite/test_dialect.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from . import testing from .. import assert_raises @@ -317,7 +323,7 @@ class FutureWeCanSetDefaultSchemaWEventsTest( class DifficultParametersTest(fixtures.TestBase): __backend__ = True - @testing.combinations( + tough_parameters = testing.combinations( ("boring",), ("per cent",), ("per % cent",), @@ -325,15 +331,30 @@ class DifficultParametersTest(fixtures.TestBase): ("par(ens)",), ("percent%(ens)yah",), ("col:ons",), + ("_starts_with_underscore",), + ("dot.s",), ("more :: %colons%",), + ("_name",), + ("___name",), + ("[BracketsAndCase]",), + ("42numbers",), + ("percent%signs",), + ("has spaces",), ("/slashes/",), ("more/slashes",), ("q?marks",), ("1param",), ("1col:on",), - argnames="name", + argnames="paramname", ) - def test_round_trip(self, name, connection, metadata): + + @tough_parameters + @config.requirements.unusual_column_name_characters + def test_round_trip_same_named_column( + self, paramname, connection, metadata + ): + name = paramname + t = Table( "t", metadata, @@ -359,3 +380,58 @@ def test_round_trip(self, name, connection, metadata): # name works as the key from cursor.description eq_(row._mapping[name], "some name") + + # use expanding IN + stmt = select(t.c[name]).where( + t.c[name].in_(["some name", "some other_name"]) + ) + + row = connection.execute(stmt).first() + + @testing.fixture + def multirow_fixture(self, metadata, connection): + mytable = Table( + "mytable", + metadata, + Column("myid", Integer), + Column("name", String(50)), + Column("desc", String(50)), + ) + + mytable.create(connection) + + connection.execute( + mytable.insert(), + [ + {"myid": 1, "name": "a", "desc": "a_desc"}, + {"myid": 2, "name": "b", "desc": "b_desc"}, + {"myid": 3, "name": "c", "desc": "c_desc"}, + {"myid": 4, "name": "d", "desc": "d_desc"}, + ], + ) + yield mytable + + @tough_parameters + def test_standalone_bindparam_escape( + self, paramname, connection, multirow_fixture + ): + tbl1 = multirow_fixture + stmt = select(tbl1.c.myid).where( + tbl1.c.name == bindparam(paramname, value="x") + ) + res = connection.scalar(stmt, {paramname: "c"}) + eq_(res, 3) + + @tough_parameters + def test_standalone_bindparam_escape_expanding( + self, paramname, connection, multirow_fixture + ): + tbl1 = multirow_fixture + stmt = ( + select(tbl1.c.myid) + .where(tbl1.c.name.in_(bindparam(paramname, value=["a", "b"]))) + .order_by(tbl1.c.myid) + ) + + res = connection.scalars(stmt, {paramname: ["d", "a"]}).all() + eq_(res, [1, 4]) diff --git a/lib/sqlalchemy/testing/suite/test_insert.py b/lib/sqlalchemy/testing/suite/test_insert.py index 3c22f50b27e..ebfdc13d915 100644 --- a/lib/sqlalchemy/testing/suite/test_insert.py +++ b/lib/sqlalchemy/testing/suite/test_insert.py @@ -1,3 +1,9 @@ +# testing/suite/test_insert.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import config from .. import engines from .. import fixtures diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 88189c2d95c..80ce8d69dd0 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -1,8 +1,13 @@ +# testing/suite/test_reflection.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import operator import re import sqlalchemy as sa -from sqlalchemy import func from .. import config from .. import engines from .. import eq_ @@ -15,6 +20,7 @@ from ..schema import Table from ... import event from ... import ForeignKey +from ... import func from ... import Identity from ... import inspect from ... import Integer @@ -25,6 +31,7 @@ from ...schema import DDL from ...schema import Index from ...sql.elements import quoted_name +from ...sql.schema import BLANK_SCHEMA from ...testing import is_false from ...testing import is_true @@ -32,7 +39,23 @@ metadata, users = None, None -class HasTableTest(fixtures.TablesTest): +class OneConnectionTablesTest(fixtures.TablesTest): + @classmethod + def setup_bind(cls): + # TODO: when temp tables are subject to server reset, + # this will also have to disable that server reset from + # happening + if config.requirements.independent_connections.enabled: + from sqlalchemy import pool + + return engines.testing_engine( + options=dict(poolclass=pool.StaticPool, scope="class"), + ) + else: + return config.db + + +class HasTableTest(OneConnectionTablesTest): __backend__ = True @classmethod @@ -52,6 +75,64 @@ def define_tables(cls, metadata): schema=config.test_schema, ) + if testing.requires.view_reflection: + cls.define_views(metadata) + if testing.requires.has_temp_table.enabled: + cls.define_temp_tables(metadata) + + @classmethod + def define_views(cls, metadata): + query = "CREATE VIEW vv AS SELECT id, data FROM test_table" + + event.listen(metadata, "after_create", DDL(query)) + event.listen(metadata, "before_drop", DDL("DROP VIEW vv")) + + if testing.requires.schemas.enabled: + query = ( + "CREATE VIEW %s.vv AS SELECT id, data FROM %s.test_table_s" + % ( + config.test_schema, + config.test_schema, + ) + ) + event.listen(metadata, "after_create", DDL(query)) + event.listen( + metadata, + "before_drop", + DDL("DROP VIEW %s.vv" % (config.test_schema)), + ) + + @classmethod + def temp_table_name(cls): + return get_temp_table_name( + config, config.db, "user_tmp_%s" % (config.ident,) + ) + + @classmethod + def define_temp_tables(cls, metadata): + kw = temp_table_keyword_args(config, config.db) + table_name = cls.temp_table_name() + user_tmp = Table( + table_name, + metadata, + Column("id", sa.INT, primary_key=True), + Column("name", sa.VARCHAR(50)), + **kw + ) + if ( + testing.requires.view_reflection.enabled + and testing.requires.temporary_views.enabled + ): + event.listen( + user_tmp, + "after_create", + DDL( + "create temporary view user_tmp_v as " + "select * from user_tmp_%s" % config.ident + ), + ) + event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v")) + def test_has_table(self): with config.db.begin() as conn: is_true(config.db.dialect.has_table(conn, "test_table")) @@ -77,6 +158,40 @@ def test_has_table_schema(self): ) ) + @testing.fails_on( + "oracle", + "per #8700 this remains at its previous behavior of not " + "working within 1.4.", + ) + @testing.requires.views + def test_has_table_view(self, connection): + insp = inspect(connection) + is_true(insp.has_table("vv")) + + @testing.requires.has_temp_table + def test_has_table_temp_table(self, connection): + insp = inspect(connection) + temp_table_name = self.temp_table_name() + is_true(insp.has_table(temp_table_name)) + + @testing.requires.has_temp_table + @testing.requires.view_reflection + @testing.requires.temporary_views + def test_has_table_temp_view(self, connection): + insp = inspect(connection) + is_true(insp.has_table("user_tmp_v")) + + @testing.fails_on( + "oracle", + "per #8700 this remains at its previous behavior of not " + "working within 1.4", + ) + @testing.requires.views + @testing.requires.schemas + def test_has_table_view_schema(self, connection): + insp = inspect(connection) + is_true(insp.has_table("vv", config.test_schema)) + class HasIndexTest(fixtures.TablesTest): __backend__ = True @@ -282,22 +397,11 @@ def test_get_check_constraints(self, name): assert insp.get_check_constraints(name) -class ComponentReflectionTest(fixtures.TablesTest): +class ComponentReflectionTest(OneConnectionTablesTest): run_inserts = run_deletes = None __backend__ = True - @classmethod - def setup_bind(cls): - if config.requirements.independent_connections.enabled: - from sqlalchemy import pool - - return engines.testing_engine( - options=dict(poolclass=pool.StaticPool, scope="class"), - ) - else: - return config.db - @classmethod def define_tables(cls, metadata): cls.define_reflected_tables(metadata, None) @@ -512,6 +616,20 @@ def test_get_schema_names(self): self.assert_(testing.config.test_schema in insp.get_schema_names()) + @testing.requires.schema_reflection + def test_get_schema_names_w_translate_map(self, connection): + """test #7300""" + + connection = connection.execution_options( + schema_translate_map={ + "foo": "bar", + BLANK_SCHEMA: testing.config.test_schema, + } + ) + insp = inspect(connection) + + self.assert_(testing.config.test_schema in insp.get_schema_names()) + @testing.requires.schema_reflection def test_dialect_initialize(self): engine = engines.testing_engine() @@ -970,6 +1088,8 @@ def test_get_unique_constraints(self, metadata, connection, use_schema): names_that_duplicate_index = set() + eq_(len(uniques), len(reflected)) + for orig, refl in zip(uniques, reflected): # Different dialects handle duplicate index and constraints # differently, so ignore this flag @@ -1137,7 +1257,9 @@ def test_get_check_constraints(self, metadata, connection, use_schema): metadata, Column("a", Integer()), sa.CheckConstraint("a > 1 AND a < 5", name="cc1"), - sa.CheckConstraint("a = 1 OR (a > 2 AND a < 5)", name="cc2"), + sa.CheckConstraint( + "a = 1 OR (a > 2 AND a < 5)", name="UsesCasing" + ), schema=schema, ) @@ -1164,8 +1286,8 @@ def normalize(sqltext): eq_( reflected, [ + {"name": "UsesCasing", "sqltext": "a = 1 or a > 2 and a < 5"}, {"name": "cc1", "sqltext": "a > 1 and a < 5"}, - {"name": "cc2", "sqltext": "a = 1 or a > 2 and a < 5"}, ], ) @@ -1187,18 +1309,24 @@ def test_reflect_expression_based_indexes(self, metadata, connection): insp = inspect(connection) expected = [ - {"name": "t_idx_2", "column_names": ["x"], "unique": False} + { + "name": "t_idx_2", + "column_names": ["x"], + "unique": False, + "dialect_options": {}, + } ] + if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"] = { + "%s_include" % connection.engine.name: [] + } with expect_warnings( "Skipped unsupported reflection of expression-based index t_idx" ): - eq_( - insp.get_indexes("t"), - expected, - ) + eq_(insp.get_indexes("t"), expected) @testing.requires.index_reflects_included_columns def test_reflect_covering_index(self, metadata, connection): @@ -1223,10 +1351,21 @@ def test_reflect_covering_index(self, metadata, connection): "column_names": ["x"], "include_columns": ["y"], "unique": False, + "dialect_options": { + "%s_include" % connection.engine.name: ["y"] + }, } ], ) + t2 = Table("t", MetaData(), autoload_with=connection) + eq_( + list(t2.indexes)[0].dialect_options[connection.engine.name][ + "include" + ], + ["y"], + ) + def _type_round_trip(self, connection, metadata, *types): t = Table( "t", diff --git a/lib/sqlalchemy/testing/suite/test_results.py b/lib/sqlalchemy/testing/suite/test_results.py index c41a55025d6..26c55297500 100644 --- a/lib/sqlalchemy/testing/suite/test_results.py +++ b/lib/sqlalchemy/testing/suite/test_results.py @@ -1,3 +1,9 @@ +# testing/suite/test_results.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import datetime from .. import engines diff --git a/lib/sqlalchemy/testing/suite/test_rowcount.py b/lib/sqlalchemy/testing/suite/test_rowcount.py index 82e831f4966..346829c1d59 100644 --- a/lib/sqlalchemy/testing/suite/test_rowcount.py +++ b/lib/sqlalchemy/testing/suite/test_rowcount.py @@ -1,3 +1,9 @@ +# testing/suite/test_rowcount.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from sqlalchemy import bindparam from sqlalchemy import Column from sqlalchemy import Integer diff --git a/lib/sqlalchemy/testing/suite/test_select.py b/lib/sqlalchemy/testing/suite/test_select.py index a3475f651b4..5731207ec75 100644 --- a/lib/sqlalchemy/testing/suite/test_select.py +++ b/lib/sqlalchemy/testing/suite/test_select.py @@ -1,3 +1,9 @@ +# testing/suite/test_select.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import itertools from .. import AssertsCompiledSQL @@ -30,11 +36,13 @@ from ... import text from ... import true from ... import tuple_ +from ... import TupleType from ... import union from ... import util from ... import values from ...exc import DatabaseError from ...exc import ProgrammingError +from ...util import collections_abc class CollateTest(fixtures.TablesTest): @@ -882,7 +890,7 @@ def test_compile(self): self.assert_compile( stmt, "SELECT some_table.id FROM some_table " - "WHERE some_table.x = [POSTCOMPILE_q]", + "WHERE some_table.x = __[POSTCOMPILE_q]", {}, ) @@ -1131,6 +1139,41 @@ def test_empty_in_plus_notempty_notin(self): ) self._assert_result(stmt, []) + def test_typed_str_in(self): + """test related to #7292. + + as a type is given to the bound param, there is no ambiguity + to the type of element. + + """ + + stmt = text( + "select id FROM some_table WHERE z IN :q ORDER BY id" + ).bindparams(bindparam("q", type_=String, expanding=True)) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={"q": ["z2", "z3", "z4"]}, + ) + + def test_untyped_str_in(self): + """test related to #7292. + + for untyped expression, we look at the types of elements. + Test for Sequence to detect tuple in. but not strings or bytes! + as always.... + + """ + + stmt = text( + "select id FROM some_table WHERE z IN :q ORDER BY id" + ).bindparams(bindparam("q", expanding=True)) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={"q": ["z2", "z3", "z4"]}, + ) + @testing.requires.tuple_in def test_bound_in_two_tuple_bindparam(self): table = self.tables.some_table @@ -1197,6 +1240,73 @@ def test_bound_in_heterogeneous_two_tuple_text_bindparam(self): params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]}, ) + @testing.requires.tuple_in + def test_bound_in_heterogeneous_two_tuple_typed_bindparam_non_tuple(self): + class LikeATuple(collections_abc.Sequence): + def __init__(self, *data): + self._data = data + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, idx): + return self._data[idx] + + def __len__(self): + return len(self._data) + + stmt = text( + "select id FROM some_table WHERE (x, z) IN :q ORDER BY id" + ).bindparams( + bindparam( + "q", type_=TupleType(Integer(), String()), expanding=True + ) + ) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={ + "q": [ + LikeATuple(2, "z2"), + LikeATuple(3, "z3"), + LikeATuple(4, "z4"), + ] + }, + ) + + @testing.requires.tuple_in + def test_bound_in_heterogeneous_two_tuple_text_bindparam_non_tuple(self): + # note this becomes ARRAY if we dont use expanding + # explicitly right now + + class LikeATuple(collections_abc.Sequence): + def __init__(self, *data): + self._data = data + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, idx): + return self._data[idx] + + def __len__(self): + return len(self._data) + + stmt = text( + "select id FROM some_table WHERE (x, z) IN :q ORDER BY id" + ).bindparams(bindparam("q", expanding=True)) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={ + "q": [ + LikeATuple(2, "z2"), + LikeATuple(3, "z3"), + LikeATuple(4, "z4"), + ] + }, + ) + def test_empty_set_against_integer_bindparam(self): table = self.tables.some_table stmt = ( @@ -1677,3 +1787,54 @@ def test_is_or_is_not_distinct_from( len(result), expected_row_count_for_is_not, ) + + +class WindowFunctionTest(fixtures.TablesTest): + __requires__ = ("window_functions",) + + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column("col1", Integer), + Column("col2", Integer), + ) + + @classmethod + def insert_data(cls, connection): + connection.execute( + cls.tables.some_table.insert(), + [{"id": i, "col1": i, "col2": i * 5} for i in range(1, 50)], + ) + + def test_window(self, connection): + some_table = self.tables.some_table + rows = connection.execute( + select( + func.max(some_table.c.col2).over( + order_by=[some_table.c.col1.desc()] + ) + ).where(some_table.c.col1 < 20) + ).all() + + eq_(rows, [(95,) for i in range(19)]) + + def test_window_rows_between(self, connection): + some_table = self.tables.some_table + + # note the rows are part of the cache key right now, not handled + # as binds. this is issue #11515 + rows = connection.execute( + select( + func.max(some_table.c.col2).over( + order_by=[some_table.c.col1], + rows=(-5, 0), + ) + ) + ).all() + + eq_(rows, [(i,) for i in range(5, 250, 5)]) diff --git a/lib/sqlalchemy/testing/suite/test_sequence.py b/lib/sqlalchemy/testing/suite/test_sequence.py index d6747d25386..8d22d425b61 100644 --- a/lib/sqlalchemy/testing/suite/test_sequence.py +++ b/lib/sqlalchemy/testing/suite/test_sequence.py @@ -1,3 +1,9 @@ +# testing/suite/test_sequence.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import config from .. import fixtures from ..assertions import eq_ diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py index 22b85f398d9..046ada282cf 100644 --- a/lib/sqlalchemy/testing/suite/test_types.py +++ b/lib/sqlalchemy/testing/suite/test_types.py @@ -1,3 +1,9 @@ +# testing/suite/test_types.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php # coding: utf-8 import datetime @@ -41,6 +47,9 @@ from ... import util from ...orm import declarative_base from ...orm import Session +from ...sql.sqltypes import LargeBinary +from ...sql.sqltypes import PickleType +from ...util import compat from ...util import u @@ -195,6 +204,42 @@ def test_null_strings_text(self, connection): self._test_null_strings(connection) +class BinaryTest(_LiteralRoundTripFixture, fixtures.TablesTest): + __requires__ = ("binary_literals",) + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table( + "binary_table", + metadata, + Column( + "id", Integer, primary_key=True, test_needs_autoincrement=True + ), + Column("binary_data", LargeBinary), + Column("pickle_data", PickleType), + ) + + def test_binary_roundtrip(self, connection): + binary_table = self.tables.binary_table + + connection.execute( + binary_table.insert(), {"id": 1, "binary_data": b"this is binary"} + ) + row = connection.execute(select(binary_table.c.binary_data)).first() + eq_(row, (b"this is binary",)) + + def test_pickle_roundtrip(self, connection): + binary_table = self.tables.binary_table + + connection.execute( + binary_table.insert(), + {"id": 1, "pickle_data": {"foo": [1, 2, 3], "bar": "bat"}}, + ) + row = connection.execute(select(binary_table.c.pickle_data)).first() + eq_(row, ({"foo": [1, 2, 3], "bar": "bat"},)) + + class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest): __requires__ = ("text_type",) __backend__ = True @@ -308,6 +353,11 @@ class Decorated(TypeDecorator): Column("decorated_date_data", Decorated), ) + @testing.requires.datetime_implicit_bound + def test_select_direct(self, connection): + result = connection.scalar(select(literal(self.data))) + eq_(result, self.data) + def test_round_trip(self, connection): date_table = self.tables.date_table @@ -382,6 +432,15 @@ class DateTimeTest(_DateFixture, fixtures.TablesTest): data = datetime.datetime(2012, 10, 15, 12, 57, 18) +class DateTimeTZTest(_DateFixture, fixtures.TablesTest): + __requires__ = ("datetime_timezone",) + __backend__ = True + datatype = DateTime(timezone=True) + data = datetime.datetime( + 2012, 10, 15, 12, 57, 18, tzinfo=compat.timezone.utc + ) + + class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = ("datetime_microseconds",) __backend__ = True @@ -395,6 +454,11 @@ class TimestampMicrosecondsTest(_DateFixture, fixtures.TablesTest): datatype = TIMESTAMP data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) + @testing.requires.timestamp_microseconds_implicit_bound + def test_select_direct(self, connection): + result = connection.scalar(select(literal(self.data))) + eq_(result, self.data) + class TimeTest(_DateFixture, fixtures.TablesTest): __requires__ = ("time",) @@ -403,6 +467,13 @@ class TimeTest(_DateFixture, fixtures.TablesTest): data = datetime.time(12, 57, 18) +class TimeTZTest(_DateFixture, fixtures.TablesTest): + __requires__ = ("time_timezone",) + __backend__ = True + datatype = Time(timezone=True) + data = datetime.time(12, 57, 18, tzinfo=compat.timezone.utc) + + class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = ("time_microseconds",) __backend__ = True @@ -590,6 +661,16 @@ def test_numeric_as_float(self, do_numeric_test): [15.7563], ) + @testing.requires.infinity_floats + def test_infinity_floats(self, do_numeric_test): + """test for #977, #7283""" + + do_numeric_test( + Float(None), + [float("inf")], + [float("inf")], + ) + @testing.requires.fetch_null_from_numeric def test_numeric_null_as_decimal(self, do_numeric_test): do_numeric_test(Numeric(precision=8, scale=4), [None], [None]) @@ -617,6 +698,7 @@ def test_float_as_float(self, do_numeric_test): filter_=lambda n: n is not None and round(n, 5) or None, ) + @testing.requires.literal_float_coercion def test_float_coerce_round_trip(self, connection): expr = 15.7563 @@ -866,10 +948,7 @@ def _index_fixtures(include_comparison): ("integer", None), ("float", 28.5), ("float", None), - ( - "float", - 1234567.89, - ), + ("float", 1234567.89, testing.requires.literal_float_coercion), ("numeric", 1234567.89), # this one "works" because the float value you see here is # lost immediately to floating point stuff @@ -1408,12 +1487,14 @@ def test_string_cast_crit_against_string_basic(self): __all__ = ( + "BinaryTest", "UnicodeVarcharTest", "UnicodeTextTest", "JSONTest", "JSONLegacyStringCastIndexTest", "DateTest", "DateTimeTest", + "DateTimeTZTest", "TextTest", "NumericTest", "IntegerTest", @@ -1423,6 +1504,7 @@ def test_string_cast_crit_against_string_basic(self): "TimeMicrosecondsTest", "TimestampMicrosecondsTest", "TimeTest", + "TimeTZTest", "DateTimeMicrosecondsTest", "DateHistoricTest", "StringTest", diff --git a/lib/sqlalchemy/testing/suite/test_unicode_ddl.py b/lib/sqlalchemy/testing/suite/test_unicode_ddl.py index a4ae3348ed6..abe19d435bc 100644 --- a/lib/sqlalchemy/testing/suite/test_unicode_ddl.py +++ b/lib/sqlalchemy/testing/suite/test_unicode_ddl.py @@ -1,3 +1,9 @@ +# testing/suite/test_unicode_ddl.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php # coding: utf-8 """verrrrry basic unicode column name testing""" diff --git a/lib/sqlalchemy/testing/suite/test_update_delete.py b/lib/sqlalchemy/testing/suite/test_update_delete.py index f5ee2e02815..90ba651a164 100644 --- a/lib/sqlalchemy/testing/suite/test_update_delete.py +++ b/lib/sqlalchemy/testing/suite/test_update_delete.py @@ -1,3 +1,9 @@ +# testing/suite/test_update_delete.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import fixtures from ..assertions import eq_ from ..schema import Column @@ -8,6 +14,7 @@ class SimpleUpdateDeleteTest(fixtures.TablesTest): run_deletes = "each" + __requires__ = ("sane_rowcount",) __backend__ = True @classmethod diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py index a4d55a8f2cb..9bf71eb34a7 100644 --- a/lib/sqlalchemy/testing/util.py +++ b/lib/sqlalchemy/testing/util.py @@ -1,14 +1,17 @@ # testing/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +from collections import deque import decimal import gc +from itertools import chain import random import sys +from sys import getsizeof import types from . import config @@ -456,3 +459,63 @@ def decorate(fn, *arg, **kw): event_cls._clear() return decorate + + +def total_size(o): + """Returns the approximate memory footprint an object and all of its + contents. + + source: https://code.activestate.com/recipes/577504/ + + + """ + + def dict_handler(d): + return chain.from_iterable(d.items()) + + all_handlers = { + tuple: iter, + list: iter, + deque: iter, + dict: dict_handler, + set: iter, + frozenset: iter, + } + seen = set() # track which object id's have already been seen + default_size = getsizeof(0) # estimate sizeof object without __sizeof__ + + def sizeof(o): + if id(o) in seen: # do not double count the same object + return 0 + seen.add(id(o)) + s = getsizeof(o, default_size) + + for typ, handler in all_handlers.items(): + if isinstance(o, typ): + s += sum(map(sizeof, handler(o))) + break + return s + + return sizeof(o) + + +def count_cache_key_tuples(tup): + """given a cache key tuple, counts how many instances of actual + tuples are found. + + used to alert large jumps in cache key complexity. + + """ + stack = [tup] + + sentinel = object() + num_elements = 0 + + while stack: + elem = stack.pop(0) + if elem is sentinel: + num_elements += 1 + elif isinstance(elem, tuple): + if elem: + stack = list(elem) + [sentinel] + stack + return num_elements diff --git a/lib/sqlalchemy/testing/warnings.py b/lib/sqlalchemy/testing/warnings.py index b5842ad6942..5537bd4f9fc 100644 --- a/lib/sqlalchemy/testing/warnings.py +++ b/lib/sqlalchemy/testing/warnings.py @@ -1,5 +1,5 @@ # testing/warnings.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -14,8 +14,13 @@ from ..util.langhelpers import _warnings_warn -class SATestSuiteWarning(sa_exc.SAWarning): - """warning for a condition detected during tests that is non-fatal""" +class SATestSuiteWarning(Warning): + """warning for a condition detected during tests that is non-fatal + + Currently outside of SAWarning so that we can work around tools like + Alembic doing the wrong thing with warnings. + + """ def warn_test_suite(message): @@ -25,28 +30,27 @@ def warn_test_suite(message): def setup_filters(): """Set global warning behavior for the test suite.""" + # TODO: at this point we can use the normal pytest warnings plugin, + # if we decide the test suite can be linked to pytest only + + origin = r"^(?:test|sqlalchemy)\..*" + warnings.filterwarnings( "ignore", category=sa_exc.SAPendingDeprecationWarning ) warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning) warnings.filterwarnings("error", category=sa_exc.SAWarning) + warnings.filterwarnings("always", category=SATestSuiteWarning) - # some selected deprecations... - warnings.filterwarnings("error", category=DeprecationWarning) warnings.filterwarnings( - "ignore", category=DeprecationWarning, message=r".*StopIteration" + "error", category=DeprecationWarning, module=origin ) warnings.filterwarnings( "ignore", category=DeprecationWarning, - message=r".*inspect.get.*argspec", - ) - - warnings.filterwarnings( - "ignore", - category=DeprecationWarning, - message="The loop argument is deprecated", + message=r".*The default (?:date)?(?:time)?(?:stamp)? " + r"(adapter|converter) is deprecated", ) # ignore things that are deprecated *as of* 2.0 :) @@ -67,7 +71,7 @@ def setup_filters(): pass else: warnings.filterwarnings( - "once", category=pytest.PytestDeprecationWarning + "once", category=pytest.PytestDeprecationWarning, module=origin ) diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index ecc351fc948..6182a01c141 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -1,5 +1,5 @@ # types.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,6 +13,7 @@ "TypeEngine", "TypeDecorator", "UserDefinedType", + "ExternalType", "INT", "CHAR", "VARCHAR", @@ -36,6 +37,7 @@ "INTEGER", "DATE", "TIME", + "TupleType", "String", "Integer", "SmallInteger", @@ -103,11 +105,13 @@ from .sql.sqltypes import TIME from .sql.sqltypes import Time from .sql.sqltypes import TIMESTAMP +from .sql.sqltypes import TupleType from .sql.sqltypes import Unicode from .sql.sqltypes import UnicodeText from .sql.sqltypes import VARBINARY from .sql.sqltypes import VARCHAR from .sql.type_api import adapt_type +from .sql.type_api import ExternalType from .sql.type_api import to_instance from .sql.type_api import TypeDecorator from .sql.type_api import TypeEngine diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index bdd69431e0f..b77f70f76a8 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -1,5 +1,5 @@ # util/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -53,6 +53,7 @@ from .compat import b64decode from .compat import b64encode from .compat import binary_type +from .compat import binary_types from .compat import byte_buffer from .compat import callable from .compat import cmp @@ -63,6 +64,7 @@ from .compat import has_refcount_gc from .compat import inspect_getfullargspec from .compat import int_types +from .compat import is64bit from .compat import iterbytes from .compat import itertools_filter from .compat import itertools_filterfalse @@ -76,6 +78,11 @@ from .compat import pickle from .compat import print_ from .compat import py2k +from .compat import py310 +from .compat import py311 +from .compat import py312 +from .compat import py313 +from .compat import py314 from .compat import py37 from .compat import py38 from .compat import py39 diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py index 535ae47802f..d5ac2a64252 100644 --- a/lib/sqlalchemy/util/_collections.py +++ b/lib/sqlalchemy/util/_collections.py @@ -1,5 +1,5 @@ # util/_collections.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_compat_py3k.py b/lib/sqlalchemy/util/_compat_py3k.py index cd9f3ebc34f..6c4e37c6a6c 100644 --- a/lib/sqlalchemy/util/_compat_py3k.py +++ b/lib/sqlalchemy/util/_compat_py3k.py @@ -1,5 +1,5 @@ # util/_compat_py3k.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_concurrency_py3k.py b/lib/sqlalchemy/util/_concurrency_py3k.py index 55fe87c6a78..f20dcb05b51 100644 --- a/lib/sqlalchemy/util/_concurrency_py3k.py +++ b/lib/sqlalchemy/util/_concurrency_py3k.py @@ -1,5 +1,5 @@ # util/_concurrency_py3k.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -10,25 +10,22 @@ from typing import Any from typing import Callable from typing import Coroutine +from typing import TypeVar +from typing import Union import greenlet from . import compat from .langhelpers import memoized_property from .. import exc +from ..util import py311 -if compat.py37: - try: - from contextvars import copy_context as _copy_context +_T = TypeVar("_T") - # If greenlet.gr_context is present in current version of greenlet, - # it will be set with a copy of the current context on creation. - # Refs: https://github.com/python-greenlet/greenlet/pull/198 - getattr(greenlet.greenlet, "gr_context") - except (ImportError, AttributeError): - _copy_context = None -else: - _copy_context = None +# If greenlet.gr_context is present in current version of greenlet, +# it will be set with the current context on creation. +# Refs: https://github.com/python-greenlet/greenlet/pull/198 +_has_gr_context = hasattr(greenlet.getcurrent(), "gr_context") def is_exit_exception(e): @@ -45,59 +42,61 @@ def is_exit_exception(e): class _AsyncIoGreenlet(greenlet.greenlet): + + __sqlalchemy_greenlet_provider__ = True + def __init__(self, fn, driver): greenlet.greenlet.__init__(self, fn, driver) - self.driver = driver - if _copy_context is not None: - self.gr_context = _copy_context() + if _has_gr_context: + self.gr_context = driver.gr_context def await_only(awaitable: Coroutine) -> Any: """Awaits an async function in a sync method. The sync method must be inside a :func:`greenlet_spawn` context. - :func:`await_` calls cannot be nested. + :func:`await_only` calls cannot be nested. :param awaitable: The coroutine to call. """ # this is called in the context greenlet while running fn current = greenlet.getcurrent() - if not isinstance(current, _AsyncIoGreenlet): + if not getattr(current, "__sqlalchemy_greenlet_provider__", False): raise exc.MissingGreenlet( - "greenlet_spawn has not been called; can't call await_() here. " - "Was IO attempted in an unexpected place?" + "greenlet_spawn has not been called; can't call await_only() " + "here. Was IO attempted in an unexpected place?" ) # returns the control to the driver greenlet passing it # a coroutine to run. Once the awaitable is done, the driver greenlet # switches back to this greenlet with the result of awaitable that is # then returned to the caller (or raised as error) - return current.driver.switch(awaitable) + return current.parent.switch(awaitable) def await_fallback(awaitable: Coroutine) -> Any: """Awaits an async function in a sync method. The sync method must be inside a :func:`greenlet_spawn` context. - :func:`await_` calls cannot be nested. + :func:`await_fallback` calls cannot be nested. :param awaitable: The coroutine to call. """ # this is called in the context greenlet while running fn current = greenlet.getcurrent() - if not isinstance(current, _AsyncIoGreenlet): + if not getattr(current, "__sqlalchemy_greenlet_provider__", False): loop = get_event_loop() if loop.is_running(): raise exc.MissingGreenlet( "greenlet_spawn has not been called and asyncio event " - "loop is already running; can't call await_() here. " + "loop is already running; can't call await_fallback() here. " "Was IO attempted in an unexpected place?" ) return loop.run_until_complete(awaitable) - return current.driver.switch(awaitable) + return current.parent.switch(awaitable) async def greenlet_spawn( @@ -105,7 +104,7 @@ async def greenlet_spawn( ) -> Any: """Runs a sync function ``fn`` in a new greenlet. - The sync function can then use :func:`await_` to wait for async + The sync function can then use :func:`await_only` to wait for async functions. :param fn: The sync callable to call. @@ -115,28 +114,25 @@ async def greenlet_spawn( context = _AsyncIoGreenlet(fn, greenlet.getcurrent()) # runs the function synchronously in gl greenlet. If the execution - # is interrupted by await_, context is not dead and result is a + # is interrupted by await_only, context is not dead and result is a # coroutine to wait. If the context is dead the function has # returned, and its result can be returned. switch_occurred = False - try: - result = context.switch(*args, **kwargs) - while not context.dead: - switch_occurred = True - try: - # wait for a coroutine from await_ and then return its - # result back to it. - value = await result - except BaseException: - # this allows an exception to be raised within - # the moderated greenlet so that it can continue - # its expected flow. - result = context.throw(*sys.exc_info()) - else: - result = context.switch(value) - finally: - # clean up to avoid cycle resolution by gc - del context.driver + result = context.switch(*args, **kwargs) + while not context.dead: + switch_occurred = True + try: + # wait for a coroutine from await_only and then return its + # result back to it. + value = await result + except BaseException: + # this allows an exception to be raised within + # the moderated greenlet so that it can continue + # its expected flow. + result = context.throw(*sys.exc_info()) + else: + result = context.switch(value) + if _require_await and not switch_occurred: raise exc.AwaitRequired( "The current operation required an async execution but none was " @@ -163,30 +159,6 @@ def __exit__(self, *arg, **kw): self.mutex.release() -def _util_async_run_coroutine_function(fn, *args, **kwargs): - """for test suite/ util only""" - - loop = get_event_loop() - if loop.is_running(): - raise Exception( - "for async run coroutine we expect that no greenlet or event " - "loop is running when we start out" - ) - return loop.run_until_complete(fn(*args, **kwargs)) - - -def _util_async_run(fn, *args, **kwargs): - """for test suite/ util only""" - - loop = get_event_loop() - if not loop.is_running(): - return loop.run_until_complete(greenlet_spawn(fn, *args, **kwargs)) - else: - # allow for a wrapped test function to call another - assert isinstance(greenlet.getcurrent(), _AsyncIoGreenlet) - return fn(*args, **kwargs) - - def get_event_loop(): """vendor asyncio.get_event_loop() for python 3.7 and above. @@ -200,3 +172,50 @@ def get_event_loop(): return asyncio.get_event_loop_policy().get_event_loop() else: return asyncio.get_event_loop() + + +if py311: + _Runner = asyncio.Runner +else: + + class _Runner: # type: ignore[no-redef] + """Runner implementation for test only""" + + _loop: Union[None, asyncio.AbstractEventLoop, bool] + + def __init__(self) -> None: + self._loop = None + + def __enter__(self): + self._lazy_init() + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + def close(self) -> None: + if self._loop: + try: + self._loop.run_until_complete( + self._loop.shutdown_asyncgens() + ) + finally: + self._loop.close() + self._loop = False + + def get_loop(self) -> asyncio.AbstractEventLoop: + """Return embedded event loop.""" + self._lazy_init() + assert self._loop + return self._loop + + def run(self, coro: Coroutine[Any, Any, _T]) -> _T: + self._lazy_init() + assert self._loop + return self._loop.run_until_complete(coro) + + def _lazy_init(self) -> None: + if self._loop is False: + raise RuntimeError("Runner is closed") + if self._loop is None: + self._loop = asyncio.new_event_loop() diff --git a/lib/sqlalchemy/util/_preloaded.py b/lib/sqlalchemy/util/_preloaded.py index c8da9230a87..22f1379242a 100644 --- a/lib/sqlalchemy/util/_preloaded.py +++ b/lib/sqlalchemy/util/_preloaded.py @@ -1,5 +1,5 @@ # util/_preloaded.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 5914e8681aa..fe7e7d63526 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -1,5 +1,5 @@ # util/compat.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -14,6 +14,11 @@ import platform import sys +py314 = sys.version_info >= (3, 14) +py313 = sys.version_info >= (3, 13) +py312 = sys.version_info >= (3, 12) +py311 = sys.version_info >= (3, 11) +py310 = sys.version_info >= (3, 10) py39 = sys.version_info >= (3, 9) py38 = sys.version_info >= (3, 8) py37 = sys.version_info >= (3, 7) @@ -26,6 +31,7 @@ win32 = sys.platform.startswith("win") osx = sys.platform.startswith("darwin") arm = "aarch" in platform.machine().lower() +is64bit = sys.maxsize > 2 ** 32 has_refcount_gc = bool(cpython) diff --git a/lib/sqlalchemy/util/concurrency.py b/lib/sqlalchemy/util/concurrency.py index 37ecfdbc338..82580852168 100644 --- a/lib/sqlalchemy/util/concurrency.py +++ b/lib/sqlalchemy/util/concurrency.py @@ -1,5 +1,5 @@ # util/concurrency.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -8,12 +8,13 @@ from . import compat have_greenlet = False +greenlet_error = None if compat.py3k: try: - import greenlet # noqa F401 - except ImportError: - pass + import greenlet # noqa: F401 + except ImportError as e: + greenlet_error = str(e) else: have_greenlet = True from ._concurrency_py3k import await_only @@ -21,18 +22,42 @@ from ._concurrency_py3k import greenlet_spawn from ._concurrency_py3k import is_exit_exception from ._concurrency_py3k import AsyncAdaptedLock - from ._concurrency_py3k import _util_async_run # noqa F401 - from ._concurrency_py3k import ( - _util_async_run_coroutine_function, - ) # noqa F401, E501 - from ._concurrency_py3k import asyncio # noqa F401 + from ._concurrency_py3k import _Runner + from ._concurrency_py3k import asyncio # noqa: F401 + + # does not need greenlet, just Python 3 + from ._compat_py3k import asynccontextmanager # noqa: F401 + + +class _AsyncUtil: + """Asyncio util for test suite/ util only""" + + def __init__(self): + if have_greenlet: + self.runner = _Runner() + + def run(self, fn, *args, **kwargs): + """Run coroutine on the loop""" + return self.runner.run(fn(*args, **kwargs)) + + def run_in_greenlet(self, fn, *args, **kwargs): + """Run sync function in greenlet. Support nested calls""" + if have_greenlet: + if self.runner.get_loop().is_running(): + return fn(*args, **kwargs) + else: + return self.runner.run(greenlet_spawn(fn, *args, **kwargs)) + else: + return fn(*args, **kwargs) + + def close(self): + if have_greenlet: + self.runner.close() - # does not need greennlet, just Python 3 - from ._compat_py3k import asynccontextmanager # noqa F401 if not have_greenlet: - asyncio = None # noqa F811 + asyncio = None # noqa: F811 def _not_implemented(): # this conditional is to prevent pylance from considering @@ -45,25 +70,28 @@ def _not_implemented(): else: raise ValueError( "the greenlet library is required to use this function." + " %s" % greenlet_error + if greenlet_error + else "" ) - def is_exit_exception(e): # noqa F811 + def is_exit_exception(e): # noqa: F811 return not isinstance(e, Exception) - def await_only(thing): # noqa F811 + def await_only(thing): # noqa: F811 _not_implemented() - def await_fallback(thing): # noqa F81 + def await_fallback(thing): # noqa: F811 return thing - def greenlet_spawn(fn, *args, **kw): # noqa F81 + def greenlet_spawn(fn, *args, **kw): # noqa: F811 _not_implemented() - def AsyncAdaptedLock(*args, **kw): # noqa F81 + def AsyncAdaptedLock(*args, **kw): # noqa: F811 _not_implemented() - def _util_async_run(fn, *arg, **kw): # noqa F81 + def _util_async_run(fn, *arg, **kw): # noqa: F811 return fn(*arg, **kw) - def _util_async_run_coroutine_function(fn, *arg, **kw): # noqa F81 + def _util_async_run_coroutine_function(fn, *arg, **kw): # noqa: F811 _not_implemented() diff --git a/lib/sqlalchemy/util/deprecations.py b/lib/sqlalchemy/util/deprecations.py index 4d3e04fde85..bf537ba9be0 100644 --- a/lib/sqlalchemy/util/deprecations.py +++ b/lib/sqlalchemy/util/deprecations.py @@ -1,5 +1,5 @@ # util/deprecations.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -10,6 +10,7 @@ import os import re +import sys from . import compat from .langhelpers import _hash_limit_string @@ -22,15 +23,29 @@ SQLALCHEMY_WARN_20 = False +SILENCE_UBER_WARNING = False + if os.getenv("SQLALCHEMY_WARN_20", "false").lower() in ("true", "yes", "1"): SQLALCHEMY_WARN_20 = True +if compat.py2k: + SILENCE_UBER_WARNING = True +elif os.getenv("SQLALCHEMY_SILENCE_UBER_WARNING", "false").lower() in ( + "true", + "yes", + "1", +): + SILENCE_UBER_WARNING = True + def _warn_with_version(msg, version, type_, stacklevel, code=None): if ( issubclass(type_, exc.Base20DeprecationWarning) and not SQLALCHEMY_WARN_20 ): + if not SILENCE_UBER_WARNING: + _emit_uber_warning(type_, stacklevel) + return warn = type_(msg, code=code) @@ -39,6 +54,57 @@ def _warn_with_version(msg, version, type_, stacklevel, code=None): _warnings_warn(warn, stacklevel=stacklevel + 1) +def _emit_uber_warning(type_, stacklevel): + global SILENCE_UBER_WARNING + + if SILENCE_UBER_WARNING: + return + + SILENCE_UBER_WARNING = True + + file_ = sys.stderr + + # source: https://github.com/pytest-dev/pytest/blob/326ae0cd88f5e954c8effc2b0c986832e9caff11/src/_pytest/_io/terminalwriter.py#L35-L37 # noqa: E501 + use_color = ( + hasattr(file_, "isatty") + and file_.isatty() + and os.environ.get("TERM") != "dumb" + ) + + msg = ( + "%(red)sDeprecated API features detected! " + "These feature(s) are not compatible with SQLAlchemy 2.0. " + "%(green)sTo prevent incompatible upgrades prior to updating " + "applications, ensure requirements files are " + 'pinned to "sqlalchemy<2.0". ' + "%(cyan)sSet environment variable SQLALCHEMY_WARN_20=1 to show all " + "deprecation warnings. Set environment variable " + "SQLALCHEMY_SILENCE_UBER_WARNING=1 to silence this message.%(nocolor)s" + ) + + if use_color: + msg = msg % { + "red": "\x1b[31m", + "cyan": "\x1b[36m", + "green": "\x1b[32m", + "magenta": "\x1b[35m", + "nocolor": "\x1b[0m", + } + else: + msg = msg % { + "red": "", + "cyan": "", + "green": "", + "magenta": "", + "nocolor": "", + } + + # note this is a exc.Base20DeprecationWarning subclass, which + # will implicitly add the link to the SQLAlchemy 2.0 page in the message + warn = type_(msg) + _warnings_warn(warn, stacklevel=stacklevel + 1) + + def warn_deprecated(msg, version, stacklevel=3, code=None): _warn_with_version( msg, version, exc.SADeprecationWarning, stacklevel, code=code @@ -356,6 +422,7 @@ def _decorate_cls_with_warning( clsdict = dict(cls.__dict__) clsdict["__doc__"] = doc clsdict.pop("__dict__", None) + clsdict.pop("__weakref__", None) cls = type(cls.__name__, cls.__bases__, clsdict) if constructor is not None: constructor_fn = clsdict[constructor] diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py index 89ca4c1ebf6..5d6e89257c4 100644 --- a/lib/sqlalchemy/util/langhelpers.py +++ b/lib/sqlalchemy/util/langhelpers.py @@ -1,5 +1,5 @@ # util/langhelpers.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -668,7 +668,7 @@ def instrument(name, clslevel=False): else: code = ( "def %(name)s(%(args)s):\n" - " return %(self_arg)s._proxied.%(name)s(%(apply_kw_proxied)s)" # noqa E501 + " return %(self_arg)s._proxied.%(name)s(%(apply_kw_proxied)s)" # noqa: E501 % metadata ) @@ -1167,7 +1167,11 @@ def _set_memoized_attribute(self, key, value): self._memoized_keys |= {key} class memoized_attribute(object): - """A read-only @property that is only evaluated once.""" + """A read-only @property that is only evaluated once. + + :meta private: + + """ def __init__(self, fget, doc=None): self.fget = fget @@ -1932,9 +1936,9 @@ def repr_tuple_names(names): def has_compiled_ext(): try: - from sqlalchemy import cimmutabledict # noqa F401 - from sqlalchemy import cprocessors # noqa F401 - from sqlalchemy import cresultproxy # noqa F401 + from sqlalchemy import cimmutabledict # noqa: F401 + from sqlalchemy import cprocessors # noqa: F401 + from sqlalchemy import cresultproxy # noqa: F401 return True except ImportError: diff --git a/lib/sqlalchemy/util/queue.py b/lib/sqlalchemy/util/queue.py index 12b37220237..70dc387a990 100644 --- a/lib/sqlalchemy/util/queue.py +++ b/lib/sqlalchemy/util/queue.py @@ -1,5 +1,5 @@ # util/queue.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/tool_support.py b/lib/sqlalchemy/util/tool_support.py new file mode 100644 index 00000000000..407c2d45075 --- /dev/null +++ b/lib/sqlalchemy/util/tool_support.py @@ -0,0 +1,201 @@ +# util/tool_support.py +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls +"""support routines for the helpers in tools/. + +These aren't imported by the enclosing util package as the are not +needed for normal library use. + +""" +from __future__ import annotations + +from argparse import ArgumentParser +from argparse import Namespace +import contextlib +import difflib +import os +from pathlib import Path +import shlex +import shutil +import subprocess +import sys +from typing import Any +from typing import Dict +from typing import Iterator +from typing import Optional +from typing import Union + +from . import compat + + +class code_writer_cmd: + parser: ArgumentParser + args: Namespace + suppress_output: bool + diffs_detected: bool + source_root: Path + pyproject_toml_path: Path + + def __init__(self, tool_script: str): + self.source_root = Path(tool_script).parent.parent + self.pyproject_toml_path = self.source_root / Path("pyproject.toml") + assert self.pyproject_toml_path.exists() + + self.parser = ArgumentParser() + self.parser.add_argument( + "--stdout", + action="store_true", + help="Write to stdout instead of saving to file", + ) + self.parser.add_argument( + "-c", + "--check", + help="Don't write the files back, just return the " + "status. Return code 0 means nothing would change. " + "Return code 1 means some files would be reformatted", + action="store_true", + ) + + def run_zimports(self, tempfile: str) -> None: + self._run_console_script( + str(tempfile), + { + "entrypoint": "zimports", + "options": f"--toml-config {self.pyproject_toml_path}", + }, + ) + + def run_black(self, tempfile: str) -> None: + self._run_console_script( + str(tempfile), + { + "entrypoint": "black", + "options": f"--config {self.pyproject_toml_path}", + }, + ) + + def _run_console_script(self, path: str, options: Dict[str, Any]) -> None: + """Run a Python console application from within the process. + + Used for black, zimports + + """ + + is_posix = os.name == "posix" + + entrypoint_name = options["entrypoint"] + + for entry in compat.importlib_metadata_get("console_scripts"): + if entry.name == entrypoint_name: + impl = entry + break + else: + raise Exception( + f"Could not find entrypoint console_scripts.{entrypoint_name}" + ) + cmdline_options_str = options.get("options", "") + cmdline_options_list = shlex.split( + cmdline_options_str, posix=is_posix + ) + [path] + + kw: Dict[str, Any] = {} + if self.suppress_output: + kw["stdout"] = kw["stderr"] = subprocess.DEVNULL + + subprocess.run( + [ + sys.executable, + "-c", + "import %s; %s.%s()" % (impl.module, impl.module, impl.attr), + ] + + cmdline_options_list, + cwd=str(self.source_root), + **kw, + ) + + def write_status(self, *text: str) -> None: + if not self.suppress_output: + sys.stderr.write(" ".join(text)) + + def write_output_file_from_text( + self, text: str, destination_path: Union[str, Path] + ) -> None: + if self.args.check: + self._run_diff(destination_path, source=text) + elif self.args.stdout: + print(text) + else: + self.write_status(f"Writing {destination_path}...") + Path(destination_path).write_text( + text, encoding="utf-8", newline="\n" + ) + self.write_status("done\n") + + def write_output_file_from_tempfile( + self, tempfile: str, destination_path: str + ) -> None: + if self.args.check: + self._run_diff(destination_path, source_file=tempfile) + os.unlink(tempfile) + elif self.args.stdout: + with open(tempfile) as tf: + print(tf.read()) + os.unlink(tempfile) + else: + self.write_status(f"Writing {destination_path}...") + shutil.move(tempfile, destination_path) + self.write_status("done\n") + + def _run_diff( + self, + destination_path: Union[str, Path], + *, + source: Optional[str] = None, + source_file: Optional[str] = None, + ) -> None: + if source_file: + with open(source_file, encoding="utf-8") as tf: + source_lines = list(tf) + elif source is not None: + source_lines = source.splitlines(keepends=True) + else: + assert False, "source or source_file is required" + + with open(destination_path, encoding="utf-8") as dp: + d = difflib.unified_diff( + list(dp), + source_lines, + fromfile=Path(destination_path).as_posix(), + tofile="", + n=3, + lineterm="\n", + ) + d_as_list = list(d) + if d_as_list: + self.diffs_detected = True + print("".join(d_as_list)) + + @contextlib.contextmanager + def add_arguments(self) -> Iterator[ArgumentParser]: + yield self.parser + + @contextlib.contextmanager + def run_program(self) -> Iterator[None]: + self.args = self.parser.parse_args() + if self.args.check: + self.diffs_detected = False + self.suppress_output = True + elif self.args.stdout: + self.suppress_output = True + else: + self.suppress_output = False + yield + + if self.args.check and self.diffs_detected: + sys.exit(1) + else: + sys.exit(0) diff --git a/lib/sqlalchemy/util/topological.py b/lib/sqlalchemy/util/topological.py index ae4b37426bb..27ee27bfc3a 100644 --- a/lib/sqlalchemy/util/topological.py +++ b/lib/sqlalchemy/util/topological.py @@ -1,5 +1,5 @@ # util/topological.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/pyproject.toml b/pyproject.toml index 0f72578923c..6a93e957332 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,22 @@ +[build-system] +build-backend = "setuptools.build_meta" +requires = [ + "setuptools>=44", +] + [tool.black] line-length = 79 target-version = ['py27', 'py36'] + +[tool.zimports] +black-line-length = 79 +keep-unused-type-checking = true + +# disable isort, for IDEs that just default isort to be turned on, e.g. vscode. +# we use flake8-import-order for import sorting, using zimports to actually +# reformat code. isort is nicer in many ways but doesn't have our +# "import *" fixer and also is not 100% compatible with flake8-import-order. +[tool.isort] +skip_glob=['*'] + + diff --git a/regen_callcounts.tox.ini b/regen_callcounts.tox.ini index 80d88aa4544..0379b1cfe8d 100644 --- a/regen_callcounts.tox.ini +++ b/regen_callcounts.tox.ini @@ -21,7 +21,15 @@ commands= db_{oracle}: {env:BASECOMMAND} {env:ORACLE:} {posargs} db_{mssql}: {env:BASECOMMAND} {env:MSSQL:} {posargs} -passenv=ORACLE_HOME NLS_LANG TOX_POSTGRESQL TOX_MYSQL TOX_ORACLE TOX_MSSQL TOX_SQLITE TOX_WORKERS +passenv= + ORACLE_HOME + NLS_LANG + TOX_POSTGRESQL + TOX_MYSQL + TOX_ORACLE + TOX_MSSQL + TOX_SQLITE + TOX_WORKERS # -E : ignore PYTHON* environment variables (such as PYTHONPATH) # -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE diff --git a/setup.cfg b/setup.cfg index f432561b189..e0752d1feb7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -11,7 +11,7 @@ url = https://www.sqlalchemy.org author = Mike Bayer author_email = mike_mp@zzzcomputing.com license = MIT -license_file = LICENSE +license_files = LICENSE classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Developers @@ -26,6 +26,10 @@ classifiers = Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 + Programming Language :: Python :: 3.13 + Programming Language :: Python :: 3.14 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Topic :: Database :: Front-Ends @@ -50,34 +54,37 @@ mypy = mypy >= 0.910;python_version>="3" sqlalchemy2-stubs mssql = pyodbc -mssql_pymssql = pymssql -mssql_pyodbc = pyodbc + +mssql-pymssql = pymssql +mssql-pyodbc = pyodbc + mysql = mysqlclient>=1.4.0,<2;python_version<"3" mysqlclient>=1.4.0;python_version>="3" -mysql_connector = +mysql-connector = mysql-connector-python -mariadb_connector = - mariadb>=1.0.1;python_version>="3" +mariadb-connector = + mariadb>=1.0.1,!=1.1.2;python_version>="3" oracle = cx_oracle>=7,<8;python_version<"3" cx_oracle>=7;python_version>="3" postgresql = psycopg2>=2.7 -postgresql_pg8000 = pg8000>=1.16.6 -postgresql_asyncpg = +postgresql-pg8000 = + pg8000>=1.16.6,!=1.29.0;python_version>="3" + +postgresql-asyncpg = %(asyncio)s asyncpg;python_version>="3" -postgresql_psycopg2binary = psycopg2-binary -postgresql_psycopg2cffi = psycopg2cffi + pymysql = pymysql;python_version>="3" pymysql<1;python_version<"3" aiomysql = %(asyncio)s - aiomysql;python_version>="3" + aiomysql>=0.2.0;python_version>="3" asyncmy = %(asyncio)s - asyncmy>=0.2.3;python_version>="3" + asyncmy>=0.2.3,!=0.2.4;python_version>="3" aiosqlite = %(asyncio)s aiosqlite;python_version>="3" @@ -85,6 +92,31 @@ aiosqlite = sqlcipher = sqlcipher3_binary;python_version>="3" +# legacy underscore names +# there appears as if there might be some dual-passes through this file +# when tox installs extras, sometimes looking for dashed names and sometimes +# looking first for underscore names. so the dash/underscore names here are +# kept entirely independent of each other else things dont seem to want +# to install +mssql_pymssql = pymssql +mssql_pyodbc = pyodbc + +mysql_connector = + mysql-connector-python + +mariadb_connector = + mariadb>=1.0.1,!=1.1.2;python_version>="3" + +postgresql_pg8000 = + pg8000>=1.16.6,!=1.29.0;python_version>="3" + +postgresql_asyncpg = + %(asyncio)s + asyncpg;python_version>="3" + +postgresql_psycopg2binary = psycopg2-binary +postgresql_psycopg2cffi = psycopg2cffi + [egg_info] tag_build = dev @@ -93,7 +125,8 @@ where = lib [tool:pytest] addopts = --tb native -v -r sfxX --maxfail=250 -p no:warnings -p no:logging -python_files = test/*test_*.py +norecursedirs = examples build doc lib +python_files = test_*.py [upload] sign = 1 @@ -105,12 +138,15 @@ enable-extensions = G # E203 is due to https://github.com/PyCQA/pycodestyle/issues/373 ignore = - A003, + A003, A004, A005, A006 D, E203,E305,E711,E712,E721,E722,E741, + FA100,F824 N801,N802,N806, RST304,RST303,RST299,RST399, W503,W504 + U100,U101 + IS001 exclude = .venv,.git,.tox,dist,doc,*egg,build import-order-style = google application-import-names = sqlalchemy,test @@ -170,10 +206,11 @@ aiomysql = mysql+aiomysql://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 aiomysql_fallback = mysql+aiomysql://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4&async_fallback=true asyncmy = mysql+asyncmy://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 asyncmy_fallback = mysql+asyncmy://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4&async_fallback=true -mariadb = mariadb://scott:tiger@127.0.0.1:3306/test +mariadb = mariadb+mysqldb://scott:tiger@127.0.0.1:3306/test +mariadb_connector = mariadb+mariadbconnector://scott:tiger@127.0.0.1:3306/test mssql = mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server mssql_pymssql = mssql+pymssql://scott:tiger@ms_2008 docker_mssql = mssql+pymssql://scott:tiger^5HHH@127.0.0.1:1433/test -oracle = oracle://scott:tiger@127.0.0.1:1521 +oracle = oracle://scott:tiger@oracle18c oracle8 = oracle://scott:tiger@127.0.0.1:1521/?use_ansi=0 firebird = firebird://sysdba:mainkey@localhost//Users/classic/foo.fdb diff --git a/setup.py b/setup.py index 55a3cee6f98..243c9696704 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,3 @@ -from distutils.command.build_ext import build_ext -from distutils.errors import CCompilerError -from distutils.errors import DistutilsExecError -from distutils.errors import DistutilsPlatformError import os import platform import re @@ -10,8 +6,19 @@ from setuptools import Distribution as _Distribution from setuptools import Extension from setuptools import setup -from setuptools.command.test import test as TestCommand - +from setuptools.command.build_ext import build_ext + +# attempt to use pep-632 imports for setuptools symbols; however, +# since these symbols were only added to setuptools as of 59.0.1, +# fall back to the distutils symbols otherwise +try: + from setuptools.errors import CCompilerError + from setuptools.errors import DistutilsExecError + from setuptools.errors import DistutilsPlatformError +except ImportError: + from distutils.errors import CCompilerError + from distutils.errors import DistutilsExecError + from distutils.errors import DistutilsPlatformError cmdclass = {} @@ -87,24 +94,6 @@ def has_ext_modules(self): return True -class UseTox(TestCommand): - RED = 31 - RESET_SEQ = "\033[0m" - BOLD_SEQ = "\033[1m" - COLOR_SEQ = "\033[1;%dm" - - def run_tests(self): - sys.stderr.write( - "%s%spython setup.py test is deprecated by pypa. Please invoke " - "'tox' with no arguments for a basic test run.\n%s" - % (self.COLOR_SEQ % self.RED, self.BOLD_SEQ, self.RESET_SEQ) - ) - sys.exit(1) - - -cmdclass["test"] = UseTox - - def status_msgs(*msgs): print("*" * 75) for msg in msgs: diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py index 624c12ea225..bcf3475e9d1 100644 --- a/test/aaa_profiling/test_memusage.py +++ b/test/aaa_profiling/test_memusage.py @@ -6,6 +6,7 @@ import sqlalchemy as sa from sqlalchemy import ForeignKey +from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData @@ -16,6 +17,7 @@ from sqlalchemy import util from sqlalchemy.engine import result from sqlalchemy.orm import aliased +from sqlalchemy.orm import attributes from sqlalchemy.orm import clear_mappers from sqlalchemy.orm import configure_mappers from sqlalchemy.orm import declarative_base @@ -209,10 +211,14 @@ def run_plain(*func_args): # return run_plain def run_in_process(*func_args): - queue = multiprocessing.Queue() - proc = multiprocessing.Process( - target=profile, args=(queue, func_args) - ) + # see + # https://docs.python.org/3.14/whatsnew/3.14.html + # #incompatible-changes - the default run type is no longer + # "fork", but since we are running closures in the process + # we need forked mode + ctx = multiprocessing.get_context("fork") + queue = ctx.Queue() + proc = ctx.Process(target=profile, args=(queue, func_args)) proc.start() while True: row = queue.get() @@ -355,6 +361,33 @@ def go(): go() + def test_clone_expression(self): + # this test is for the memory issue "fixed" in #7823, where clones + # no longer carry along all past elements. + # However, due to #7903, we can't at the moment use a + # BindParameter here - these have to continue to carry along all + # the previous clones for now. So the test here only works with + # expressions that dont have BindParameter objects in them. + + root_expr = column("x", Integer) == column("y", Integer) + expr = [root_expr] + + @profile_memory() + def go(): + expr[0] = cloned_traverse(expr[0], {}, {}) + + go() + + def test_tv_render_derived(self): + root_expr = func.some_fn().table_valued() + expr = [root_expr] + + @profile_memory() + def go(): + expr[0] = expr[0].render_derived() + + go() + class MemUsageWBackendTest(fixtures.MappedTest, EnsureZeroed): @@ -1727,3 +1760,57 @@ def go(): s.close() go() + + +class MiscMemoryIntensiveTests(fixtures.TestBase): + __tags__ = ("memory_intensive",) + + @testing.fixture + def user_fixture(self, decl_base): + class User(decl_base): + __tablename__ = "user" + + id = Column(Integer, primary_key=True) + name = Column(String(50)) + + decl_base.metadata.create_all(testing.db) + yield User + + @testing.requires.predictable_gc + def test_gced_delete_on_rollback(self, user_fixture): + User = user_fixture + + s = fixture_session() + u1 = User(name="ed") + s.add(u1) + s.commit() + + s.delete(u1) + u1_state = attributes.instance_state(u1) + assert u1_state in s.identity_map.all_states() + assert u1_state in s._deleted + s.flush() + assert u1_state not in s.identity_map.all_states() + assert u1_state not in s._deleted + del u1 + gc_collect() + gc_collect() + gc_collect() + assert u1_state.obj() is None + + s.rollback() + # new in 1.1, not in identity map if the object was + # gc'ed and we restore snapshot; we've changed update_impl + # to just skip this object + assert u1_state not in s.identity_map.all_states() + + # in any version, the state is replaced by the query + # because the identity map would switch it + u1 = s.query(User).filter_by(name="ed").one() + assert u1_state not in s.identity_map.all_states() + + eq_(s.scalar(select(func.count("*")).select_from(User.__table__)), 1) + s.delete(u1) + s.flush() + eq_(s.scalar(select(func.count("*")).select_from(User.__table__)), 0) + s.commit() diff --git a/test/aaa_profiling/test_orm.py b/test/aaa_profiling/test_orm.py index 5d081b933ec..bbcb6ad5b8b 100644 --- a/test/aaa_profiling/test_orm.py +++ b/test/aaa_profiling/test_orm.py @@ -1225,7 +1225,9 @@ def test_no_bundle(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1239,7 +1241,9 @@ def test_no_entity_wo_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1251,7 +1255,9 @@ def test_no_entity_w_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1263,7 +1269,9 @@ def test_entity_w_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1276,7 +1284,9 @@ def test_entity_wo_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1289,7 +1299,9 @@ def test_no_bundle_wo_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1301,7 +1313,9 @@ def test_no_bundle_w_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1314,7 +1328,9 @@ def test_bundle_wo_annotation(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() @@ -1326,6 +1342,8 @@ def test_bundle_w_annotation(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa: F841 go() diff --git a/test/base/test_concurrency_py3k.py b/test/base/test_concurrency_py3k.py index 0b648aa30bd..c639f24febf 100644 --- a/test/base/test_concurrency_py3k.py +++ b/test/base/test_concurrency_py3k.py @@ -1,3 +1,5 @@ +import asyncio +import random import threading from sqlalchemy import exc @@ -8,7 +10,6 @@ from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_true -from sqlalchemy.util import asyncio from sqlalchemy.util import await_fallback from sqlalchemy.util import await_only from sqlalchemy.util import greenlet_spawn @@ -80,6 +81,7 @@ def go(): with expect_raises_message(ValueError, "sync error"): await greenlet_spawn(go) + @testing.requires.not_python314 def test_await_fallback_no_greenlet(self): to_await = run1() await_fallback(to_await) @@ -89,7 +91,8 @@ async def test_await_only_no_greenlet(self): to_await = run1() with expect_raises_message( exc.MissingGreenlet, - r"greenlet_spawn has not been called; can't call await_\(\) here.", + "greenlet_spawn has not been called; " + r"can't call await_only\(\) here.", ): await_only(to_await) @@ -134,7 +137,8 @@ def go(): with expect_raises_message( exc.InvalidRequestError, - r"greenlet_spawn has not been called; can't call await_\(\) here.", + "greenlet_spawn has not been called; " + r"can't call await_only\(\) here.", ): await greenlet_spawn(go) @@ -147,20 +151,43 @@ async def test_contextvars(self): import contextvars var = contextvars.ContextVar("var") - concurrency = 5 + concurrency = 500 + # NOTE: sleep here is not necessary. It's used to simulate IO + # ensuring that task are not run sequentially async def async_inner(val): + await asyncio.sleep(random.uniform(0.005, 0.015)) eq_(val, var.get()) return var.get() + async def async_set(val): + await asyncio.sleep(random.uniform(0.005, 0.015)) + var.set(val) + def inner(val): retval = await_only(async_inner(val)) eq_(val, var.get()) eq_(retval, val) + + # set the value in a sync function + newval = val + concurrency + var.set(newval) + syncset = await_only(async_inner(newval)) + eq_(newval, var.get()) + eq_(syncset, newval) + + # set the value in an async function + retval = val + 2 * concurrency + await_only(async_set(retval)) + eq_(var.get(), retval) + eq_(await_only(async_inner(retval)), retval) + return retval async def task(val): + await asyncio.sleep(random.uniform(0.005, 0.015)) var.set(val) + await asyncio.sleep(random.uniform(0.005, 0.015)) return await greenlet_spawn(inner, val) values = { @@ -169,7 +196,7 @@ async def task(val): [task(i) for i in range(concurrency)] ) } - eq_(values, set(range(concurrency))) + eq_(values, set(range(concurrency * 2, concurrency * 3))) @async_test async def test_require_await(self): diff --git a/test/base/test_events.py b/test/base/test_events.py index 68db5207ca0..e8ed0ff3628 100644 --- a/test/base/test_events.py +++ b/test/base/test_events.py @@ -7,6 +7,7 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_deprecated +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not @@ -197,6 +198,50 @@ def test_exec_once(self): eq_(m1.mock_calls, [call(5, 6), call(9, 10)]) + def test_real_name_wrong_dispatch(self): + m1 = Mock() + + class E1(event.Events): + @classmethod + def _accept_with(cls, target): + if isinstance(target, T1): + return target + else: + m1.yup() + return None + + def event_one(self, x, y): + pass + + def event_two(self, x): + pass + + def event_three(self, x): + pass + + class T1(object): + dispatch = event.dispatcher(E1) + + class T2(object): + pass + + class E2(event.Events): + + _dispatch_target = T2 + + def event_four(self, x): + pass + + with expect_raises_message( + exc.InvalidRequestError, "No such event 'event_three'" + ): + + @event.listens_for(E2, "event_three") + def go(*arg): + pass + + eq_(m1.mock_calls, [call.yup()]) + def test_exec_once_exception(self): m1 = Mock() m1.side_effect = ValueError @@ -632,6 +677,35 @@ def handler2(x, y): eq_(len(SubTarget().dispatch.event_one), 2) + @testing.combinations(True, False, argnames="m1") + @testing.combinations(True, False, argnames="m2") + @testing.combinations(True, False, argnames="m3") + @testing.combinations(True, False, argnames="use_insert") + def test_subclass_gen_after_clslisten(self, m1, m2, m3, use_insert): + """test #8467""" + m1 = Mock() if m1 else None + m2 = Mock() if m2 else None + m3 = Mock() if m3 else None + + if m1: + event.listen(self.TargetOne, "event_one", m1, insert=use_insert) + + class SubTarget(self.TargetOne): + pass + + if m2: + event.listen(SubTarget, "event_one", m2, insert=use_insert) + + if m3: + event.listen(self.TargetOne, "event_one", m3, insert=use_insert) + + st = SubTarget() + st.dispatch.event_one() + + for m in m1, m2, m3: + if m: + eq_(m.mock_calls, [call()]) + def test_lis_multisub_lis(self): @event.listens_for(self.TargetOne, "event_one") def handler1(x, y): diff --git a/test/base/test_except.py b/test/base/test_except.py index 767fd233c01..d464aa7d747 100644 --- a/test/base/test_except.py +++ b/test/base/test_except.py @@ -538,7 +538,6 @@ def make_combinations(): for cls_list, callable_list in ALL_EXC: unroll.extend(product(cls_list, callable_list)) - print(unroll) return combinations_list(unroll) @make_combinations() diff --git a/test/base/test_result.py b/test/base/test_result.py index d94602203ce..86874b41df0 100644 --- a/test/base/test_result.py +++ b/test/base/test_result.py @@ -5,10 +5,13 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_deprecated from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true +from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.util import picklers +from sqlalchemy.util import compat class ResultTupleTest(fixtures.TestBase): @@ -65,7 +68,12 @@ def test_slice_access(self): def test_slices_arent_in_mappings(self): keyed_tuple = self._fixture([1, 2], ["a", "b"]) - assert_raises(TypeError, lambda: keyed_tuple._mapping[0:2]) + if compat.py312: + with expect_raises(KeyError): + keyed_tuple._mapping[0:2] + else: + with expect_raises(TypeError): + keyed_tuple._mapping[0:2] def test_integers_arent_in_mappings(self): keyed_tuple = self._fixture([1, 2], ["a", "b"]) @@ -223,6 +231,21 @@ def _fixture( return res + def test_close_attributes(self): + """test #8710""" + r1 = self._fixture() + + is_false(r1.closed) + is_false(r1._soft_closed) + + r1._soft_close() + is_false(r1.closed) + is_true(r1._soft_closed) + + r1.close() + is_true(r1.closed) + is_true(r1._soft_closed) + def test_class_presented(self): """To support different kinds of objects returned vs. rows, there are two wrapper classes for Result. @@ -484,7 +507,12 @@ def test_first(self): row = result.first() eq_(row, (1, 1, 1)) - eq_(result.all(), []) + # note this is a behavior change in 1.4.27 due to + # adding a real result.close() to Result, previously this would + # return an empty list. this is already the + # behavior with CursorResult, but was mis-implemented for + # other non-cursor result sets. + assert_raises(exc.ResourceClosedError, result.all) def test_one_unique(self): # assert that one() counts rows after uniqueness has been applied. @@ -597,7 +625,12 @@ def test_scalar(self): eq_(result.scalar(), 1) - eq_(result.all(), []) + # note this is a behavior change in 1.4.27 due to + # adding a real result.close() to Result, previously this would + # return an empty list. this is already the + # behavior with CursorResult, but was mis-implemented for + # other non-cursor result sets. + assert_raises(exc.ResourceClosedError, result.all) def test_partition(self): result = self._fixture() @@ -1044,12 +1077,49 @@ def test_scalar_mode_columns0_mapping(self, no_tuple_fixture): metadata, no_tuple_fixture, source_supports_scalars=True ) - r = r.columns(0).mappings() + with expect_deprecated( + r"The Result.columns\(\) method has a bug in SQLAlchemy 1.4 " + r"that is causing it to yield scalar values" + ): + r = r.columns(0).mappings() eq_( list(r), [{"a": 1}, {"a": 2}, {"a": 1}, {"a": 1}, {"a": 4}], ) + def test_scalar_mode_columns0_plain(self, no_tuple_fixture): + """test #7953""" + + metadata = result.SimpleResultMetaData(["a", "b", "c"]) + + r = result.ChunkedIteratorResult( + metadata, no_tuple_fixture, source_supports_scalars=True + ) + + with expect_deprecated( + r"The Result.columns\(\) method has a bug in SQLAlchemy 1.4 " + r"that is causing it to yield scalar values" + ): + r = r.columns(0) + eq_( + list(r), + [1, 2, 1, 1, 4], + # [(1,), (2,), (1,), (1,), (4,)], # correct result + ) + + def test_scalar_mode_scalars0(self, no_tuple_fixture): + metadata = result.SimpleResultMetaData(["a", "b", "c"]) + + r = result.ChunkedIteratorResult( + metadata, no_tuple_fixture, source_supports_scalars=True + ) + + r = r.scalars(0) + eq_( + list(r), + [1, 2, 1, 1, 4], + ) + def test_scalar_mode_but_accessed_nonscalar_result(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) diff --git a/test/base/test_tutorials.py b/test/base/test_tutorials.py index 494b8a0f67d..05d884b4da6 100644 --- a/test/base/test_tutorials.py +++ b/test/base/test_tutorials.py @@ -9,10 +9,12 @@ from sqlalchemy import testing from sqlalchemy.testing import config from sqlalchemy.testing import fixtures +from sqlalchemy.testing import requires class DocTest(fixtures.TestBase): __requires__ = ("python3",) + __only_on__ = "sqlite+pysqlite" def _setup_logger(self): rootlogger = logging.getLogger("sqlalchemy.engine.Engine") @@ -88,6 +90,7 @@ def _run_doctest(self, *fnames): globs.update(test.globs) assert not runner.failures + @requires.has_json_each def test_20_style(self): self._run_doctest( "tutorial/index.rst", @@ -115,6 +118,9 @@ def test_core_operators(self): def test_orm_queryguide(self): self._run_doctest("orm/queryguide.rst") + def test_orm_quickstart(self): + self._run_doctest("orm/quickstart.rst") + # unicode checker courtesy pytest diff --git a/test/base/test_warnings.py b/test/base/test_warnings.py index 0cbab7f2824..7e5063bdf22 100644 --- a/test/base/test_warnings.py +++ b/test/base/test_warnings.py @@ -1,6 +1,9 @@ +from sqlalchemy import testing +from sqlalchemy.exc import SADeprecationWarning from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_deprecated from sqlalchemy.testing import fixtures +from sqlalchemy.util.deprecations import _decorate_cls_with_warning from sqlalchemy.util.deprecations import warn_deprecated_limited from sqlalchemy.util.langhelpers import _hash_limit_string @@ -33,4 +36,47 @@ def test_warn_deprecated_limited_cap(self): messages.add(message) eq_(len(printouts), occurrences) - eq_(len(messages), cap) + assert cap / 2 < len(messages) <= cap + + +class ClsWarningTest(fixtures.TestBase): + @testing.fixture + def dep_cls_fixture(self): + class Connectable(object): + """a docstring""" + + some_member = "foo" + + Connectable = _decorate_cls_with_warning( + Connectable, + None, + SADeprecationWarning, + "a message", + "2.0", + "another message", + ) + + return Connectable + + def test_dep_inspectable(self, dep_cls_fixture): + """test #8115""" + + import inspect + + class PlainClass(object): + some_member = "bar" + + pc_keys = dict(inspect.getmembers(PlainClass())) + insp_keys = dict(inspect.getmembers(dep_cls_fixture())) + + assert set(insp_keys).intersection( + ( + "__class__", + "__doc__", + "__eq__", + "__dict__", + "__weakref__", + "some_member", + ) + ) + eq_(set(pc_keys), set(insp_keys)) diff --git a/test/conftest.py b/test/conftest.py index 0db4486a92f..515fff340e0 100755 --- a/test/conftest.py +++ b/test/conftest.py @@ -32,7 +32,11 @@ # We check no_user_site to honor the use of this flag. sys.path.insert( 0, - os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "lib"), + os.path.abspath( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "lib" + ) + ), ) # use bootstrapping so that test plugins are loaded @@ -51,4 +55,4 @@ code = compile(f.read(), "bootstrap.py", "exec") to_bootstrap = "pytest" exec(code, globals(), locals()) - from pytestplugin import * # noqa + from sqla_pytestplugin import * # noqa diff --git a/test/dialect/mssql/test_compiler.py b/test/dialect/mssql/test_compiler.py index cf8894f4242..a385ad12083 100644 --- a/test/dialect/mssql/test_compiler.py +++ b/test/dialect/mssql/test_compiler.py @@ -37,6 +37,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing.assertions import eq_ignore_whitespace +from sqlalchemy.types import TypeEngine tbl = table("t", column("a")) @@ -104,6 +105,34 @@ def test_select_w_order_by_collate(self): "Latin1_General_CS_AS_KS_WS_CI ASC", ) + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) OUTPUT inserted.name, " + "lower(inserted.value) AS value VALUES (:name, :value)", + ) + def test_join_with_hint(self): t1 = table( "t1", @@ -181,7 +210,7 @@ def test_update_exclude_hint(self): t.update() .where(t.c.somecolumn == "q") .values(somecolumn="x") - .with_hint("XYZ", "mysql"), + .with_hint("XYZ", dialect_name="mysql"), "UPDATE sometable SET somecolumn=:somecolumn " "WHERE sometable.somecolumn = :somecolumn_1", ) @@ -334,8 +363,8 @@ def test_update_to_select_schema(self): @testing.combinations( ( lambda: select(literal("x"), literal("y")), - "SELECT [POSTCOMPILE_param_1] AS anon_1, " - "[POSTCOMPILE_param_2] AS anon_2", + "SELECT __[POSTCOMPILE_param_1] AS anon_1, " + "__[POSTCOMPILE_param_2] AS anon_2", { "check_literal_execute": {"param_1": "x", "param_2": "y"}, "check_post_param": {}, @@ -344,7 +373,7 @@ def test_update_to_select_schema(self): ( lambda t: select(t).where(t.c.foo.in_(["x", "y", "z"])), "SELECT sometable.foo FROM sometable WHERE sometable.foo " - "IN ([POSTCOMPILE_foo_1])", + "IN (__[POSTCOMPILE_foo_1])", { "check_literal_execute": {"foo_1": ["x", "y", "z"]}, "check_post_param": {}, @@ -436,7 +465,8 @@ def test_noorderby_insubquery_limit(self): crit = q.c.myid == table1.c.myid self.assert_compile( select("*").where(crit), - "SELECT * FROM (SELECT TOP [POSTCOMPILE_param_1] mytable.myid AS " + "SELECT * FROM (SELECT TOP __[POSTCOMPILE_param_1] " + "mytable.myid AS " "myid FROM mytable ORDER BY mytable.myid) AS foo, mytable WHERE " "foo.myid = mytable.myid", ) @@ -555,6 +585,47 @@ def test_noorderby_parameters_insubquery(self): checkpositional=("bar",), ) + @testing.variation("use_schema_translate", [True, False]) + @testing.combinations( + "abc", "has spaces", "[abc]", "[has spaces]", argnames="schemaname" + ) + def test_schema_single_token_bracketed( + self, use_schema_translate, schemaname + ): + """test for #9133. + + this is not the actual regression case for #9133, which is instead + within the reflection process. However, when we implemented + #2626, we never considered the case of ``[schema]`` without any + dots in it. + + """ + + schema_no_brackets = schemaname.strip("[]") + + if " " in schemaname: + rendered_schema = "[%s]" % (schema_no_brackets,) + else: + rendered_schema = schema_no_brackets + + metadata = MetaData() + tbl = Table( + "test", + metadata, + Column("id", Integer, primary_key=True), + schema=schemaname if not use_schema_translate else None, + ) + + self.assert_compile( + select(tbl), + "SELECT %(name)s.test.id FROM %(name)s.test" + % {"name": rendered_schema}, + schema_translate_map={None: schemaname} + if use_schema_translate + else None, + render_schema_translate=True if use_schema_translate else False, + ) + def test_schema_many_tokens_one(self): metadata = MetaData() tbl = Table( @@ -810,10 +881,10 @@ def test_union(self): self.assert_compile( u, "SELECT t1.col3 AS col3, t1.col4 AS col4 " - "FROM t1 WHERE t1.col2 IN ([POSTCOMPILE_col2_1]) " + "FROM t1 WHERE t1.col2 IN (__[POSTCOMPILE_col2_1]) " "UNION SELECT t2.col3 AS col3, " "t2.col4 AS col4 FROM t2 WHERE t2.col2 IN " - "([POSTCOMPILE_col2_2]) ORDER BY col3, col4", + "(__[POSTCOMPILE_col2_2]) ORDER BY col3, col4", checkparams={ "col2_1": ["t1col2r1", "t1col2r2"], "col2_2": ["t2col2r2", "t2col2r3"], @@ -823,9 +894,9 @@ def test_union(self): u.alias("bar").select(), "SELECT bar.col3, bar.col4 FROM (SELECT " "t1.col3 AS col3, t1.col4 AS col4 FROM t1 " - "WHERE t1.col2 IN ([POSTCOMPILE_col2_1]) UNION " + "WHERE t1.col2 IN (__[POSTCOMPILE_col2_1]) UNION " "SELECT t2.col3 AS col3, t2.col4 AS col4 " - "FROM t2 WHERE t2.col2 IN ([POSTCOMPILE_col2_2])) AS bar", + "FROM t2 WHERE t2.col2 IN (__[POSTCOMPILE_col2_2])) AS bar", checkparams={ "col2_1": ["t1col2r1", "t1col2r2"], "col2_2": ["t2col2r2", "t2col2r3"], @@ -972,7 +1043,7 @@ def test_limit_using_top(self): self.assert_compile( s, - "SELECT TOP [POSTCOMPILE_param_1] t.x, t.y FROM t " + "SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t " "WHERE t.x = :x_1 ORDER BY t.y", checkparams={"x_1": 5, "param_1": 10}, ) @@ -1000,7 +1071,7 @@ def test_limit_zero_using_top(self): self.assert_compile( s, - "SELECT TOP [POSTCOMPILE_param_1] t.x, t.y FROM t " + "SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t " "WHERE t.x = :x_1 ORDER BY t.y", checkparams={"x_1": 5, "param_1": 0}, ) @@ -1201,7 +1272,7 @@ def test_limit_zero_using_window(self): # of zero, so produces TOP 0 self.assert_compile( s, - "SELECT TOP [POSTCOMPILE_param_1] t.x, t.y FROM t " + "SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t " "WHERE t.x = :x_1 ORDER BY t.y", checkparams={"x_1": 5, "param_1": 0}, ) @@ -1445,21 +1516,21 @@ def test_column_computed(self, text, persisted): 5, 0, {"percent": True}, - "TOP [POSTCOMPILE_param_1] PERCENT", + "TOP __[POSTCOMPILE_param_1] PERCENT", {"param_1": 5}, ), ( 5, None, {"percent": True, "with_ties": True}, - "TOP [POSTCOMPILE_param_1] PERCENT WITH TIES", + "TOP __[POSTCOMPILE_param_1] PERCENT WITH TIES", {"param_1": 5}, ), ( 5, 0, {"with_ties": True}, - "TOP [POSTCOMPILE_param_1] WITH TIES", + "TOP __[POSTCOMPILE_param_1] WITH TIES", {"param_1": 5}, ), ( @@ -1537,21 +1608,21 @@ def test_fetch(self, dialect_2012, fetch, offset, fetch_kw, exp, params): 5, 0, {"percent": True}, - "TOP [POSTCOMPILE_param_1] PERCENT", + "TOP __[POSTCOMPILE_param_1] PERCENT", {"param_1": 5}, ), ( 5, None, {"percent": True, "with_ties": True}, - "TOP [POSTCOMPILE_param_1] PERCENT WITH TIES", + "TOP __[POSTCOMPILE_param_1] PERCENT WITH TIES", {"param_1": 5}, ), ( 5, 0, {"with_ties": True}, - "TOP [POSTCOMPILE_param_1] WITH TIES", + "TOP __[POSTCOMPILE_param_1] WITH TIES", {"param_1": 5}, ), ( diff --git a/test/dialect/mssql/test_engine.py b/test/dialect/mssql/test_engine.py index 5482e261670..32068e504b7 100644 --- a/test/dialect/mssql/test_engine.py +++ b/test/dialect/mssql/test_engine.py @@ -1,6 +1,7 @@ # -*- encoding: utf-8 from decimal import Decimal +import re from sqlalchemy import Column from sqlalchemy import event @@ -23,6 +24,7 @@ from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import mock @@ -234,25 +236,49 @@ def test_pyodbc_odbc_connect_ignores_other_values(self): connection, ) - def test_pyodbc_token_injection(self): - token1 = "someuser%3BPORT%3D50001" - token2 = "some{strange}pw%3BPORT%3D50001" - token3 = "somehost%3BPORT%3D50001" - token4 = "somedb%3BPORT%3D50001" - - u = url.make_url( - "mssql+pyodbc://%s:%s@%s/%s?driver=foob" - % (token1, token2, token3, token4) - ) - dialect = pyodbc.dialect() - connection = dialect.create_connect_args(u) - eq_( - [ + @testing.combinations( + ( + "original", + ( + "someuser%3BPORT%3D50001", + "some{strange}pw%3BPORT%3D50001", + "somehost%3BPORT%3D50001", + "somedb%3BPORT%3D50001", + ), + ( [ "DRIVER={foob};Server=somehost%3BPORT%3D50001;" "Database=somedb%3BPORT%3D50001;UID={someuser;PORT=50001};" "PWD={some{strange}}pw;PORT=50001}" - ], + ] + ), + ), + ( + "issue_8062", + ( + "larry", + "{moe", + "localhost", + "mydb", + ), + ( + [ + "DRIVER={foob};Server=localhost;" + "Database=mydb;UID=larry;" + "PWD={{moe}" + ] + ), + ), + argnames="tokens, connection_string", + id_="iaa", + ) + def test_pyodbc_token_injection(self, tokens, connection_string): + u = url.make_url("mssql+pyodbc://%s:%s@%s/%s?driver=foob" % tokens) + dialect = pyodbc.dialect() + connection = dialect.create_connect_args(u) + eq_( + [ + connection_string, {}, ], connection, @@ -623,7 +649,12 @@ def test_isolation_level(self, metadata): class IsolationLevelDetectTest(fixtures.TestBase): - def _fixture(self, view): + def _fixture( + self, + view_result, + simulate_perm_failure=False, + simulate_no_system_views=False, + ): class Error(Exception): pass @@ -636,15 +667,34 @@ class Error(Exception): def fail_on_exec( stmt, ): - if view is not None and view in stmt: + result[:] = [] + if "SELECT name FROM sys.system_views" in stmt: + if simulate_no_system_views: + raise dialect.dbapi.Error( + "SQL Server simulated no system_views error" + ) + else: + if view_result: + result.append((view_result,)) + elif re.match( + ".*SELECT CASE transaction_isolation_level.*FROM sys.%s" + % (view_result,), + stmt, + re.S, + ): + if simulate_perm_failure: + raise dialect.dbapi.Error( + "SQL Server simulated permission error" + ) result.append(("SERIALIZABLE",)) else: - raise Error("that didn't work") + assert False connection = Mock( cursor=Mock( return_value=Mock( - execute=fail_on_exec, fetchone=lambda: result[0] + execute=fail_on_exec, + fetchone=lambda: result[0] if result else None, ) ) ) @@ -664,13 +714,43 @@ def test_exec_sessions(self): def test_not_supported(self): dialect, connection = self._fixture(None) - with expect_warnings("Could not fetch transaction isolation level"): - assert_raises_message( - NotImplementedError, - "Can't fetch isolation", - dialect.get_isolation_level, - connection, - ) + assert_raises_message( + NotImplementedError, + "Can't fetch isolation level on this particular ", + dialect.get_isolation_level, + connection, + ) + + @testing.combinations(True, False) + def test_no_system_views(self, simulate_perm_failure_also): + dialect, connection = self._fixture( + "dm_pdw_nodes_exec_sessions", + simulate_perm_failure=simulate_perm_failure_also, + simulate_no_system_views=True, + ) + + assert_raises_message( + NotImplementedError, + r"Can\'t fetch isolation level; encountered error SQL Server " + r"simulated no system_views error when attempting to query the " + r'"sys.system_views" view.', + dialect.get_isolation_level, + connection, + ) + + def test_dont_have_table_perms(self): + dialect, connection = self._fixture( + "dm_pdw_nodes_exec_sessions", simulate_perm_failure=True + ) + + assert_raises_message( + NotImplementedError, + r"Can\'t fetch isolation level; encountered error SQL Server " + r"simulated permission error when attempting to query the " + r'"sys.dm_pdw_nodes_exec_sessions" view.', + dialect.get_isolation_level, + connection, + ) class InvalidTransactionFalsePositiveTest(fixtures.TablesTest): @@ -708,3 +788,44 @@ def test_invalid_transaction_detection(self, connection): # "Can't reconnect until invalid transaction is rolled back." result = connection.execute(t.select()).fetchall() eq_(len(result), 1) + + +class IgnoreNotransOnRollbackTest(fixtures.TestBase): + def test_ignore_no_transaction_on_rollback(self): + """test #8231""" + + class ProgrammingError(Exception): + pass + + dialect = base.dialect(ignore_no_transaction_on_rollback=True) + dialect.dbapi = mock.Mock(ProgrammingError=ProgrammingError) + + connection = mock.Mock( + rollback=mock.Mock( + side_effect=ProgrammingError("Error 111214 happened") + ) + ) + with expect_warnings( + "ProgrammingError 111214 'No corresponding transaction found.' " + "has been suppressed via ignore_no_transaction_on_rollback=True" + ): + dialect.do_rollback(connection) + + def test_other_programming_error_on_rollback(self): + """test #8231""" + + class ProgrammingError(Exception): + pass + + dialect = base.dialect(ignore_no_transaction_on_rollback=True) + dialect.dbapi = mock.Mock(ProgrammingError=ProgrammingError) + + connection = mock.Mock( + rollback=mock.Mock( + side_effect=ProgrammingError("Some other error happened") + ) + ) + with expect_raises_message( + ProgrammingError, "Some other error happened" + ): + dialect.do_rollback(connection) diff --git a/test/dialect/mssql/test_query.py b/test/dialect/mssql/test_query.py index e5e3cd3ad29..3a34bf04cef 100644 --- a/test/dialect/mssql/test_query.py +++ b/test/dialect/mssql/test_query.py @@ -1,4 +1,6 @@ # -*- encoding: utf-8 +import decimal + from sqlalchemy import and_ from sqlalchemy import Column from sqlalchemy import DDL @@ -9,6 +11,7 @@ from sqlalchemy import Identity from sqlalchemy import Integer from sqlalchemy import literal +from sqlalchemy import Numeric from sqlalchemy import or_ from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import select @@ -41,6 +44,13 @@ def define_tables(cls, metadata): Column("description", String(50)), PrimaryKeyConstraint("id", name="PK_cattable"), ) + Table( + "numeric_identity", + metadata, + Column("id", Numeric(18, 0), autoincrement=True), + Column("description", String(50)), + PrimaryKeyConstraint("id", name="PK_numeric_identity"), + ) def test_compiled(self): cattable = self.tables.cattable @@ -63,6 +73,13 @@ def test_execute(self, connection): lastcat = conn.execute(cattable.select().order_by(desc(cattable.c.id))) eq_((10, "PHP"), lastcat.first()) + numeric_identity = self.tables.numeric_identity + # for some reason, T-SQL does not like .values(), but this works + result = conn.execute( + numeric_identity.insert(), dict(description="T-SQL") + ) + eq_(result.inserted_primary_key, (decimal.Decimal("1"),)) + def test_executemany(self, connection): conn = connection cattable = self.tables.cattable @@ -255,6 +272,21 @@ def test_fetchid_trigger(self, metadata, connection): r = connection.execute(t1.insert(), dict(descr="hello")) eq_(r.inserted_primary_key, (100,)) + def test_compiler_symbol_conflict(self, connection, metadata): + t = Table("t", metadata, Column("POSTCOMPILE_DATA", String(50))) + + t.create(connection) + + connection.execute(t.insert().values(POSTCOMPILE_DATA="some data")) + eq_( + connection.scalar( + select(t.c.POSTCOMPILE_DATA).where( + t.c.POSTCOMPILE_DATA.in_(["some data", "some other data"]) + ) + ), + "some data", + ) + @testing.provide_metadata def _test_disable_scope_identity(self): engine = engines.testing_engine(options={"use_scope_identity": False}) diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py index 1fa301e282b..125959cf9d8 100644 --- a/test/dialect/mssql/test_reflection.py +++ b/test/dialect/mssql/test_reflection.py @@ -1,6 +1,7 @@ # -*- encoding: utf-8 import datetime import decimal +import random from sqlalchemy import Column from sqlalchemy import DDL @@ -24,6 +25,7 @@ from sqlalchemy.dialects.mssql import base from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode from sqlalchemy.dialects.mssql.information_schema import tables +from sqlalchemy.pool import NullPool from sqlalchemy.schema import CreateIndex from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import ComparesTables @@ -34,6 +36,7 @@ from sqlalchemy.testing import is_ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock +from sqlalchemy.testing import provision class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL): @@ -358,6 +361,85 @@ def test_has_table_temp_temp_present_both_sessions(self): "drop table #myveryveryuniquetemptablename" ) + @testing.fixture + def temp_db_alt_collation_fixture( + self, connection_no_trans, testing_engine + ): + temp_db_name = "%s_different_collation" % ( + provision.FOLLOWER_IDENT or "default" + ) + cnxn = connection_no_trans.execution_options( + isolation_level="AUTOCOMMIT" + ) + cnxn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % temp_db_name) + cnxn.exec_driver_sql( + "CREATE DATABASE %s COLLATE Danish_Norwegian_CI_AS" % temp_db_name + ) + eng = testing_engine( + url=testing.db.url.set(database=temp_db_name), + options=dict(poolclass=NullPool, future=True), + ) + + yield eng + + cnxn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % temp_db_name) + + def test_global_temp_different_collation( + self, temp_db_alt_collation_fixture + ): + """test #8035""" + + tname = "##foo%s" % (random.randint(1, 1000000),) + + with temp_db_alt_collation_fixture.connect() as conn: + conn.exec_driver_sql( + "CREATE TABLE %s (id int primary key)" % (tname,) + ) + conn.commit() + + eq_( + inspect(conn).get_columns(tname), + [ + { + "name": "id", + "type": testing.eq_type_affinity(sqltypes.INTEGER), + "nullable": False, + "default": None, + "autoincrement": False, + } + ], + ) + Table(tname, MetaData(), autoload_with=conn) + + @testing.combinations( + ("test_schema"), + ("[test_schema]"), + argnames="schema_value", + ) + @testing.variation( + "reflection_operation", ["has_table", "reflect_table", "get_columns"] + ) + def test_has_table_with_single_token_schema( + self, metadata, connection, schema_value, reflection_operation + ): + """test for #9133""" + tt = Table( + "test", metadata, Column("id", Integer), schema=schema_value + ) + tt.create(connection) + + if reflection_operation.has_table: + is_true(inspect(connection).has_table("test", schema=schema_value)) + elif reflection_operation.reflect_table: + m2 = MetaData() + Table("test", m2, autoload_with=connection, schema=schema_value) + elif reflection_operation.get_columns: + is_true( + inspect(connection).get_columns("test", schema=schema_value) + ) + else: + reflection_operation.fail() + def test_db_qualified_items(self, metadata, connection): Table("foo", metadata, Column("id", Integer, primary_key=True)) Table( @@ -765,7 +847,11 @@ def define_tables(cls, metadata): ), ), Column("id2", Integer, Identity()), - Column("id3", sqltypes.BigInteger, Identity()), + Column( + "id3", + sqltypes.BigInteger, + Identity(start=-9223372036854775808), + ), Column("id4", sqltypes.SmallInteger, Identity()), Column("id5", sqltypes.Numeric, Identity()), ] @@ -787,7 +873,10 @@ def test_reflect_identity(self, connection): eq_(type(col["identity"]["start"]), int) eq_(type(col["identity"]["increment"]), int) elif col["name"] == "id3": - eq_(col["identity"], {"start": 1, "increment": 1}) + eq_( + col["identity"], + {"start": -9223372036854775808, "increment": 1}, + ) eq_(type(col["identity"]["start"]), util.compat.long_type) eq_(type(col["identity"]["increment"]), util.compat.long_type) elif col["name"] == "id4": diff --git a/test/dialect/mssql/test_types.py b/test/dialect/mssql/test_types.py index 7e238dd8196..6004bc295b1 100644 --- a/test/dialect/mssql/test_types.py +++ b/test/dialect/mssql/test_types.py @@ -50,6 +50,7 @@ from sqlalchemy.testing import emits_warning_on from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not @@ -531,6 +532,12 @@ def test_binary(self): (mssql.MSVarBinary, [10], {}, "VARBINARY(10)"), (types.VARBINARY, [10], {}, "VARBINARY(10)"), (types.VARBINARY, [], {}, "VARBINARY(max)"), + ( + mssql.MSVarBinary, + [], + {"filestream": True}, + "VARBINARY(max) FILESTREAM", + ), (mssql.MSImage, [], {}, "IMAGE"), (mssql.IMAGE, [], {}, "IMAGE"), (types.LargeBinary, [], {}, "IMAGE"), @@ -554,6 +561,17 @@ def test_binary(self): ) self.assert_(repr(col)) + def test_VARBINARY_init(self): + d = mssql.dialect() + t = mssql.MSVarBinary(length=None, filestream=True) + eq_(str(t.compile(dialect=d)), "VARBINARY(max) FILESTREAM") + t = mssql.MSVarBinary(length="max", filestream=True) + eq_(str(t.compile(dialect=d)), "VARBINARY(max) FILESTREAM") + with expect_raises_message( + ValueError, "length must be None or 'max' when setting filestream" + ): + mssql.MSVarBinary(length=1000, filestream=True) + class TypeRoundTripTest( fixtures.TestBase, AssertsExecutionResults, ComparesTables @@ -1012,6 +1030,15 @@ def test_binary_reflection(self, metadata, deprecate_large_types): ), ] + if testing.requires.mssql_filestream.enabled: + columns.append( + ( + mssql.MSVarBinary, + [], + {"filestream": True}, + "VARBINARY(max) FILESTREAM", + ) + ) engine = engines.testing_engine( options={"deprecate_large_types": deprecate_large_types} ) @@ -1254,6 +1281,15 @@ class BinaryTest(fixtures.TestBase): None, False, ), + ( + mssql.VARBINARY(filestream=True), + "binary_data_one.dat", + None, + True, + None, + False, + testing.requires.mssql_filestream, + ), ( sqltypes.LargeBinary, "binary_data_one.dat", diff --git a/test/dialect/mysql/test_compiler.py b/test/dialect/mysql/test_compiler.py index 708039f943e..bb16099cd82 100644 --- a/test/dialect/mysql/test_compiler.py +++ b/test/dialect/mysql/test_compiler.py @@ -1110,12 +1110,12 @@ def test_update_sql_expr(self): ) stmt = stmt.on_duplicate_key_update( bar=func.coalesce(stmt.inserted.bar), - baz=stmt.inserted.baz + "some literal", + baz=stmt.inserted.baz + "some literal" + stmt.inserted.bar, ) expected_sql = ( "INSERT INTO foos (id, bar) VALUES (%s, %s), (%s, %s) ON " "DUPLICATE KEY UPDATE bar = coalesce(VALUES(bar)), " - "baz = (concat(VALUES(baz), %s))" + "baz = (concat(concat(VALUES(baz), %s), VALUES(bar)))" ) self.assert_compile( stmt, @@ -1210,18 +1210,25 @@ def test_regexp_replace_string(self): class RegexpTestMySql(fixtures.TestBase, RegexpCommon): __dialect__ = "mysql" + def test_regexp_match_flags_safestring(self): + self.assert_compile( + self.table.c.myid.regexp_match("pattern", flags="i'g"), + "REGEXP_LIKE(mytable.myid, %s, 'i''g')", + checkpositional=("pattern",), + ) + def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "REGEXP_LIKE(mytable.myid, %s, %s)", - checkpositional=("pattern", "ig"), + "REGEXP_LIKE(mytable.myid, %s, 'ig')", + checkpositional=("pattern",), ) def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "NOT REGEXP_LIKE(mytable.myid, %s, %s)", - checkpositional=("pattern", "ig"), + "NOT REGEXP_LIKE(mytable.myid, %s, 'ig')", + checkpositional=("pattern",), ) def test_regexp_replace_flags(self): @@ -1229,26 +1236,42 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, %s, %s, %s)", - checkpositional=("pattern", "replacement", "ig"), + "REGEXP_REPLACE(mytable.myid, %s, %s, 'ig')", + checkpositional=("pattern", "replacement"), + ) + + def test_regexp_replace_flags_safestring(self): + self.assert_compile( + self.table.c.myid.regexp_replace( + "pattern", "replacement", flags="i'g" + ), + "REGEXP_REPLACE(mytable.myid, %s, %s, 'i''g')", + checkpositional=("pattern", "replacement"), ) class RegexpTestMariaDb(fixtures.TestBase, RegexpCommon): __dialect__ = "mariadb" + def test_regexp_match_flags_safestring(self): + self.assert_compile( + self.table.c.myid.regexp_match("pattern", flags="i'g"), + "mytable.myid REGEXP CONCAT('(?', 'i''g', ')', %s)", + checkpositional=("pattern",), + ) + def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid REGEXP CONCAT('(?', %s, ')', %s)", - checkpositional=("ig", "pattern"), + "mytable.myid REGEXP CONCAT('(?', 'ig', ')', %s)", + checkpositional=("pattern",), ) def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid NOT REGEXP CONCAT('(?', %s, ')', %s)", - checkpositional=("ig", "pattern"), + "mytable.myid NOT REGEXP CONCAT('(?', 'ig', ')', %s)", + checkpositional=("pattern",), ) def test_regexp_replace_flags(self): @@ -1256,8 +1279,8 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, CONCAT('(?', %s, ')', %s), %s)", - checkpositional=("ig", "pattern", "replacement"), + "REGEXP_REPLACE(mytable.myid, CONCAT('(?', 'ig', ')', %s), %s)", + checkpositional=("pattern", "replacement"), ) diff --git a/test/dialect/mysql/test_dialect.py b/test/dialect/mysql/test_dialect.py index f314bd0af55..6f60a215004 100644 --- a/test/dialect/mysql/test_dialect.py +++ b/test/dialect/mysql/test_dialect.py @@ -5,6 +5,7 @@ from sqlalchemy import bindparam from sqlalchemy import Column from sqlalchemy import DateTime +from sqlalchemy import event from sqlalchemy import exc from sqlalchemy import func from sqlalchemy import Integer @@ -33,6 +34,35 @@ class BackendDialectTest( __backend__ = True __only_on__ = "mysql", "mariadb" + @testing.fixture + def mysql_version_dialect(self, testing_engine): + """yield a MySQL engine that will simulate a specific version. + + patches out various methods to not fail + + """ + engine = testing_engine() + _server_version = [None] + with mock.patch.object( + engine.dialect, + "_get_server_version_info", + lambda conn: engine.dialect._parse_server_version( + _server_version[0] + ), + ), mock.patch.object( + engine.dialect, "_set_mariadb", lambda *arg: None + ), mock.patch.object( + engine.dialect, + "get_isolation_level", + lambda *arg: "REPEATABLE READ", + ): + + def go(server_version): + _server_version[0] = server_version + return engine + + yield go + def test_reserved_words_mysql_vs_mariadb( self, mysql_mariadb_reserved_words ): @@ -55,12 +85,11 @@ def test_reserved_words_mysql_vs_mariadb( ) def test_no_show_variables(self): - from sqlalchemy.testing import mock engine = engines.testing_engine() def my_execute(self, statement, *args, **kw): - if statement.startswith("SHOW VARIABLES"): + if statement.startswith("SELECT @@"): statement = "SELECT 1 FROM DUAL WHERE 1=0" return real_exec(self, statement, *args, **kw) @@ -75,7 +104,6 @@ def my_execute(self, statement, *args, **kw): engine.connect() def test_no_default_isolation_level(self): - from sqlalchemy.testing import mock engine = engines.testing_engine() @@ -100,6 +128,43 @@ def fake_isolation_level(connection): ): engine.connect() + @testing.combinations( + "10.5.12-MariaDB", "5.6.49", "5.0.2", argnames="server_version" + ) + def test_variable_fetch(self, mysql_version_dialect, server_version): + """test #7518""" + engine = mysql_version_dialect(server_version) + + fetches = [] + + # the initialize() connection does not seem to use engine-level events. + # not changing that here + + @event.listens_for(engine, "do_execute_no_params") + @event.listens_for(engine, "do_execute") + def do_execute_no_params(cursor, statement, *arg): + if statement.startswith("SHOW VARIABLES") or statement.startswith( + "SELECT @@" + ): + fetches.append(statement) + return None + + engine.connect() + + if server_version == "5.0.2": + eq_( + fetches, + [ + "SHOW VARIABLES LIKE 'sql_mode'", + "SHOW VARIABLES LIKE 'lower_case_table_names'", + ], + ) + else: + eq_( + fetches, + ["SELECT @@sql_mode", "SELECT @@lower_case_table_names"], + ) + def test_autocommit_isolation_level(self): c = testing.db.connect().execution_options( isolation_level="AUTOCOMMIT" @@ -134,6 +199,10 @@ class DialectTest(fixtures.TestBase): (2006, "foo", "OperationalError", "pymysql", True), (2007, "foo", "OperationalError", "mysqldb", False), (2007, "foo", "OperationalError", "pymysql", False), + (4031, "foo", "OperationalError", "mysqldb", True), + (4031, "foo", "OperationalError", "pymysql", True), + (4032, "foo", "OperationalError", "mysqldb", False), + (4032, "foo", "OperationalError", "pymysql", False), ) def test_is_disconnect( self, arg0, message, exc_cls_name, dialect_name, is_disconnect diff --git a/test/dialect/mysql/test_on_duplicate.py b/test/dialect/mysql/test_on_duplicate.py index 65d5b8364e7..5a4e6ca8d5c 100644 --- a/test/dialect/mysql/test_on_duplicate.py +++ b/test/dialect/mysql/test_on_duplicate.py @@ -100,13 +100,19 @@ def test_on_duplicate_key_update_expression_multirow(self, connection): conn.execute(insert(foos).values(dict(id=1, bar="b", baz="bz"))) stmt = insert(foos).values([dict(id=1, bar="ab"), dict(id=2, bar="b")]) stmt = stmt.on_duplicate_key_update( - bar=func.concat(stmt.inserted.bar, "_foo") + bar=func.concat(stmt.inserted.bar, "_foo"), + baz=func.concat(stmt.inserted.bar, "_", foos.c.baz), ) result = conn.execute(stmt) eq_(result.inserted_primary_key, (None,)) eq_( - conn.execute(foos.select().where(foos.c.id == 1)).fetchall(), - [(1, "ab_foo", "bz", False)], + conn.execute(foos.select()).fetchall(), + [ + # first entry triggers ON DUPLICATE + (1, "ab_foo", "ab_bz", False), + # second entry must be an insert + (2, "b", None, False), + ], ) def test_on_duplicate_key_update_preserve_order(self, connection): diff --git a/test/dialect/mysql/test_reflection.py b/test/dialect/mysql/test_reflection.py index 60d7e3a5dd3..a297145abe7 100644 --- a/test/dialect/mysql/test_reflection.py +++ b/test/dialect/mysql/test_reflection.py @@ -370,7 +370,10 @@ def test_reflection_with_table_options(self, metadata, connection): assert reflected.comment == comment assert reflected.kwargs["mysql_comment"] == comment - assert reflected.kwargs["mysql_default charset"] == "utf8" + assert reflected.kwargs["mysql_default charset"] in ( + "utf8", + "utf8mb3", + ) assert reflected.kwargs["mysql_avg_row_length"] == "3" assert reflected.kwargs["mysql_connection"] == "fish" @@ -756,6 +759,93 @@ def test_reflect_fulltext(self, metadata, connection): "CREATE FULLTEXT INDEX textdata_ix ON mytable (textdata)", ) + def test_reflect_index_col_length(self, metadata, connection): + """test for #9047""" + + tt = Table( + "test_table", + metadata, + Column("signal_type", Integer(), nullable=False), + Column("signal_data", String(200), nullable=False), + Column("signal_data_2", String(200), nullable=False), + Index( + "ix_1", + "signal_type", + "signal_data", + mysql_length={"signal_data": 25}, + mariadb_length={"signal_data": 25}, + ), + ) + Index( + "ix_2", + tt.c.signal_type, + tt.c.signal_data, + tt.c.signal_data_2, + mysql_length={"signal_data": 25, "signal_data_2": 10}, + mariadb_length={"signal_data": 25, "signal_data_2": 10}, + ) + + mysql_length = ( + "mysql_length" + if not connection.dialect.is_mariadb + else "mariadb_length" + ) + eq_( + {idx.name: idx.kwargs[mysql_length] for idx in tt.indexes}, + { + "ix_1": {"signal_data": 25}, + "ix_2": {"signal_data": 25, "signal_data_2": 10}, + }, + ) + + metadata.create_all(connection) + + eq_( + sorted( + inspect(connection).get_indexes("test_table"), + key=lambda rec: rec["name"], + ), + [ + { + "name": "ix_1", + "column_names": ["signal_type", "signal_data"], + "unique": False, + "dialect_options": {mysql_length: {"signal_data": 25}}, + }, + { + "name": "ix_2", + "column_names": [ + "signal_type", + "signal_data", + "signal_data_2", + ], + "unique": False, + "dialect_options": { + mysql_length: { + "signal_data": 25, + "signal_data_2": 10, + } + }, + }, + ], + ) + + new_metadata = MetaData() + reflected_table = Table( + "test_table", new_metadata, autoload_with=connection + ) + + eq_( + { + idx.name: idx.kwargs[mysql_length] + for idx in reflected_table.indexes + }, + { + "ix_1": {"signal_data": 25}, + "ix_2": {"signal_data": 25, "signal_data_2": 10}, + }, + ) + @testing.requires.mysql_ngram_fulltext def test_reflect_fulltext_comment( self, @@ -1119,8 +1209,6 @@ def test_case_sensitive_reflection_dual_case_references( class RawReflectionTest(fixtures.TestBase): - __backend__ = True - def setup_test(self): dialect = mysql.dialect() self.parser = _reflection.MySQLTableDefinitionParser( @@ -1246,3 +1334,18 @@ def test_fk_reflection(self): "SET NULL", ), ) + + @testing.combinations( + ( + "CREATE ALGORITHM=UNDEFINED DEFINER=`scott`@`%` " + "SQL SECURITY DEFINER VIEW `v1` AS SELECT", + True, + ), + ("CREATE VIEW `v1` AS SELECT", True), + ("CREATE TABLE `v1`", False), + ("CREATE TABLE `VIEW`", False), + ("CREATE TABLE `VIEW_THINGS`", False), + ("CREATE TABLE `A VIEW`", False), + ) + def test_is_view(self, sql, expected): + is_(self.parser._check_view(sql), expected) diff --git a/test/dialect/mysql/test_types.py b/test/dialect/mysql/test_types.py index 7bdf6f8ceb7..358b814b92f 100644 --- a/test/dialect/mysql/test_types.py +++ b/test/dialect/mysql/test_types.py @@ -474,6 +474,8 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults): # fixed in mysql-connector as of 2.0.1, # see https://bugs.mysql.com/bug.php?id=73266 + + @testing.requires.literal_float_coercion def test_precision_float_roundtrip(self, metadata, connection): t = Table( "t", @@ -513,8 +515,8 @@ def test_charset_collate_table(self, metadata, connection): ) t.create(connection) t2 = Table("foo", MetaData(), autoload_with=connection) - eq_(t2.kwargs["mysql_collate"], "utf8_bin") - eq_(t2.kwargs["mysql_default charset"], "utf8") + assert t2.kwargs["mysql_collate"] in ("utf8_bin", "utf8mb3_bin") + assert t2.kwargs["mysql_default charset"] in ("utf8", "utf8mb3") # test [ticket:2906] # in order to test the condition here, need to use @@ -1312,6 +1314,24 @@ def test_broken_enum_returns_blanks(self, metadata, connection): [("", ""), ("", ""), ("two", "two"), (None, None)], ) + @testing.combinations( + ( + [""], + {"retrieve_as_bitwise": True}, + "SET('', retrieve_as_bitwise=True)", + ), + (["a"], {}, "SET('a')"), + (["a", "b", "c"], {}, "SET('a', 'b', 'c')"), + ( + ["a", "b", "c"], + {"collation": "utf8_bin"}, + "SET('a', 'b', 'c', collation='utf8_bin')", + ), + argnames="value,kw,expected", + ) + def test_set_repr(self, value, kw, expected): + eq_(repr(mysql.SET(*value, **kw)), expected) + def colspec(c): return testing.db.dialect.ddl_compiler( diff --git a/test/dialect/oracle/test_compiler.py b/test/dialect/oracle/test_compiler.py index 08158eed470..737c11f4680 100644 --- a/test/dialect/oracle/test_compiler.py +++ b/test/dialect/oracle/test_compiler.py @@ -8,6 +8,7 @@ from sqlalchemy import func from sqlalchemy import Identity from sqlalchemy import Index +from sqlalchemy import insert from sqlalchemy import Integer from sqlalchemy import literal from sqlalchemy import literal_column @@ -39,6 +40,7 @@ from sqlalchemy.testing.assertions import eq_ignore_whitespace from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from sqlalchemy.types import TypeEngine class CompileTest(fixtures.TestBase, AssertsCompiledSQL): @@ -101,7 +103,7 @@ def test_bindparam_quote(self): def test_bindparam_quote_works_on_expanding(self): self.assert_compile( bindparam("uid", expanding=True), - "([POSTCOMPILE_uid])", + "(__[POSTCOMPILE_uid])", dialect=cx_oracle.dialect(), ) @@ -164,9 +166,9 @@ def test_limit_one(self): "anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " "WHERE ora_rn > " - "[POSTCOMPILE_param_2]", + "__[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 20}, ) @@ -203,14 +205,14 @@ def test_limit_one_firstrows(self): self.assert_compile( s, "SELECT anon_1.col1, anon_1.col2 FROM " - "(SELECT /*+ FIRST_ROWS([POSTCOMPILE_param_1]) */ " + "(SELECT /*+ FIRST_ROWS(__[POSTCOMPILE_param_1]) */ " "anon_2.col1 AS col1, " "anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " "WHERE ora_rn > " - "[POSTCOMPILE_param_2]", + "__[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 20}, dialect=oracle.OracleDialect(optimize_limits=True), ) @@ -229,9 +231,10 @@ def test_limit_two(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_3 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_2 " - "WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1", + "WHERE ora_rn > __[POSTCOMPILE_param_2]) anon_1", checkparams={"param_1": 10, "param_2": 20}, ) @@ -244,9 +247,10 @@ def test_limit_two(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_3 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_2 " - "WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1", + "WHERE ora_rn > __[POSTCOMPILE_param_2]) anon_1", ) c = s2.compile(dialect=oracle.OracleDialect()) eq_(len(c._result_columns), 2) @@ -264,8 +268,8 @@ def test_limit_three(self): "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " "sometable.col2) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 20}, ) c = s.compile(dialect=oracle.OracleDialect()) @@ -281,7 +285,7 @@ def test_limit_four(self): "SELECT anon_1.col1, anon_1.col2 FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " - "sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "sometable.col2) anon_1 WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE", checkparams={"param_1": 10}, ) @@ -292,11 +296,11 @@ def test_limit_four_firstrows(self): s = select(t).with_for_update().limit(10).order_by(t.c.col2) self.assert_compile( s, - "SELECT /*+ FIRST_ROWS([POSTCOMPILE_param_1]) */ " + "SELECT /*+ FIRST_ROWS(__[POSTCOMPILE_param_1]) */ " "anon_1.col1, anon_1.col2 FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " - "sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "sometable.col2) anon_1 WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE", checkparams={"param_1": 10}, dialect=oracle.OracleDialect(optimize_limits=True), @@ -314,8 +318,8 @@ def test_limit_five(self): "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " "sometable.col2) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] FOR " + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2] FOR " "UPDATE", checkparams={"param_1": 10, "param_2": 20}, ) @@ -335,7 +339,7 @@ def test_limit_six(self): "col1, anon_2.col2 AS col2, ROWNUM AS ora_rn FROM " "(SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable ORDER BY sometable.col2) anon_2 WHERE " - "ROWNUM <= [POSTCOMPILE_param_1] + :param_2 + :param_3) anon_1 " + "ROWNUM <= __[POSTCOMPILE_param_1] + :param_2 + :param_3) anon_1 " "WHERE ora_rn > :param_2 + :param_3", checkparams={"param_1": 10, "param_2": 10, "param_3": 20}, ) @@ -357,7 +361,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)", True)) @@ -369,7 +373,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) col = literal_column("SUM(ABC)").label("SUM(ABC)_") @@ -381,7 +385,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)_" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)_" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)_", True)) @@ -393,7 +397,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)_" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)_" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) def test_for_update(self): @@ -511,7 +515,7 @@ def test_for_update_of_w_limit_adaption_col_present(self): "SELECT anon_1.myid, anon_1.name FROM " "(SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE OF anon_1.name NOWAIT", checkparams={"param_1": 10, "myid_1": 7}, ) @@ -527,7 +531,7 @@ def test_for_update_of_w_limit_adaption_col_unpresent(self): "SELECT anon_1.myid FROM " "(SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE OF anon_1.name NOWAIT", ) @@ -545,9 +549,10 @@ def test_for_update_of_w_limit_offset_adaption_col_present(self): "ROWNUM AS ora_rn " "FROM (SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] " + "WHERE ora_rn > __[POSTCOMPILE_param_2] " "FOR UPDATE OF anon_1.name NOWAIT", checkparams={"param_1": 10, "param_2": 50, "myid_1": 7}, ) @@ -566,8 +571,9 @@ def test_for_update_of_w_limit_offset_adaption_col_unpresent(self): "FROM (SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_2 " "WHERE " - "ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] " + "ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2] " "FOR UPDATE OF anon_1.name NOWAIT", checkparams={"param_1": 10, "param_2": 50, "myid_1": 7}, ) @@ -587,9 +593,10 @@ def test_for_update_of_w_limit_offset_adaption_partial_col_unpresent(self): "mytable.bar AS bar, " "mytable.foo AS foo FROM mytable " "WHERE mytable.myid = :myid_1) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] " + "WHERE ora_rn > __[POSTCOMPILE_param_2] " "FOR UPDATE OF anon_1.foo, anon_1.bar NOWAIT", checkparams={"param_1": 10, "param_2": 50, "myid_1": 7}, ) @@ -617,7 +624,7 @@ def test_use_binds_for_limits_disabled_one(self): "SELECT anon_1.col1, anon_1.col2 FROM " "(SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -635,7 +642,7 @@ def test_use_binds_for_limits_disabled_two(self): "anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_1]", + "WHERE ora_rn > __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -653,9 +660,9 @@ def test_use_binds_for_limits_disabled_three(self): "anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + " - "[POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2]", dialect=dialect, ) @@ -672,7 +679,7 @@ def test_use_binds_for_limits_enabled_one(self): "SELECT anon_1.col1, anon_1.col2 FROM " "(SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_1 WHERE ROWNUM " - "<= [POSTCOMPILE_param_1]", + "<= __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -691,7 +698,7 @@ def test_use_binds_for_limits_enabled_two(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_1]", + "WHERE ora_rn > __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -710,9 +717,9 @@ def test_use_binds_for_limits_enabled_three(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + " - "[POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2]", dialect=dialect, checkparams={"param_1": 10, "param_2": 10}, ) @@ -914,9 +921,10 @@ def test_outer_join_five(self): "thirdtable.userid(+) = " "myothertable.otherid AND mytable.myid = " "myothertable.otherid ORDER BY mytable.name) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "WHERE ora_rn > __[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 5}, dialect=oracle.dialect(use_ansi=False), ) @@ -1144,6 +1152,35 @@ def test_returning_insert_labeled(self): "t1.c2, t1.c3 INTO :ret_0, :ret_1", ) + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, lower(some_table.value) " + "INTO :ret_0, :ret_1", + ) + def test_returning_insert_computed(self): m = MetaData() t1 = Table( @@ -1336,7 +1373,7 @@ def test_column_identity(self): schema.CreateTable(t), "CREATE TABLE t (y INTEGER GENERATED ALWAYS AS IDENTITY " "(INCREMENT BY 7 START WITH 4 NOMINVALUE NOMAXVALUE " - "NOORDER NOCYCLE))", + "NOCYCLE NOORDER))", ) def test_column_identity_no_generated(self): @@ -1436,14 +1473,14 @@ def test_regexp_match_str(self): def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "REGEXP_LIKE(mytable.myid, :myid_1, :myid_2)", - checkparams={"myid_1": "pattern", "myid_2": "ig"}, + "REGEXP_LIKE(mytable.myid, :myid_1, 'ig')", + checkparams={"myid_1": "pattern"}, ) - def test_regexp_match_flags_col(self): + def test_regexp_match_flags_safestring(self): self.assert_compile( - self.table.c.myid.regexp_match("pattern", flags=self.table.c.name), - "REGEXP_LIKE(mytable.myid, :myid_1, mytable.name)", + self.table.c.myid.regexp_match("pattern", flags="i'g"), + "REGEXP_LIKE(mytable.myid, :myid_1, 'i''g')", checkparams={"myid_1": "pattern"}, ) @@ -1468,20 +1505,11 @@ def test_not_regexp_match_str(self): checkparams={"param_1": "string"}, ) - def test_not_regexp_match_flags_col(self): - self.assert_compile( - ~self.table.c.myid.regexp_match( - "pattern", flags=self.table.c.name - ), - "NOT REGEXP_LIKE(mytable.myid, :myid_1, mytable.name)", - checkparams={"myid_1": "pattern"}, - ) - def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "NOT REGEXP_LIKE(mytable.myid, :myid_1, :myid_2)", - checkparams={"myid_1": "pattern", "myid_2": "ig"}, + "NOT REGEXP_LIKE(mytable.myid, :myid_1, 'ig')", + checkparams={"myid_1": "pattern"}, ) def test_regexp_replace(self): @@ -1517,21 +1545,23 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_3, :myid_2)", + "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, 'ig')", checkparams={ "myid_1": "pattern", - "myid_3": "replacement", - "myid_2": "ig", + "myid_2": "replacement", }, ) - def test_regexp_replace_flags_col(self): + def test_regexp_replace_flags_safestring(self): self.assert_compile( self.table.c.myid.regexp_replace( - "pattern", "replacement", flags=self.table.c.name + "pattern", "replacement", flags="i'g" ), - "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, mytable.name)", - checkparams={"myid_1": "pattern", "myid_2": "replacement"}, + "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, 'i''g')", + checkparams={ + "myid_1": "pattern", + "myid_2": "replacement", + }, ) @@ -1543,19 +1573,41 @@ def test_scalar_alias_column(self): stmt = select(fn.alias().column) self.assert_compile( stmt, - "SELECT COLUMN_VALUE anon_1 " + "SELECT anon_1.COLUMN_VALUE " "FROM TABLE (scalar_strings(:scalar_strings_1)) anon_1", ) + def test_scalar_alias_multi_columns(self): + fn1 = func.scalar_strings(5) + fn2 = func.scalar_strings(3) + stmt = select(fn1.alias().column, fn2.alias().column) + self.assert_compile( + stmt, + "SELECT anon_1.COLUMN_VALUE, anon_2.COLUMN_VALUE FROM TABLE " + "(scalar_strings(:scalar_strings_1)) anon_1, " + "TABLE (scalar_strings(:scalar_strings_2)) anon_2", + ) + def test_column_valued(self): fn = func.scalar_strings(5) stmt = select(fn.column_valued()) self.assert_compile( stmt, - "SELECT COLUMN_VALUE anon_1 " + "SELECT anon_1.COLUMN_VALUE " "FROM TABLE (scalar_strings(:scalar_strings_1)) anon_1", ) + def test_multi_column_valued(self): + fn1 = func.scalar_strings(5) + fn2 = func.scalar_strings(3) + stmt = select(fn1.column_valued(), fn2.column_valued().label("x")) + self.assert_compile( + stmt, + "SELECT anon_1.COLUMN_VALUE, anon_2.COLUMN_VALUE AS x FROM " + "TABLE (scalar_strings(:scalar_strings_1)) anon_1, " + "TABLE (scalar_strings(:scalar_strings_2)) anon_2", + ) + def test_table_valued(self): fn = func.three_pairs().table_valued("string1", "string2") stmt = select(fn.c.string1, fn.c.string2) diff --git a/test/dialect/oracle/test_dialect.py b/test/dialect/oracle/test_dialect.py index 554e5f18b4b..8a388889321 100644 --- a/test/dialect/oracle/test_dialect.py +++ b/test/dialect/oracle/test_dialect.py @@ -1,10 +1,12 @@ # coding: utf-8 +import itertools import re from sqlalchemy import bindparam from sqlalchemy import Computed from sqlalchemy import create_engine +from sqlalchemy import Enum from sqlalchemy import exc from sqlalchemy import Float from sqlalchemy import func @@ -32,6 +34,7 @@ from sqlalchemy.testing import mock from sqlalchemy.testing.mock import Mock from sqlalchemy.testing.schema import Column +from sqlalchemy.testing.schema import pep435_enum from sqlalchemy.testing.schema import Table from sqlalchemy.testing.suite import test_select from sqlalchemy.util import u @@ -58,7 +61,7 @@ def test_minimum_version(self): exc.InvalidRequestError, "cx_Oracle version 5.2 and above are supported", cx_oracle.OracleDialect_cx_oracle, - dbapi=Mock(), + dbapi=mock.Mock(), ) with mock.patch( @@ -66,13 +69,61 @@ def test_minimum_version(self): "_parse_cx_oracle_ver", lambda self, vers: (5, 3, 1), ): - cx_oracle.OracleDialect_cx_oracle(dbapi=Mock()) + cx_oracle.OracleDialect_cx_oracle(dbapi=mock.Mock()) class DialectWBackendTest(fixtures.TestBase): __backend__ = True __only_on__ = "oracle" + @testing.combinations( + ( + "db is not connected", + None, + True, + ), + ( + "ORA-1234 fake error", + 1234, + False, + ), + ( + "ORA-03114: not connected to ORACLE", + 3114, + True, + ), + ( + "DPI-1010: not connected", + None, + True, + ), + ( + "DPI-1010: make sure we read the code", + None, + True, + ), + ( + "DPI-1080: connection was closed by ORA-3113", + None, + True, + ), + ( + "DPI-1234: some other DPI error", + None, + False, + ), + ) + @testing.only_on("oracle+cx_oracle") + def test_is_disconnect(self, message, code, expected): + + dialect = testing.db.dialect + + exception_obj = dialect.dbapi.InterfaceError() + exception_obj.args = (Exception(message),) + exception_obj.args[0].code = code + + eq_(dialect.is_disconnect(exception_obj, None, None), expected) + def test_hypothetical_not_implemented_isolation_level(self): engine = engines.testing_engine() @@ -516,6 +567,23 @@ def test_numeric_bind_round_trip(self, connection): 4, ) + def test_param_w_processors(self, metadata, connection): + """test #8053""" + + SomeEnum = pep435_enum("SomeEnum") + one = SomeEnum("one", 1) + SomeEnum("two", 2) + + t = Table( + "t", + metadata, + Column("_id", Integer, primary_key=True), + Column("_data", Enum(SomeEnum)), + ) + t.create(connection) + connection.execute(t.insert(), {"_id": 1, "_data": one}) + eq_(connection.scalar(select(t.c._data)), one) + def test_numeric_bind_in_crud(self, metadata, connection): t = Table("asfd", metadata, Column("100K", Integer)) t.create(connection) @@ -532,6 +600,35 @@ def test_expanding_quote_roundtrip(self, metadata, connection): dict(uid=[1, 2, 3]), ) + @testing.combinations(True, False, argnames="executemany") + def test_python_side_default(self, metadata, connection, executemany): + """test #7676""" + + ids = ["a", "b", "c"] + + def gen_id(): + return ids.pop(0) + + t = Table( + "has_id", + metadata, + Column("_id", String(50), default=gen_id, primary_key=True), + Column("_data", Integer), + ) + metadata.create_all(connection) + + if executemany: + result = connection.execute( + t.insert(), [{"_data": 27}, {"_data": 28}, {"_data": 29}] + ) + eq_( + connection.execute(t.select().order_by(t.c._id)).all(), + [("a", 27), ("b", 28), ("c", 29)], + ) + else: + result = connection.execute(t.insert(), {"_data": 27}) + eq_(result.inserted_primary_key, ("a",)) + class CompatFlagsTest(fixtures.TestBase, AssertsCompiledSQL): def _dialect(self, server_version, **kw): @@ -962,7 +1059,7 @@ def scalar_strings(self, connection): connection.exec_driver_sql( r""" CREATE OR REPLACE FUNCTION scalar_strings ( - count_in IN INTEGER) + count_in IN INTEGER, string_in IN VARCHAR2) RETURN strings_t AUTHID DEFINER IS @@ -972,7 +1069,7 @@ def scalar_strings(self, connection): FOR indx IN 1 .. count_in LOOP - l_strings (indx) := 'some string'; + l_strings (indx) := string_in; END LOOP; RETURN l_strings; @@ -1022,7 +1119,8 @@ def two_strings(self, connection): def test_scalar_strings_control(self, scalar_strings, connection): result = ( connection.exec_driver_sql( - "SELECT COLUMN_VALUE my_string FROM TABLE (scalar_strings (5))" + "SELECT COLUMN_VALUE my_string FROM TABLE " + "(scalar_strings (5, 'some string'))" ) .scalars() .all() @@ -1033,7 +1131,7 @@ def test_scalar_strings_named_control(self, scalar_strings, connection): result = ( connection.exec_driver_sql( "SELECT COLUMN_VALUE anon_1 " - "FROM TABLE (scalar_strings (5)) anon_1" + "FROM TABLE (scalar_strings (5, 'some string')) anon_1" ) .scalars() .all() @@ -1041,7 +1139,7 @@ def test_scalar_strings_named_control(self, scalar_strings, connection): eq_(result, ["some string"] * 5) def test_scalar_strings(self, scalar_strings, connection): - fn = func.scalar_strings(5) + fn = func.scalar_strings(5, "some string") result = connection.execute(select(fn.column_valued())).scalars().all() eq_(result, ["some string"] * 5) @@ -1056,6 +1154,15 @@ def test_two_strings(self, two_strings, connection): result = connection.execute(select(fn.c.string1, fn.c.string2)).all() eq_(result, [("a", "b"), ("c", "d"), ("e", "f")]) + def test_two_independent_tables(self, scalar_strings, connection): + fn1 = func.scalar_strings(5, "string one").column_valued() + fn2 = func.scalar_strings(3, "string two").column_valued() + result = connection.execute(select(fn1, fn2).where(fn1 != fn2)).all() + eq_( + result, + list(itertools.product(["string one"] * 5, ["string two"] * 3)), + ) + class OptimizedFetchLimitOffsetTest(test_select.FetchLimitOffsetTest): __only_on__ = "oracle" diff --git a/test/dialect/oracle/test_reflection.py b/test/dialect/oracle/test_reflection.py index acf7d75d549..836edc0e927 100644 --- a/test/dialect/oracle/test_reflection.py +++ b/test/dialect/oracle/test_reflection.py @@ -15,15 +15,24 @@ from sqlalchemy import Numeric from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import Unicode from sqlalchemy import UniqueConstraint +from sqlalchemy.dialects.oracle import NVARCHAR2 +from sqlalchemy.dialects.oracle import VARCHAR2 from sqlalchemy.dialects.oracle.base import BINARY_DOUBLE from sqlalchemy.dialects.oracle.base import BINARY_FLOAT from sqlalchemy.dialects.oracle.base import DOUBLE_PRECISION from sqlalchemy.dialects.oracle.base import NUMBER -from sqlalchemy.testing import assert_raises +from sqlalchemy.dialects.oracle.base import RAW +from sqlalchemy.dialects.oracle.base import ROWID +from sqlalchemy.sql.sqltypes import CHAR +from sqlalchemy.sql.sqltypes import NCHAR +from sqlalchemy.sql.sqltypes import NVARCHAR +from sqlalchemy.sql.sqltypes import VARCHAR +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -352,7 +361,7 @@ def test_oracle_has_no_on_update_cascade(self, connection): "foo_id", Integer, ForeignKey("foo.id", onupdate="CASCADE") ), ) - assert_raises(exc.SAWarning, bar.create, connection) + assert_warns(exc.SAWarning, bar.create, connection) bat = Table( "bat", @@ -361,7 +370,7 @@ def test_oracle_has_no_on_update_cascade(self, connection): Column("foo_id", Integer), ForeignKeyConstraint(["foo_id"], ["foo.id"], onupdate="CASCADE"), ) - assert_raises(exc.SAWarning, bat.create, connection) + assert_warns(exc.SAWarning, bat.create, connection) def test_reflect_check_include_all(self, connection): insp = inspect(connection) @@ -817,6 +826,32 @@ def test_float_types( ] self._run_test(metadata, connection, specs, ["precision"]) + def test_string_types( + self, + metadata, + connection, + ): + specs = [ + (String(125), VARCHAR(125)), + (String(42).with_variant(VARCHAR2(42), "oracle"), VARCHAR(42)), + (Unicode(125), VARCHAR(125)), + (Unicode(42).with_variant(NVARCHAR2(42), "oracle"), NVARCHAR(42)), + (CHAR(125), CHAR(125)), + (NCHAR(42), NCHAR(42)), + ] + self._run_test(metadata, connection, specs, ["length"]) + + @testing.combinations(ROWID(), RAW(1), argnames="type_") + def test_misc_types(self, metadata, connection, type_): + t = Table("t1", metadata, Column("x", type_)) + + t.create(connection) + + eq_( + inspect(connection).get_columns("t1")[0]["type"]._type_affinity, + type_._type_affinity, + ) + class IdentityReflectionTest(fixtures.TablesTest): __only_on__ = "oracle" diff --git a/test/dialect/oracle/test_types.py b/test/dialect/oracle/test_types.py index 70b00c06f20..842defb4bab 100644 --- a/test/dialect/oracle/test_types.py +++ b/test/dialect/oracle/test_types.py @@ -1103,62 +1103,67 @@ def _creator(): def teardown_test(self): self.engine.dispose() - def test_were_getting_a_comma(self): - connection = self.engine.pool._creator() - cursor = connection.cursor() - try: - cx_Oracle = self.engine.dialect.dbapi + def test_detection(self): + # revised as of #8744 + with self.engine.connect() as conn: + connection = conn.connection - def output_type_handler( - cursor, name, defaultType, size, precision, scale - ): - return cursor.var( - cx_Oracle.STRING, 255, arraysize=cursor.arraysize - ) + with connection.cursor() as cursor: + cx_Oracle = self.engine.dialect.dbapi - cursor.outputtypehandler = output_type_handler - cursor.execute("SELECT 1.1 FROM DUAL") - row = cursor.fetchone() - eq_(row[0], "1,1") - finally: - cursor.close() - connection.close() + def output_type_handler( + cursor, name, defaultType, size, precision, scale + ): + return cursor.var( + cx_Oracle.STRING, 255, arraysize=cursor.arraysize + ) - def test_output_type_handler(self): - with self.engine.connect() as conn: - for stmt, exp, kw in [ - ("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}), - ("SELECT CAST(15 AS INTEGER) FROM DUAL", 15, {}), - ( - "SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL", - decimal.Decimal("15"), - {}, - ), - ( - "SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL", - decimal.Decimal("0.1"), - {}, - ), - ( - "SELECT :num FROM DUAL", - decimal.Decimal("2.5"), - {"num": decimal.Decimal("2.5")}, - ), - ( - text( - "SELECT CAST(28.532 AS NUMERIC(5, 3)) " - "AS val FROM DUAL" - ).columns(val=Numeric(5, 3, asdecimal=True)), - decimal.Decimal("28.532"), - {}, - ), - ]: - if isinstance(stmt, util.string_types): - test_exp = conn.exec_driver_sql(stmt, kw).scalar() + cursor.outputtypehandler = output_type_handler + cursor.execute("SELECT 1.1 FROM DUAL") + row = cursor.fetchone() + decimal_char = row[0][1] + + if testing.against("+cx_oracle"): + eq_(decimal_char, ",") else: - test_exp = conn.scalar(stmt, **kw) - eq_(test_exp, exp) - assert type(test_exp) is type(exp) + assert decimal_char in ",." + + eq_(conn.dialect._decimal_char, decimal_char) + + @testing.combinations( + ("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}), + ("SELECT CAST(15 AS INTEGER) FROM DUAL", 15, {}), + ( + "SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL", + decimal.Decimal("15"), + {}, + ), + ( + "SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL", + decimal.Decimal("0.1"), + {}, + ), + ( + "SELECT :num FROM DUAL", + decimal.Decimal("2.5"), + {"num": decimal.Decimal("2.5")}, + ), + ( + text( + "SELECT CAST(28.532 AS NUMERIC(5, 3)) " "AS val FROM DUAL" + ).columns(val=Numeric(5, 3, asdecimal=True)), + decimal.Decimal("28.532"), + {}, + ), + ) + def test_output_type_handler(self, stmt, expected, kw): + with self.engine.connect() as conn: + if isinstance(stmt, str): + test_exp = conn.exec_driver_sql(stmt, kw).scalar() + else: + test_exp = conn.scalar(stmt, **kw) + eq_(test_exp, expected) + assert type(test_exp) is type(expected) class SetInputSizesTest(fixtures.TestBase): diff --git a/test/dialect/postgresql/test_async_pg_py3k.py b/test/dialect/postgresql/test_async_pg_py3k.py index 62c8f5dde98..a7a8af1576c 100644 --- a/test/dialect/postgresql/test_async_pg_py3k.py +++ b/test/dialect/postgresql/test_async_pg_py3k.py @@ -12,7 +12,9 @@ from sqlalchemy.dialects.postgresql import ENUM from sqlalchemy.testing import async_test from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises from sqlalchemy.testing import fixtures +from sqlalchemy.testing import mock class AsyncPgTest(fixtures.TestBase): @@ -164,6 +166,55 @@ async def async_setup(engine, enums): ], ) + @testing.variation("trans", ["commit", "rollback"]) + @async_test + async def test_dont_reset_open_transaction( + self, trans, async_testing_engine + ): + """test for #11819""" + + engine = async_testing_engine() + + control_conn = await engine.connect() + await control_conn.execution_options(isolation_level="AUTOCOMMIT") + + conn = await engine.connect() + txid_current = ( + await conn.exec_driver_sql("select txid_current()") + ).scalar() + + with expect_raises(exc.MissingGreenlet): + if trans.commit: + conn.sync_connection.connection.dbapi_connection.commit() + elif trans.rollback: + conn.sync_connection.connection.dbapi_connection.rollback() + else: + trans.fail() + + trans_exists = ( + await control_conn.exec_driver_sql( + f"SELECT count(*) FROM pg_stat_activity " + f"where backend_xid={txid_current}" + ) + ).scalar() + eq_(trans_exists, 1) + + if trans.commit: + await conn.commit() + elif trans.rollback: + await conn.rollback() + else: + trans.fail() + + trans_exists = ( + await control_conn.exec_driver_sql( + f"SELECT count(*) FROM pg_stat_activity " + f"where backend_xid={txid_current}" + ) + ).scalar() + eq_(trans_exists, 0) + await engine.dispose() + @async_test async def test_failed_commit_recover(self, metadata, async_testing_engine): @@ -251,3 +302,23 @@ async def test_failed_rollback_recover( await conn.begin() await conn.rollback() + + @testing.combinations( + "setup_asyncpg_json_codec", + "setup_asyncpg_jsonb_codec", + argnames="methname", + ) + @testing.requires.python38 + @async_test + async def test_codec_registration( + self, metadata, async_testing_engine, methname + ): + """test new hooks added for #7284""" + + engine = async_testing_engine() + with mock.patch.object(engine.dialect, methname) as codec_meth: + conn = await engine.connect() + adapted_conn = (await conn.get_raw_connection()).connection + await conn.close() + + eq_(codec_meth.mock_calls, [mock.call(adapted_conn)]) diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 5ada6f592f3..a005821cc6e 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -3,6 +3,7 @@ from sqlalchemy import BigInteger from sqlalchemy import bindparam from sqlalchemy import cast +from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import Computed from sqlalchemy import Date @@ -10,6 +11,8 @@ from sqlalchemy import Enum from sqlalchemy import exc from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import ForeignKeyConstraint from sqlalchemy import func from sqlalchemy import Identity from sqlalchemy import Index @@ -56,6 +59,7 @@ from sqlalchemy.testing.assertions import AssertsCompiledSQL from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertions import is_ +from sqlalchemy.types import TypeEngine from sqlalchemy.util import OrderedDict from sqlalchemy.util import u @@ -202,6 +206,35 @@ def test_insert_returning(self): dialect=dialect, ) + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) " + "VALUES (%(name)s, %(value)s) RETURNING some_table.name, " + "lower(some_table.value) AS value", + ) + def test_create_drop_enum(self): # test escaping and unicode within CREATE TYPE for ENUM typ = postgresql.ENUM( @@ -857,6 +890,62 @@ def test_drop_index_concurrently(self): schema.DropIndex(idx1), "DROP INDEX test_idx1", dialect=dialect_9_1 ) + def test_create_check_constraint_not_valid(self): + m = MetaData() + + tbl = Table( + "testtbl", + m, + Column("data", Integer), + CheckConstraint("data = 0", postgresql_not_valid=True), + ) + + self.assert_compile( + schema.CreateTable(tbl), + "CREATE TABLE testtbl (data INTEGER, CHECK (data = 0) NOT VALID)", + ) + + def test_create_foreign_key_constraint_not_valid(self): + m = MetaData() + + tbl = Table( + "testtbl", + m, + Column("a", Integer), + Column("b", Integer), + ForeignKeyConstraint( + "b", ["testtbl.a"], postgresql_not_valid=True + ), + ) + + self.assert_compile( + schema.CreateTable(tbl), + "CREATE TABLE testtbl (" + "a INTEGER, " + "b INTEGER, " + "FOREIGN KEY(b) REFERENCES testtbl (a) NOT VALID" + ")", + ) + + def test_create_foreign_key_column_not_valid(self): + m = MetaData() + + tbl = Table( + "testtbl", + m, + Column("a", Integer), + Column("b", ForeignKey("testtbl.a", postgresql_not_valid=True)), + ) + + self.assert_compile( + schema.CreateTable(tbl), + "CREATE TABLE testtbl (" + "a INTEGER, " + "b INTEGER, " + "FOREIGN KEY(b) REFERENCES testtbl (a) NOT VALID" + ")", + ) + def test_exclude_constraint_min(self): m = MetaData() tbl = Table("testtbl", m, Column("room", Integer, primary_key=True)) @@ -1464,48 +1553,48 @@ def test_array(self): ) self.assert_compile( postgresql.Any(4, c), - "%(param_1)s = ANY (x)", - checkparams={"param_1": 4}, + "%(x_1)s = ANY (x)", + checkparams={"x_1": 4}, ) self.assert_compile( c.any(5), - "%(param_1)s = ANY (x)", - checkparams={"param_1": 5}, + "%(x_1)s = ANY (x)", + checkparams={"x_1": 5}, ) self.assert_compile( ~c.any(5), - "NOT (%(param_1)s = ANY (x))", - checkparams={"param_1": 5}, + "NOT (%(x_1)s = ANY (x))", + checkparams={"x_1": 5}, ) self.assert_compile( c.all(5), - "%(param_1)s = ALL (x)", - checkparams={"param_1": 5}, + "%(x_1)s = ALL (x)", + checkparams={"x_1": 5}, ) self.assert_compile( ~c.all(5), - "NOT (%(param_1)s = ALL (x))", - checkparams={"param_1": 5}, + "NOT (%(x_1)s = ALL (x))", + checkparams={"x_1": 5}, ) self.assert_compile( c.any(5, operator=operators.ne), - "%(param_1)s != ANY (x)", - checkparams={"param_1": 5}, + "%(x_1)s != ANY (x)", + checkparams={"x_1": 5}, ) self.assert_compile( postgresql.All(6, c, operator=operators.gt), - "%(param_1)s > ALL (x)", - checkparams={"param_1": 6}, + "%(x_1)s > ALL (x)", + checkparams={"x_1": 6}, ) self.assert_compile( c.all(7, operator=operators.lt), - "%(param_1)s < ALL (x)", - checkparams={"param_1": 7}, + "%(x_1)s < ALL (x)", + checkparams={"x_1": 7}, ) @testing.combinations( @@ -2223,41 +2312,103 @@ def test_difficult_update_4(self): ) -class InsertOnConflictTest(fixtures.TestBase, AssertsCompiledSQL): +class InsertOnConflictTest(fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = postgresql.dialect() - def setup_test(self): - self.table1 = table1 = table( + run_create_tables = None + + @classmethod + def define_tables(cls, metadata): + cls.table1 = table1 = table( "mytable", column("myid", Integer), column("name", String(128)), column("description", String(128)), ) - md = MetaData() - self.table_with_metadata = Table( + cls.table_with_metadata = Table( "mytable", - md, + metadata, Column("myid", Integer, primary_key=True), Column("name", String(128)), Column("description", String(128)), ) - self.unique_constr = schema.UniqueConstraint( + cls.unique_constr = schema.UniqueConstraint( table1.c.name, name="uq_name" ) - self.excl_constr = ExcludeConstraint( + cls.excl_constr = ExcludeConstraint( (table1.c.name, "="), (table1.c.description, "&&"), name="excl_thing", ) - self.excl_constr_anon = ExcludeConstraint( - (self.table_with_metadata.c.name, "="), - (self.table_with_metadata.c.description, "&&"), - where=self.table_with_metadata.c.description != "foo", + cls.excl_constr_anon = ExcludeConstraint( + (cls.table_with_metadata.c.name, "="), + (cls.table_with_metadata.c.description, "&&"), + where=cls.table_with_metadata.c.description != "foo", ) - self.goofy_index = Index( + cls.goofy_index = Index( "goofy_index", table1.c.name, postgresql_where=table1.c.name > "m" ) + Table( + "users", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + ) + + Table( + "users_w_key", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), key="name_keyed"), + ) + + @testing.combinations("control", "excluded", "dict") + def test_set_excluded(self, scenario): + """test #8014, sending all of .excluded to set""" + + if scenario == "control": + users = self.tables.users + + stmt = insert(users) + self.assert_compile( + stmt.on_conflict_do_update( + constraint=users.primary_key, set_=stmt.excluded + ), + "INSERT INTO users (id, name) VALUES (%(id)s, %(name)s) ON " + "CONFLICT (id) DO UPDATE " + "SET id = excluded.id, name = excluded.name", + ) + else: + users_w_key = self.tables.users_w_key + + stmt = insert(users_w_key) + + if scenario == "excluded": + self.assert_compile( + stmt.on_conflict_do_update( + constraint=users_w_key.primary_key, set_=stmt.excluded + ), + "INSERT INTO users_w_key (id, name) " + "VALUES (%(id)s, %(name_keyed)s) ON " + "CONFLICT (id) DO UPDATE " + "SET id = excluded.id, name = excluded.name", + ) + else: + self.assert_compile( + stmt.on_conflict_do_update( + constraint=users_w_key.primary_key, + set_={ + "id": stmt.excluded.id, + "name_keyed": stmt.excluded.name_keyed, + }, + ), + "INSERT INTO users_w_key (id, name) " + "VALUES (%(id)s, %(name_keyed)s) ON " + "CONFLICT (id) DO UPDATE " + "SET id = excluded.id, name = excluded.name", + ) + def test_on_conflict_do_no_call_twice(self): users = self.table1 @@ -2279,6 +2430,28 @@ def test_on_conflict_do_no_call_twice(self): ): meth() + def test_on_conflict_cte_plus_textual(self): + """test #7798""" + + bar = table("bar", column("id"), column("attr"), column("foo_id")) + s1 = text("SELECT bar.id, bar.attr FROM bar").columns( + bar.c.id, bar.c.attr + ) + s2 = ( + insert(bar) + .from_select(list(s1.selected_columns), s1) + .on_conflict_do_update( + index_elements=[s1.selected_columns.id], + set_={"attr": s1.selected_columns.attr}, + ) + ) + + self.assert_compile( + s2, + "INSERT INTO bar (id, attr) SELECT bar.id, bar.attr " + "FROM bar ON CONFLICT (id) DO UPDATE SET attr = bar.attr", + ) + def test_do_nothing_no_target(self): i = ( @@ -3009,8 +3182,8 @@ def test_regexp_match_str(self): def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid ~ CONCAT('(?', %(myid_1)s, ')', %(myid_2)s)", - checkparams={"myid_2": "pattern", "myid_1": "ig"}, + "mytable.myid ~ CONCAT('(?', 'ig', ')', %(myid_1)s)", + checkparams={"myid_1": "pattern"}, ) def test_regexp_match_flags_ignorecase(self): @@ -3020,13 +3193,6 @@ def test_regexp_match_flags_ignorecase(self): checkparams={"myid_1": "pattern"}, ) - def test_regexp_match_flags_col(self): - self.assert_compile( - self.table.c.myid.regexp_match("pattern", flags=self.table.c.name), - "mytable.myid ~ CONCAT('(?', mytable.name, ')', %(myid_1)s)", - checkparams={"myid_1": "pattern"}, - ) - def test_not_regexp_match(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern"), @@ -3051,8 +3217,8 @@ def test_not_regexp_match_str(self): def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid !~ CONCAT('(?', %(myid_1)s, ')', %(myid_2)s)", - checkparams={"myid_2": "pattern", "myid_1": "ig"}, + "mytable.myid !~ CONCAT('(?', 'ig', ')', %(myid_1)s)", + checkparams={"myid_1": "pattern"}, ) def test_not_regexp_match_flags_ignorecase(self): @@ -3062,15 +3228,6 @@ def test_not_regexp_match_flags_ignorecase(self): checkparams={"myid_1": "pattern"}, ) - def test_not_regexp_match_flags_col(self): - self.assert_compile( - ~self.table.c.myid.regexp_match( - "pattern", flags=self.table.c.name - ), - "mytable.myid !~ CONCAT('(?', mytable.name, ')', %(myid_1)s)", - checkparams={"myid_1": "pattern"}, - ) - def test_regexp_replace(self): self.assert_compile( self.table.c.myid.regexp_replace("pattern", "replacement"), @@ -3104,22 +3261,23 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_3)s, %(myid_2)s)", + "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_2)s, 'ig')", checkparams={ "myid_1": "pattern", - "myid_3": "replacement", - "myid_2": "ig", + "myid_2": "replacement", }, ) - def test_regexp_replace_flags_col(self): + def test_regexp_replace_flags_safestring(self): self.assert_compile( self.table.c.myid.regexp_replace( - "pattern", "replacement", flags=self.table.c.name + "pattern", "replacement", flags="i'g" ), - "REGEXP_REPLACE(mytable.myid, %(myid_1)s," - " %(myid_2)s, mytable.name)", - checkparams={"myid_1": "pattern", "myid_2": "replacement"}, + "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_2)s, 'i''g')", + checkparams={ + "myid_1": "pattern", + "myid_2": "replacement", + }, ) @testing.combinations( @@ -3204,3 +3362,36 @@ def test_fetch(self, fetch, offset, fetch_kw, exp, params): "SELECT 1 " + exp, checkparams=params, ) + + +class CacheKeyTest(fixtures.CacheKeyFixture, fixtures.TestBase): + def test_aggregate_order_by(self): + """test #8574""" + + self._run_cache_key_fixture( + lambda: ( + aggregate_order_by(column("a"), column("a")), + aggregate_order_by(column("a"), column("b")), + aggregate_order_by(column("a"), column("a").desc()), + aggregate_order_by(column("a"), column("a").nulls_first()), + aggregate_order_by( + column("a"), column("a").desc().nulls_first() + ), + aggregate_order_by(column("a", Integer), column("b")), + aggregate_order_by(column("a"), column("b"), column("c")), + aggregate_order_by(column("a"), column("c"), column("b")), + aggregate_order_by( + column("a"), column("b").desc(), column("c") + ), + aggregate_order_by( + column("a"), column("b").nulls_first(), column("c") + ), + aggregate_order_by( + column("a"), column("b").desc().nulls_first(), column("c") + ), + aggregate_order_by( + column("a", Integer), column("a"), column("b") + ), + ), + compare_values=False, + ) diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py index c0eb4410cf9..f32915cbc3f 100644 --- a/test/dialect/postgresql/test_dialect.py +++ b/test/dialect/postgresql/test_dialect.py @@ -8,6 +8,7 @@ from sqlalchemy import bindparam from sqlalchemy import cast from sqlalchemy import Column +from sqlalchemy import create_engine from sqlalchemy import DateTime from sqlalchemy import DDL from sqlalchemy import event @@ -30,6 +31,7 @@ from sqlalchemy import TypeDecorator from sqlalchemy import util from sqlalchemy.dialects.postgresql import base as postgresql +from sqlalchemy.dialects.postgresql import insert as pg_insert from sqlalchemy.dialects.postgresql import psycopg2 as psycopg2_dialect from sqlalchemy.dialects.postgresql.psycopg2 import EXECUTEMANY_BATCH from sqlalchemy.dialects.postgresql.psycopg2 import EXECUTEMANY_PLAIN @@ -40,6 +42,7 @@ from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy.testing import config from sqlalchemy.testing import engines +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false @@ -236,24 +239,85 @@ def test_psycopg2_nonempty_connection_string_w_query(self): eq_(cargs, []) eq_(cparams, {"host": "somehost", "any_random_thing": "yes"}) - def test_psycopg2_nonempty_connection_string_w_query_two(self): + @testing.combinations( + ( + "postgresql+psycopg2://USER:PASS@/DB?host=hostA", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA", + }, + ), + ( + "postgresql+psycopg2://USER:PASS@/DB" + "?host=hostA&host=hostB&host=hostC", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA,hostB,hostC", + "port": ",,", + }, + ), + ( + "postgresql+psycopg2://USER:PASS@/DB" + "?host=hostA&host=hostB:portB&host=hostC:portC", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA,hostB,hostC", + "port": ",portB,portC", + }, + ), + ( + "postgresql+psycopg2://USER:PASS@/DB?" + "host=hostA:portA&host=hostB:portB&host=hostC:portC", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA,hostB,hostC", + "port": "portA,portB,portC", + }, + ), + ( + "postgresql+psycopg2:///" + "?host=hostA:portA&host=hostB:portB&host=hostC:portC", + {"host": "hostA,hostB,hostC", "port": "portA,portB,portC"}, + ), + ( + "postgresql+psycopg2:///" + "?host=hostA:portA&host=hostB:portB&host=hostC:portC", + {"host": "hostA,hostB,hostC", "port": "portA,portB,portC"}, + ), + ( + "postgresql+psycopg2:///" + "?host=hostA,hostB,hostC&port=portA,portB,portC", + {"host": "hostA,hostB,hostC", "port": "portA,portB,portC"}, + ), + argnames="url_string,expected", + ) + def test_psycopg_multi_hosts(self, url_string, expected): dialect = psycopg2_dialect.dialect() - url_string = "postgresql://USER:PASS@/DB?host=hostA" u = url.make_url(url_string) cargs, cparams = dialect.create_connect_args(u) eq_(cargs, []) - eq_(cparams["host"], "hostA") + eq_(cparams, expected) - def test_psycopg2_nonempty_connection_string_w_query_three(self): + @testing.combinations( + "postgresql+psycopg2:///?host=H&host=H&port=5432,5432", + "postgresql+psycopg2://user:pass@/dbname?host=H&host=H&port=5432,5432", + argnames="url_string", + ) + def test_psycopg_no_mix_hosts(self, url_string): dialect = psycopg2_dialect.dialect() - url_string = ( - "postgresql://USER:PASS@/DB" - "?host=hostA:portA&host=hostB&host=hostC" - ) - u = url.make_url(url_string) - cargs, cparams = dialect.create_connect_args(u) - eq_(cargs, []) - eq_(cparams["host"], "hostA:portA,hostB,hostC") + with expect_raises_message( + exc.ArgumentError, "Can't mix 'multihost' formats together" + ): + u = url.make_url(url_string) + dialect.create_connect_args(u) def test_psycopg2_disconnect(self): class Error(Exception): @@ -292,6 +356,41 @@ class Error(Exception): eq_(dialect.is_disconnect("not an error", None, None), False) +class BackendDialectTest(fixtures.TestBase): + __backend__ = True + + @testing.only_on(["+psycopg", "+psycopg2"]) + @testing.combinations( + "host=H:P&host=H:P&host=H:P", + "host=H:P&host=H&host=H", + "host=H:P&host=H&host=H:P", + "host=H&host=H:P&host=H", + "host=H,H,H&port=P,P,P", + ) + def test_connect_psycopg_multiple_hosts(self, pattern): + """test the fix for #4392""" + + tdb_url = testing.db.url + + host = tdb_url.host + if host == "127.0.0.1": + host = "localhost" + port = str(tdb_url.port) if tdb_url.port else "5432" + + query_str = pattern.replace("H", host).replace("P", port) + url_string = "%s://%s:" "%s@/%s?%s" % ( + tdb_url.drivername, + tdb_url.username, + tdb_url.password, + tdb_url.database, + query_str, + ) + + e = create_engine(url_string) + with e.connect() as conn: + eq_(conn.exec_driver_sql("select 1").scalar(), 1) + + class PGCodeTest(fixtures.TestBase): __only_on__ = "postgresql" @@ -357,7 +456,10 @@ def define_tables(cls, metadata): Column(ue("\u6e2c\u8a66"), Integer), ) - def test_insert(self, connection): + @testing.combinations( + "insert", "pg_insert", "pg_insert_on_conflict", argnames="insert_type" + ) + def test_insert(self, connection, insert_type): from psycopg2 import extras values_page_size = connection.dialect.executemany_values_page_size @@ -377,11 +479,23 @@ def test_insert(self, connection): else: assert False + if insert_type == "pg_insert_on_conflict": + stmt += " ON CONFLICT DO NOTHING" + with mock.patch.object( extras, meth.__name__, side_effect=meth ) as mock_exec: + if insert_type == "insert": + ins_stmt = self.tables.data.insert() + elif insert_type == "pg_insert": + ins_stmt = pg_insert(self.tables.data) + elif insert_type == "pg_insert_on_conflict": + ins_stmt = pg_insert(self.tables.data).on_conflict_do_nothing() + else: + assert False + connection.execute( - self.tables.data.insert(), + ins_stmt, [ {"x": "x1", "y": "y1"}, {"x": "x2", "y": "y2"}, @@ -1079,6 +1193,23 @@ def test_readonly_flag_engine(self, testing_engine, pre_ping): dbapi_conn.rollback() eq_(val, "off") + @testing.combinations((True,), (False,), argnames="autocommit") + def test_autocommit_pre_ping(self, testing_engine, autocommit): + engine = testing_engine( + options={ + "isolation_level": "AUTOCOMMIT" + if autocommit + else "SERIALIZABLE", + "pool_pre_ping": True, + } + ) + for i in range(4): + with engine.connect() as conn: + conn.execute(text("select 1")).scalar() + + dbapi_conn = conn.connection.dbapi_connection + eq_(dbapi_conn.autocommit, autocommit) + def test_deferrable_flag_engine(self): engine = engines.testing_engine( options={ @@ -1391,6 +1522,11 @@ def test_initial_transaction_state(self): with engine.connect() as conn: ne_(conn.connection.status, STATUS_IN_TRANSACTION) + def test_select_rowcount(self): + conn = testing.db.connect() + cursor = conn.exec_driver_sql("SELECT 1") + eq_(cursor.rowcount, 1) + class AutocommitTextTest(test_deprecations.AutocommitTextTest): __only_on__ = "postgresql" diff --git a/test/dialect/postgresql/test_on_conflict.py b/test/dialect/postgresql/test_on_conflict.py index 508f691c514..ab46342f5fc 100644 --- a/test/dialect/postgresql/test_on_conflict.py +++ b/test/dialect/postgresql/test_on_conflict.py @@ -675,6 +675,45 @@ def test_on_conflict_do_update_exotic_targets_six(self, connection): [(1, "name1", "mail2@gmail.com", "unique_name")], ) + def test_on_conflict_do_update_constraint_can_be_index(self, connection): + """test #9023""" + + users = self.tables.users_xtra + + connection.execute( + insert(users), + dict( + id=1, + name="name1", + login_email="mail1@gmail.com", + lets_index_this="unique_name", + ), + ) + + i = insert(users) + i = i.on_conflict_do_update( + constraint=self.unique_partial_index, + set_=dict( + name=i.excluded.name, login_email=i.excluded.login_email + ), + ) + + connection.execute( + i, + [ + dict( + name="name1", + login_email="mail2@gmail.com", + lets_index_this="unique_name", + ) + ], + ) + + eq_( + connection.execute(users.select()).fetchall(), + [(1, "name1", "mail2@gmail.com", "unique_name")], + ) + def test_on_conflict_do_update_no_row_actually_affected(self, connection): users = self.tables.users_xtra diff --git a/test/dialect/postgresql/test_query.py b/test/dialect/postgresql/test_query.py index a1e9c465729..d0f5d429b41 100644 --- a/test/dialect/postgresql/test_query.py +++ b/test/dialect/postgresql/test_query.py @@ -75,7 +75,8 @@ def test_foreignkey_missing_insert(self, implicit_returning): # the case here due to the foreign key. with expect_warnings(".*has no Python-side or server-side default.*"): - with engine.begin() as conn: + with engine.connect() as conn: + conn.begin() assert_raises( (exc.IntegrityError, exc.ProgrammingError), conn.execute, @@ -596,7 +597,8 @@ def _assert_data_noautoincrement(self, table): with engine.begin() as conn: conn.execute(table.insert(), {"id": 30, "data": "d1"}) - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -606,8 +608,10 @@ def _assert_data_noautoincrement(self, table): table.insert(), {"data": "d2"}, ) + trans.rollback() - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -617,8 +621,10 @@ def _assert_data_noautoincrement(self, table): table.insert(), [{"data": "d2"}, {"data": "d3"}], ) + trans.rollback() - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -628,8 +634,10 @@ def _assert_data_noautoincrement(self, table): table.insert(), {"data": "d2"}, ) + trans.rollback() - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -639,6 +647,7 @@ def _assert_data_noautoincrement(self, table): table.insert(), [{"data": "d2"}, {"data": "d3"}], ) + trans.rollback() with engine.begin() as conn: conn.execute( @@ -660,7 +669,8 @@ def _assert_data_noautoincrement(self, table): with engine.begin() as conn: conn.execute(table.insert(), {"id": 30, "data": "d1"}) - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -671,7 +681,8 @@ def _assert_data_noautoincrement(self, table): {"data": "d2"}, ) - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -681,6 +692,7 @@ def _assert_data_noautoincrement(self, table): table.insert(), [{"data": "d2"}, {"data": "d3"}], ) + trans.rollback() with engine.begin() as conn: conn.execute( diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index fa90ec212fc..807ea128198 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -34,10 +34,13 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises +from sqlalchemy.testing.assertions import assert_warns from sqlalchemy.testing.assertions import AssertsExecutionResults from sqlalchemy.testing.assertions import eq_ +from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertions import is_ from sqlalchemy.testing.assertions import is_true +from sqlalchemy.types import NullType class ReflectionFixtures(object): @@ -183,6 +186,7 @@ def test_reflect_index(self, connection): "unique": False, "column_names": ["q"], "include_columns": [], + "dialect_options": {"postgresql_include": []}, } ], ) @@ -198,6 +202,7 @@ def test_reflect_index_from_partition(self, connection): { "column_names": ["q"], "include_columns": [], + "dialect_options": {"postgresql_include": []}, "name": mock.ANY, "unique": False, } @@ -298,13 +303,18 @@ def test_get_view_names_empty(self, connection): def test_get_view_definition(self, connection): insp = inspect(connection) + + def normalize(definition): + # pg16 returns "SELECT" without qualifying tablename. + # older pgs include it + definition = re.sub( + r"testtable\.(\w+)", lambda m: m.group(1), definition + ) + return re.sub(r"[\n\t ]+", " ", definition.strip()) + eq_( - re.sub( - r"[\n\t ]+", - " ", - insp.get_view_definition("test_mview").strip(), - ), - "SELECT testtable.id, testtable.data FROM testtable;", + normalize(insp.get_view_definition("test_mview")), + "SELECT id, data FROM testtable;", ) @@ -484,7 +494,7 @@ def test_unknown_types(self, connection): base.PGDialect.ischema_names = {} try: m2 = MetaData() - assert_raises( + assert_warns( exc.SAWarning, Table, "testtable", m2, autoload_with=connection ) @@ -1088,7 +1098,7 @@ def test_index_reflection_with_sorting(self, metadata, connection): # "ASC NULLS LAST" is implicit default for indexes, # and "NULLS FIRST" is implicit default for "DESC". - # (https://www.postgresql.org/docs/11/indexes-ordering.html) + # (https://www.postgresql.org/docs/current/indexes-ordering.html) def compile_exprs(exprs): return list(map(str, exprs)) @@ -1131,6 +1141,7 @@ def test_index_reflection_modified(self, metadata, connection): expected = [{"name": "idx1", "unique": False, "column_names": ["y"]}] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"] = {"postgresql_include": []} eq_(ind, expected) @@ -1163,6 +1174,7 @@ def test_index_reflection_with_storage_options(self, metadata, connection): ] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"]["postgresql_include"] = [] eq_(ind, expected) m = MetaData() @@ -1195,6 +1207,7 @@ def test_index_reflection_with_access_method(self, metadata, connection): ] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"]["postgresql_include"] = [] eq_(ind, expected) m = MetaData() t1 = Table("t", m, autoload_with=connection) @@ -1229,6 +1242,7 @@ def test_index_reflection_with_include(self, metadata, connection): "unique": False, "column_names": ["x"], "include_columns": ["name"], + "dialect_options": {"postgresql_include": ["name"]}, "name": "idx1", } ], @@ -1604,6 +1618,7 @@ def test_reflection_with_exclude_constraint(self, metadata, connection): ] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"]["postgresql_include"] = [] eq_(insp.get_indexes("t"), expected) @@ -1814,6 +1829,21 @@ def test_instancelevel(self): dialect.ischema_names["my_custom_type"] = self.CustomType self._assert_reflected(dialect) + def test_no_format_type(self): + """test #8748""" + + dialect = postgresql.PGDialect() + dialect.ischema_names = dialect.ischema_names.copy() + dialect.ischema_names["my_custom_type"] = self.CustomType + + with expect_warnings( + r"PostgreSQL format_type\(\) returned NULL for column 'colname'" + ): + column_info = dialect._get_column_info( + "colname", None, None, False, {}, {}, "public", None, "", None + ) + assert isinstance(column_info["type"], NullType) + class IntervalReflectionTest(fixtures.TestBase): __only_on__ = "postgresql" diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index d1c0361e4f9..1a5cdb6474d 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -47,6 +47,7 @@ from sqlalchemy.exc import CompileError from sqlalchemy.orm import declarative_base from sqlalchemy.orm import Session +from sqlalchemy.sql import bindparam from sqlalchemy.sql import operators from sqlalchemy.sql import sqltypes from sqlalchemy.sql.type_api import Variant @@ -922,11 +923,11 @@ class NumericInterpretationTest(fixtures.TestBase): def test_numeric_codes(self): from sqlalchemy.dialects.postgresql import ( + base, pg8000, pygresql, psycopg2, psycopg2cffi, - base, ) dialects = ( @@ -1208,7 +1209,7 @@ def test_array_in_enum_psycopg2_cast(self): self.assert_compile( expr, - "x IN ([POSTCOMPILE_x_1~~~~REPL~~::myenum[]~~])", + "x IN (__[POSTCOMPILE_x_1~~~~REPL~~::myenum[]~~])", dialect=postgresql.psycopg2.dialect(), ) @@ -1226,7 +1227,7 @@ def test_array_in_str_psycopg2_cast(self): self.assert_compile( expr, - "x IN ([POSTCOMPILE_x_1~~~~REPL~~::VARCHAR(15)[]~~])", + "x IN (__[POSTCOMPILE_x_1~~~~REPL~~::VARCHAR(15)[]~~])", dialect=postgresql.psycopg2.dialect(), ) @@ -1260,16 +1261,16 @@ def test_array_any(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.any(7, operator=operators.lt)), - "SELECT %(param_1)s < ANY (x) AS anon_1", - checkparams={"param_1": 7}, + "SELECT %(x_1)s < ANY (x) AS anon_1", + checkparams={"x_1": 7}, ) def test_array_all(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.all(7, operator=operators.lt)), - "SELECT %(param_1)s < ALL (x) AS anon_1", - checkparams={"param_1": 7}, + "SELECT %(x_1)s < ALL (x) AS anon_1", + checkparams={"x_1": 7}, ) def test_array_contains(self): @@ -1415,9 +1416,11 @@ def test_array_agg_generic(self): argnames="with_enum, using_aggregate_order_by", ) def test_array_agg_specific(self, with_enum, using_aggregate_order_by): - from sqlalchemy.dialects.postgresql import aggregate_order_by - from sqlalchemy.dialects.postgresql import array_agg - from sqlalchemy.dialects.postgresql import ENUM + from sqlalchemy.dialects.postgresql import ( + ENUM, + aggregate_order_by, + array_agg, + ) element_type = ENUM if with_enum else Integer expr = ( @@ -1905,7 +1908,7 @@ def test_array_plus_native_enum_create(self, metadata, connection): t.drop(connection) eq_(inspect(connection).get_enums(), []) - def _type_combinations(exclude_json=False): + def _type_combinations(exclude_json=False, exclude_empty_lists=False): def str_values(x): return ["one", "two: %s" % x, "three", "four", "five"] @@ -1939,6 +1942,9 @@ def enum_values(x): AnEnum.Foo, ] + def empty_list(x): + return [] + class inet_str(str): def __eq__(self, other): return str(self) == str(other) @@ -1954,6 +1960,23 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) + difficult_enum = [ + "Value", + "With space", + "With,comma", + 'With"quote', + "With\\escape", + """Various!@#$%^*()"'\\][{};:.<>|_+~chars""", + ] + + def make_difficult_enum(cls_, native): + return cls_( + *difficult_enum, name="difficult_enum", native_enum=native + ) + + def difficult_enum_values(x): + return [v for i, v in enumerate(difficult_enum) if i != x - 1] + elements = [ (sqltypes.Integer, lambda x: [1, x, 3, 4, 5]), (sqltypes.Text, str_values), @@ -2041,8 +2064,29 @@ def __ne__(self, other): (sqltypes.Enum(AnEnum, native_enum=True), enum_values), (sqltypes.Enum(AnEnum, native_enum=False), enum_values), (postgresql.ENUM(AnEnum, native_enum=True), enum_values), + ( + make_difficult_enum(sqltypes.Enum, native=True), + difficult_enum_values, + ), + ( + make_difficult_enum(sqltypes.Enum, native=False), + difficult_enum_values, + ), + ( + make_difficult_enum(postgresql.ENUM, native=True), + difficult_enum_values, + ), ] + if not exclude_empty_lists: + elements.extend( + [ + (postgresql.ENUM(AnEnum), empty_list), + (sqltypes.Enum(AnEnum, native_enum=True), empty_list), + (sqltypes.Enum(AnEnum, native_enum=False), empty_list), + (postgresql.ENUM(AnEnum, native_enum=True), empty_list), + ] + ) if not exclude_json: elements.extend( [ @@ -2131,7 +2175,7 @@ def test_type_specific_value_update( connection.scalar(select(table.c.bar).where(table.c.id == 2)), ) - @_type_combinations() + @_type_combinations(exclude_empty_lists=True) def test_type_specific_slice_update( self, type_specific_fixture, connection, type_, gen ): @@ -2158,7 +2202,7 @@ def test_type_specific_slice_update( eq_(rows, [(gen(1),), (sliced_gen,)]) - @_type_combinations(exclude_json=True) + @_type_combinations(exclude_json=True, exclude_empty_lists=True) def test_type_specific_value_delete( self, type_specific_fixture, connection, type_, gen ): @@ -2353,14 +2397,19 @@ class MyEnum(Enum): array_cls(enum_cls(MyEnum)), ), ) + data = [ + {"enum_col": ["foo"], "pyenum_col": [MyEnum.a, MyEnum.b]}, + {"enum_col": ["foo", "bar"], "pyenum_col": [MyEnum.b]}, + ] else: MyEnum = None + data = [ + {"enum_col": ["foo"]}, + {"enum_col": ["foo", "bar"]}, + ] metadata.create_all(connection) - connection.execute( - tbl.insert(), - [{"enum_col": ["foo"]}, {"enum_col": ["foo", "bar"]}], - ) + connection.execute(tbl.insert(), data) return tbl, MyEnum yield go @@ -2377,6 +2426,27 @@ def _enum_combinations(fn): )(fn) ) + @testing.requires.python3 + @_enum_combinations + @testing.combinations("all", "any", argnames="fn") + def test_any_all_roundtrip( + self, array_of_enum_fixture, connection, array_cls, enum_cls, fn + ): + """test #6515""" + + tbl, MyEnum = array_of_enum_fixture(array_cls, enum_cls) + + if fn == "all": + expr = tbl.c.pyenum_col.all(MyEnum.b) + result = [([MyEnum.b],)] + elif fn == "any": + expr = tbl.c.pyenum_col.any(MyEnum.b) + result = [([MyEnum.a, MyEnum.b],), ([MyEnum.b],)] + else: + assert False + sel = select(tbl.c.pyenum_col).where(expr).order_by(tbl.c.id) + eq_(connection.execute(sel).fetchall(), result) + @_enum_combinations def test_array_of_enums_roundtrip( self, array_of_enum_fixture, connection, array_cls, enum_cls @@ -2649,6 +2719,7 @@ def get_col_spec(self): Column("bitstring", postgresql.BIT(4)), Column("addr", postgresql.INET), Column("addr2", postgresql.MACADDR), + Column("addr4", postgresql.MACADDR8), Column("price", postgresql.MONEY), Column("addr3", postgresql.CIDR), Column("doubleprec", postgresql.DOUBLE_PRECISION), @@ -2761,6 +2832,37 @@ def test_round_trip(self, datatype, value1, value2, connection): def test_uuid_array(self, datatype, value1, value2, connection): self.test_round_trip(datatype, value1, value2, connection) + @testing.combinations( + ( + "not_as_uuid", + postgresql.UUID(as_uuid=False), + str(uuid.uuid4()), + ), + ( + "as_uuid", + postgresql.UUID(as_uuid=True), + uuid.uuid4(), + ), + id_="iaa", + argnames="datatype, value1", + ) + def test_uuid_literal(self, datatype, value1, connection): + v1 = connection.execute( + select( + bindparam( + "key", + value=value1, + literal_execute=True, + type_=datatype, + ) + ), + ) + eq_(v1.fetchone()[0], value1) + + def test_python_type(self): + eq_(postgresql.UUID(as_uuid=True).python_type, uuid.UUID) + eq_(postgresql.UUID(as_uuid=False).python_type, str) + class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "postgresql" @@ -3727,6 +3829,33 @@ def _assert_column_is_JSON_NULL(self, conn, column="data"): ).fetchall() eq_([d for d, in data], [None]) + @testing.combinations( + "key", + "réve🐍 illé", + 'name_with"quotes"name', + "name with spaces", + "name with ' single ' quotes", + 'some_key("idx")', + argnames="key", + ) + def test_indexed_special_keys(self, connection, key): + data_table = self.tables.data_table + data_element = {key: "some value"} + + connection.execute( + data_table.insert(), + { + "name": "row1", + "data": data_element, + "nulldata": data_element, + }, + ) + + row = connection.execute( + select(data_table.c.data[key], data_table.c.nulldata[key]) + ).one() + eq_(row, ("some value", "some value")) + def test_reflect(self, connection): insp = inspect(connection) cols = insp.get_columns("data_table") diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index ed0f11907cf..55833761e9d 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -42,6 +42,7 @@ from sqlalchemy.engine.url import make_url from sqlalchemy.schema import CreateTable from sqlalchemy.schema import FetchedValue +from sqlalchemy.sql.elements import quoted_name from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL @@ -53,6 +54,7 @@ from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ +from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.types import Boolean @@ -113,7 +115,7 @@ def test_string_dates_passed_raise(self, connection): ) def test_cant_parse_datetime_message(self, connection): - for (typ, disp) in [ + for typ, disp in [ (Time, "time"), (DateTime, "datetime"), (Date, "date"), @@ -826,7 +828,7 @@ def _fixture(self): Table( "another_created", meta, - Column("bat", Integer), + Column("bat", Integer, unique=True), Column("hoho", String), schema="test_schema", ) @@ -908,6 +910,28 @@ def test_table_names_system(self): {"created", "another_created"}, ) + def test_unique_constraints(self): + self._fixture() + insp = inspect(self.conn) + eq_( + [ + d["column_names"] + for d in insp.get_unique_constraints( + "created", schema="test_schema" + ) + ], + [], + ) + eq_( + [ + d["column_names"] + for d in insp.get_unique_constraints( + "another_created", schema="test_schema" + ) + ], + [["bat"]], + ) + def test_schema_names(self): self._fixture() insp = inspect(self.conn) @@ -969,7 +993,6 @@ def test_col_targeting_union(self): class SQLTest(fixtures.TestBase, AssertsCompiledSQL): - """Tests SQLite-dialect specific compilation.""" __dialect__ = sqlite.dialect() @@ -1159,7 +1182,7 @@ def test_in_tuple(self): .in_([(1, 2), (3, 4)]) .compile(dialect=sqlite.dialect()) ) - eq_(str(compiled), "(q, p) IN ([POSTCOMPILE_param_1])") + eq_(str(compiled), "(q, p) IN (__[POSTCOMPILE_param_1])") eq_( compiled._literal_execute_expanding_parameter( "param_1", @@ -1364,7 +1387,6 @@ def test_on_conflict_clause_primary_key_constraint(self): class InsertTest(fixtures.TestBase, AssertsExecutionResults): - """Tests inserts and autoincrement.""" __only_on__ = "sqlite" @@ -1887,6 +1909,20 @@ def setup_test_class(cls): "ON UPDATE NO ACTION)" ) + conn.exec_driver_sql( + "CREATE TABLE deferrable_test (id INTEGER PRIMARY KEY, " + "c1 INTEGER, c2 INTEGER, c3 INTEGER, c4 INTEGER, " + "CONSTRAINT fk1 FOREIGN KEY (c1) REFERENCES a1(id) " + "DEFERRABLE," + "CONSTRAINT fk2 FOREIGN KEY (c2) REFERENCES a1(id) " + "NOT DEFERRABLE," + "CONSTRAINT fk3 FOREIGN KEY (c3) REFERENCES a2(id) " + "ON UPDATE CASCADE " + "DEFERRABLE INITIALLY DEFERRED," + "CONSTRAINT fk4 FOREIGN KEY (c4) REFERENCES a2(id) " + "NOT DEFERRABLE INITIALLY IMMEDIATE)" + ) + conn.exec_driver_sql( "CREATE TABLE cp (" "q INTEGER check (q > 1 AND q < 6),\n" @@ -2257,6 +2293,51 @@ def test_foreign_key_ondelete_onupdate(self): ], ) + def test_foreign_key_deferrable_initially(self): + inspector = inspect(testing.db) + fks = inspector.get_foreign_keys("deferrable_test") + eq_( + fks, + [ + { + "referred_table": "a1", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk1", + "constrained_columns": ["c1"], + "options": {"deferrable": True}, + }, + { + "referred_table": "a1", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk2", + "constrained_columns": ["c2"], + "options": {"deferrable": False}, + }, + { + "referred_table": "a2", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk3", + "constrained_columns": ["c3"], + "options": { + "deferrable": True, + "initially": "DEFERRED", + "onupdate": "CASCADE", + }, + }, + { + "referred_table": "a2", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk4", + "constrained_columns": ["c4"], + "options": {"deferrable": False, "initially": "IMMEDIATE"}, + }, + ], + ) + def test_foreign_key_options_unnamed_inline(self): with testing.db.begin() as conn: conn.exec_driver_sql( @@ -2289,6 +2370,7 @@ def test_dont_reflect_autoindex(self): "unique": 1, "name": "sqlite_autoindex_o_1", "column_names": ["foo"], + "dialect_options": {}, } ], ) @@ -2302,12 +2384,63 @@ def test_create_index_with_schema(self): [ { "unique": 0, - "name": u"ix_main_l_bar", - "column_names": [u"bar"], + "name": "ix_main_l_bar", + "column_names": ["bar"], + "dialect_options": {}, } ], ) + @testing.requires.sqlite_partial_indexes + def test_reflect_partial_indexes(self, connection): + connection.exec_driver_sql( + "create table foo_with_partial_index (x integer, y integer)" + ) + connection.exec_driver_sql( + "create unique index ix_partial on " + "foo_with_partial_index (x) where y > 10" + ) + connection.exec_driver_sql( + "create unique index ix_no_partial on " + "foo_with_partial_index (x)" + ) + connection.exec_driver_sql( + "create unique index ix_partial2 on " + "foo_with_partial_index (x, y) where " + "y = 10 or abs(x) < 5" + ) + + inspector = inspect(connection) + indexes = inspector.get_indexes("foo_with_partial_index") + eq_( + indexes, + [ + { + "unique": 1, + "name": "ix_no_partial", + "column_names": ["x"], + "dialect_options": {}, + }, + { + "unique": 1, + "name": "ix_partial", + "column_names": ["x"], + "dialect_options": {"sqlite_where": mock.ANY}, + }, + { + "unique": 1, + "name": "ix_partial2", + "column_names": ["x", "y"], + "dialect_options": {"sqlite_where": mock.ANY}, + }, + ], + ) + eq_(indexes[1]["dialect_options"]["sqlite_where"].text, "y > 10") + eq_( + indexes[2]["dialect_options"]["sqlite_where"].text, + "y = 10 or abs(x) < 5", + ) + def test_unique_constraint_named(self): inspector = inspect(testing.db) eq_( @@ -2401,9 +2534,57 @@ def test_check_constraint(self): ], ) + @testing.combinations( + ("plain_name", "plain_name"), + ("name with spaces", "name with spaces"), + ("plainname", "plainname"), + ("[Code]", "[Code]"), + (quoted_name("[Code]", quote=False), "Code"), + argnames="colname,expected", + ) + @testing.combinations( + "uq", "uq_inline", "pk", "ix", argnames="constraint_type" + ) + def test_constraint_cols( + self, colname, expected, constraint_type, connection, metadata + ): + if constraint_type == "uq_inline": + t = Table("t", metadata, Column(colname, Integer)) + connection.exec_driver_sql( + """ + CREATE TABLE t (%s INTEGER UNIQUE) + """ + % connection.dialect.identifier_preparer.quote(colname) + ) + else: + t = Table("t", metadata, Column(colname, Integer)) + if constraint_type == "uq": + constraint = UniqueConstraint(t.c[colname]) + elif constraint_type == "pk": + constraint = PrimaryKeyConstraint(t.c[colname]) + elif constraint_type == "ix": + constraint = Index("some_index", t.c[colname]) + else: + assert False + + t.append_constraint(constraint) + + t.create(connection) + + if constraint_type in ("uq", "uq_inline"): + const = inspect(connection).get_unique_constraints("t")[0] + eq_(const["column_names"], [expected]) + elif constraint_type == "pk": + const = inspect(connection).get_pk_constraint("t") + eq_(const["constrained_columns"], [expected]) + elif constraint_type == "ix": + const = inspect(connection).get_indexes("t")[0] + eq_(const["column_names"], [expected]) + else: + assert False -class SavepointTest(fixtures.TablesTest): +class SavepointTest(fixtures.TablesTest): """test that savepoints work when we use the correct event setup""" __only_on__ = "sqlite" @@ -2704,7 +2885,7 @@ def test_regexp_replace(self): ) -class OnConflictTest(fixtures.TablesTest): +class OnConflictTest(AssertsCompiledSQL, fixtures.TablesTest): __only_on__ = ("sqlite >= 3.24.0",) __backend__ = True @@ -2718,6 +2899,13 @@ def define_tables(cls, metadata): Column("name", String(50)), ) + Table( + "users_w_key", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), key="name_keyed"), + ) + class SpecialType(sqltypes.TypeDecorator): impl = String cache_ok = True @@ -2762,6 +2950,44 @@ def test_bad_args(self): ValueError, insert(self.tables.users).on_conflict_do_update ) + @testing.combinations("control", "excluded", "dict") + def test_set_excluded(self, scenario): + """test #8014, sending all of .excluded to set""" + + if scenario == "control": + users = self.tables.users + + stmt = insert(users) + self.assert_compile( + stmt.on_conflict_do_update(set_=stmt.excluded), + "INSERT INTO users (id, name) VALUES (?, ?) ON CONFLICT " + "DO UPDATE SET id = excluded.id, name = excluded.name", + ) + else: + users_w_key = self.tables.users_w_key + + stmt = insert(users_w_key) + + if scenario == "excluded": + self.assert_compile( + stmt.on_conflict_do_update(set_=stmt.excluded), + "INSERT INTO users_w_key (id, name) VALUES (?, ?) " + "ON CONFLICT " + "DO UPDATE SET id = excluded.id, name = excluded.name", + ) + else: + self.assert_compile( + stmt.on_conflict_do_update( + set_={ + "id": stmt.excluded.id, + "name_keyed": stmt.excluded.name_keyed, + } + ), + "INSERT INTO users_w_key (id, name) VALUES (?, ?) " + "ON CONFLICT " + "DO UPDATE SET id = excluded.id, name = excluded.name", + ) + def test_on_conflict_do_no_call_twice(self): users = self.tables.users @@ -3316,3 +3542,101 @@ def test_on_conflict_do_update_special_types_in_set(self, connection): conn.scalar(sql.select(bind_targets.c.data)), "new updated data processed", ) + + +class ComputedReflectionTest(fixtures.TestBase): + __only_on__ = "sqlite" + __requires__ = ("computed_columns",) + __backend__ = True + + @classmethod + def setup_test_class(cls): + tables = [ + """CREATE TABLE test1 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x') + );""", + """CREATE TABLE test2 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x'), + y VARCHAR GENERATED ALWAYS AS (s || 'y') + );""", + """CREATE TABLE test3 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")) + );""", + """CREATE TABLE test4 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")), + y INTEGER GENERATED ALWAYS AS (INSTR(x, ",")));""", + """CREATE TABLE test5 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x') STORED + );""", + """CREATE TABLE test6 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x') STORED, + y VARCHAR GENERATED ALWAYS AS (s || 'y') STORED + );""", + """CREATE TABLE test7 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")) STORED + );""", + """CREATE TABLE test8 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")) STORED, + y INTEGER GENERATED ALWAYS AS (INSTR(x, ",")) STORED + );""", + ] + + with testing.db.begin() as conn: + for ct in tables: + conn.exec_driver_sql(ct) + + @classmethod + def teardown_test_class(cls): + with testing.db.begin() as conn: + for tn in cls.res: + conn.exec_driver_sql("DROP TABLE %s" % tn) + + res = { + "test1": {"x": {"text": "s || 'x'", "stored": False}}, + "test2": { + "x": {"text": "s || 'x'", "stored": False}, + "y": {"text": "s || 'y'", "stored": False}, + }, + "test3": {"x": {"text": 'INSTR(s, ",")', "stored": False}}, + "test4": { + "x": {"text": 'INSTR(s, ",")', "stored": False}, + "y": {"text": 'INSTR(x, ",")', "stored": False}, + }, + "test5": {"x": {"text": "s || 'x'", "stored": True}}, + "test6": { + "x": {"text": "s || 'x'", "stored": True}, + "y": {"text": "s || 'y'", "stored": True}, + }, + "test7": {"x": {"text": 'INSTR(s, ",")', "stored": True}}, + "test8": { + "x": {"text": 'INSTR(s, ",")', "stored": True}, + "y": {"text": 'INSTR(x, ",")', "stored": True}, + }, + } + + def test_reflection(self, connection): + meta = MetaData() + meta.reflect(connection) + eq_(len(meta.tables), len(self.res)) + for tbl in meta.tables.values(): + data = self.res[tbl.name] + seen = set() + for col in tbl.c: + if col.name not in data: + is_(col.computed, None) + else: + info = data[col.name] + seen.add(col.name) + msg = "%s-%s" % (tbl.name, col.name) + is_true(bool(col.computed)) + eq_(col.computed.sqltext.text, info["text"], msg) + eq_(col.computed.persisted, info["stored"], msg) + eq_(seen, set(data.keys())) diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index 4a14cbcca0c..afb7ddd4d41 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -1,6 +1,7 @@ # coding: utf-8 from contextlib import contextmanager +import copy import re import threading import weakref @@ -264,6 +265,58 @@ def test_raw_named(self, connection): (4, "sally"), ] + def test_non_dict_mapping(self, connection): + """ensure arbitrary Mapping works for execute()""" + + class NotADict(collections_abc.Mapping): + def __init__(self, _data): + self._data = _data + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) + + def __getitem__(self, key): + return self._data[key] + + def keys(self): + return self._data.keys() + + nd = NotADict({"a": 10, "b": 15}) + eq_(dict(nd), {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer), bindparam("b", type_=Integer) + ), + nd, + ) + eq_(result.first(), (10, 15)) + + def test_row_works_as_mapping(self, connection): + """ensure the RowMapping object works as a parameter dictionary for + execute.""" + + result = connection.execute( + select(literal(10).label("a"), literal(15).label("b")) + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer).label("a"), + bindparam("b", type_=Integer).label("b"), + ), + row._mapping, + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + def test_dialect_has_table_assertion(self): with expect_raises_message( tsa.exc.ArgumentError, @@ -418,7 +471,7 @@ def test_stmt_exception_bytestring_utf8(self): eq_(str(err), message) # unicode accessor decodes to utf-8 - eq_(unicode(err), util.u("some message méil")) # noqa F821 + eq_(unicode(err), util.u("some message méil")) # noqa: F821 else: eq_(str(err), util.u("some message méil")) @@ -433,7 +486,7 @@ def test_stmt_exception_bytestring_latin1(self): eq_(str(err), message) # unicode accessor decodes to utf-8 - eq_(unicode(err), util.u("some message m\\xe9il")) # noqa F821 + eq_(unicode(err), util.u("some message m\\xe9il")) # noqa: F821 else: eq_(str(err), util.u("some message m\\xe9il")) @@ -444,7 +497,7 @@ def test_stmt_exception_unicode_hook_unicode(self): err = tsa.exc.SQLAlchemyError(message) if util.py2k: - eq_(unicode(err), util.u("some message méil")) # noqa F821 + eq_(unicode(err), util.u("some message méil")) # noqa: F821 else: eq_(str(err), util.u("some message méil")) @@ -453,7 +506,7 @@ def test_stmt_exception_object_arg(self): eq_(str(err), "foo") if util.py2k: - eq_(unicode(err), util.u("fóó")) # noqa F821 + eq_(unicode(err), util.u("fóó")) # noqa: F821 def test_stmt_exception_str_multi_args(self): err = tsa.exc.SQLAlchemyError("some message", 206) @@ -813,16 +866,42 @@ def test_transaction_engine_ctx_commit(self): testing.run_as_contextmanager(ctx, fn, 5, value=8) self._assert_fn(5, value=8) - def test_transaction_engine_ctx_begin_fails(self): + def test_transaction_engine_ctx_begin_fails_dont_enter_enter(self): + """test #7272""" engine = engines.testing_engine() mock_connection = Mock( return_value=Mock(begin=Mock(side_effect=Exception("boom"))) ) - engine._connection_cls = mock_connection - assert_raises(Exception, engine.begin) + with mock.patch.object(engine, "_connection_cls", mock_connection): + if testing.requires.legacy_engine.enabled: + with expect_raises_message(Exception, "boom"): + engine.begin() + else: + # context manager isn't entered, doesn't actually call + # connect() or connection.begin() + engine.begin() + + if testing.requires.legacy_engine.enabled: + eq_(mock_connection.return_value.close.mock_calls, [call()]) + else: + eq_(mock_connection.return_value.close.mock_calls, []) - eq_(mock_connection.return_value.close.mock_calls, [call()]) + def test_transaction_engine_ctx_begin_fails_include_enter(self): + """test #7272""" + engine = engines.testing_engine() + + close_mock = Mock() + with mock.patch.object( + engine._connection_cls, + "begin", + Mock(side_effect=Exception("boom")), + ), mock.patch.object(engine._connection_cls, "close", close_mock): + with expect_raises_message(Exception, "boom"): + with engine.begin(): + pass + + eq_(close_mock.mock_calls, [call()]) def test_transaction_engine_ctx_rollback(self): fn = self._trans_rollback_fn() @@ -867,6 +946,7 @@ def test_connection_as_ctx(self): self._assert_fn(5, value=8) @testing.fails_on("mysql+oursql", "oursql bug ? getting wrong rowcount") + @testing.requires.legacy_engine def test_connect_as_ctx_noautocommit(self): fn = self._trans_fn() self._assert_no_data() @@ -878,6 +958,12 @@ def test_connect_as_ctx_noautocommit(self): self._assert_no_data() +class FutureConvenienceExecuteTest( + fixtures.FutureEngineMixin, ConvenienceExecuteTest +): + __backend__ = True + + class CompiledCacheTest(fixtures.TestBase): __backend__ = True @@ -1122,12 +1208,12 @@ def test_create_table(self, plain_tables, connection): t1.drop(conn) asserter.assert_( - CompiledSQL("CREATE TABLE [SCHEMA__none].t1 (x INTEGER)"), - CompiledSQL("CREATE TABLE [SCHEMA_foo].t2 (x INTEGER)"), - CompiledSQL("CREATE TABLE [SCHEMA_bar].t3 (x INTEGER)"), - CompiledSQL("DROP TABLE [SCHEMA_bar].t3"), - CompiledSQL("DROP TABLE [SCHEMA_foo].t2"), - CompiledSQL("DROP TABLE [SCHEMA__none].t1"), + CompiledSQL("CREATE TABLE __[SCHEMA__none].t1 (x INTEGER)"), + CompiledSQL("CREATE TABLE __[SCHEMA_foo].t2 (x INTEGER)"), + CompiledSQL("CREATE TABLE __[SCHEMA_bar].t3 (x INTEGER)"), + CompiledSQL("DROP TABLE __[SCHEMA_bar].t3"), + CompiledSQL("DROP TABLE __[SCHEMA_foo].t2"), + CompiledSQL("DROP TABLE __[SCHEMA__none].t1"), ) def test_ddl_hastable(self, plain_tables, connection): @@ -1227,27 +1313,29 @@ def test_option_on_execute(self, plain_tables, connection): conn._execute_20(t3.delete(), execution_options=execution_options) asserter.assert_( - CompiledSQL("INSERT INTO [SCHEMA__none].t1 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_foo].t2 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_bar].t3 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"), + CompiledSQL( + "UPDATE __[SCHEMA__none].t1 SET x=:x WHERE " + "__[SCHEMA__none].t1.x = :x_1" + ), CompiledSQL( - "UPDATE [SCHEMA__none].t1 SET x=:x WHERE " - "[SCHEMA__none].t1.x = :x_1" + "UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE " + "__[SCHEMA_foo].t2.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_foo].t2 SET x=:x WHERE " - "[SCHEMA_foo].t2.x = :x_1" + "UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE " + "__[SCHEMA_bar].t3.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_bar].t3 SET x=:x WHERE " - "[SCHEMA_bar].t3.x = :x_1" + "SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1" ), - CompiledSQL("SELECT [SCHEMA__none].t1.x FROM [SCHEMA__none].t1"), - CompiledSQL("SELECT [SCHEMA_foo].t2.x FROM [SCHEMA_foo].t2"), - CompiledSQL("SELECT [SCHEMA_bar].t3.x FROM [SCHEMA_bar].t3"), - CompiledSQL("DELETE FROM [SCHEMA__none].t1"), - CompiledSQL("DELETE FROM [SCHEMA_foo].t2"), - CompiledSQL("DELETE FROM [SCHEMA_bar].t3"), + CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"), + CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"), + CompiledSQL("DELETE FROM __[SCHEMA__none].t1"), + CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"), + CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"), ) def test_crud(self, plain_tables, connection): @@ -1285,27 +1373,29 @@ def test_crud(self, plain_tables, connection): conn.execute(t3.delete()) asserter.assert_( - CompiledSQL("INSERT INTO [SCHEMA__none].t1 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_foo].t2 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_bar].t3 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"), CompiledSQL( - "UPDATE [SCHEMA__none].t1 SET x=:x WHERE " - "[SCHEMA__none].t1.x = :x_1" + "UPDATE __[SCHEMA__none].t1 SET x=:x WHERE " + "__[SCHEMA__none].t1.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_foo].t2 SET x=:x WHERE " - "[SCHEMA_foo].t2.x = :x_1" + "UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE " + "__[SCHEMA_foo].t2.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_bar].t3 SET x=:x WHERE " - "[SCHEMA_bar].t3.x = :x_1" + "UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE " + "__[SCHEMA_bar].t3.x = :x_1" ), - CompiledSQL("SELECT [SCHEMA__none].t1.x FROM [SCHEMA__none].t1"), - CompiledSQL("SELECT [SCHEMA_foo].t2.x FROM [SCHEMA_foo].t2"), - CompiledSQL("SELECT [SCHEMA_bar].t3.x FROM [SCHEMA_bar].t3"), - CompiledSQL("DELETE FROM [SCHEMA__none].t1"), - CompiledSQL("DELETE FROM [SCHEMA_foo].t2"), - CompiledSQL("DELETE FROM [SCHEMA_bar].t3"), + CompiledSQL( + "SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1" + ), + CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"), + CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"), + CompiledSQL("DELETE FROM __[SCHEMA__none].t1"), + CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"), + CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"), ) def test_via_engine(self, plain_tables, metadata): @@ -1327,7 +1417,7 @@ def test_via_engine(self, plain_tables, metadata): with eng.connect() as conn: conn.execute(select(t2.c.x)) asserter.assert_( - CompiledSQL("SELECT [SCHEMA_foo].t2.x FROM [SCHEMA_foo].t2") + CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2") ) @@ -2102,6 +2192,30 @@ def test_dispose_event(self, testing_engine): eq_(canary.mock_calls, [call(eng), call(eng)]) + @testing.requires.ad_hoc_engines + @testing.combinations(True, False, argnames="close") + def test_close_parameter(self, testing_engine, close): + eng = testing_engine( + options=dict(pool_size=1, max_overflow=0, poolclass=QueuePool) + ) + + conn = eng.connect() + dbapi_conn_one = conn.connection.dbapi_connection + conn.close() + + eng_copy = copy.copy(eng) + eng_copy.dispose(close=close) + copy_conn = eng_copy.connect() + dbapi_conn_two = copy_conn.connection.dbapi_connection + + is_not(dbapi_conn_one, dbapi_conn_two) + + conn = eng.connect() + if close: + is_not(dbapi_conn_one, conn.connection.dbapi_connection) + else: + is_(dbapi_conn_one, conn.connection.dbapi_connection) + def test_retval_flag(self): canary = [] @@ -3482,6 +3596,58 @@ def define_tables(cls, metadata): test_needs_acid=True, ) + def test_non_dict_mapping(self, connection): + """ensure arbitrary Mapping works for execute()""" + + class NotADict(collections_abc.Mapping): + def __init__(self, _data): + self._data = _data + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) + + def __getitem__(self, key): + return self._data[key] + + def keys(self): + return self._data.keys() + + nd = NotADict({"a": 10, "b": 15}) + eq_(dict(nd), {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer), bindparam("b", type_=Integer) + ), + nd, + ) + eq_(result.first(), (10, 15)) + + def test_row_works_as_mapping(self, connection): + """ensure the RowMapping object works as a parameter dictionary for + execute.""" + + result = connection.execute( + select(literal(10).label("a"), literal(15).label("b")) + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer).label("a"), + bindparam("b", type_=Integer).label("b"), + ), + row._mapping, + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + @testing.combinations( ({}, {}, {}), ({"a": "b"}, {}, {"a": "b"}), @@ -3823,6 +3989,7 @@ def translate_select_structure(self, select_stmt, **kwargs): class MyDialect(SQLiteDialect_pysqlite): statement_compiler = MyCompiler + supports_statement_cache = False from sqlalchemy.dialects import registry diff --git a/test/engine/test_logging.py b/test/engine/test_logging.py index c5f8b69b64d..ded91490396 100644 --- a/test/engine/test_logging.py +++ b/test/engine/test_logging.py @@ -449,6 +449,14 @@ def _test_queuepool(self, q, dispose=True): conn.close() conn = None + conn = q.connect() + conn._close_special(transaction_reset=True) + conn = None + + conn = q.connect() + conn._close_special(transaction_reset=False) + conn = None + conn = q.connect() conn = None del conn @@ -460,15 +468,21 @@ def _test_queuepool(self, q, dispose=True): [ "Created new connection %r", "Connection %r checked out from pool", - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", "Connection %s rollback-on-return", "Connection %r checked out from pool", - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", "Connection %s rollback-on-return", "Connection %r checked out from pool", - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", + "Connection %s reset, transaction already reset", + "Connection %r checked out from pool", + "Connection %r being returned to pool", + "Connection %s rollback-on-return", + "Connection %r checked out from pool", + "Connection %r being returned to pool", "Connection %s rollback-on-return", - "Closing connection %r", + "%s connection %r", ] + (["Pool disposed. %s"] if dispose else []), ) @@ -587,6 +601,30 @@ def test_unnamed_logger_echoflags_execute(self): class TransactionContextLoggingTest(fixtures.TestBase): + __only_on__ = "sqlite" + + @testing.fixture() + def plain_assert_buf(self, plain_logging_engine): + buf = logging.handlers.BufferingHandler(100) + for log in [ + logging.getLogger("sqlalchemy.engine"), + ]: + log.addHandler(buf) + + def go(expected): + assert buf.buffer + + buflines = [rec.msg % rec.args for rec in buf.buffer] + + eq_(buflines, expected) + buf.flush() + + yield go + for log in [ + logging.getLogger("sqlalchemy.engine"), + ]: + log.removeHandler(buf) + @testing.fixture() def assert_buf(self, logging_engine): buf = logging.handlers.BufferingHandler(100) @@ -616,6 +654,28 @@ def logging_engine(self, testing_engine): e.connect().close() return e + @testing.fixture() + def autocommit_iso_logging_engine(self, testing_engine): + kw = {"echo": True, "future": True, "isolation_level": "AUTOCOMMIT"} + e = testing_engine(options=kw) + e.connect().close() + return e + + @testing.fixture() + def plain_logging_engine(self, testing_engine): + # deliver an engine with logging using the plain logging API, + # not the echo parameter + log = logging.getLogger("sqlalchemy.engine") + existing_level = log.level + log.setLevel(logging.DEBUG) + + try: + e = testing_engine(future=True) + e.connect().close() + yield e + finally: + log.setLevel(existing_level) + def test_begin_once_block(self, logging_engine, assert_buf): with logging_engine.begin(): pass @@ -636,6 +696,38 @@ def test_commit_as_you_go_block_rollback(self, logging_engine, assert_buf): assert_buf(["BEGIN (implicit)", "ROLLBACK"]) + def test_commit_as_you_go_block_commit_engine_level_autocommit( + self, autocommit_iso_logging_engine, assert_buf + ): + with autocommit_iso_logging_engine.connect() as conn: + conn.begin() + conn.commit() + + assert_buf( + [ + "BEGIN (implicit; DBAPI should not " + "BEGIN due to autocommit mode)", + "COMMIT using DBAPI connection.commit(), DBAPI " + "should ignore due to autocommit mode", + ] + ) + + def test_commit_engine_level_autocommit_exec_opt_nonauto( + self, autocommit_iso_logging_engine, assert_buf + ): + with autocommit_iso_logging_engine.execution_options( + isolation_level=testing.db.dialect.default_isolation_level + ).connect() as conn: + conn.begin() + conn.commit() + + assert_buf( + [ + "BEGIN (implicit)", + "COMMIT", + ] + ) + def test_commit_as_you_go_block_commit_autocommit( self, logging_engine, assert_buf ): @@ -647,7 +739,8 @@ def test_commit_as_you_go_block_commit_autocommit( assert_buf( [ - "BEGIN (implicit)", + "BEGIN (implicit; DBAPI should not " + "BEGIN due to autocommit mode)", "COMMIT using DBAPI connection.commit(), DBAPI " "should ignore due to autocommit mode", ] @@ -664,12 +757,80 @@ def test_commit_as_you_go_block_rollback_autocommit( assert_buf( [ - "BEGIN (implicit)", + "BEGIN (implicit; DBAPI should not " + "BEGIN due to autocommit mode)", "ROLLBACK using DBAPI connection.rollback(), DBAPI " "should ignore due to autocommit mode", ] ) + def test_logging_compatibility( + self, plain_assert_buf, plain_logging_engine + ): + """ensure plain logging doesn't produce API errors. + + Added as part of #7612 + + """ + e = plain_logging_engine + + with e.connect() as conn: + result = conn.exec_driver_sql("select 1") + result.all() + + plain_assert_buf( + [ + "BEGIN (implicit)", + "select 1", + "[raw sql] ()", + "Col ('1',)", + "Row (1,)", + "ROLLBACK", + ] + ) + + @testing.requires.python38 + def test_log_messages_have_correct_metadata_plain( + self, plain_logging_engine + ): + """test #7612""" + self._test_log_messages_have_correct_metadata(plain_logging_engine) + + @testing.requires.python38 + def test_log_messages_have_correct_metadata_echo(self, logging_engine): + """test #7612""" + self._test_log_messages_have_correct_metadata(logging_engine) + + def _test_log_messages_have_correct_metadata(self, logging_engine): + buf = logging.handlers.BufferingHandler(100) + log = logging.getLogger("sqlalchemy.engine") + try: + log.addHandler(buf) + + with logging_engine.connect().execution_options( + isolation_level="AUTOCOMMIT" + ) as conn: + conn.begin() + conn.rollback() + finally: + log.removeHandler(buf) + + assert len(buf.buffer) >= 2 + + # log messages must originate from functions called 'begin'/'rollback' + logging_functions = {rec.funcName for rec in buf.buffer} + assert any( + "begin" in fn for fn in logging_functions + ), logging_functions + assert any( + "rollback" in fn for fn in logging_functions + ), logging_functions + + # log messages must originate from different lines + log_lines = {rec.lineno for rec in buf.buffer} + assert len(log_lines) > 1, log_lines + buf.flush() + class LoggingTokenTest(fixtures.TestBase): def setup_test(self): diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index 67d8369b5dc..28362ba2a1c 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -1,9 +1,12 @@ +import copy + import sqlalchemy as tsa from sqlalchemy import create_engine from sqlalchemy import engine_from_config from sqlalchemy import exc from sqlalchemy import pool from sqlalchemy import testing +from sqlalchemy import util from sqlalchemy.dialects import plugins from sqlalchemy.dialects import registry from sqlalchemy.engine.default import DefaultDialect @@ -11,9 +14,11 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false +from sqlalchemy.testing import is_not from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import expect_deprecated @@ -177,6 +182,17 @@ def test_query_string(self): eq_(u.query, {"arg1=": "param1", "arg2": "param 2"}) eq_(str(u), test_url) + def test_query_string_py2_unicode(self): + url_str = u"dialect://user:pass@host/?arg1=param1&arg2=param2" + if util.py2k: + # just to make sure linters / formatters etc. don't erase the + # 'u' above + assert isinstance(url_str, unicode) # noqa + u = url.make_url(url_str) + eq_(u.query, {"arg1": "param1", "arg2": "param2"}) + eq_(u.database, "") + eq_(str(u), "dialect://user:pass@host/?arg1=param1&arg2=param2") + def test_comparison(self): common_url = ( "dbtype://username:password" @@ -193,6 +209,25 @@ def test_comparison(self): is_true(url1 != url3) is_false(url1 == url3) + def test_copy(self): + url1 = url.make_url( + "dialect://user:pass@host/db?arg1%3D=param1&arg2=param+2" + ) + url2 = copy.copy(url1) + eq_(url1, url2) + is_not(url1, url2) + + def test_deepcopy(self): + url1 = url.make_url( + "dialect://user:pass@host/db?arg1%3D=param1&arg2=param+2" + ) + url2 = copy.deepcopy(url1) + eq_(url1, url2) + is_not(url1, url2) + is_not(url1.query, url2.query) # immutabledict of immutable k/v, + # but it copies it on constructor + # in any case if params are present + @testing.combinations( "drivername", "username", @@ -239,6 +274,17 @@ def test_update_query_dict(self, starting, update_with, expected, append): url.make_url("drivername:///?%s" % expected), ) + @testing.combinations( + "drivername://", + "drivername://?foo=bar", + "drivername://?foo=bar&foo=bat", + ) + def test_query_dict_immutable(self, urlstr): + url_obj = url.make_url(urlstr) + + with expect_raises_message(TypeError, ".*immutable"): + url_obj.query["foo"] = "hoho" + @testing.combinations( ( "foo1=bar1&foo2=bar2", diff --git a/test/engine/test_pool.py b/test/engine/test_pool.py index 43ec9cc3ffa..fea65fe4c4a 100644 --- a/test/engine/test_pool.py +++ b/test/engine/test_pool.py @@ -5,6 +5,7 @@ import weakref import sqlalchemy as tsa +from sqlalchemy import create_engine from sqlalchemy import event from sqlalchemy import pool from sqlalchemy import select @@ -14,7 +15,7 @@ from sqlalchemy.pool.base import _ConnDialect from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_context_ok -from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises from sqlalchemy.testing import fixtures @@ -92,10 +93,13 @@ def _queuepool_fixture(self, **kw): def _queuepool_dbapi_fixture(self, **kw): dbapi = MockDBAPI() _is_asyncio = kw.pop("_is_asyncio", False) + _has_terminate = kw.pop("_has_terminate", False) p = pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw) if _is_asyncio: p._is_asyncio = True p._dialect = _AsyncConnDialect() + if _has_terminate: + p._dialect.has_terminate = True return dbapi, p @@ -468,8 +472,10 @@ def checkout(*arg, **kw): return p, canary - def _checkin_event_fixture(self, _is_asyncio=False): - p = self._queuepool_fixture(_is_asyncio=_is_asyncio) + def _checkin_event_fixture(self, _is_asyncio=False, _has_terminate=False): + p = self._queuepool_fixture( + _is_asyncio=_is_asyncio, _has_terminate=_has_terminate + ) canary = [] @event.listens_for(p, "checkin") @@ -744,9 +750,13 @@ def test_invalidate_event_exception(self): assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is exc - @testing.combinations((True, testing.requires.python3), (False,)) - def test_checkin_event_gc(self, detach_gced): - p, canary = self._checkin_event_fixture(_is_asyncio=detach_gced) + @testing.variation("is_asyncio", [(True, testing.requires.asyncio), False]) + @testing.variation("has_terminate", [True, False]) + @testing.requires.python3 + def test_checkin_event_gc(self, is_asyncio, has_terminate): + p, canary = self._checkin_event_fixture( + _is_asyncio=is_asyncio, _has_terminate=has_terminate + ) c1 = p.connect() @@ -756,6 +766,8 @@ def test_checkin_event_gc(self, detach_gced): del c1 lazy_gc() + detach_gced = is_asyncio and not has_terminate + if detach_gced: # "close_detached" is not called because for asyncio the # connection is just lost. @@ -846,18 +858,34 @@ def listen_three(*args): p2.connect() eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"]) - def test_connect_event_fails_invalidates(self): + @testing.variation("exc_type", ["plain", "base_exception"]) + def test_connect_event_fails_invalidates(self, exc_type): fail = False + if exc_type.plain: + + class RegularThing(Exception): + pass + + exc_cls = RegularThing + elif exc_type.base_exception: + + class TimeoutThing(BaseException): + pass + + exc_cls = TimeoutThing + else: + exc_type.fail() + def listen_one(conn, rec): if fail: - raise Exception("it failed") + raise exc_cls("it failed") def listen_two(conn, rec): rec.info["important_flag"] = True p1 = pool.QueuePool( - creator=MockDBAPI().connect, pool_size=1, max_overflow=0 + creator=MockDBAPI().connect, pool_size=1, max_overflow=0, timeout=5 ) event.listen(p1, "connect", listen_one) event.listen(p1, "connect", listen_two) @@ -868,7 +896,9 @@ def listen_two(conn, rec): conn.close() fail = True - assert_raises(Exception, p1.connect) + + # if the failed checkin is not reverted, the pool is blocked + assert_raises(exc_cls, p1.connect) fail = False @@ -1494,7 +1524,7 @@ def assert_no_wr_callback( return patch.object(pool, "_finalize_fairy", assert_no_wr_callback) - def _assert_cleanup_on_pooled_reconnect(self, dbapi, p): + def _assert_cleanup_on_pooled_reconnect(self, dbapi, p, exc_cls=Exception): # p is QueuePool with size=1, max_overflow=2, # and one connection in the pool that will need to # reconnect when next used (either due to recycle or invalidate) @@ -1503,7 +1533,7 @@ def _assert_cleanup_on_pooled_reconnect(self, dbapi, p): eq_(p.checkedout(), 0) eq_(p._overflow, 0) dbapi.shutdown(True) - assert_raises_context_ok(Exception, p.connect) + assert_raises_context_ok(exc_cls, p.connect) eq_(p._overflow, 0) eq_(p.checkedout(), 0) # and not 1 @@ -1621,20 +1651,45 @@ def checkout(conn, conn_rec, conn_f): c = p.connect() c.close() - def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self): + @testing.variation("exc_type", ["plain", "base_exception"]) + def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self, exc_type): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2) c1 = p.connect() c1.close() - @event.listens_for(p, "checkout") - def handle_checkout_event(dbapi_con, con_record, con_proxy): - if dbapi.is_shutdown: - raise tsa.exc.DisconnectionError() + if exc_type.plain: - self._assert_cleanup_on_pooled_reconnect(dbapi, p) + @event.listens_for(p, "checkout") + def handle_checkout_event(dbapi_con, con_record, con_proxy): + if dbapi.is_shutdown: + raise tsa.exc.DisconnectionError() + + elif exc_type.base_exception: + + class TimeoutThing(BaseException): + pass + + @event.listens_for(p, "checkout") + def handle_checkout_event(dbapi_con, con_record, con_proxy): + if dbapi.is_shutdown: + raise TimeoutThing() + + else: + exc_type.fail() + + self._assert_cleanup_on_pooled_reconnect( + dbapi, + p, + exc_cls=TimeoutThing if exc_type.base_exception else Exception, + ) - @testing.combinations((True, testing.requires.python3), (False,)) + @testing.variation( + "detach_gced", + [("detached_gc", testing.requires.asyncio), "normal_gc"], + ) + @testing.emits_warning("The garbage collector") + @testing.requires.python3 def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced): dbapi, pool = self._queuepool_dbapi_fixture( pool_size=1, max_overflow=2, _is_asyncio=detach_gced @@ -1821,7 +1876,7 @@ def test_no_double_checkin(self): c1 = p.connect() rec = c1._connection_record c1.close() - assert_raises_message( + assert_warns_message( Warning, "Double checkin attempted on %s" % rec, rec.checkin ) @@ -1911,14 +1966,90 @@ def _fixture(self, **kw): pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw), ) - def test_plain_rollback(self): + def _engine_fixture(self, **kw): + dbapi = Mock() + + return dbapi, create_engine( + "postgresql://", + module=dbapi, + creator=lambda: dbapi.connect("foo.db"), + _initialize=False, + ) + + def test_custom(self): + dbapi, p = self._fixture(reset_on_return=None) + + @event.listens_for(p, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + + c1 = p.connect() + c1.close() + + assert dbapi.connect().special_reset_method.called + assert not dbapi.connect().rollback.called + assert not dbapi.connect().commit.called + + @testing.combinations(True, False, argnames="assert_w_event") + @testing.combinations(True, False, argnames="use_engine_transaction") + def test_custom_via_engine(self, assert_w_event, use_engine_transaction): + dbapi, engine = self._engine_fixture(reset_on_return=None) + + if assert_w_event: + + @event.listens_for(engine, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + + c1 = engine.connect() + if use_engine_transaction: + c1.begin() + c1.close() + assert dbapi.connect().rollback.called + + if assert_w_event: + assert dbapi.connect().special_reset_method.called + + @testing.combinations(True, False, argnames="assert_w_event") + def test_plain_rollback(self, assert_w_event): dbapi, p = self._fixture(reset_on_return="rollback") + if assert_w_event: + + @event.listens_for(p, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + c1 = p.connect() c1.close() assert dbapi.connect().rollback.called assert not dbapi.connect().commit.called + if assert_w_event: + assert dbapi.connect().special_reset_method.called + + @testing.combinations(True, False, argnames="assert_w_event") + @testing.combinations(True, False, argnames="use_engine_transaction") + def test_plain_rollback_via_engine( + self, assert_w_event, use_engine_transaction + ): + dbapi, engine = self._engine_fixture(reset_on_return="rollback") + + if assert_w_event: + + @event.listens_for(engine, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + + c1 = engine.connect() + if use_engine_transaction: + c1.begin() + c1.close() + assert dbapi.connect().rollback.called + + if assert_w_event: + assert dbapi.connect().special_reset_method.called + def test_plain_commit(self): dbapi, p = self._fixture(reset_on_return="commit") diff --git a/test/engine/test_reconnect.py b/test/engine/test_reconnect.py index 51da845b397..2079fbe7df9 100644 --- a/test/engine/test_reconnect.py +++ b/test/engine/test_reconnect.py @@ -15,6 +15,7 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import assert_raises_message_context_ok +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises @@ -967,6 +968,7 @@ def get_default_schema_name(connection): util.warn("Exception attempting to detect") eng.dialect._get_default_schema_name = get_default_schema_name + eng.dialect._check_unicode_description = mock.Mock() return eng def test_cursor_explode(self): @@ -982,11 +984,13 @@ def test_cursor_explode(self): def test_cursor_shutdown_in_initialize(self): db = self._fixture(True, True) - assert_raises_message_context_ok( + assert_warns_message( exc.SAWarning, "Exception attempting to detect", db.connect ) + # there's legacy py2k stuff happening here making this + # less smooth and probably buggy eq_( - db.pool.logger.error.mock_calls, + db.pool.logger.error.mock_calls[0:1], [call("Error closing cursor", exc_info=True)], ) @@ -1349,6 +1353,9 @@ def test_pre_ping_db_stays_shutdown(self): class InvalidateDuringResultTest(fixtures.TestBase): __backend__ = True + # test locks SQLite file databases due to unconsumed results + __requires__ = ("ad_hoc_engines",) + def setup_test(self): self.engine = engines.reconnecting_engine() self.meta = MetaData() @@ -1371,31 +1378,25 @@ def teardown_test(self): self.meta.drop_all(conn) self.engine.dispose() - @testing.crashes( - "oracle", - "cx_oracle 6 doesn't allow a close like this due to open cursors", - ) - @testing.fails_if( - [ - "+mariadbconnector", - "+mysqlconnector", - "+mysqldb", - "+cymysql", - "+pymysql", - "+pg8000", - "+asyncpg", - "+aiosqlite", - "+aiomysql", - "+asyncmy", - ], - "Buffers the result set and doesn't check for connection close", - ) def test_invalidate_on_results(self): conn = self.engine.connect() - result = conn.exec_driver_sql("select * from sometable") + result = conn.exec_driver_sql( + "select * from sometable", + ) for x in range(20): result.fetchone() + + real_cursor = result.cursor self.engine.test_shutdown() + + def produce_side_effect(): + # will fail because connection was closed, with an exception + # that should trigger disconnect routines + real_cursor.execute("select * from sometable") + + result.cursor = Mock( + fetchone=mock.Mock(side_effect=produce_side_effect) + ) try: _assert_invalidated(result.fetchone) assert conn.invalidated diff --git a/test/engine/test_reflection.py b/test/engine/test_reflection.py index 0a46ddeecaa..64a3bc4d329 100644 --- a/test/engine/test_reflection.py +++ b/test/engine/test_reflection.py @@ -12,6 +12,7 @@ from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import schema +from sqlalchemy import select from sqlalchemy import sql from sqlalchemy import String from sqlalchemy import testing @@ -23,6 +24,7 @@ from sqlalchemy.testing import config from sqlalchemy.testing import eq_ from sqlalchemy.testing import eq_regex +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ @@ -253,41 +255,6 @@ def test_nonexistent(self, connection): ) assert "nonexistent" not in meta.tables - def test_include_columns(self, connection, metadata): - meta = metadata - foo = Table( - "foo", - meta, - *[Column(n, sa.String(30)) for n in ["a", "b", "c", "d", "e", "f"]] - ) - meta.create_all(connection) - meta2 = MetaData() - foo = Table( - "foo", - meta2, - autoload_with=connection, - include_columns=["b", "f", "e"], - ) - # test that cols come back in original order - eq_([c.name for c in foo.c], ["b", "e", "f"]) - for c in ("b", "f", "e"): - assert c in foo.c - for c in ("a", "c", "d"): - assert c not in foo.c - - # test against a table which is already reflected - meta3 = MetaData() - foo = Table("foo", meta3, autoload_with=connection) - - foo = Table( - "foo", meta3, include_columns=["b", "f", "e"], extend_existing=True - ) - eq_([c.name for c in foo.c], ["b", "e", "f"]) - for c in ("b", "f", "e"): - assert c in foo.c - for c in ("a", "c", "d"): - assert c not in foo.c - def test_extend_existing(self, connection, metadata): meta = metadata @@ -2236,3 +2203,156 @@ def test_table_reflection(self): is_true(table.c.id1.identity is not None) eq_(table.c.id1.identity.start, 2) eq_(table.c.id1.identity.increment, 3) + + +class IncludeColsFksTest(AssertsCompiledSQL, fixtures.TestBase): + __dialect__ = "default" + + @testing.fixture + def tab_wo_fks(self, connection, metadata): + meta = metadata + foo = Table( + "foo", + meta, + *[Column(n, sa.String(30)) for n in ["a", "b", "c", "d", "e", "f"]] + ) + meta.create_all(connection) + + return foo + + @testing.fixture + def tab_w_fks(self, connection, metadata): + Table( + "a", + metadata, + Column("x", Integer, primary_key=True), + test_needs_fk=True, + ) + + b = Table( + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("q", Integer), + Column("p", Integer), + Column("r", Integer, ForeignKey("a.x")), + Column("s", Integer), + Column("t", Integer), + test_needs_fk=True, + ) + + metadata.create_all(connection) + + return b + + def test_include_columns(self, connection, tab_wo_fks): + foo = tab_wo_fks + meta2 = MetaData() + foo = Table( + "foo", + meta2, + autoload_with=connection, + include_columns=["b", "f", "e"], + ) + # test that cols come back in original order + eq_([c.name for c in foo.c], ["b", "e", "f"]) + for c in ("b", "f", "e"): + assert c in foo.c + for c in ("a", "c", "d"): + assert c not in foo.c + + # test against a table which is already reflected + meta3 = MetaData() + foo = Table("foo", meta3, autoload_with=connection) + + foo = Table( + "foo", meta3, include_columns=["b", "f", "e"], extend_existing=True + ) + eq_([c.name for c in foo.c], ["b", "e", "f"]) + for c in ("b", "f", "e"): + assert c in foo.c + for c in ("a", "c", "d"): + assert c not in foo.c + + @testing.emits_warning + @testing.combinations(True, False, argnames="resolve_fks") + def test_include_cols_skip_fk_col( + self, connection, tab_w_fks, resolve_fks + ): + """test #8100""" + + m2 = MetaData() + + b2 = Table( + "b", + m2, + autoload_with=connection, + resolve_fks=resolve_fks, + include_columns=["x", "q", "p"], + ) + + eq_([c.name for c in b2.c], ["x", "q", "p"]) + + # no FK, whether or not resolve_fks was called + eq_(b2.constraints, set((b2.primary_key,))) + + b2a = b2.alias() + eq_([c.name for c in b2a.c], ["x", "q", "p"]) + + self.assert_compile(select(b2), "SELECT b.x, b.q, b.p FROM b") + self.assert_compile( + select(b2.alias()), + "SELECT b_1.x, b_1.q, b_1.p FROM b AS b_1", + ) + + def test_table_works_minus_fks(self, connection, tab_w_fks): + """test #8101""" + + m2 = MetaData() + + b2 = Table( + "b", + m2, + autoload_with=connection, + resolve_fks=False, + ) + + eq_([c.name for c in b2.c], ["x", "q", "p", "r", "s", "t"]) + + b2a = b2.alias() + eq_([c.name for c in b2a.c], ["x", "q", "p", "r", "s", "t"]) + + self.assert_compile( + select(b2), "SELECT b.x, b.q, b.p, b.r, b.s, b.t FROM b" + ) + b2a_1 = b2.alias() + self.assert_compile( + select(b2a_1), + "SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t FROM b AS b_1", + ) + + # reflecting the related table + a2 = Table("a", m2, autoload_with=connection) + + # the existing alias doesn't know about it + with expect_raises_message( + sa.exc.InvalidRequestError, + "Foreign key associated with column 'anon_1.r' could not find " + "table 'a' with which to generate a foreign key to target " + "column 'x'", + ): + select(b2a_1).join(a2).compile() + + # can still join manually (needed to fix inside of util for this...) + self.assert_compile( + select(b2a_1).join(a2, b2a_1.c.r == a2.c.x), + "SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t " + "FROM b AS b_1 JOIN a ON b_1.r = a.x", + ) + + # a new alias does know about it however + self.assert_compile( + select(b2.alias()).join(a2), + "SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t " + "FROM b AS b_1 JOIN a ON a.x = b_1.r", + ) diff --git a/test/engine/test_transaction.py b/test/engine/test_transaction.py index b8e7edc6522..85e39c49815 100644 --- a/test/engine/test_transaction.py +++ b/test/engine/test_transaction.py @@ -346,7 +346,10 @@ def test_savepoint_rollback_fails_flat(self, local_connection): with testing.expect_warnings("nested transaction already"): s1.rollback() # no error (though it warns) - t1.commit() # no error + # this test was previously calling "commit", but note relies on + # buggy behavior in PostgreSQL as the transaction block is in fact + # aborted. pg8000 enforces this on the client as of 1.29 + t1.rollback() # no error @testing.requires.savepoints_w_release def test_savepoint_release_fails_flat(self): @@ -368,7 +371,10 @@ def test_savepoint_release_fails_flat(self): assert not s1.is_active s1.rollback() # no error. prior to 1.4 this would try to rollback - t1.commit() # no error + # this test was previously calling "commit", but note relies on + # buggy behavior in PostgreSQL as the transaction block is in fact + # aborted. pg8000 enforces this on the client as of 1.29 + t1.rollback() # no error @testing.requires.savepoints_w_release def test_savepoint_release_fails_ctxmanager(self, local_connection): @@ -487,6 +493,36 @@ def test_close2(self, local_connection): result = connection.exec_driver_sql("select * from users") assert len(result.fetchall()) == 0 + @testing.requires.independent_connections + def test_no_rollback_in_deactive(self, local_connection): + """test #7388""" + + def fail(*arg, **kw): + raise BaseException("some base exception") + + with mock.patch.object(testing.db.dialect, "do_commit", fail): + with expect_raises_message(BaseException, "some base exception"): + with local_connection.begin(): + pass + + @testing.requires.independent_connections + @testing.requires.savepoints + def test_no_rollback_in_deactive_savepoint(self, local_connection): + """test #7388""" + + def fail(*arg, **kw): + raise BaseException("some base exception") + + with mock.patch.object( + testing.db.dialect, "do_release_savepoint", fail + ): + with local_connection.begin(): + with expect_raises_message( + BaseException, "some base exception" + ): + with local_connection.begin_nested(): + pass + @testing.requires.savepoints def test_nested_subtransaction_rollback(self, local_connection): connection = local_connection @@ -1568,8 +1604,10 @@ def test_no_autocommit_w_begin(self): with testing.db.begin() as conn: assert_raises_message( exc.InvalidRequestError, - "This connection has already begun a transaction; " - "isolation_level may not be altered until transaction end", + r"This connection has already initialized a SQLAlchemy " + r"Transaction\(\) object via begin\(\) or autobegin; " + r"isolation_level may not be altered unless rollback\(\) or " + r"commit\(\) is called first.", conn.execution_options, isolation_level="AUTOCOMMIT", ) @@ -1582,8 +1620,10 @@ def test_no_autocommit_w_autobegin(self): assert_raises_message( exc.InvalidRequestError, - "This connection has already begun a transaction; " - "isolation_level may not be altered until transaction end", + r"This connection has already initialized a SQLAlchemy " + r"Transaction\(\) object via begin\(\) or autobegin; " + r"isolation_level may not be altered unless rollback\(\) or " + r"commit\(\) is called first.", conn.execution_options, isolation_level="AUTOCOMMIT", ) @@ -1822,7 +1862,10 @@ def test_no_double_begin(self): assert_raises_message( exc.InvalidRequestError, - "a transaction is already begun for this connection", + r"This connection has already initialized a SQLAlchemy " + r"Transaction\(\) object via begin\(\) or autobegin; can't " + r"call begin\(\) here unless rollback\(\) or commit\(\) is " + r"called first.", conn.begin, ) diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index 3c260f9e5d9..cb79fa3826c 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -1,4 +1,5 @@ import asyncio +import inspect as stdlib_inspect from sqlalchemy import Column from sqlalchemy import create_engine @@ -14,8 +15,11 @@ from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import union_all +from sqlalchemy.engine import cursor as _cursor +from sqlalchemy.ext.asyncio import async_engine_from_config from sqlalchemy.ext.asyncio import create_async_engine from sqlalchemy.ext.asyncio import engine as _async_engine +from sqlalchemy.ext.asyncio import exc as async_exc from sqlalchemy.ext.asyncio import exc as asyncio_exc from sqlalchemy.ext.asyncio.base import ReversibleProxy from sqlalchemy.ext.asyncio.engine import AsyncConnection @@ -173,6 +177,11 @@ class EngineFixture(AsyncFixture, fixtures.TablesTest): def async_engine(self): return engines.testing_engine(asyncio=True, transfer_staticpool=True) + @testing.fixture + def async_connection(self, async_engine): + with async_engine.sync_engine.connect() as conn: + yield AsyncConnection(async_engine, conn) + @classmethod def define_tables(cls, metadata): Table( @@ -219,6 +228,30 @@ def test_proxied_attrs_engine(self, async_engine): eq_(async_engine.driver, sync_engine.driver) eq_(async_engine.echo, sync_engine.echo) + @async_test + async def test_run_async(self, async_engine): + async def test_meth(async_driver_connection): + # there's no method that's guaranteed to be on every + # driver, so just stringify it and compare that to the + # outside + return str(async_driver_connection) + + def run_sync_to_async(connection): + connection_fairy = connection.connection + async_return = connection_fairy.run_async( + lambda driver_connection: test_meth(driver_connection) + ) + assert not stdlib_inspect.iscoroutine(async_return) + return async_return + + async with async_engine.connect() as conn: + driver_connection = ( + await conn.get_raw_connection() + ).driver_connection + res = await conn.run_sync(run_sync_to_async) + assert not stdlib_inspect.iscoroutine(res) + eq_(res, str(driver_connection)) + @async_test async def test_engine_eq_ne(self, async_engine): e2 = _async_engine.AsyncEngine(async_engine.sync_engine) @@ -331,56 +364,53 @@ def test_execution_options(self, async_engine): @async_test async def test_proxied_attrs_connection(self, async_engine): - conn = await async_engine.connect() - - sync_conn = conn.sync_connection + async with async_engine.connect() as conn: + sync_conn = conn.sync_connection - is_(conn.engine, async_engine) - is_(conn.closed, sync_conn.closed) - is_(conn.dialect, async_engine.sync_engine.dialect) - eq_(conn.default_isolation_level, sync_conn.default_isolation_level) + is_(conn.engine, async_engine) + is_(conn.closed, sync_conn.closed) + is_(conn.dialect, async_engine.sync_engine.dialect) + eq_( + conn.default_isolation_level, sync_conn.default_isolation_level + ) @async_test - async def test_transaction_accessor(self, async_engine): - async with async_engine.connect() as conn: - is_none(conn.get_transaction()) - is_false(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + async def test_transaction_accessor(self, async_connection): + conn = async_connection + is_none(conn.get_transaction()) + is_false(conn.in_transaction()) + is_false(conn.in_nested_transaction()) - trans = await conn.begin() + trans = await conn.begin() - is_true(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + is_true(conn.in_transaction()) + is_false(conn.in_nested_transaction()) - is_( - trans.sync_transaction, conn.get_transaction().sync_transaction - ) + is_(trans.sync_transaction, conn.get_transaction().sync_transaction) - nested = await conn.begin_nested() + nested = await conn.begin_nested() - is_true(conn.in_transaction()) - is_true(conn.in_nested_transaction()) + is_true(conn.in_transaction()) + is_true(conn.in_nested_transaction()) - is_( - conn.get_nested_transaction().sync_transaction, - nested.sync_transaction, - ) - eq_(conn.get_nested_transaction(), nested) + is_( + conn.get_nested_transaction().sync_transaction, + nested.sync_transaction, + ) + eq_(conn.get_nested_transaction(), nested) - is_( - trans.sync_transaction, conn.get_transaction().sync_transaction - ) + is_(trans.sync_transaction, conn.get_transaction().sync_transaction) - await nested.commit() + await nested.commit() - is_true(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + is_true(conn.in_transaction()) + is_false(conn.in_nested_transaction()) - await trans.rollback() + await trans.rollback() - is_none(conn.get_transaction()) - is_false(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + is_none(conn.get_transaction()) + is_false(conn.in_transaction()) + is_false(conn.in_nested_transaction()) @testing.requires.queue_pool @async_test @@ -403,31 +433,26 @@ async def test_invalidate(self, async_engine): is_not(new_fairy, connection_fairy) is_(new_fairy.is_valid, True) is_(connection_fairy.is_valid, False) + await conn.close() @async_test - async def test_get_dbapi_connection_raise(self, async_engine): - - conn = await async_engine.connect() - + async def test_get_dbapi_connection_raise(self, async_connection): with testing.expect_raises_message( exc.InvalidRequestError, "AsyncConnection.connection accessor is not " "implemented as the attribute", ): - conn.connection + async_connection.connection @async_test - async def test_get_raw_connection(self, async_engine): + async def test_get_raw_connection(self, async_connection): - conn = await async_engine.connect() - - pooled = await conn.get_raw_connection() - is_(pooled, conn.sync_connection.connection) + pooled = await async_connection.get_raw_connection() + is_(pooled, async_connection.sync_connection.connection) @async_test - async def test_isolation_level(self, async_engine): - conn = await async_engine.connect() - + async def test_isolation_level(self, async_connection): + conn = async_connection sync_isolation_level = await greenlet_spawn( conn.sync_connection.get_isolation_level ) @@ -440,8 +465,6 @@ async def test_isolation_level(self, async_engine): eq_(isolation_level, "SERIALIZABLE") - await conn.close() - @testing.requires.queue_pool @async_test async def test_dispose(self, async_engine): @@ -464,9 +487,9 @@ async def test_dispose(self, async_engine): @testing.requires.independent_connections @async_test async def test_init_once_concurrency(self, async_engine): - c1 = async_engine.connect() - c2 = async_engine.connect() - await asyncio.wait([c1, c2]) + async with async_engine.connect() as c1, async_engine.connect() as c2: + coro = asyncio.gather(c1.scalar(select(1)), c2.scalar(select(2))) + eq_(await coro, [1, 2]) @async_test async def test_connect_ctxmanager(self, async_engine): @@ -591,6 +614,16 @@ async def test_create_async_engine_server_side_cursor(self, async_engine): server_side_cursors=True, ) + def test_async_engine_from_config(self): + config = { + "sqlalchemy.url": str(testing.db.url), + "sqlalchemy.echo": "true", + } + engine = async_engine_from_config(config) + assert engine.url == testing.db.url + assert engine.echo is True + assert engine.dialect.is_async is True + class AsyncEventTest(EngineFixture): """The engine events all run in their normal synchronous context. @@ -612,8 +645,29 @@ async def test_no_async_listeners(self, async_engine): ): event.listen(async_engine, "before_cursor_execute", mock.Mock()) - conn = await async_engine.connect() + async with async_engine.connect() as conn: + with testing.expect_raises_message( + NotImplementedError, + "asynchronous events are not implemented " + "at this time. Apply synchronous listeners to the " + "AsyncEngine.sync_engine or " + "AsyncConnection.sync_connection attributes.", + ): + event.listen(conn, "before_cursor_execute", mock.Mock()) + + @async_test + async def test_no_async_listeners_dialect_event(self, async_engine): + with testing.expect_raises_message( + NotImplementedError, + "asynchronous events are not implemented " + "at this time. Apply synchronous listeners to the " + "AsyncEngine.sync_engine or " + "AsyncConnection.sync_connection attributes.", + ): + event.listen(async_engine, "do_execute", mock.Mock()) + @async_test + async def test_no_async_listeners_pool_event(self, async_engine): with testing.expect_raises_message( NotImplementedError, "asynchronous events are not implemented " @@ -621,7 +675,7 @@ async def test_no_async_listeners(self, async_engine): "AsyncEngine.sync_engine or " "AsyncConnection.sync_connection attributes.", ): - event.listen(conn, "before_cursor_execute", mock.Mock()) + event.listen(async_engine, "checkout", mock.Mock()) @async_test async def test_sync_before_cursor_execute_engine(self, async_engine): @@ -690,6 +744,32 @@ async def test_inspect_connection(self, async_engine): class AsyncResultTest(EngineFixture): + @async_test + async def test_no_ss_cursor_w_execute(self, async_engine): + users = self.tables.users + async with async_engine.connect() as conn: + conn = await conn.execution_options(stream_results=True) + with expect_raises_message( + async_exc.AsyncMethodRequired, + r"Can't use the AsyncConnection.execute\(\) method with a " + r"server-side cursor. Use the AsyncConnection.stream\(\) " + r"method for an async streaming result set.", + ): + await conn.execute(select(users)) + + @async_test + async def test_no_ss_cursor_w_exec_driver_sql(self, async_engine): + async with async_engine.connect() as conn: + conn = await conn.execution_options(stream_results=True) + with expect_raises_message( + async_exc.AsyncMethodRequired, + r"Can't use the AsyncConnection.exec_driver_sql\(\) " + r"method with a " + r"server-side cursor. Use the AsyncConnection.stream\(\) " + r"method for an async streaming result set.", + ): + await conn.exec_driver_sql("SELECT * FROM users") + @testing.combinations( (None,), ("scalars",), ("mappings",), argnames="filter_" ) @@ -795,20 +875,53 @@ async def test_columns_all(self, async_engine): @testing.combinations( (None,), ("scalars",), ("mappings",), argnames="filter_" ) + @testing.combinations(None, 2, 5, 10, argnames="yield_per") + @testing.combinations("method", "opt", argnames="yield_per_type") @async_test - async def test_partitions(self, async_engine, filter_): + async def test_partitions( + self, async_engine, filter_, yield_per, yield_per_type + ): users = self.tables.users async with async_engine.connect() as conn: - result = await conn.stream(select(users)) + stmt = select(users) + if yield_per and yield_per_type == "opt": + stmt = stmt.execution_options(yield_per=yield_per) + result = await conn.stream(stmt) if filter_ == "mappings": result = result.mappings() elif filter_ == "scalars": result = result.scalars(1) + if yield_per and yield_per_type == "method": + result = result.yield_per(yield_per) + check_result = [] - async for partition in result.partitions(5): - check_result.append(partition) + + # stream() sets stream_results unconditionally + assert isinstance( + result._real_result.cursor_strategy, + _cursor.BufferedRowCursorFetchStrategy, + ) + + if yield_per: + partition_size = yield_per + + eq_(result._real_result.cursor_strategy._bufsize, yield_per) + + async for partition in result.partitions(): + check_result.append(partition) + else: + eq_(result._real_result.cursor_strategy._bufsize, 5) + + partition_size = 5 + async for partition in result.partitions(partition_size): + check_result.append(partition) + + ranges = [ + (i, min(20, i + partition_size)) + for i in range(1, 21, partition_size) + ] if filter_ == "mappings": eq_( @@ -818,23 +931,20 @@ async def test_partitions(self, async_engine, filter_): {"user_id": i, "user_name": "name%d" % i} for i in range(a, b) ] - for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)] + for (a, b) in ranges ], ) elif filter_ == "scalars": eq_( check_result, - [ - ["name%d" % i for i in range(a, b)] - for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)] - ], + [["name%d" % i for i in range(a, b)] for (a, b) in ranges], ) else: eq_( check_result, [ [(i, "name%d" % i) for i in range(a, b)] - for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)] + for (a, b) in ranges ], ) @@ -905,6 +1015,8 @@ async def test_scalars(self, async_engine, filter_): class TextSyncDBAPI(fixtures.TestBase): + __requires__ = ("asyncio",) + def test_sync_dbapi_raises(self): with expect_raises_message( exc.InvalidRequestError, @@ -1035,16 +1147,16 @@ async def test_gc_conn(self, testing_engine): def test_regen_conn_but_not_engine(self, async_engine): - sync_conn = async_engine.sync_engine.connect() + with async_engine.sync_engine.connect() as sync_conn: - async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn) - async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn) + async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn) + async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn) - is_(async_conn, async_conn2) - is_(async_conn.engine, async_engine) + is_(async_conn, async_conn2) + is_(async_conn.engine, async_engine) - def test_regen_trans_but_not_conn(self, async_engine): - sync_conn = async_engine.sync_engine.connect() + def test_regen_trans_but_not_conn(self, connection_no_trans): + sync_conn = connection_no_trans async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn) @@ -1057,3 +1169,23 @@ def test_regen_trans_but_not_conn(self, async_engine): async_t2 = async_conn.get_transaction() is_(async_t1, async_t2) + + +class PoolRegenTest(EngineFixture): + @testing.requires.queue_pool + @async_test + @testing.variation("do_dispose", [True, False]) + async def test_gather_after_dispose(self, testing_engine, do_dispose): + engine = testing_engine( + asyncio=True, options=dict(pool_size=10, max_overflow=10) + ) + + async def thing(engine): + async with engine.connect() as conn: + await conn.exec_driver_sql("select 1") + + if do_dispose: + await engine.dispose() + + tasks = [thing(engine) for _ in range(10)] + await asyncio.gather(*tasks) diff --git a/test/ext/asyncio/test_session_py3k.py b/test/ext/asyncio/test_session_py3k.py index 4e475b2122e..f04b87f3718 100644 --- a/test/ext/asyncio/test_session_py3k.py +++ b/test/ext/asyncio/test_session_py3k.py @@ -11,6 +11,7 @@ from sqlalchemy import update from sqlalchemy.ext.asyncio import async_object_session from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.ext.asyncio import exc as async_exc from sqlalchemy.ext.asyncio.base import ReversibleProxy from sqlalchemy.orm import relationship from sqlalchemy.orm import selectinload @@ -19,9 +20,11 @@ from sqlalchemy.testing import async_test from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import is_ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import is_false from .test_engine_py3k import AsyncFixture as _AsyncFixture from ...orm import _fixtures @@ -164,6 +167,28 @@ async def test_stream_partitions(self, async_session, kw): ], ) + @testing.combinations("statement", "execute", argnames="location") + @async_test + async def test_no_ss_cursor_w_execute(self, async_session, location): + User = self.classes.User + + stmt = select(User) + if location == "statement": + stmt = stmt.execution_options(stream_results=True) + + with expect_raises_message( + async_exc.AsyncMethodRequired, + r"Can't use the AsyncSession.execute\(\) method with a " + r"server-side cursor. Use the AsyncSession.stream\(\) " + r"method for an async streaming result set.", + ): + if location == "execute": + await async_session.execute( + stmt, execution_options={"stream_results": True} + ) + else: + await async_session.execute(stmt) + class AsyncSessionTransactionTest(AsyncFixture): run_inserts = None @@ -488,6 +513,22 @@ def end_savepoint(session, transaction): result = await async_session.execute(select(User)) eq_(result.all(), []) + @async_test + @testing.requires.independent_connections + async def test_invalidate(self, async_session): + await async_session.execute(select(1)) + conn = async_session.sync_session.connection() + fairy = conn.connection + connection_rec = fairy._connection_record + + is_false(conn.closed) + is_false(connection_rec._is_hard_or_soft_invalidated()) + await async_session.invalidate() + is_true(conn.closed) + is_true(connection_rec._is_hard_or_soft_invalidated()) + + eq_(async_session.in_transaction(), False) + class AsyncCascadesTest(AsyncFixture): run_inserts = None diff --git a/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py b/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py index 1a89041474b..9ee9c76f467 100644 --- a/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py +++ b/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py @@ -16,5 +16,5 @@ class User: u1 = User() -# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa E501 +# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa: E501 p: str = u1.name diff --git a/test/ext/mypy/files/ensure_descriptor_type_noninferred.py b/test/ext/mypy/files/ensure_descriptor_type_noninferred.py index b1dabe8dc9b..e8ce35114e7 100644 --- a/test/ext/mypy/files/ensure_descriptor_type_noninferred.py +++ b/test/ext/mypy/files/ensure_descriptor_type_noninferred.py @@ -19,5 +19,5 @@ class User: u1 = User() -# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "Optional[int]") # noqa E501 +# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "Optional[int]") # noqa: E501 p: Optional[int] = u1.name diff --git a/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py b/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py index 2154ff074c6..d72649b62a4 100644 --- a/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py +++ b/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py @@ -22,5 +22,5 @@ class User: u1 = User() -# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa E501 +# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa: E501 p: str = u1.name diff --git a/test/ext/mypy/files/invalid_noninferred_lh_type.py b/test/ext/mypy/files/invalid_noninferred_lh_type.py index 5084de7225e..e9ff303ca78 100644 --- a/test/ext/mypy/files/invalid_noninferred_lh_type.py +++ b/test/ext/mypy/files/invalid_noninferred_lh_type.py @@ -11,5 +11,5 @@ class User: __tablename__ = "user" id = Column(Integer(), primary_key=True) - # EXPECTED: Left hand assignment 'name: "int"' not compatible with ORM mapped expression # noqa E501 + # EXPECTED: Left hand assignment 'name: "int"' not compatible with ORM mapped expression # noqa: E501 name: int = Column(String()) diff --git a/test/ext/mypy/files/issue_7321.py b/test/ext/mypy/files/issue_7321.py new file mode 100644 index 00000000000..d4cd7f2c435 --- /dev/null +++ b/test/ext/mypy/files/issue_7321.py @@ -0,0 +1,22 @@ +from typing import Any +from typing import Dict + +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import declared_attr + + +Base = declarative_base() + + +class Foo(Base): + @declared_attr + def __tablename__(cls) -> str: + return "name" + + @declared_attr + def __mapper_args__(cls) -> Dict[Any, Any]: + return {} + + @declared_attr + def __table_args__(cls) -> Dict[Any, Any]: + return {} diff --git a/test/ext/mypy/files/issue_7321_part2.py b/test/ext/mypy/files/issue_7321_part2.py new file mode 100644 index 00000000000..4227f2797e8 --- /dev/null +++ b/test/ext/mypy/files/issue_7321_part2.py @@ -0,0 +1,28 @@ +from typing import Any +from typing import Dict +from typing import Type + +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import declared_attr + + +Base = declarative_base() + + +class Foo(Base): + # no mypy error emitted regarding the + # Type[Foo] part + @declared_attr + def __tablename__(cls: Type["Foo"]) -> str: + return "name" + + @declared_attr + def __mapper_args__(cls: Type["Foo"]) -> Dict[Any, Any]: + return {} + + # this was a workaround that works if there's no plugin present, make + # sure that doesn't crash anything + @classmethod + @declared_attr + def __table_args__(cls: Type["Foo"]) -> Dict[Any, Any]: + return {} diff --git a/test/ext/mypy/files/issue_9102.py b/test/ext/mypy/files/issue_9102.py new file mode 100644 index 00000000000..a9eea7c606b --- /dev/null +++ b/test/ext/mypy/files/issue_9102.py @@ -0,0 +1,18 @@ +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy.orm import registry + + +class BackendMeta: + __abstract__ = True + mapped_registry: registry = registry() + metadata = mapped_registry.metadata + + +# this decorator is not picked up now, but at least it doesn't crash +@BackendMeta.mapped_registry.mapped +class User: + __tablename__ = "user" + + # EXPECTED_MYPY: Incompatible types in assignment (expression has type "Column[Integer]", variable has type "int") # noqa: E501 + id: int = Column(Integer(), primary_key=True) diff --git a/test/ext/mypy/files/issue_9102_workaround.py b/test/ext/mypy/files/issue_9102_workaround.py new file mode 100644 index 00000000000..3682d29b237 --- /dev/null +++ b/test/ext/mypy/files/issue_9102_workaround.py @@ -0,0 +1,19 @@ +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy.orm import registry + + +class BackendMeta: + __abstract__ = True + mapped_registry: registry = registry() + metadata = mapped_registry.metadata + + +reg: registry = BackendMeta.mapped_registry + + +@reg.mapped +class User: + __tablename__ = "user" + + id: int = Column(Integer(), primary_key=True) diff --git a/test/ext/mypy/files/lambda_default.py b/test/ext/mypy/files/lambda_default.py new file mode 100644 index 00000000000..a1019f0d02f --- /dev/null +++ b/test/ext/mypy/files/lambda_default.py @@ -0,0 +1,11 @@ +import uuid + +from sqlalchemy import Column +from sqlalchemy import String +from sqlalchemy.orm import declarative_base + +Base = declarative_base() + + +class MyClass(Base): + id = Column(String, default=lambda: uuid.uuid4(), primary_key=True) diff --git a/test/ext/mypy/files/relationship_err3.py b/test/ext/mypy/files/relationship_err3.py index aa76ae1f0e0..1c7cd9f303d 100644 --- a/test/ext/mypy/files/relationship_err3.py +++ b/test/ext/mypy/files/relationship_err3.py @@ -24,7 +24,7 @@ class A(Base): id = Column(Integer, primary_key=True) data = Column(String) - # EXPECTED: Left hand assignment 'bs: "Set[B]"' not compatible with ORM mapped expression of type "Mapped[List[B]]" # noqa + bs: Set[B] = relationship(B, uselist=True, back_populates="a") # EXPECTED: Left hand assignment 'another_bs: "Set[B]"' not compatible with ORM mapped expression of type "Mapped[B]" # noqa diff --git a/test/ext/mypy/files/typeless_fk_col_cant_infer.py b/test/ext/mypy/files/typeless_fk_col_cant_infer.py index beb4a7a5d0c..0b933db4785 100644 --- a/test/ext/mypy/files/typeless_fk_col_cant_infer.py +++ b/test/ext/mypy/files/typeless_fk_col_cant_infer.py @@ -20,6 +20,6 @@ class Address: __tablename__ = "address" id = Column(Integer, primary_key=True) - # EXPECTED: Can't infer type from ORM mapped expression assigned to attribute 'user_id'; # noqa E501 + # EXPECTED: Can't infer type from ORM mapped expression assigned to attribute 'user_id'; # noqa: E501 user_id = Column(ForeignKey("user.id")) email_address = Column(String) diff --git a/test/ext/mypy/files/typing_err3.py b/test/ext/mypy/files/typing_err3.py index 5383f89560c..a81ea067c79 100644 --- a/test/ext/mypy/files/typing_err3.py +++ b/test/ext/mypy/files/typing_err3.py @@ -22,6 +22,7 @@ class User(Base): id = Column(Integer, primary_key=True) + # note this goes away in 2.0 for the moment # EXPECTED_MYPY: Unexpected keyword argument "wrong_arg" for "RelationshipProperty" # noqa addresses: Mapped[List["Address"]] = relationship( "Address", wrong_arg="imwrong" diff --git a/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py b/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py index 4f29932e569..161dce08757 100644 --- a/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py +++ b/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py @@ -1,8 +1,10 @@ from sqlalchemy import Column from sqlalchemy import Enum -from sqlalchemy.orm import declarative_base, Mapped +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import Mapped from . import enum_col_import1 -from .enum_col_import1 import IntEnum, StrEnum +from .enum_col_import1 import IntEnum +from .enum_col_import1 import StrEnum Base = declarative_base() diff --git a/test/ext/mypy/test_mypy_plugin_py3k.py b/test/ext/mypy/test_mypy_plugin_py3k.py index 681c9d57bab..a92aee1e712 100644 --- a/test/ext/mypy/test_mypy_plugin_py3k.py +++ b/test/ext/mypy/test_mypy_plugin_py3k.py @@ -5,6 +5,7 @@ import tempfile from sqlalchemy import testing +from sqlalchemy import util from sqlalchemy.testing import config from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -63,9 +64,25 @@ def run(path, use_plugin=True, incremental=False): ), ] - args.append(path) - - return api.run(args) + if incremental: + args.append(path) + else: + # mypy as of 0.990 is more aggressively blocking messaging + # for paths that are in sys.path, and as pytest puts currdir, + # test/ etc in sys.path, just copy the source file to the + # tempdir we are working in so that we don't have to try to + # manipulate sys.path and/or guess what mypy is doing + filename = os.path.basename(path) + test_program = os.path.join(cachedir, filename) + shutil.copyfile(path, test_program) + args.append(test_program) + + # I set this locally but for the suite here needs to be + # disabled + os.environ.pop("MYPY_FORCE_COLOR", None) + + result = api.run(args) + return result return run @@ -156,6 +173,9 @@ def test_mypy(self, mypy_runner, path): expected_errors = [] expected_re = re.compile(r"\s*# EXPECTED(_MYPY)?: (.+)") py_ver_re = re.compile(r"^#\s*PYTHON_VERSION\s?>=\s?(\d+\.\d+)") + + from sqlalchemy.ext.mypy.util import mypy_14 + with open(path) as file_: for num, line in enumerate(file_, 1): m = py_ver_re.match(line) @@ -174,15 +194,41 @@ def test_mypy(self, mypy_runner, path): if m: is_mypy = bool(m.group(1)) expected_msg = m.group(2) - expected_msg = re.sub(r"# noqa ?.*", "", m.group(2)) + expected_msg = re.sub(r"# noqa[:]? ?.*", "", m.group(2)) + + if mypy_14 and util.py39: + # use_lowercase_names, py39 and above + # https://github.com/python/mypy/blob/304997bfb85200fb521ac727ee0ce3e6085e5278/mypy/options.py#L363 # noqa: E501 + + # skip first character which could be capitalized + # "List item x not found" type of message + expected_msg = expected_msg[0] + re.sub( + r"\b(List|Tuple|Dict|Set|Type)\b", + lambda m: m.group(1).lower(), + expected_msg[1:], + ) + + if mypy_14 and util.py310: + # use_or_syntax, py310 and above + # https://github.com/python/mypy/blob/304997bfb85200fb521ac727ee0ce3e6085e5278/mypy/options.py#L368 # noqa: E501 + expected_msg = re.sub( + r"Optional\[(.*?)\]", + lambda m: f"{m.group(1)} | None", + expected_msg, + ) + expected_errors.append( (num, is_mypy, expected_msg.strip()) ) result = mypy_runner(path, use_plugin=use_plugin) + not_located = [] + if expected_errors: - eq_(result[2], 1, msg=result) + # mypy 0.990 changed how return codes work, so don't assume a + # 1 or a 0 return code here, could be either depending on if + # errors were generated or not print(result[0]) @@ -201,9 +247,14 @@ def test_mypy(self, mypy_runner, path): ): break else: + not_located.append(msg) continue del errors[idx] + if not_located: + print(f"Couldn't locate expected messages: {not_located}") + assert False, "expected messages not found, see stdout" + assert not errors, "errors remain: %s" % "\n".join(errors) else: diff --git a/test/ext/test_associationproxy.py b/test/ext/test_associationproxy.py index 0b05fe0387e..44f3890de88 100644 --- a/test/ext/test_associationproxy.py +++ b/test/ext/test_associationproxy.py @@ -34,6 +34,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.mock import call from sqlalchemy.testing.mock import Mock @@ -3343,6 +3344,10 @@ class A(Base): b_data = association_proxy("bs", "value") well_behaved_b_data = association_proxy("bs", "well_behaved_value") + fails_on_class_access = association_proxy( + "bs", "fails_on_class_access" + ) + class B(Base): __tablename__ = "b" @@ -3386,6 +3391,10 @@ def well_behaved_w_expr(self, value): def well_behaved_w_expr(cls): return cast(cls.data, Integer) + @hybrid_property + def fails_on_class_access(self): + return len(self.data) + class C(Base): __tablename__ = "c" @@ -3394,6 +3403,19 @@ class C(Base): _b = relationship("B") attr = association_proxy("_b", "well_behaved_w_expr") + def test_msg_fails_on_cls_access(self): + A, B = self.classes("A", "B") + + a1 = A(bs=[B(data="b1")]) + + with expect_raises_message( + exc.InvalidRequestError, + "Association proxy received an unexpected error when trying to " + 'retreive attribute "B.fails_on_class_access" from ' + r'class "B": .* no len\(\)', + ): + a1.fails_on_class_access + def test_get_ambiguous(self): A, B = self.classes("A", "B") diff --git a/test/ext/test_automap.py b/test/ext/test_automap.py index eaafa347785..9227405e91d 100644 --- a/test/ext/test_automap.py +++ b/test/ext/test_automap.py @@ -501,6 +501,7 @@ def _chaos(self): finally: e.dispose() + @testing.requires.timing_intensive def test_concurrent_automaps_w_configure(self): self._success = True threads = [threading.Thread(target=self._chaos) for i in range(30)] diff --git a/test/ext/test_baked.py b/test/ext/test_baked.py index 977fb363909..a7fb1ec766e 100644 --- a/test/ext/test_baked.py +++ b/test/ext/test_baked.py @@ -1043,6 +1043,7 @@ def _option_fixture(self): from sqlalchemy.orm.interfaces import UserDefinedOption class RelationshipCache(UserDefinedOption): + inherit_cache = True propagate_to_loaders = True diff --git a/test/ext/test_compiler.py b/test/ext/test_compiler.py index 7fb0213292c..99679712200 100644 --- a/test/ext/test_compiler.py +++ b/test/ext/test_compiler.py @@ -37,6 +37,8 @@ class UserDefinedTest(fixtures.TestBase, AssertsCompiledSQL): def test_column(self): class MyThingy(ColumnClause): + inherit_cache = False + def __init__(self, arg=None): super(MyThingy, self).__init__(arg or "MYTHINGY!") @@ -96,7 +98,7 @@ def visit_pg_type(type_, compiler, **kw): def test_no_compile_for_col_label(self): class MyThingy(FunctionElement): - pass + inherit_cache = True @compiles(MyThingy) def visit_thingy(thingy, compiler, **kw): @@ -120,6 +122,8 @@ def visit_thingy_pg(thingy, compiler, **kw): def test_stateful(self): class MyThingy(ColumnClause): + inherit_cache = False + def __init__(self): super(MyThingy, self).__init__("MYTHINGY!") @@ -142,6 +146,8 @@ def visit_thingy(thingy, compiler, **kw): def test_callout_to_compiler(self): class InsertFromSelect(ClauseElement): + inherit_cache = False + def __init__(self, table, select): self.table = table self.select = select @@ -162,7 +168,7 @@ def visit_insert_from_select(element, compiler, **kw): def test_no_default_but_has_a_visit(self): class MyThingy(ColumnClause): - pass + inherit_cache = False @compiles(MyThingy, "postgresql") def visit_thingy(thingy, compiler, **kw): @@ -172,7 +178,7 @@ def visit_thingy(thingy, compiler, **kw): def test_no_default_has_no_visit(self): class MyThingy(TypeEngine): - pass + inherit_cache = False @compiles(MyThingy, "postgresql") def visit_thingy(thingy, compiler, **kw): @@ -189,6 +195,7 @@ def visit_thingy(thingy, compiler, **kw): @testing.combinations((True,), (False,)) def test_no_default_proxy_generation(self, named): class my_function(FunctionElement): + inherit_cache = False if named: name = "my_function" type = Numeric() @@ -215,7 +222,7 @@ def sqlite_my_function(element, compiler, **kw): def test_no_default_message(self): class MyThingy(ClauseElement): - pass + inherit_cache = False @compiles(MyThingy, "postgresql") def visit_thingy(thingy, compiler, **kw): @@ -314,7 +321,7 @@ def test_functions(self): from sqlalchemy.dialects import postgresql class MyUtcFunction(FunctionElement): - pass + inherit_cache = True @compiles(MyUtcFunction) def visit_myfunc(element, compiler, **kw): @@ -335,7 +342,7 @@ def visit_myfunc_pg(element, compiler, **kw): def test_functions_args_noname(self): class myfunc(FunctionElement): - pass + inherit_cache = True @compiles(myfunc) def visit_myfunc(element, compiler, **kw): @@ -351,6 +358,7 @@ def test_function_calls_base(self): class greatest(FunctionElement): type = Numeric() name = "greatest" + inherit_cache = True @compiles(greatest) def default_greatest(element, compiler, **kw): @@ -380,12 +388,15 @@ def case_greatest(element, compiler, **kw): def test_function_subclasses_one(self): class Base(FunctionElement): + inherit_cache = True name = "base" class Sub1(Base): + inherit_cache = True name = "sub1" class Sub2(Base): + inherit_cache = True name = "sub2" @compiles(Base) @@ -407,6 +418,7 @@ class Base(FunctionElement): name = "base" class Sub1(Base): + inherit_cache = True name = "sub1" @compiles(Base) @@ -414,9 +426,11 @@ def visit_base(element, compiler, **kw): return element.name class Sub2(Base): + inherit_cache = True name = "sub2" class SubSub1(Sub1): + inherit_cache = True name = "subsub1" self.assert_compile( @@ -545,7 +559,7 @@ def define_tables(cls, metadata): @testing.fixture() def insert_fixture(self): class MyInsert(Executable, ClauseElement): - pass + inherit_cache = True @compiles(MyInsert) def _run_myinsert(element, compiler, **kw): @@ -556,7 +570,7 @@ def _run_myinsert(element, compiler, **kw): @testing.fixture() def select_fixture(self): class MySelect(Executable, ClauseElement): - pass + inherit_cache = True @compiles(MySelect) def _run_myinsert(element, compiler, **kw): diff --git a/test/ext/test_extendedattr.py b/test/ext/test_extendedattr.py index c762754bc58..d895f74a9ee 100644 --- a/test/ext/test_extendedattr.py +++ b/test/ext/test_extendedattr.py @@ -156,7 +156,8 @@ def __sa_instrumentation_manager__(cls): ) # This proves SA can handle a class with non-string dict keys - if util.cpython: + # Since python 3.13 non-string key raise a runtime warning. + if util.cpython and not util.py313: locals()[42] = 99 # Don't remove this line! def __init__(self, **kwargs): diff --git a/test/ext/test_hybrid.py b/test/ext/test_hybrid.py index ad8d92b9b37..caee584a0e7 100644 --- a/test/ext/test_hybrid.py +++ b/test/ext/test_hybrid.py @@ -5,6 +5,7 @@ from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import LABEL_STYLE_DISAMBIGUATE_ONLY from sqlalchemy import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy import literal_column from sqlalchemy import Numeric @@ -245,6 +246,67 @@ class B(Base): return A, B + @testing.fixture + def _related_polymorphic_attr_fixture(self): + """test for #7425""" + + Base = declarative_base() + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + + bs = relationship("B", back_populates="a", lazy="joined") + + class B(Base): + __tablename__ = "poly" + __mapper_args__ = { + "polymorphic_on": "type", + # if with_polymorphic is removed, issue does not occur + "with_polymorphic": "*", + } + name = Column(String, primary_key=True) + type = Column(String) + a_id = Column(ForeignKey(A.id)) + + a = relationship(A, back_populates="bs") + + @hybrid.hybrid_property + def is_foo(self): + return self.name == "foo" + + return A, B + + def test_cloning_in_polymorphic_any( + self, _related_polymorphic_attr_fixture + ): + A, B = _related_polymorphic_attr_fixture + + session = fixture_session() + + # in the polymorphic case, A.bs.any() does a traverse() / clone() + # on the expression. so the proxedattribute coming from the hybrid + # has to support this. + + self.assert_compile( + session.query(A).filter(A.bs.any(B.name == "foo")), + "SELECT a.id AS a_id, poly_1.name AS poly_1_name, poly_1.type " + "AS poly_1_type, poly_1.a_id AS poly_1_a_id FROM a " + "LEFT OUTER JOIN poly AS poly_1 ON a.id = poly_1.a_id " + "WHERE EXISTS (SELECT 1 FROM poly WHERE a.id = poly.a_id " + "AND poly.name = :name_1)", + ) + + # SQL should be identical + self.assert_compile( + session.query(A).filter(A.bs.any(B.is_foo)), + "SELECT a.id AS a_id, poly_1.name AS poly_1_name, poly_1.type " + "AS poly_1_type, poly_1.a_id AS poly_1_a_id FROM a " + "LEFT OUTER JOIN poly AS poly_1 ON a.id = poly_1.a_id " + "WHERE EXISTS (SELECT 1 FROM poly WHERE a.id = poly.a_id " + "AND poly.name = :name_1)", + ) + @testing.fixture def _unnamed_expr_fixture(self): Base = declarative_base() @@ -261,6 +323,21 @@ def name(self): return A + @testing.fixture + def _unnamed_expr_matches_col_fixture(self): + Base = declarative_base() + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + foo = Column(String) + + @hybrid.hybrid_property + def bar(self): + return self.foo + + return A + def test_labeling_for_unnamed(self, _unnamed_expr_fixture): A = _unnamed_expr_fixture @@ -280,6 +357,39 @@ def test_labeling_for_unnamed(self, _unnamed_expr_fixture): "a.lastname AS name FROM a) AS anon_1", ) + @testing.variation("pre_populate_col_proxy", [True, False]) + def test_labeling_for_unnamed_matches_col( + self, _unnamed_expr_matches_col_fixture, pre_populate_col_proxy + ): + """test #11728""" + + A = _unnamed_expr_matches_col_fixture + + if pre_populate_col_proxy: + pre_stmt = select(A.id, A.foo) + pre_stmt.subquery().c + + stmt = select(A.id, A.bar) + self.assert_compile( + stmt, + "SELECT a.id, a.foo FROM a", + ) + + compile_state = stmt._compile_state_factory(stmt, None) + eq_( + compile_state._column_naming_convention( + LABEL_STYLE_DISAMBIGUATE_ONLY, legacy=False + )(list(stmt.inner_columns)[1]), + "bar", + ) + eq_(stmt.subquery().c.keys(), ["id", "bar"]) + + self.assert_compile( + select(stmt.subquery()), + "SELECT anon_1.id, anon_1.foo FROM " + "(SELECT a.id AS id, a.foo AS foo FROM a) AS anon_1", + ) + def test_labeling_for_unnamed_tablename_plus_col( self, _unnamed_expr_fixture ): diff --git a/test/ext/test_mutable.py b/test/ext/test_mutable.py index 1d88deb7a0e..70b076c55ea 100644 --- a/test/ext/test_mutable.py +++ b/test/ext/test_mutable.py @@ -4,7 +4,10 @@ from sqlalchemy import event from sqlalchemy import ForeignKey from sqlalchemy import func +from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import JSON +from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import util @@ -16,6 +19,7 @@ from sqlalchemy.orm import column_property from sqlalchemy.orm import composite from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import Session from sqlalchemy.orm.instrumentation import ClassManager from sqlalchemy.orm.mapper import Mapper from sqlalchemy.testing import assert_raises @@ -23,6 +27,7 @@ from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ +from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -41,6 +46,10 @@ class SubFoo(Foo): pass +class Foo2(fixtures.BasicEntity): + pass + + class FooWithEq(object): def __init__(self, **kw): for k in kw: @@ -102,6 +111,101 @@ def teardown_test(self): ClassManager.dispatch._clear() +class MiscTest(fixtures.TestBase): + @testing.combinations(True, False, argnames="pickleit") + def test_pickle_parent_multi_attrs(self, registry, connection, pickleit): + """test #8133""" + + local_foo = Table( + "lf", + registry.metadata, + Column("id", Integer, primary_key=True), + Column("j1", MutableDict.as_mutable(PickleType)), + Column("j2", MutableDict.as_mutable(PickleType)), + Column("j3", MutableDict.as_mutable(PickleType)), + Column("j4", MutableDict.as_mutable(PickleType)), + ) + + registry.map_imperatively(Foo2, local_foo) + registry.metadata.create_all(connection) + + with Session(connection) as sess: + + data = dict( + j1={"a": 1}, + j2={"b": 2}, + j3={"c": 3}, + j4={"d": 4}, + ) + lf = Foo2(**data) + sess.add(lf) + sess.commit() + + all_attrs = {"j1", "j2", "j3", "j4"} + for attr in all_attrs: + for loads, dumps in picklers(): + with Session(connection) as sess: + f1 = sess.scalars(select(Foo2)).first() + if pickleit: + f2 = loads(dumps(f1)) + else: + f2 = f1 + + existing_dict = getattr(f2, attr) + existing_dict["q"] = "c" + eq_( + inspect(f2).attrs[attr].history, + ([existing_dict], (), ()), + ) + for other_attr in all_attrs.difference([attr]): + a = inspect(f2).attrs[other_attr].history + b = ((), [data[other_attr]], ()) + eq_(a, b) + + @testing.combinations("key_present", "key_non_present", argnames="present") + @testing.combinations( + ("transient", True), + ("detached", True), + ("detached", False), + argnames="merge_subject, load", + ) + @testing.requires.json_type + def test_session_merge( + self, decl_base, connection, present, load, merge_subject + ): + """test #8446""" + + class Thing(decl_base): + __tablename__ = "thing" + id = Column(Integer, primary_key=True) + data = Column(MutableDict.as_mutable(JSON)) + + decl_base.metadata.create_all(connection) + + with Session(connection) as sess: + sess.add(Thing(id=1, data={"foo": "bar"})) + sess.commit() + + if merge_subject == "transient": + t1_to_merge = Thing(id=1, data={"foo": "bar"}) + elif merge_subject == "detached": + with Session(connection) as sess: + t1_to_merge = sess.get(Thing, 1) + + with Session(connection) as sess: + already_present = None + if present == "key_present": + already_present = sess.get(Thing, 1) + + t1_merged = sess.merge(t1_to_merge, load=load) + + t1_merged.data["foo"] = "bat" + if present == "key_present": + is_(t1_merged, already_present) + + is_true(inspect(t1_merged).attrs.data.history.added) + + class _MutableDictTestBase(_MutableDictTestFixture): run_define_tables = "each" diff --git a/test/orm/declarative/test_basic.py b/test/orm/declarative/test_basic.py index a405b9f2c6a..2ab787aa676 100644 --- a/test/orm/declarative/test_basic.py +++ b/test/orm/declarative/test_basic.py @@ -406,7 +406,13 @@ class Foo4(MyMixin2, Base): id = Column(Integer, primary_key=True) def test_column_named_twice(self): - def go(): + with assertions.expect_deprecated( + "A column with name 'x' is already present in table 'foo'" + ), expect_warnings( + "On class 'Foo', Column object 'x' named directly multiple times, " + "only one will be used: x, y", + ): + class Foo(Base): __tablename__ = "foo" @@ -414,15 +420,14 @@ class Foo(Base): x = Column("x", Integer) y = Column("x", Integer) - assert_raises_message( - sa.exc.SAWarning, + def test_column_repeated_under_prop(self): + with assertions.expect_deprecated( + "A column with name 'x' is already present in table 'foo'" + ), expect_warnings( "On class 'Foo', Column object 'x' named directly multiple times, " - "only one will be used: x, y", - go, - ) + "only one will be used: x, y, z", + ): - def test_column_repeated_under_prop(self): - def go(): class Foo(Base): __tablename__ = "foo" @@ -431,13 +436,6 @@ class Foo(Base): y = column_property(x) z = Column("x", Integer) - assert_raises_message( - sa.exc.SAWarning, - "On class 'Foo', Column object 'x' named directly multiple times, " - "only one will be used: x, y, z", - go, - ) - def test_using_explicit_prop_in_schema_objects(self): class Foo(Base): __tablename__ = "foo" @@ -2200,15 +2198,14 @@ class Test(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) - assert_raises_message( - sa.exc.SAWarning, + with expect_warnings( "This declarative base already contains a class with ", - lambda: type(Base)( + ): + type(Base)( "Test", (Base,), dict(__tablename__="b", id=Column(Integer, primary_key=True)), - ), - ) + ) @testing.teardown_events(MapperEvents) @testing.teardown_events(InstrumentationEvents) diff --git a/test/orm/declarative/test_clsregistry.py b/test/orm/declarative/test_clsregistry.py index b9d41ee5325..17e3624b7bb 100644 --- a/test/orm/declarative/test_clsregistry.py +++ b/test/orm/declarative/test_clsregistry.py @@ -1,12 +1,18 @@ +from sqlalchemy import Column from sqlalchemy import exc +from sqlalchemy import Integer from sqlalchemy import MetaData +from sqlalchemy import testing from sqlalchemy.orm import clsregistry from sqlalchemy.orm import registry +from sqlalchemy.orm import relationship from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.util import gc_collect @@ -34,16 +40,16 @@ def test_same_module_same_name(self): clsregistry.add_class("Foo", f1, base._class_registry) gc_collect() - assert_raises_message( - exc.SAWarning, + with expect_warnings( "This declarative base already contains a class with the " "same class name and module name as foo.bar.Foo, and " - "will be replaced in the string-lookup table.", - clsregistry.add_class, - "Foo", - f2, - base._class_registry, - ) + "will be replaced in the string-lookup table." + ): + clsregistry.add_class( + "Foo", + f2, + base._class_registry, + ) def test_resolve(self): base = registry() @@ -107,6 +113,36 @@ def test_fragment_ambiguous(self): name_resolver("alt.Foo"), ) + @testing.combinations( + ("NonExistentFoo",), + ("nonexistent.Foo",), + ("existent.nonexistent.Foo",), + ("existent.NonExistentFoo",), + ("nonexistent.NonExistentFoo",), + ("existent.existent.NonExistentFoo",), + argnames="name", + ) + def test_name_resolution_failures(self, name, registry): + + Base = registry.generate_base() + + f1 = MockClass(registry, "existent.Foo") + f2 = MockClass(registry, "existent.existent.Foo") + clsregistry.add_class("Foo", f1, registry._class_registry) + clsregistry.add_class("Foo", f2, registry._class_registry) + + class MyClass(Base): + __tablename__ = "my_table" + id = Column(Integer, primary_key=True) + foo = relationship(name) + + with expect_raises_message( + exc.InvalidRequestError, + r"When initializing mapper .*MyClass.*, expression '%s' " + r"failed to locate a name" % (name,), + ): + registry.configure() + def test_no_fns_in_name_resolve(self): base = registry() f1 = MockClass(base, "foo.bar.Foo") @@ -240,7 +276,7 @@ def test_module_reg_cleanout_race(self): f_resolver = resolver("foo") del mod_entry.contents["Foo"] assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Foo'", lambda: f_resolver().bar.Foo, @@ -248,7 +284,7 @@ def test_module_reg_cleanout_race(self): f_resolver = name_resolver("foo") assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Foo'", lambda: f_resolver().bar.Foo, @@ -263,7 +299,7 @@ def test_module_reg_no_class(self): name_resolver, resolver = clsregistry._resolver(f1, MockProp()) f_resolver = resolver("foo") assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Bat'", lambda: f_resolver().bar.Bat, @@ -271,7 +307,7 @@ def test_module_reg_no_class(self): f_resolver = name_resolver("foo") assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Bat'", lambda: f_resolver().bar.Bat, diff --git a/test/orm/declarative/test_inheritance.py b/test/orm/declarative/test_inheritance.py index 7e43e255954..7f1b47f3758 100644 --- a/test/orm/declarative/test_inheritance.py +++ b/test/orm/declarative/test_inheritance.py @@ -38,6 +38,7 @@ def teardown_test(self): class DeclarativeInheritanceTest(DeclarativeTestBase): + @testing.emits_warning(r".*does not indicate a polymorphic_identity") def test_we_must_copy_mapper_args(self): class Person(Base): @@ -673,6 +674,9 @@ class Employee(Person): __tablename__ = "employee" id = Column(Integer, ForeignKey(Person.id), primary_key=True) + __mapper_args__ = { + "polymorphic_identity": "employee", + } class Engineer(Employee): __mapper_args__ = {"polymorphic_identity": "engineer"} @@ -1007,9 +1011,15 @@ class Manager(Person): __mapper_args__ = {"polymorphic_identity": "manager"} id = Column(Integer, ForeignKey("people.id"), primary_key=True) golf_swing = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "manager", + } class Boss(Manager): boss_name = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "boss", + } is_( Boss.__mapper__.column_attrs["boss_name"].columns[0], diff --git a/test/orm/declarative/test_mixin.py b/test/orm/declarative/test_mixin.py index 664c0063038..f8e0cf5adb3 100644 --- a/test/orm/declarative/test_mixin.py +++ b/test/orm/declarative/test_mixin.py @@ -38,7 +38,11 @@ mapper_registry = None -class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults): +class DeclarativeTestBase( + testing.AssertsCompiledSQL, + fixtures.TestBase, + testing.AssertsExecutionResults, +): def setup_test(self): global Base, mapper_registry @@ -53,6 +57,20 @@ def teardown_test(self): class DeclarativeMixinTest(DeclarativeTestBase): + @testing.requires.python3 + def test_init_subclass_works(self, registry): + class Base: + def __init_subclass__(cls): + cls.id = Column(Integer, primary_key=True) + + Base = registry.generate_base(cls=Base) + + class Foo(Base): + __tablename__ = "foo" + name = Column(String) + + self.assert_compile(select(Foo), "SELECT foo.name, foo.id FROM foo") + def test_simple_wbase(self): class MyMixin(object): @@ -1639,7 +1657,7 @@ class Derived(Base): self.assert_compile( s.query(Derived.data_syn).filter(Derived.data_syn == "foo"), "SELECT test.data AS test_data FROM test WHERE test.data = " - ":data_1 AND test.type IN ([POSTCOMPILE_type_1])", + ":data_1 AND test.type IN (__[POSTCOMPILE_type_1])", dialect="default", checkparams={"type_1": ["derived"], "data_1": "foo"}, ) @@ -1856,14 +1874,11 @@ class Mixin(object): def my_prop(cls): return Column("x", Integer) - assert_raises_message( - sa.exc.SAWarning, + with expect_warnings( "Unmanaged access of declarative attribute my_prop " - "from non-mapped class Mixin", - getattr, - Mixin, - "my_prop", - ) + "from non-mapped class Mixin" + ): + Mixin.my_prop def test_can_we_access_the_mixin_straight_special_names(self): class Mixin(object): @@ -2125,6 +2140,53 @@ class User(Base, HasAddressCount): "> :param_1", ) + def test_multilevel_mixin_attr_refers_to_column_copies(self): + """test #8190. + + This test is the same idea as test_mixin_attr_refers_to_column_copies + but tests the column copies from superclasses. + + """ + counter = mock.Mock() + + class SomeOtherMixin: + status = Column(String) + + class HasAddressCount(SomeOtherMixin): + id = Column(Integer, primary_key=True) + + @declared_attr + def address_count(cls): + counter(cls.id) + counter(cls.status) + return column_property( + select(func.count(Address.id)) + .where(Address.user_id == cls.id) + .where(cls.status == "some status") + .scalar_subquery() + ) + + class Address(Base): + __tablename__ = "address" + id = Column(Integer, primary_key=True) + user_id = Column(ForeignKey("user.id")) + + class User(Base, HasAddressCount): + __tablename__ = "user" + + eq_(counter.mock_calls, [mock.call(User.id), mock.call(User.status)]) + + sess = fixture_session() + self.assert_compile( + sess.query(User).having(User.address_count > 5), + "SELECT (SELECT count(address.id) AS count_1 FROM address " + 'WHERE address.user_id = "user".id AND "user".status = :param_1) ' + 'AS anon_1, "user".status AS user_status, "user".id AS user_id ' + 'FROM "user" HAVING (SELECT count(address.id) AS count_1 ' + 'FROM address WHERE address.user_id = "user".id ' + 'AND "user".status = :param_1) > :param_2', + ) + class AbstractTest(DeclarativeTestBase): def test_abstract_boolean(self): diff --git a/test/orm/declarative/test_typing_py3k.py b/test/orm/declarative/test_typing_py3k.py new file mode 100644 index 00000000000..595194512d9 --- /dev/null +++ b/test/orm/declarative/test_typing_py3k.py @@ -0,0 +1,44 @@ +from typing import Generic +from typing import Type +from typing import TypeVar + +from sqlalchemy import Column +from sqlalchemy import inspect +from sqlalchemy import Integer +from sqlalchemy.orm import as_declarative +from sqlalchemy.testing import eq_ +from sqlalchemy.testing import fixtures +from sqlalchemy.testing import is_ +from sqlalchemy.testing.assertions import expect_raises + + +class DeclarativeBaseTest(fixtures.TestBase): + __requires__ = ("python37",) + + def test_class_getitem(self): + T = TypeVar("T", bound="CommonBase") # noqa + + class CommonBase(Generic[T]): + @classmethod + def boring(cls: Type[T]) -> Type[T]: + return cls + + @classmethod + def more_boring(cls: Type[T]) -> int: + return 27 + + @as_declarative() + class Base(CommonBase[T]): + foo = 1 + + class Tab(Base["Tab"]): + __tablename__ = "foo" + a = Column(Integer, primary_key=True) + + eq_(Tab.foo, 1) + is_(Tab.__table__, inspect(Tab).local_table) + eq_(Tab.boring(), Tab) + eq_(Tab.more_boring(), 27) + + with expect_raises(AttributeError): + Tab.non_existent diff --git a/test/orm/inheritance/_poly_fixtures.py b/test/orm/inheritance/_poly_fixtures.py index 7efc99913ad..7ba611f958c 100644 --- a/test/orm/inheritance/_poly_fixtures.py +++ b/test/orm/inheritance/_poly_fixtures.py @@ -350,9 +350,10 @@ def setup_mappers(cls): inherits=Person, polymorphic_identity="engineer", properties={ + "company": relationship(Company, viewonly=True), "machines": relationship( Machine, order_by=machines.c.machine_id - ) + ), }, ) diff --git a/test/orm/inheritance/test_assorted_poly.py b/test/orm/inheritance/test_assorted_poly.py index 729e1ee0479..3d17d702382 100644 --- a/test/orm/inheritance/test_assorted_poly.py +++ b/test/orm/inheritance/test_assorted_poly.py @@ -2252,7 +2252,7 @@ class A(Base): id = Column(Integer, primary_key=True) class MySpecialColumn(Column): - pass + inherit_cache = True class B(A): __tablename__ = "b" diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py index ac1661fdd16..e2348bb8a49 100644 --- a/test/orm/inheritance/test_basic.py +++ b/test/orm/inheritance/test_basic.py @@ -32,9 +32,11 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import assert_warns_message from sqlalchemy.testing.assertsql import AllOf from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.assertsql import Conditional @@ -169,6 +171,7 @@ def test_group_by(self): rows = ( s.query(B.id.expressions[0], B.id.expressions[1], func.sum(B.data)) .group_by(*B.id.expressions) + .order_by(B.id.expressions[0]) .all() ) eq_(rows, [(1, 1, 5), (2, 2, 7)]) @@ -903,7 +906,7 @@ def test_invalid_assignment_downwards(self): c1 = C() c1.class_name = "b" sess.add(c1) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'b'; the object may not " @@ -922,7 +925,7 @@ def test_invalid_assignment_upwards(self): b1 = B() b1.class_name = "c" sess.add(b1) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'c'; the object may not " @@ -938,7 +941,7 @@ def test_entirely_oob_assignment(self): b1 = B() b1.class_name = "xyz" sess.add(b1) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'xyz'; the object may not " @@ -968,7 +971,7 @@ def test_validate_on_upate(self): sess.expire(c1) c1.class_name = "b" - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'b'; the object may not " @@ -2061,8 +2064,8 @@ class Sub(Base): class DistinctPKTest(fixtures.MappedTest): - """test the construction of mapper.primary_key when an inheriting relationship - joins on a column other than primary key column.""" + """test the construction of mapper.primary_key when an inheriting + relationship joins on a column other than primary key column.""" run_inserts = "once" run_deletes = None @@ -2143,7 +2146,7 @@ def test_explicit_composite_pk(self): properties=dict(id=[employee_table.c.eid, person_table.c.id]), primary_key=[person_table.c.id, employee_table.c.eid], ) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, r"On mapper mapped class Employee->employees, " "primary key column 'persons.id' is being " @@ -2441,7 +2444,7 @@ def go(): Sub, subtable_two, inherits=Base ) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Implicitly combining column base.base_id with " "column subtable_two.base_id under attribute 'base_id'", @@ -2696,6 +2699,70 @@ def _key_fallback(self, key, raiseerr): eq_(s1.sub, "s1sub") + def test_optimized_get_blank_intermediary(self, registry, connection): + """test #7507""" + + Base = registry.generate_base() + + class A(Base): + __tablename__ = "a" + + id = Column(Integer, primary_key=True) + a = Column(String(20), nullable=False) + type_ = Column(String(20)) + __mapper_args__ = { + "polymorphic_on": type_, + "polymorphic_identity": "a", + } + + class B(A): + __tablename__ = "b" + __mapper_args__ = {"polymorphic_identity": "b"} + + id = Column(Integer, ForeignKey("a.id"), primary_key=True) + b = Column(String(20), nullable=False) + + class C(B): + __tablename__ = "c" + __mapper_args__ = {"polymorphic_identity": "c"} + + id = Column(Integer, ForeignKey("b.id"), primary_key=True) + + class D(C): + __tablename__ = "d" + __mapper_args__ = {"polymorphic_identity": "d"} + + id = Column(Integer, ForeignKey("c.id"), primary_key=True) + c = Column(String(20), nullable=False) + + Base.metadata.create_all(connection) + + session = Session(connection) + session.add(D(a="x", b="y", c="z")) + session.commit() + + with self.sql_execution_asserter(connection) as asserter: + d = session.query(A).one() + eq_(d.c, "z") + asserter.assert_( + CompiledSQL( + "SELECT a.id AS a_id, a.a AS a_a, a.type_ AS a_type_ FROM a", + [], + ), + Or( + CompiledSQL( + "SELECT d.c AS d_c, b.b AS b_b FROM d, b, c WHERE " + ":param_1 = b.id AND b.id = c.id AND c.id = d.id", + [{"param_1": 1}], + ), + CompiledSQL( + "SELECT b.b AS b_b, d.c AS d_c FROM b, d, c WHERE " + ":param_1 = b.id AND b.id = c.id AND c.id = d.id", + [{"param_1": 1}], + ), + ), + ) + def test_optimized_passes(self): """ "test that the 'optimized load' routine doesn't crash when a column in the join condition is not available.""" @@ -3029,7 +3096,7 @@ class C(P): pass self.mapper_registry.map_imperatively(P, parent) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Could not assemble any primary keys for locally mapped " "table 'child' - no rows will be persisted in this Table.", @@ -3349,7 +3416,12 @@ def setup_mappers(cls): cls.mapper_registry.map_imperatively( A, base, polymorphic_on=base.c.type ) - cls.mapper_registry.map_imperatively(B, inherits=A) + + with expect_warnings( + r"Mapper mapped class B->base does not indicate a " + "polymorphic_identity," + ): + cls.mapper_registry.map_imperatively(B, inherits=A) cls.mapper_registry.map_imperatively( C, inherits=B, polymorphic_identity="c" ) @@ -3359,6 +3431,28 @@ def setup_mappers(cls): cls.mapper_registry.map_imperatively( E, inherits=A, polymorphic_identity="e" ) + cls.mapper_registry.configure() + + def test_warning(self, decl_base): + """test #7545""" + + class A(decl_base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + type = Column(String) + + __mapper_args__ = {"polymorphic_on": type} + + class B(A): + __mapper_args__ = {"polymorphic_identity": "b"} + + with expect_warnings( + r"Mapper mapped class C->a does not indicate a " + "polymorphic_identity," + ): + + class C(A): + __mapper_args__ = {} def test_load_from_middle(self): C, B = self.classes.C, self.classes.B diff --git a/test/orm/inheritance/test_concrete.py b/test/orm/inheritance/test_concrete.py index d9dfa3d9e6c..a7ff156c544 100644 --- a/test/orm/inheritance/test_concrete.py +++ b/test/orm/inheritance/test_concrete.py @@ -5,24 +5,34 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import union from sqlalchemy import union_all +from sqlalchemy.ext.declarative import AbstractConcreteBase from sqlalchemy.ext.hybrid import hybrid_property +from sqlalchemy.orm import aliased from sqlalchemy.orm import attributes from sqlalchemy.orm import class_mapper from sqlalchemy.orm import clear_mappers +from sqlalchemy.orm import composite from sqlalchemy.orm import configure_mappers +from sqlalchemy.orm import contains_eager +from sqlalchemy.orm import declared_attr from sqlalchemy.orm import joinedload from sqlalchemy.orm import polymorphic_union from sqlalchemy.orm import relationship -from sqlalchemy.orm.util import with_polymorphic +from sqlalchemy.orm import Session +from sqlalchemy.orm import with_polymorphic from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import mock +from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from test.orm.test_events import _RemoveListeners class ConcreteTest(fixtures.MappedTest): @@ -185,6 +195,7 @@ def test_basic(self): "Manager Sally knows how to manage things", ] ) + assert set([repr(x) for x in session.query(Manager)]) == set( ["Manager Sally knows how to manage things"] ) @@ -1434,3 +1445,240 @@ class Office(Location): eq_(sess.get(Refugee, 2).name, "refugee2") eq_(sess.get(Office, 1).name, "office1") eq_(sess.get(Office, 2).name, "office2") + + +class AdaptOnNamesTest(_RemoveListeners, fixtures.DeclarativeMappedTest): + """test the full integration case for #7805""" + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + Basic = cls.Basic + + class Metadata(ComparableEntity, Base): + __tablename__ = "metadata" + id = Column( + Integer, + primary_key=True, + ) + + some_data = Column(String(50)) + + class BaseObj(ComparableEntity, AbstractConcreteBase, Base): + """abstract concrete base with a custom polymorphic_union. + + Additionally, at query time it needs to use a new version of this + union each time in order to add filter criteria. this is because + polymorphic_union() is of course very inefficient in its form + and if someone actually has to use this, it's likely better for + filter criteria to be within each sub-select. The current use + case here does not really have easy answers as we don't have + a built-in widget that does this. The complexity / little use + ratio doesn't justify it unfortunately. + + This use case might be easier if we were mapped to something that + can be adapted. however, we are using adapt_on_names here as this + is usually what's more accessible to someone trying to get into + this, or at least we should make that feature work as well as it + can. + + """ + + @declared_attr + def id(cls): + return Column(Integer, primary_key=True) + + @declared_attr + def metadata_id(cls): + return Column(ForeignKey(Metadata.id), nullable=False) + + @classmethod + def _create_polymorphic_union(cls, mappers, discriminator_name): + return cls.make_statement().subquery() + + @declared_attr + def related_metadata(cls): + return relationship(Metadata) + + @classmethod + def make_statement(cls, *filter_cond, **kw): + include_metadata = kw.pop("include_metadata", False) + a_stmt = ( + select( + A.id, + A.metadata_id, + A.thing1, + A.x1, + A.y1, + null().label("thing2"), + null().label("x2"), + null().label("y2"), + literal("a").label("type"), + ) + .join(Metadata) + .filter(*filter_cond) + ) + if include_metadata: + a_stmt = a_stmt.add_columns(Metadata.__table__) + + b_stmt = ( + select( + B.id, + B.metadata_id, + null().label("thing1"), + null().label("x1"), + null().label("y1"), + B.thing2, + B.x2, + B.y2, + literal("b").label("type"), + ) + .join(Metadata) + .filter(*filter_cond) + ) + if include_metadata: + b_stmt = b_stmt.add_columns(Metadata.__table__) + + return union(a_stmt, b_stmt) + + class XYThing(Basic): + def __init__(self, x, y): + self.x = x + self.y = y + + def __composite_values__(self): + return (self.x, self.y) + + def __eq__(self, other): + return ( + isinstance(other, XYThing) + and other.x == self.x + and other.y == self.y + ) + + def __ne__(self, other): + return not self.__eq__(other) + + class A(BaseObj): + __tablename__ = "a" + thing1 = Column(String(50)) + comp1 = composite( + XYThing, Column("x1", Integer), Column("y1", Integer) + ) + + __mapper_args__ = {"polymorphic_identity": "a", "concrete": True} + + class B(BaseObj): + __tablename__ = "b" + thing2 = Column(String(50)) + comp2 = composite( + XYThing, Column("x2", Integer), Column("y2", Integer) + ) + + __mapper_args__ = {"polymorphic_identity": "b", "concrete": True} + + @classmethod + def insert_data(cls, connection): + Metadata, A, B = cls.classes("Metadata", "A", "B") + XYThing = cls.classes.XYThing + + with Session(connection) as sess: + sess.add_all( + [ + Metadata(id=1, some_data="m1"), + Metadata(id=2, some_data="m2"), + ] + ) + sess.flush() + + sess.add_all( + [ + A( + id=5, + metadata_id=1, + thing1="thing1", + comp1=XYThing(1, 2), + ), + B( + id=6, + metadata_id=2, + thing2="thing2", + comp2=XYThing(3, 4), + ), + ] + ) + sess.commit() + + def test_contains_eager(self): + Metadata, A, B = self.classes("Metadata", "A", "B") + BaseObj = self.classes.BaseObj + XYThing = self.classes.XYThing + + alias = BaseObj.make_statement( + Metadata.id < 3, include_metadata=True + ).subquery() + ac = with_polymorphic( + BaseObj, + [A, B], + selectable=alias, + adapt_on_names=True, + ) + + mt = aliased(Metadata, alias=alias) + + sess = fixture_session() + + with self.sql_execution_asserter() as asserter: + objects = sess.scalars( + select(ac) + .options( + contains_eager(ac.A.related_metadata.of_type(mt)), + contains_eager(ac.B.related_metadata.of_type(mt)), + ) + .order_by(ac.id) + ).all() + + eq_( + objects, + [ + A( + id=5, + metadata_id=1, + thing1="thing1", + comp1=XYThing(1, 2), + related_metadata=Metadata(id=1, some_data="m1"), + ), + B( + id=6, + metadata_id=2, + thing2="thing2", + comp2=XYThing(3, 4), + related_metadata=Metadata(id=2, some_data="m2"), + ), + ], + ) + asserter.assert_( + CompiledSQL( + "SELECT anon_1.id, anon_1.metadata_id, anon_1.thing1, " + "anon_1.x1, anon_1.y1, anon_1.thing2, anon_1.x2, anon_1.y2, " + "anon_1.type, anon_1.id_1, anon_1.some_data FROM " + "(SELECT a.id AS id, a.metadata_id AS metadata_id, " + "a.thing1 AS thing1, a.x1 AS x1, a.y1 AS y1, " + "NULL AS thing2, NULL AS x2, NULL AS y2, :param_1 AS type, " + "metadata.id AS id_1, metadata.some_data AS some_data " + "FROM a JOIN metadata ON metadata.id = a.metadata_id " + "WHERE metadata.id < :id_2 UNION SELECT b.id AS id, " + "b.metadata_id AS metadata_id, NULL AS thing1, NULL AS x1, " + "NULL AS y1, b.thing2 AS thing2, b.x2 AS x2, b.y2 AS y2, " + ":param_2 AS type, metadata.id AS id_1, " + "metadata.some_data AS some_data FROM b " + "JOIN metadata ON metadata.id = b.metadata_id " + "WHERE metadata.id < :id_3) AS anon_1 ORDER BY anon_1.id", + # tip: whether or not there is "id_2" and "id_3" here, + # or just "id_2", is based on whether or not the two + # queries had polymorphic adaption proceed, so that the + # two filter criterias are different vs. the same object. see + # mapper._should_select_with_poly_adapter added in #8456. + [{"param_1": "a", "id_2": 3, "param_2": "b", "id_3": 3}], + ) + ) diff --git a/test/orm/inheritance/test_deprecations.py b/test/orm/inheritance/test_deprecations.py index 8c807c1152e..6f370d5e47d 100644 --- a/test/orm/inheritance/test_deprecations.py +++ b/test/orm/inheritance/test_deprecations.py @@ -532,7 +532,7 @@ def test_of_type_aliased_fromjoinpoint(self): "companies.name AS companies_name FROM companies " "LEFT OUTER JOIN employees AS employees_1 ON " "companies.company_id = employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) @@ -739,7 +739,7 @@ def test_query_wpoly_single_inh_subclass(self): "engineer.engineer_info AS engineer_engineer_info, " "engineer.manager_id AS engineer_manager_id " "FROM employee JOIN engineer ON employee.id = engineer.id) " - "AS anon_1 WHERE anon_1.employee_type IN ([POSTCOMPILE_type_1])", + "AS anon_1 WHERE anon_1.employee_type IN (__[POSTCOMPILE_type_1])", ) diff --git a/test/orm/inheritance/test_poly_loading.py b/test/orm/inheritance/test_poly_loading.py index 35822a29e9f..fc5743a7330 100644 --- a/test/orm/inheritance/test_poly_loading.py +++ b/test/orm/inheritance/test_poly_loading.py @@ -1,23 +1,39 @@ -from sqlalchemy import Column +from sqlalchemy import exc from sqlalchemy import ForeignKey +from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import literal +from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import union from sqlalchemy.orm import backref +from sqlalchemy.orm import column_property +from sqlalchemy.orm import composite +from sqlalchemy.orm import defaultload +from sqlalchemy.orm import immediateload from sqlalchemy.orm import joinedload +from sqlalchemy.orm import lazyload from sqlalchemy.orm import relationship from sqlalchemy.orm import selectin_polymorphic from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session +from sqlalchemy.orm import subqueryload from sqlalchemy.orm import with_polymorphic +from sqlalchemy.orm.interfaces import CompileStateOption from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from sqlalchemy.testing import assertsql from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import AllOf from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.assertsql import EachOf from sqlalchemy.testing.assertsql import Or +from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session +from sqlalchemy.testing.schema import Column from ._poly_fixtures import _Polymorphic from ._poly_fixtures import Company from ._poly_fixtures import Engineer @@ -110,7 +126,7 @@ def _assert_all_selectin(self, q): "a.type AS a_type, " "asub.asubdata AS asub_asubdata FROM a JOIN asub " "ON a.id = asub.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) " + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", {"primary_keys": [2]}, ), @@ -123,13 +139,13 @@ def _assert_all_selectin(self, q): "SELECT c.a_sub_id AS c_a_sub_id, " "c.id AS c_id " "FROM c WHERE c.a_sub_id " - "IN ([POSTCOMPILE_primary_keys])", + "IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [2]}, ), ), CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id FROM b " - "WHERE b.a_id IN ([POSTCOMPILE_primary_keys])", + "WHERE b.a_id IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [1, 2]}, ), ), @@ -209,7 +225,7 @@ def test_person_selectin_subclasses(self): "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers " "ON people.person_id = engineers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [1, 2, 5]}, ), @@ -221,7 +237,7 @@ def test_person_selectin_subclasses(self): "managers.manager_name AS managers_manager_name " "FROM people JOIN managers " "ON people.person_id = managers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [3, 4]}, ), @@ -255,7 +271,7 @@ def test_load_company_plus_employees(self): "people.person_id AS people_person_id, " "people.name AS people_name, people.type AS people_type " "FROM people WHERE people.company_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [1, 2]}, ), @@ -269,7 +285,7 @@ def test_load_company_plus_employees(self): "managers.manager_name AS managers_manager_name " "FROM people JOIN managers " "ON people.person_id = managers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [3, 4]}, ), @@ -283,7 +299,7 @@ def test_load_company_plus_employees(self): "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers " "ON people.person_id = engineers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [1, 2, 5]}, ), @@ -338,7 +354,8 @@ def test_threelevel_selectin_to_inline_mapped(self): "c.c_data AS c_c_data, c.e_data AS c_e_data, " "c.d_data AS c_d_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), CompiledSQL( @@ -346,7 +363,8 @@ def test_threelevel_selectin_to_inline_mapped(self): "c.c_data AS c_c_data, " "c.d_data AS c_d_data, c.e_data AS c_e_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), ), @@ -393,7 +411,8 @@ def test_threelevel_selectin_to_inline_options(self): "c.c_data AS c_c_data, c.e_data AS c_e_data, " "c.d_data AS c_d_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), CompiledSQL( @@ -401,7 +420,8 @@ def test_threelevel_selectin_to_inline_options(self): "c.c_data AS c_c_data, c.d_data AS c_d_data, " "c.e_data AS c_e_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), ), @@ -409,7 +429,10 @@ def test_threelevel_selectin_to_inline_options(self): with self.assert_statement_count(testing.db, 0): eq_(result, [d(d_data="d1"), e(e_data="e1")]) - def test_threelevel_selectin_to_inline_awkward_alias_options(self): + @testing.combinations((True,), (False,)) + def test_threelevel_selectin_to_inline_awkward_alias_options( + self, use_aliased_class + ): self._fixture_from_geometry( { "a": { @@ -440,57 +463,99 @@ def test_threelevel_selectin_to_inline_awkward_alias_options(self): ) c_alias = with_polymorphic(c, (d, e), poly) - q = ( - sess.query(a) - .options(selectin_polymorphic(a, [b, c_alias])) - .order_by(a.id) - ) - result = self.assert_sql_execution( - testing.db, - q.all, - CompiledSQL( - "SELECT a.type AS a_type, a.id AS a_id, " - "a.a_data AS a_a_data FROM a ORDER BY a.id", - {}, - ), - Or( - # here, the test is that the adaptation of "a" takes place + if use_aliased_class: + opt = selectin_polymorphic(a, [b, c_alias]) + else: + opt = selectin_polymorphic( + a, + [b, c_alias, d, e], + ) + q = sess.query(a).options(opt).order_by(a.id) + + if use_aliased_class: + result = self.assert_sql_execution( + testing.db, + q.all, CompiledSQL( - "SELECT poly.a_type AS poly_a_type, " - "poly.c_id AS poly_c_id, " - "poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, " - "poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data, " - "poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data " - "FROM (SELECT a.id AS a_id, a.type AS a_type, " - "c.id AS c_id, " - "c.c_data AS c_c_data, d.id AS d_id, " - "d.d_data AS d_d_data, " - "e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c " - "ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id " - "LEFT OUTER JOIN e ON c.id = e.id) AS poly " - "WHERE poly.a_id IN ([POSTCOMPILE_primary_keys]) " - "ORDER BY poly.a_id", - [{"primary_keys": [1, 2]}], + "SELECT a.type AS a_type, a.id AS a_id, " + "a.a_data AS a_a_data FROM a ORDER BY a.id", + {}, + ), + Or( + # here, the test is that the adaptation of "a" takes place + CompiledSQL( + "SELECT poly.a_type AS poly_a_type, " + "poly.c_id AS poly_c_id, " + "poly.a_id AS poly_a_id, poly.c_c_data " + "AS poly_c_c_data, " + "poly.e_id AS poly_e_id, poly.e_e_data " + "AS poly_e_e_data, " + "poly.d_id AS poly_d_id, poly.d_d_data " + "AS poly_d_d_data " + "FROM (SELECT a.id AS a_id, a.type AS a_type, " + "c.id AS c_id, " + "c.c_data AS c_c_data, d.id AS d_id, " + "d.d_data AS d_d_data, " + "e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c " + "ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id " + "LEFT OUTER JOIN e ON c.id = e.id) AS poly " + "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY poly.a_id", + [{"primary_keys": [1, 2]}], + ), + CompiledSQL( + "SELECT poly.a_type AS poly_a_type, " + "poly.c_id AS poly_c_id, " + "poly.a_id AS poly_a_id, " + "poly.c_c_data AS poly_c_c_data, " + "poly.d_id AS poly_d_id, poly.d_d_data " + "AS poly_d_d_data, " + "poly.e_id AS poly_e_id, poly.e_e_data " + "AS poly_e_e_data " + "FROM (SELECT a.id AS a_id, a.type AS a_type, " + "c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, " + "d.d_data AS d_d_data, e.id AS e_id, " + "e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id " + "LEFT OUTER JOIN d ON c.id = d.id " + "LEFT OUTER JOIN e ON c.id = e.id) AS poly " + "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY poly.a_id", + [{"primary_keys": [1, 2]}], + ), ), + ) + else: + result = self.assert_sql_execution( + testing.db, + q.all, CompiledSQL( - "SELECT poly.a_type AS poly_a_type, " - "poly.c_id AS poly_c_id, " - "poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, " - "poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data, " - "poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data " - "FROM (SELECT a.id AS a_id, a.type AS a_type, " - "c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, " - "d.d_data AS d_d_data, e.id AS e_id, " - "e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id " - "LEFT OUTER JOIN d ON c.id = d.id " - "LEFT OUTER JOIN e ON c.id = e.id) AS poly " - "WHERE poly.a_id IN ([POSTCOMPILE_primary_keys]) " - "ORDER BY poly.a_id", - [{"primary_keys": [1, 2]}], + "SELECT a.type AS a_type, a.id AS a_id, " + "a.a_data AS a_a_data FROM a ORDER BY a.id", + {}, ), - ), - ) + AllOf( + CompiledSQL( + "SELECT a.type AS a_type, d.id AS d_id, c.id AS c_id, " + "a.id AS a_id, " + "d.d_data AS d_d_data FROM a " + "JOIN c ON a.id = c.id JOIN d ON c.id = d.id " + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", + [{"primary_keys": [1]}], + ), + CompiledSQL( + "SELECT a.type AS a_type, e.id AS e_id, c.id AS c_id, " + "a.id AS a_id, " + "e.e_data AS e_e_data FROM a " + "JOIN c ON a.id = c.id JOIN e ON c.id = e.id " + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", + [{"primary_keys": [2]}], + ), + ), + ) + with self.assert_statement_count(testing.db, 0): eq_(result, [d(d_data="d1"), e(e_data="e1")]) @@ -578,17 +643,11 @@ def insert_data(cls, connection): session.add_all([parent, subclass1, other]) session.commit() - def test_options_dont_pollute_baked(self): - self._test_options_dont_pollute(True) - - def test_options_dont_pollute_unbaked(self): - self._test_options_dont_pollute(False) - - def _test_options_dont_pollute(self, enable_baked): + def test_options_dont_pollute(self): Parent, ChildSubclass1, Other = self.classes( "Parent", "ChildSubclass1", "Other" ) - session = fixture_session(enable_baked_queries=enable_baked) + session = fixture_session() def no_opt(): q = session.query(Parent).options( @@ -622,7 +681,7 @@ def no_opt(): "child.type AS child_type " "FROM child JOIN child_subclass1 " "ON child.id = child_subclass1.id " - "WHERE child.id IN ([POSTCOMPILE_primary_keys]) " + "WHERE child.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY child.id", [{"primary_keys": [1]}], ), @@ -672,7 +731,7 @@ def no_opt(): "ON child.id = child_subclass1.id " "LEFT OUTER JOIN other AS other_1 " "ON child_subclass1.id = other_1.child_subclass_id " - "WHERE child.id IN ([POSTCOMPILE_primary_keys]) " + "WHERE child.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY child.id", [{"primary_keys": [1]}], ), @@ -686,3 +745,547 @@ def no_opt(): result = no_opt() with self.assert_statement_count(testing.db, 1): eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])]) + + +class IgnoreOptionsOnSubclassAttrLoad(fixtures.DeclarativeMappedTest): + """test #7304 and related cases + + in this case we trigger the subclass attribute load, while at the same + time there will be a deferred loader option present in the state's + options that was established by the previous loader. + + test both that the option takes effect (i.e. raiseload) and that a deferred + loader doesn't interfere with the mapper's load of the attribute. + + """ + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Parent(Base): + __tablename__ = "parent" + + id = Column( + Integer, primary_key=True, test_needs_autoincrement=True + ) + + entity_id = Column(ForeignKey("entity.id")) + entity = relationship("Entity") + + class Entity(Base): + __tablename__ = "entity" + + id = Column( + Integer, primary_key=True, test_needs_autoincrement=True + ) + type = Column(String(32)) + + __mapper_args__ = { + "polymorphic_on": type, + "polymorphic_identity": "entity", + } + + class SubEntity(Entity): + __tablename__ = "sub_entity" + + id = Column(ForeignKey(Entity.id), primary_key=True) + + name = Column(String(32)) + + __mapper_args__ = {"polymorphic_identity": "entity_two"} + + @classmethod + def insert_data(cls, connection): + Parent, SubEntity = cls.classes("Parent", "SubEntity") + + with Session(connection) as session: + session.add(Parent(entity=SubEntity(name="some name"))) + session.commit() + + @testing.combinations( + defaultload, + joinedload, + selectinload, + lazyload, + argnames="first_option", + ) + @testing.combinations( + ("load_only", "id", True), + ("defer", "name", True), + ("undefer", "name", True), + ("raise", "name", False), + (None, None, True), + # these don't seem possible at the moment as the "type" column + # doesn't load and it can't recognize the polymorphic identity. + # we assume load_only() is smart enough to include this column + # ("defer", '*', True), + # ("undefer", '*', True), + # ("raise", '*', False), + argnames="second_option,second_argument,expect_load", + ) + def test_subclass_loadattr( + self, first_option, second_option, second_argument, expect_load + ): + Parent, Entity, SubEntity = self.classes( + "Parent", "Entity", "SubEntity" + ) + + stmt = select(Parent) + + will_lazyload = first_option in (defaultload, lazyload) + + opt = first_option(Parent.entity) + + if second_argument == "name": + second_argument = SubEntity.name + elif second_argument == "id": + second_argument = Entity.id + + if second_option is None: + sub_opt = opt + elif second_option == "raise": + sub_opt = opt.defer(second_argument, raiseload=True) + else: + sub_opt = getattr(opt, second_option)(second_argument) + + stmt = stmt.options(sub_opt) + + session = fixture_session() + result = session.execute(stmt).scalars() + + parent_obj = result.first() + + entity_id = parent_obj.__dict__["entity_id"] + + with assertsql.assert_engine(testing.db) as asserter_: + if expect_load: + eq_(parent_obj.entity.name, "some name") + else: + with expect_raises_message( + exc.InvalidRequestError, + "'SubEntity.name' is not available due to raiseload=True", + ): + parent_obj.entity.name + + expected = [] + + if will_lazyload: + expected.append( + CompiledSQL( + "SELECT entity.id AS entity_id, " + "entity.type AS entity_type FROM entity " + "WHERE entity.id = :pk_1", + [{"pk_1": entity_id}], + ) + ) + + if second_option in ("undefer", "load_only", None): + # load will be a mapper optimized load for the name alone + expected.append( + CompiledSQL( + "SELECT sub_entity.name AS sub_entity_name " + "FROM sub_entity " + "WHERE :param_1 = sub_entity.id", + [{"param_1": entity_id}], + ) + ) + elif second_option == "defer": + # load will be a deferred load. this is because the explicit + # call to the deferred load put a deferred loader on the attribute + expected.append( + CompiledSQL( + "SELECT sub_entity.name AS sub_entity_name FROM entity " + "JOIN sub_entity ON entity.id = sub_entity.id " + "WHERE entity.id = :pk_1", + [{"pk_1": entity_id}], + ) + ) + + asserter_.assert_(*expected) + + +class LazyLoaderTransfersOptsTest(fixtures.DeclarativeMappedTest): + """test #7557""" + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Address(Base): + __tablename__ = "address" + + id = Column(Integer, primary_key=True) + user_id = Column(Integer, ForeignKey("user.id")) + address_type = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "base_address", + "polymorphic_on": address_type, + } + + class EmailAddress(Address): + __tablename__ = "email_address" + email = Column(String(50)) + address_id = Column( + Integer, + ForeignKey(Address.id), + primary_key=True, + ) + + __mapper_args__ = { + "polymorphic_identity": "email", + "polymorphic_load": "selectin", + } + + class User(Base): + __tablename__ = "user" + + id = Column(Integer, primary_key=True) + name = Column(String(50)) + address = relationship(Address, uselist=False) + + @classmethod + def insert_data(cls, connection): + User, EmailAddress = cls.classes("User", "EmailAddress") + with Session(connection) as sess: + sess.add_all( + [User(name="u1", address=EmailAddress(email="foo", user_id=1))] + ) + + sess.commit() + + @testing.combinations( + None, selectinload, joinedload, lazyload, subqueryload, immediateload + ) + def test_opt_propagates(self, strat): + User, EmailAddress = self.classes("User", "EmailAddress") + sess = fixture_session() + + class AnyOpt(CompileStateOption): + _cache_key_traversal = () + propagate_to_loaders = True + + def _adjust_for_extra_criteria(self, context): + return self + + from sqlalchemy.orm.strategy_options import Load + + with mock.patch.object( + Load, "_adjust_for_extra_criteria", lambda self, ctx: self + ): + any_opt = AnyOpt() + if strat is None: + opts = (any_opt,) + else: + opts = (strat(User.address), any_opt) + + u = sess.execute(select(User).options(*opts)).scalars().one() + address = u.address + eq_(inspect(address).load_options, opts) + + +class NoBaseWPPlusAliasedTest( + testing.AssertsExecutionResults, fixtures.TestBase +): + """test for #7799""" + + @testing.fixture + def mapping_fixture(self, registry, connection): + Base = registry.generate_base() + + class BaseClass(Base): + __tablename__ = "baseclass" + id = Column( + Integer, + primary_key=True, + unique=True, + ) + + class A(BaseClass): + __tablename__ = "a" + + id = Column(ForeignKey(BaseClass.id), primary_key=True) + thing1 = Column(String(50)) + + __mapper_args__ = {"polymorphic_identity": "a"} + + class B(BaseClass): + __tablename__ = "b" + + id = Column(ForeignKey(BaseClass.id), primary_key=True) + thing2 = Column(String(50)) + + __mapper_args__ = {"polymorphic_identity": "b"} + + registry.metadata.create_all(connection) + with Session(connection) as sess: + + sess.add_all( + [ + A(thing1="thing1_1"), + A(thing1="thing1_2"), + B(thing2="thing2_2"), + B(thing2="thing2_3"), + A(thing1="thing1_3"), + A(thing1="thing1_4"), + B(thing2="thing2_1"), + B(thing2="thing2_4"), + ] + ) + + sess.commit() + + return BaseClass, A, B + + def test_wp(self, mapping_fixture, connection): + BaseClass, A, B = mapping_fixture + + stmt = union( + select(A.id, literal("a").label("type")), + select(B.id, literal("b").label("type")), + ).subquery() + + wp = with_polymorphic( + BaseClass, + [A, B], + selectable=stmt, + polymorphic_on=stmt.c.type, + ) + + session = Session(connection) + + with self.sql_execution_asserter() as asserter: + result = session.scalars( + select(wp) + .options(selectin_polymorphic(wp, [A, B])) + .order_by(wp.id) + ) + for obj in result: + if isinstance(obj, A): + obj.thing1 + else: + obj.thing2 + + asserter.assert_( + CompiledSQL( + "SELECT anon_1.id, anon_1.type FROM " + "(SELECT a.id AS id, :param_1 AS type FROM baseclass " + "JOIN a ON baseclass.id = a.id " + "UNION SELECT b.id AS id, :param_2 AS type " + "FROM baseclass JOIN b ON baseclass.id = b.id) AS anon_1 " + "ORDER BY anon_1.id", + [{"param_1": "a", "param_2": "b"}], + ), + AllOf( + CompiledSQL( + "SELECT a.id AS a_id, baseclass.id AS baseclass_id, " + "a.thing1 AS a_thing1 FROM baseclass " + "JOIN a ON baseclass.id = a.id " + "WHERE baseclass.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY baseclass.id", + {"primary_keys": [1, 2, 5, 6]}, + ), + CompiledSQL( + "SELECT b.id AS b_id, baseclass.id AS baseclass_id, " + "b.thing2 AS b_thing2 FROM baseclass " + "JOIN b ON baseclass.id = b.id " + "WHERE baseclass.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY baseclass.id", + {"primary_keys": [3, 4, 7, 8]}, + ), + ), + ) + + +class CompositeAttributesTest(fixtures.TestBase): + @testing.fixture + def mapping_fixture(self, registry, connection): + Base = registry.generate_base() + + class BaseCls(Base): + __tablename__ = "base" + id = Column( + Integer, primary_key=True, test_needs_autoincrement=True + ) + type = Column(String(50)) + + __mapper_args__ = {"polymorphic_on": type} + + class XYThing: + def __init__(self, x, y): + self.x = x + self.y = y + + def __composite_values__(self): + return (self.x, self.y) + + def __eq__(self, other): + return ( + isinstance(other, XYThing) + and other.x == self.x + and other.y == self.y + ) + + def __ne__(self, other): + return not self.__eq__(other) + + class A(ComparableEntity, BaseCls): + __tablename__ = "a" + id = Column(ForeignKey(BaseCls.id), primary_key=True) + thing1 = Column(String(50)) + comp1 = composite( + XYThing, Column("x1", Integer), Column("y1", Integer) + ) + + __mapper_args__ = { + "polymorphic_identity": "a", + "polymorphic_load": "selectin", + } + + class B(ComparableEntity, BaseCls): + __tablename__ = "b" + id = Column(ForeignKey(BaseCls.id), primary_key=True) + thing2 = Column(String(50)) + comp2 = composite( + XYThing, Column("x2", Integer), Column("y2", Integer) + ) + + __mapper_args__ = { + "polymorphic_identity": "b", + "polymorphic_load": "selectin", + } + + registry.metadata.create_all(connection) + + with Session(connection) as sess: + sess.add_all( + [ + A(id=1, thing1="thing1", comp1=XYThing(1, 2)), + B(id=2, thing2="thing2", comp2=XYThing(3, 4)), + ] + ) + sess.commit() + + return BaseCls, A, B, XYThing + + def test_load_composite(self, mapping_fixture, connection): + BaseCls, A, B, XYThing = mapping_fixture + + with Session(connection) as sess: + rows = sess.scalars(select(BaseCls).order_by(BaseCls.id)).all() + + eq_( + rows, + [ + A(id=1, thing1="thing1", comp1=XYThing(1, 2)), + B(id=2, thing2="thing2", comp2=XYThing(3, 4)), + ], + ) + + +class PolymorphicOnExprTest( + testing.AssertsExecutionResults, fixtures.TestBase +): + """test for #8704""" + + @testing.fixture() + def poly_fixture(self, connection, decl_base): + def fixture(create_prop, use_load): + class TypeTable(decl_base): + __tablename__ = "type" + + id = Column(Integer, primary_key=True) + name = Column(String(30)) + + class PolyBase(ComparableEntity, decl_base): + __tablename__ = "base" + + id = Column(Integer, primary_key=True) + type_id = Column(ForeignKey(TypeTable.id)) + + if create_prop == "create_prop": + polymorphic = column_property( + select(TypeTable.name) + .where(TypeTable.id == type_id) + .scalar_subquery() + ) + __mapper_args__ = { + "polymorphic_on": polymorphic, + } + elif create_prop == "dont_create_prop": + __mapper_args__ = { + "polymorphic_on": select(TypeTable.name) + .where(TypeTable.id == type_id) + .scalar_subquery() + } + elif create_prop == "arg_level_prop": + __mapper_args__ = { + "polymorphic_on": column_property( + select(TypeTable.name) + .where(TypeTable.id == type_id) + .scalar_subquery() + ) + } + + class Foo(PolyBase): + __tablename__ = "foo" + + if use_load == "use_polymorphic_load": + __mapper_args__ = { + "polymorphic_identity": "foo", + "polymorphic_load": "selectin", + } + else: + __mapper_args__ = { + "polymorphic_identity": "foo", + } + + id = Column(ForeignKey(PolyBase.id), primary_key=True) + foo_attr = Column(String(30)) + + decl_base.metadata.create_all(connection) + + with Session(connection) as session: + foo_type = TypeTable(name="foo") + session.add(foo_type) + session.flush() + + foo = Foo(type_id=foo_type.id, foo_attr="foo value") + session.add(foo) + + session.commit() + + return PolyBase, Foo, TypeTable + + yield fixture + + @testing.combinations( + "create_prop", + "dont_create_prop", + "arg_level_prop", + argnames="create_prop", + ) + @testing.combinations( + "use_polymorphic_load", + "use_loader_option", + "none", + argnames="use_load", + ) + def test_load_selectin( + self, poly_fixture, connection, create_prop, use_load + ): + PolyBase, Foo, TypeTable = poly_fixture(create_prop, use_load) + + sess = Session(connection) + + foo_type = sess.scalars(select(TypeTable)).one() + + stmt = select(PolyBase) + if use_load == "use_loader_option": + stmt = stmt.options(selectin_polymorphic(PolyBase, [Foo])) + obj = sess.scalars(stmt).all() + + def go(): + eq_(obj, [Foo(type_id=foo_type.id, foo_attr="foo value")]) + + self.assert_sql_count(testing.db, go, 0 if use_load != "none" else 1) diff --git a/test/orm/inheritance/test_polymorphic_rel.py b/test/orm/inheritance/test_polymorphic_rel.py index 60235bd86cd..d30d7a28c48 100644 --- a/test/orm/inheritance/test_polymorphic_rel.py +++ b/test/orm/inheritance/test_polymorphic_rel.py @@ -1,5 +1,6 @@ from sqlalchemy import desc from sqlalchemy import exc as sa_exc +from sqlalchemy import exists from sqlalchemy import func from sqlalchemy import select from sqlalchemy import testing @@ -10,10 +11,10 @@ from sqlalchemy.orm import joinedload from sqlalchemy.orm import selectinload from sqlalchemy.orm import subqueryload +from sqlalchemy.orm import with_parent from sqlalchemy.orm import with_polymorphic from sqlalchemy.testing import assert_raises from sqlalchemy.testing import eq_ -from sqlalchemy.testing import fixtures from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session from ._poly_fixtures import _Polymorphic @@ -30,7 +31,7 @@ from ._poly_fixtures import Person -class _PolymorphicTestBase(fixtures.NoCache): +class _PolymorphicTestBase(object): __backend__ = True __dialect__ = "default_enhanced" @@ -64,6 +65,44 @@ def insert_data(cls, connection): ) e1, e2, e3, b1, m1 = cls.e1, cls.e2, cls.e3, cls.b1, cls.m1 + @testing.requires.ctes + def test_cte_clone_issue(self): + """test #8357""" + + sess = fixture_session() + + cte = select(Engineer.person_id).cte(name="test_cte") + + stmt = ( + select(Engineer) + .where(exists().where(Engineer.person_id == cte.c.person_id)) + .where(exists().where(Engineer.person_id == cte.c.person_id)) + ).order_by(Engineer.person_id) + + self.assert_compile( + stmt, + "WITH test_cte AS (SELECT engineers.person_id AS person_id " + "FROM people JOIN engineers ON people.person_id = " + "engineers.person_id) SELECT engineers.person_id, " + "people.person_id AS person_id_1, people.company_id, " + "people.name, people.type, engineers.status, " + "engineers.engineer_name, engineers.primary_language FROM people " + "JOIN engineers ON people.person_id = engineers.person_id WHERE " + "(EXISTS (SELECT * FROM test_cte WHERE engineers.person_id = " + "test_cte.person_id)) AND (EXISTS (SELECT * FROM test_cte " + "WHERE engineers.person_id = test_cte.person_id)) " + "ORDER BY engineers.person_id", + ) + result = sess.scalars(stmt) + eq_( + result.all(), + [ + Engineer(name="dilbert"), + Engineer(name="wally"), + Engineer(name="vlad"), + ], + ) + def test_loads_at_once(self): """ Test that all objects load from the full query, when @@ -195,6 +234,34 @@ def test_get_three(self): Boss(name="pointy haired boss", golf_swing="fore"), ) + def test_lazyload_related_w_cache_check(self): + sess = fixture_session() + + c1 = sess.get(Company, 1) + c2 = sess.get(Company, 2) + + q1 = ( + sess.query(Person) + .filter(with_parent(c1, Company.employees)) + .order_by(Person.person_id) + ) + eq_( + q1.all(), + [ + Engineer(name="dilbert"), + Engineer(name="wally"), + Boss(name="pointy haired boss"), + Manager(name="dogbert"), + ], + ) + + q2 = ( + sess.query(Person) + .filter(with_parent(c2, Company.employees)) + .order_by(Person.person_id) + ) + eq_(q2.all(), [Engineer(name="vlad")]) + def test_multi_join(self): sess = fixture_session() e = aliased(Person) @@ -436,6 +503,7 @@ def test_join_from_polymorphic_explicit_aliased_two(self): def test_join_from_polymorphic_explicit_aliased_three(self): sess = fixture_session() pa = aliased(Paperwork) + eq_( sess.query(Engineer) .order_by(Person.person_id) @@ -881,7 +949,7 @@ def go(): self.assert_sql_count(testing.db, go, 1) - def test_with_polymorphic_three_future(self): + def test_with_polymorphic_three_future(self, nocache): sess = fixture_session() def go(): @@ -2034,6 +2102,41 @@ def test_correlation_three(self): class PolymorphicTest(_PolymorphicTestBase, _Polymorphic): + def test_joined_aliasing_unrelated_subuqery(self): + """test #8456""" + + inner = select(Engineer).where(Engineer.name == "vlad").subquery() + + crit = select(inner.c.person_id) + + outer = select(Engineer).where(Engineer.person_id.in_(crit)) + + # this query will not work at all for any "polymorphic" case + # as it will adapt the inner query as well. for those cases, + # aliased() has to be used for the inner entity to disambiguate it. + self.assert_compile( + outer, + "SELECT engineers.person_id, people.person_id AS person_id_1, " + "people.company_id, people.name, people.type, engineers.status, " + "engineers.engineer_name, engineers.primary_language " + "FROM people JOIN engineers " + "ON people.person_id = engineers.person_id " + "WHERE engineers.person_id IN " + "(SELECT anon_1.person_id FROM " + "(SELECT engineers.person_id AS person_id, " + "people.person_id AS person_id_1, " + "people.company_id AS company_id, people.name AS name, " + "people.type AS type, engineers.status AS status, " + "engineers.engineer_name AS engineer_name, " + "engineers.primary_language AS primary_language FROM people " + "JOIN engineers ON people.person_id = engineers.person_id " + "WHERE people.name = :name_1) " + "AS anon_1)", + ) + + sess = fixture_session() + eq_(sess.scalars(outer).all(), [Engineer(name="vlad")]) + def test_primary_eager_aliasing_three_dont_reset_selectable(self): """test now related to #7262 @@ -2284,6 +2387,12 @@ def test_flat_aliased_w_select_from(self): class PolymorphicUnionsTest(_PolymorphicTestBase, _PolymorphicUnions): + @testing.skip_if( + lambda: True, "join condition doesn't work w/ this mapping" + ) + def test_lazyload_related_w_cache_check(self): + pass + def test_with_polymorphic_two_future_default_wp(self): """test #7262 @@ -2385,6 +2494,12 @@ def test_subqueryload_on_subclass_uses_path_correctly(self): class PolymorphicAliasedJoinsTest( _PolymorphicTestBase, _PolymorphicAliasedJoins ): + @testing.skip_if( + lambda: True, "join condition doesn't work w/ this mapping" + ) + def test_lazyload_related_w_cache_check(self): + pass + def test_with_polymorphic_two_future_default_wp(self): """test #7262 diff --git a/test/orm/inheritance/test_relationship.py b/test/orm/inheritance/test_relationship.py index eeb3a7ed636..c4e20fefce1 100644 --- a/test/orm/inheritance/test_relationship.py +++ b/test/orm/inheritance/test_relationship.py @@ -2190,7 +2190,7 @@ def test_contains_eager_multi_alias(self): "a_b.kind AS a_b_kind, a_b.a_id AS a_b_a_id, a.id AS a_id_1, " "a.kind AS a_kind, a.a_id AS a_a_id FROM a " "LEFT OUTER JOIN a AS a_b ON a.id = a_b.a_id AND a_b.kind IN " - "([POSTCOMPILE_kind_1]) LEFT OUTER JOIN x AS b_x " + "(__[POSTCOMPILE_kind_1]) LEFT OUTER JOIN x AS b_x " "ON a_b.id = b_x.a_id", ) @@ -2383,7 +2383,7 @@ def _test_poly_single_poly(self, fn): joinedload(cls.links).joinedload(Link.child).joinedload(cls.links) ) if cls is self.classes.Sub1: - extra = " WHERE parent.type IN ([POSTCOMPILE_type_1])" + extra = " WHERE parent.type IN (__[POSTCOMPILE_type_1])" else: extra = "" @@ -2413,7 +2413,7 @@ def _test_single_poly_poly(self, fn): ) if Link.child.property.mapper.class_ is self.classes.Sub1: - extra = "AND parent_1.type IN ([POSTCOMPILE_type_1]) " + extra = "AND parent_1.type IN (__[POSTCOMPILE_type_1]) " else: extra = "" @@ -2512,9 +2512,9 @@ def test_local_wpoly_innerjoins_roundtrip(self): class JoinAcrossJoinedInhMultiPath( fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL ): - """test long join paths with a joined-inh in the middle, where we go multiple - times across the same joined-inh to the same target but with other classes - in the middle. E.g. test [ticket:2908] + """test long join paths with a joined-inh in the middle, where we go + multiple times across the same joined-inh to the same target but with + other classes in the middle. E.g. test [ticket:2908] """ run_setup_mappers = "once" @@ -3015,3 +3015,111 @@ def test_load_m2o_use_get(self): is_(obj.child2, None) is_(obj.parent, c1) + + +class JoinedLoadSpliceFromJoinedTest( + testing.AssertsCompiledSQL, fixtures.DeclarativeMappedTest +): + """test #8378""" + + __dialect__ = "default" + run_create_tables = None + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Root(Base): + __tablename__ = "root" + + id = Column(Integer, primary_key=True) + root_elements = relationship("BaseModel") + + class BaseModel(Base): + __tablename__ = "base_model" + + id = Column(Integer, primary_key=True) + root_id = Column(Integer, ForeignKey("root.id"), nullable=False) + type = Column(String, nullable=False) + __mapper_args__ = {"polymorphic_on": type} + + class SubModel(BaseModel): + elements = relationship("SubModelElement") + __mapper_args__ = {"polymorphic_identity": "sub_model"} + + class SubModelElement(Base): + __tablename__ = "sub_model_element" + + id = Column(Integer, primary_key=True) + model_id = Column(ForeignKey("base_model.id"), nullable=False) + + def test_oj_ij(self): + Root, SubModel = self.classes("Root", "SubModel") + + s = Session() + query = s.query(Root) + query = query.options( + joinedload(Root.root_elements.of_type(SubModel)).joinedload( + SubModel.elements, innerjoin=True + ) + ) + self.assert_compile( + query, + "SELECT root.id AS root_id, base_model_1.id AS base_model_1_id, " + "base_model_1.root_id AS base_model_1_root_id, " + "base_model_1.type AS base_model_1_type, " + "sub_model_element_1.id AS sub_model_element_1_id, " + "sub_model_element_1.model_id AS sub_model_element_1_model_id " + "FROM root LEFT OUTER JOIN (base_model AS base_model_1 " + "JOIN sub_model_element AS sub_model_element_1 " + "ON base_model_1.id = sub_model_element_1.model_id) " + "ON root.id = base_model_1.root_id", + ) + + def test_ij_oj(self): + Root, SubModel = self.classes("Root", "SubModel") + + s = Session() + query = s.query(Root) + query = query.options( + joinedload( + Root.root_elements.of_type(SubModel), innerjoin=True + ).joinedload(SubModel.elements) + ) + self.assert_compile( + query, + "SELECT root.id AS root_id, base_model_1.id AS base_model_1_id, " + "base_model_1.root_id AS base_model_1_root_id, " + "base_model_1.type AS base_model_1_type, " + "sub_model_element_1.id AS sub_model_element_1_id, " + "sub_model_element_1.model_id AS sub_model_element_1_model_id " + "FROM root JOIN base_model AS base_model_1 " + "ON root.id = base_model_1.root_id " + "LEFT OUTER JOIN sub_model_element AS sub_model_element_1 " + "ON base_model_1.id = sub_model_element_1.model_id" + "", + ) + + def test_ij_ij(self): + Root, SubModel = self.classes("Root", "SubModel") + + s = Session() + query = s.query(Root) + query = query.options( + joinedload( + Root.root_elements.of_type(SubModel), innerjoin=True + ).joinedload(SubModel.elements, innerjoin=True) + ) + self.assert_compile( + query, + "SELECT root.id AS root_id, base_model_1.id AS base_model_1_id, " + "base_model_1.root_id AS base_model_1_root_id, " + "base_model_1.type AS base_model_1_type, " + "sub_model_element_1.id AS sub_model_element_1_id, " + "sub_model_element_1.model_id AS sub_model_element_1_model_id " + "FROM root JOIN base_model AS base_model_1 " + "ON root.id = base_model_1.root_id " + "JOIN sub_model_element AS sub_model_element_1 " + "ON base_model_1.id = sub_model_element_1.model_id" + "", + ) diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py index 30d4549c41b..afb0ce95dca 100644 --- a/test/orm/inheritance/test_single.py +++ b/test/orm/inheritance/test_single.py @@ -13,6 +13,8 @@ from sqlalchemy import util from sqlalchemy.orm import aliased from sqlalchemy.orm import Bundle +from sqlalchemy.orm import column_property +from sqlalchemy.orm import join as orm_join from sqlalchemy.orm import joinedload from sqlalchemy.orm import relationship from sqlalchemy.orm import Session @@ -182,9 +184,9 @@ def test_discrim_bound_param_cloned_ok(self): self.assert_compile( select(subq1, subq2), "SELECT (SELECT employees.employee_id FROM employees " - "WHERE employees.type IN ([POSTCOMPILE_type_1])) AS foo, " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])) AS foo, " "(SELECT employees.employee_id FROM employees " - "WHERE employees.type IN ([POSTCOMPILE_type_1])) AS bar", + "WHERE employees.type IN (__[POSTCOMPILE_type_1])) AS bar", ) def test_multi_qualification(self): @@ -329,7 +331,7 @@ def test_from_self_legacy(self): "employees.engineer_info AS " "employees_engineer_info, employees.type " "AS employees_type FROM employees WHERE " - "employees.type IN ([POSTCOMPILE_type_1])) AS " + "employees.type IN (__[POSTCOMPILE_type_1])) AS " "anon_1", use_default_dialect=True, ) @@ -370,8 +372,8 @@ def test_from_subq(self): "employees.engineer_info AS " "employees_engineer_info, employees.type " "AS employees_type FROM employees WHERE " - "employees.type IN ([POSTCOMPILE_type_1])) AS " - "anon_1 WHERE anon_1.employees_type IN ([POSTCOMPILE_type_2])", + "employees.type IN (__[POSTCOMPILE_type_1])) AS " + "anon_1 WHERE anon_1.employees_type IN (__[POSTCOMPILE_type_2])", use_default_dialect=True, ) @@ -385,13 +387,183 @@ def test_select_from_aliased_w_subclass(self): sess.query(a1.employee_id).select_from(a1), "SELECT employees_1.employee_id AS employees_1_employee_id " "FROM employees AS employees_1 WHERE employees_1.type " - "IN ([POSTCOMPILE_type_1])", + "IN (__[POSTCOMPILE_type_1])", ) self.assert_compile( sess.query(literal("1")).select_from(a1), "SELECT :param_1 AS anon_1 FROM employees AS employees_1 " - "WHERE employees_1.type IN ([POSTCOMPILE_type_1])", + "WHERE employees_1.type IN (__[POSTCOMPILE_type_1])", + ) + + @testing.combinations( + ( + lambda Engineer, Report: select(Report.report_id) + .select_from(Engineer) + .join(Engineer.reports), + ), + ( + lambda Engineer, Report: select(Report.report_id).select_from( + orm_join(Engineer, Report, Engineer.reports) + ), + ), + ( + lambda Engineer, Report: select(Report.report_id).join_from( + Engineer, Report, Engineer.reports + ), + ), + ( + lambda Engineer, Report: select(Report.report_id) + .select_from(Engineer) + .join(Report), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_engineer") + def test_select_col_only_from_w_join(self, stmt_fn, alias_engineer): + """test #11412 which seems to have been fixed by #10365""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + + if alias_engineer: + Engineer = aliased(Engineer) + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + if alias_engineer: + self.assert_compile( + stmt, + "SELECT reports.report_id FROM employees AS employees_1 " + "JOIN reports ON employees_1.employee_id = " + "reports.employee_id WHERE employees_1.type " + "IN (__[POSTCOMPILE_type_1])", + ) + else: + self.assert_compile( + stmt, + "SELECT reports.report_id FROM employees JOIN reports " + "ON employees.employee_id = reports.employee_id " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])", + ) + + @testing.combinations( + ( + lambda Engineer, Report: select(Report) + .select_from(Engineer) + .join(Engineer.reports), + ), + ( + lambda Engineer, Report: select(Report).select_from( + orm_join(Engineer, Report, Engineer.reports) + ), + ), + ( + lambda Engineer, Report: select(Report).join_from( + Engineer, Report, Engineer.reports + ), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_engineer") + def test_select_from_w_join_left(self, stmt_fn, alias_engineer): + """test #8721""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + + if alias_engineer: + Engineer = aliased(Engineer) + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + if alias_engineer: + self.assert_compile( + stmt, + "SELECT reports.report_id, reports.employee_id, reports.name " + "FROM employees AS employees_1 JOIN reports " + "ON employees_1.employee_id = reports.employee_id " + "WHERE employees_1.type IN (__[POSTCOMPILE_type_1])", + ) + else: + self.assert_compile( + stmt, + "SELECT reports.report_id, reports.employee_id, reports.name " + "FROM employees JOIN reports ON employees.employee_id = " + "reports.employee_id " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])", + ) + + @testing.combinations( + ( + lambda Engineer, Report: select( + Report.report_id, Engineer.employee_id + ) + .select_from(Engineer) + .join(Engineer.reports), + ), + ( + lambda Engineer, Report: select( + Report.report_id, Engineer.employee_id + ).select_from(orm_join(Engineer, Report, Engineer.reports)), + ), + ( + lambda Engineer, Report: select( + Report.report_id, Engineer.employee_id + ).join_from(Engineer, Report, Engineer.reports), + ), + ) + def test_select_from_w_join_left_including_entity(self, stmt_fn): + """test #8721""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + self.assert_compile( + stmt, + "SELECT reports.report_id, employees.employee_id " + "FROM employees JOIN reports ON employees.employee_id = " + "reports.employee_id " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])", + ) + + @testing.combinations( + ( + lambda Engineer, Report: select(Report).join( + Report.employee.of_type(Engineer) + ), + ), + ( + lambda Engineer, Report: select(Report).select_from( + orm_join(Report, Engineer, Report.employee.of_type(Engineer)) + ) + ), + ( + lambda Engineer, Report: select(Report).join_from( + Report, Engineer, Report.employee.of_type(Engineer) + ), + ), + ) + def test_select_from_w_join_right(self, stmt_fn): + """test #8721""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + self.assert_compile( + stmt, + "SELECT reports.report_id, reports.employee_id, reports.name " + "FROM reports JOIN employees ON employees.employee_id = " + "reports.employee_id AND employees.type " + "IN (__[POSTCOMPILE_type_1])", ) def test_from_statement_select(self): @@ -406,7 +578,7 @@ def test_from_statement_select(self): "SELECT employees.employee_id, employees.name, " "employees.manager_data, employees.engineer_info, " "employees.type FROM employees WHERE employees.type " - "IN ([POSTCOMPILE_type_1])", + "IN (__[POSTCOMPILE_type_1])", ) def test_from_statement_update(self): @@ -427,7 +599,7 @@ def test_from_statement_update(self): self.assert_compile( q, "UPDATE employees SET engineer_info=:engineer_info " - "WHERE employees.type IN ([POSTCOMPILE_type_1]) " + "WHERE employees.type IN (__[POSTCOMPILE_type_1]) " "RETURNING employees.employee_id", dialect="default_enhanced", ) @@ -452,7 +624,7 @@ def test_union_modifiers(self): "employees.engineer_info AS employees_engineer_info, " "employees.type AS employees_type FROM employees " "WHERE employees.engineer_info = :engineer_info_1 " - "AND employees.type IN ([POSTCOMPILE_type_1]) " + "AND employees.type IN (__[POSTCOMPILE_type_1]) " "%(token)s " "SELECT employees.employee_id AS employees_employee_id, " "employees.name AS employees_name, " @@ -460,7 +632,7 @@ def test_union_modifiers(self): "employees.engineer_info AS employees_engineer_info, " "employees.type AS employees_type FROM employees " "WHERE employees.manager_data = :manager_data_1 " - "AND employees.type IN ([POSTCOMPILE_type_2])) AS anon_1" + "AND employees.type IN (__[POSTCOMPILE_type_2])) AS anon_1" ) for meth, token in [ @@ -496,7 +668,7 @@ def test_having(self): "employees.name AS employees_name, employees.manager_data " "AS employees_manager_data, employees.engineer_info " "AS employees_engineer_info, employees.type AS employees_type " - "FROM employees WHERE employees.type IN ([POSTCOMPILE_type_1]) " + "FROM employees WHERE employees.type IN (__[POSTCOMPILE_type_1]) " "GROUP BY employees.employee_id HAVING employees.name = :name_1", ) @@ -511,7 +683,7 @@ def test_from_self_count(self): "SELECT count(*) AS count_1 " "FROM (SELECT employees.employee_id AS employees_employee_id " "FROM employees " - "WHERE employees.type IN ([POSTCOMPILE_type_1])) AS anon_1", + "WHERE employees.type IN (__[POSTCOMPILE_type_1])) AS anon_1", use_default_dialect=True, ) @@ -649,7 +821,7 @@ def test_exists_standalone(self): ), "SELECT EXISTS (SELECT 1 FROM employees WHERE " "employees.name = :name_1 AND employees.type " - "IN ([POSTCOMPILE_type_1])) AS anon_1", + "IN (__[POSTCOMPILE_type_1])) AS anon_1", ) def test_type_filtering(self): @@ -783,7 +955,8 @@ def test_subquery_load(self): CompiledSQL( "SELECT employee.id AS employee_id, employee.name AS " "employee_name, employee.type AS employee_type " - "FROM employee WHERE employee.type IN ([POSTCOMPILE_type_1])", + "FROM employee WHERE employee.type IN " + "(__[POSTCOMPILE_type_1])", params=[{"type_1": ["manager"]}], ), CompiledSQL( @@ -794,7 +967,7 @@ def test_subquery_load(self): "employee_stuff_name, anon_1.employee_id " "AS anon_1_employee_id FROM (SELECT " "employee.id AS employee_id FROM employee " - "WHERE employee.type IN ([POSTCOMPILE_type_1])) AS anon_1 " + "WHERE employee.type IN (__[POSTCOMPILE_type_1])) AS anon_1 " "JOIN employee_stuff ON anon_1.employee_id " "= employee_stuff.employee_id", params=[{"type_1": ["manager"]}], @@ -979,7 +1152,7 @@ def test_outer_join_prop(self): "employees.name AS employees_name " "FROM companies LEFT OUTER JOIN employees ON companies.company_id " "= employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])", + "AND employees.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_prop_alias(self): @@ -1013,7 +1186,7 @@ def test_outer_join_prop_alias(self): "employees_1_name FROM companies LEFT OUTER " "JOIN employees AS employees_1 ON companies.company_id " "= employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_literal_onclause(self): @@ -1051,7 +1224,7 @@ def test_outer_join_literal_onclause(self): "employees.company_id AS employees_company_id FROM companies " "LEFT OUTER JOIN employees ON " "companies.company_id = employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])", + "AND employees.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_literal_onclause_alias(self): @@ -1090,7 +1263,7 @@ def test_outer_join_literal_onclause_alias(self): "employees_1.company_id AS employees_1_company_id " "FROM companies LEFT OUTER JOIN employees AS employees_1 ON " "companies.company_id = employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_no_onclause(self): @@ -1126,7 +1299,7 @@ def test_outer_join_no_onclause(self): "employees.company_id AS employees_company_id " "FROM companies LEFT OUTER JOIN employees ON " "companies.company_id = employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])", + "AND employees.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_no_onclause_alias(self): @@ -1163,7 +1336,7 @@ def test_outer_join_no_onclause_alias(self): "employees_1.company_id AS employees_1_company_id " "FROM companies LEFT OUTER JOIN employees AS employees_1 ON " "companies.company_id = employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) def test_correlated_column_select(self): @@ -1200,7 +1373,7 @@ def test_correlated_column_select(self): "(SELECT count(employees.employee_id) AS count_1 " "FROM employees WHERE employees.company_id = " "companies.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])) AS anon_1 " + "AND employees.type IN (__[POSTCOMPILE_type_1])) AS anon_1 " "FROM companies", ) @@ -1282,8 +1455,8 @@ def test_no_aliasing_from_overlap(self): "ON companies.company_id = employees.company_id " "JOIN employees " "ON companies.company_id = employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1]) " - "WHERE employees.type IN ([POSTCOMPILE_type_2])", + "AND employees.type IN (__[POSTCOMPILE_type_1]) " + "WHERE employees.type IN (__[POSTCOMPILE_type_2])", ) def test_relationship_to_subclass(self): @@ -1554,7 +1727,7 @@ def test_assert_join_sql(self): "child.name AS child_name " "FROM parent LEFT OUTER JOIN (m2m AS m2m_1 " "JOIN child ON child.id = m2m_1.child_id " - "AND child.discriminator IN ([POSTCOMPILE_discriminator_1])) " + "AND child.discriminator IN (__[POSTCOMPILE_discriminator_1])) " "ON parent.id = m2m_1.parent_id", ) @@ -1571,7 +1744,7 @@ def test_assert_joinedload_sql(self): "FROM parent LEFT OUTER JOIN " "(m2m AS m2m_1 JOIN child AS child_1 " "ON child_1.id = m2m_1.child_id AND child_1.discriminator " - "IN ([POSTCOMPILE_discriminator_1])) " + "IN (__[POSTCOMPILE_discriminator_1])) " "ON parent.id = m2m_1.parent_id", ) @@ -1780,7 +1953,7 @@ def test_wpoly_single_inh_subclass(self): "engineer.manager_id AS engineer_manager_id " "FROM employee JOIN engineer ON employee.id = engineer.id) " "AS anon_1 " - "WHERE anon_1.employee_type IN ([POSTCOMPILE_type_1])", + "WHERE anon_1.employee_type IN (__[POSTCOMPILE_type_1])", ) def test_query_wpoly_single_inh_subclass(self): @@ -1809,7 +1982,7 @@ def test_query_wpoly_single_inh_subclass(self): "engineer.engineer_info AS engineer_engineer_info, " "engineer.manager_id AS engineer_manager_id " "FROM employee JOIN engineer ON employee.id = engineer.id) " - "AS anon_1 WHERE anon_1.employee_type IN ([POSTCOMPILE_type_1])", + "AS anon_1 WHERE anon_1.employee_type IN (__[POSTCOMPILE_type_1])", ) @testing.combinations((True,), (False,), argnames="autoalias") @@ -1836,7 +2009,7 @@ def test_single_inh_subclass_join_joined_inh_subclass(self, autoalias): "JOIN (employee AS employee_1 JOIN engineer AS engineer_1 " "ON employee_1.id = engineer_1.id) " "ON engineer_1.manager_id = manager.id " - "WHERE employee.type IN ([POSTCOMPILE_type_1])", + "WHERE employee.type IN (__[POSTCOMPILE_type_1])", ) def test_single_inh_subclass_join_wpoly_joined_inh_subclass(self): @@ -1873,7 +2046,7 @@ def test_single_inh_subclass_join_wpoly_joined_inh_subclass(self): "FROM employee " "JOIN engineer ON employee.id = engineer.id) AS anon_1 " "ON anon_1.manager_id = manager.id " - "WHERE employee.type IN ([POSTCOMPILE_type_1])", + "WHERE employee.type IN (__[POSTCOMPILE_type_1])", ) @testing.combinations((True,), (False,), argnames="autoalias") @@ -1903,7 +2076,7 @@ def test_joined_inh_subclass_join_single_inh_subclass(self, autoalias): "JOIN (employee AS employee_1 JOIN manager AS manager_1 " "ON employee_1.id = manager_1.id) " "ON engineer.manager_id = manager_1.id " - "AND employee_1.type IN ([POSTCOMPILE_type_1])", + "AND employee_1.type IN (__[POSTCOMPILE_type_1])", ) @@ -1978,3 +2151,35 @@ def setup_classes(cls): super(EagerDefaultEvalTestPolymorphic, cls).setup_classes( with_polymorphic="*" ) + + +class ColExprTest(AssertsCompiledSQL, fixtures.TestBase): + def test_discrim_on_column_prop(self, registry): + Base = registry.generate_base() + + class Employee(Base): + __tablename__ = "employee" + id = Column(Integer, primary_key=True) + type = Column(String(20)) + + __mapper_args__ = { + "polymorphic_on": "type", + "polymorphic_identity": "employee", + } + + class Engineer(Employee): + __mapper_args__ = {"polymorphic_identity": "engineer"} + + class Company(Base): + __tablename__ = "company" + id = Column(Integer, primary_key=True) + + max_engineer_id = column_property( + select(func.max(Engineer.id)).scalar_subquery() + ) + + self.assert_compile( + select(Company.max_engineer_id), + "SELECT (SELECT max(employee.id) AS max_1 FROM employee " + "WHERE employee.type IN (__[POSTCOMPILE_type_1])) AS anon_1", + ) diff --git a/test/orm/test_ac_relationships.py b/test/orm/test_ac_relationships.py index 6a050b698c6..57e2b25e927 100644 --- a/test/orm/test_ac_relationships.py +++ b/test/orm/test_ac_relationships.py @@ -14,6 +14,7 @@ from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import CompiledSQL @@ -315,7 +316,7 @@ def test_selectinload(self): "SELECT a_1.id AS a_1_id, b.id AS b_id FROM a AS a_1 " "JOIN (b JOIN d ON d.b_id = b.id JOIN c ON c.id = d.c_id) " "ON a_1.b_id = b.id WHERE a_1.id " - "IN ([POSTCOMPILE_primary_keys])", + "IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1]}], ), ) @@ -331,3 +332,65 @@ def test_join(self): "FROM a JOIN (b JOIN d ON d.b_id = b.id " "JOIN c ON c.id = d.c_id) ON a.b_id = b.id", ) + + +class StructuralEagerLoadCycleTest(fixtures.DeclarativeMappedTest): + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + + bs = relationship(lambda: B, back_populates="a") + + class B(Base): + __tablename__ = "b" + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey("a.id")) + + a = relationship(A, lazy="joined", back_populates="bs") + + partitioned_b = aliased(B) + + A.partitioned_bs = relationship( + partitioned_b, lazy="selectin", viewonly=True + ) + + @classmethod + def insert_data(cls, connection): + A, B = cls.classes("A", "B") + + s = Session(connection) + a = A() + a.bs = [B() for _ in range(5)] + s.add(a) + + s.commit() + + @testing.variation("ensure_no_warning", [True, False]) + def test_no_endless_loop(self, ensure_no_warning): + """test #9590""" + + A = self.classes.A + + sess = fixture_session() + + results = sess.scalars(select(A)) + + # the correct behavior is 1. no warnings and 2. no endless loop. + # however when the failure mode is occurring, it correctly warns, + # but then we don't get to see the endless loop happen. + # so test it both ways even though when things are "working", there's + # no problem + if ensure_no_warning: + + a = results.first() + else: + with expect_warnings( + "Loader depth for query is excessively deep", assert_=False + ): + a = results.first() + + a.bs diff --git a/test/orm/test_assorted_eager.py b/test/orm/test_assorted_eager.py index f6d10d8e652..86921ff9fc4 100644 --- a/test/orm/test_assorted_eager.py +++ b/test/orm/test_assorted_eager.py @@ -415,7 +415,8 @@ def setup_mappers(cls): ), def test_eager_terminate(self): - """Eager query generation does not include the same mapper's table twice. + """Eager query generation does not include the same mapper's table + twice. Or, that bi-directional eager loads don't include each other in eager query generation. diff --git a/test/orm/test_attributes.py b/test/orm/test_attributes.py index 130e9807839..70dc0a1295d 100644 --- a/test/orm/test_attributes.py +++ b/test/orm/test_attributes.py @@ -17,6 +17,7 @@ from sqlalchemy.testing import is_not from sqlalchemy.testing import is_true from sqlalchemy.testing import not_in +from sqlalchemy.testing.assertions import assert_warns from sqlalchemy.testing.mock import call from sqlalchemy.testing.mock import Mock from sqlalchemy.testing.util import all_partial_orderings @@ -3769,7 +3770,7 @@ def test_expired(self): a1.bs.append(B()) state = attributes.instance_state(a1) state._expire(state.dict, set()) - assert_raises(Warning, coll.append, B()) + assert_warns(Warning, coll.append, B()) def test_replaced(self): A, B = self.A, self.B @@ -3790,7 +3791,7 @@ def test_pop_existing(self): a1.bs.append(B()) state = attributes.instance_state(a1) state._reset(state.dict, "bs") - assert_raises(Warning, coll.append, B()) + assert_warns(Warning, coll.append, B()) def test_ad_hoc_lazy(self): A, B = self.A, self.B @@ -3799,4 +3800,4 @@ def test_ad_hoc_lazy(self): a1.bs.append(B()) state = attributes.instance_state(a1) _set_callable(state, state.dict, "bs", lambda: B()) - assert_raises(Warning, coll.append, B()) + assert_warns(Warning, coll.append, B()) diff --git a/test/orm/test_bind.py b/test/orm/test_bind.py index e1cd8fdd8a2..802de996969 100644 --- a/test/orm/test_bind.py +++ b/test/orm/test_bind.py @@ -290,6 +290,28 @@ def test_get_bind(self, testcase, expected): sess.close() + @testing.combinations(True, False) + def test_dont_mutate_binds(self, empty_dict): + users, User = ( + self.tables.users, + self.classes.User, + ) + + mp = self.mapper_registry.map_imperatively(User, users) + + sess = fixture_session() + + if empty_dict: + bind_arguments = {} + else: + bind_arguments = {"mapper": mp} + sess.execute(select(1), bind_arguments=bind_arguments) + + if empty_dict: + eq_(bind_arguments, {}) + else: + eq_(bind_arguments, {"mapper": mp}) + @testing.combinations( ( lambda session, Address: session.query(Address).statement, @@ -323,6 +345,21 @@ def test_get_bind(self, testcase, expected): lambda User: {"clause": mock.ANY, "mapper": inspect(User)}, "e1", ), + ( + lambda User: update(User) + .values(name="not ed") + .where(User.name == "ed"), + lambda User: {"clause": mock.ANY, "mapper": inspect(User)}, + "e1", + ), + ( + lambda User: insert(User).values(name="not ed"), + lambda User: { + "clause": mock.ANY, + "mapper": inspect(User), + }, + "e1", + ), ) def test_bind_through_execute( self, statement, expected_get_bind_args, expected_engine_name diff --git a/test/orm/test_cache_key.py b/test/orm/test_cache_key.py index 7fb232b0b87..93d980e00a5 100644 --- a/test/orm/test_cache_key.py +++ b/test/orm/test_cache_key.py @@ -2,16 +2,22 @@ import sqlalchemy as sa from sqlalchemy import Column +from sqlalchemy import column from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import literal_column from sqlalchemy import null from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import true from sqlalchemy import update +from sqlalchemy import util +from sqlalchemy.ext.declarative import ConcreteBase +from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import aliased from sqlalchemy.orm import Bundle from sqlalchemy.orm import defaultload @@ -26,6 +32,7 @@ from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session from sqlalchemy.orm import subqueryload +from sqlalchemy.orm import synonym from sqlalchemy.orm import with_expression from sqlalchemy.orm import with_loader_criteria from sqlalchemy.orm import with_polymorphic @@ -35,12 +42,16 @@ from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import int_within_variance from sqlalchemy.testing import ne_ +from sqlalchemy.testing.fixtures import DeclarativeMappedTest from sqlalchemy.testing.fixtures import fixture_session +from sqlalchemy.testing.util import count_cache_key_tuples +from sqlalchemy.testing.util import total_size from test.orm import _fixtures from .inheritance import _poly_fixtures +from .test_events import _RemoveListeners from .test_query import QueryTest -from ..sql.test_compare import CacheKeyFixture def stmt_20(*elements): @@ -50,7 +61,7 @@ def stmt_20(*elements): ) -class CacheKeyTest(CacheKeyFixture, _fixtures.FixtureTest): +class CacheKeyTest(fixtures.CacheKeyFixture, _fixtures.FixtureTest): run_setup_mappers = "once" run_inserts = None run_deletes = None @@ -62,8 +73,19 @@ def setup_mappers(cls): def test_mapper_and_aliased(self): User, Address, Keyword = self.classes("User", "Address", "Keyword") + addresses_table = self.tables.addresses + self._run_cache_key_fixture( - lambda: (inspect(User), inspect(Address), inspect(aliased(User))), + lambda: ( + inspect(User), + inspect(Address), + inspect(aliased(User)), + inspect(aliased(aliased(User, addresses_table))), + inspect(aliased(aliased(User), addresses_table.select())), + inspect(aliased(Address)), + inspect(aliased(Address, addresses_table.select())), + inspect(aliased(User, addresses_table.select())), + ), compare_values=True, ) @@ -387,6 +409,35 @@ def test_orm_query_using_with_entities(self): compare_values=True, ) + def test_synonyms(self, registry): + """test for issue discovered in #7394""" + + @registry.mapped + class User2(object): + __table__ = self.tables.users + + name_syn = synonym("name") + + @registry.mapped + class Address2(object): + __table__ = self.tables.addresses + + name_syn = synonym("email_address") + + self._run_cache_key_fixture( + lambda: ( + User2.id, + User2.name, + User2.name_syn, + Address2.name_syn, + Address2.email_address, + aliased(User2).name_syn, + aliased(User2, name="foo").name_syn, + aliased(User2, name="bar").name_syn, + ), + compare_values=True, + ) + def test_more_with_entities_sanity_checks(self): """test issue #6503""" User, Address, Keyword, Order, Item = self.classes( @@ -544,7 +595,7 @@ class MyOpt(CacheableOptions): ) -class PolyCacheKeyTest(CacheKeyFixture, _poly_fixtures._Polymorphic): +class PolyCacheKeyTest(fixtures.CacheKeyFixture, _poly_fixtures._Polymorphic): run_setup_mappers = "once" run_inserts = None run_deletes = None @@ -576,6 +627,94 @@ def test_wp_objects(self): compare_values=True, ) + def test_wpoly_cache_keys(self): + Person, Manager, Engineer, Boss = self.classes( + "Person", "Manager", "Engineer", "Boss" + ) + + meb_stmt = inspect( + with_polymorphic(Person, [Manager, Engineer, Boss]) + ).selectable + me_stmt = inspect( + with_polymorphic(Person, [Manager, Engineer]) + ).selectable + + self._run_cache_key_fixture( + lambda: ( + inspect(Person), + inspect( + aliased(Person, me_stmt), + ), + inspect( + aliased(Person, meb_stmt), + ), + inspect( + with_polymorphic(Person, [Manager, Engineer]), + ), + # aliased=True is the same as flat=True for default selectable + inspect( + with_polymorphic( + Person, [Manager, Engineer], aliased=True + ), + ), + inspect( + with_polymorphic(Person, [Manager, Engineer], flat=True), + ), + inspect( + with_polymorphic( + Person, [Manager, Engineer], flat=True, innerjoin=True + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer], + flat=True, + _use_mapper_path=True, + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer], + flat=True, + adapt_on_names=True, + ), + ), + inspect( + with_polymorphic( + Person, [Manager, Engineer], selectable=meb_stmt + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer], + selectable=meb_stmt, + aliased=True, + ), + ), + inspect( + with_polymorphic(Person, [Manager, Engineer, Boss]), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer, Boss], + polymorphic_on=literal_column("foo"), + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer, Boss], + polymorphic_on=literal_column("bar"), + ), + ), + ), + compare_values=True, + ) + def test_wp_queries(self): Person, Manager, Engineer, Boss = self.classes( "Person", "Manager", "Engineer", "Boss" @@ -648,6 +787,55 @@ def three(): compare_values=True, ) + @testing.variation( + "exprtype", ["plain_column", "self_standing_case", "case_w_columns"] + ) + def test_hybrid_w_case_ac(self, decl_base, exprtype): + """test #9728""" + + class Employees(decl_base): + __tablename__ = "employees" + id = Column(String(128), primary_key=True) + first_name = Column(String(length=64)) + + @hybrid_property + def name(self): + return self.first_name + + @name.expression + def name( + cls, + ): + if exprtype.plain_column: + return cls.first_name + elif exprtype.self_standing_case: + return case( + (column("x") == 1, column("q")), + else_=column("q"), + ) + elif exprtype.case_w_columns: + return case( + (column("x") == 1, column("q")), + else_=cls.first_name, + ) + else: + exprtype.fail() + + def go1(): + employees_2 = aliased(Employees, name="employees_2") + stmt = select(employees_2.name) + return stmt + + def go2(): + employees_2 = aliased(Employees, name="employees_2") + stmt = select(employees_2) + return stmt + + self._run_cache_key_fixture( + lambda: stmt_20(go1(), go2()), + compare_values=True, + ) + class RoundTripTest(QueryTest, AssertsCompiledSQL): __dialect__ = "default" @@ -903,3 +1091,84 @@ def test_bulk_update_cache_key(self): ) eq_(stmt._generate_cache_key(), stmt2._generate_cache_key()) + + +class EmbeddedSubqTest(_RemoveListeners, DeclarativeMappedTest): + """test #8790. + + it's expected that cache key structures will change, this test is here + testing something fairly similar to the issue we had (though vastly + smaller scale) so we mostly want to look for surprise jumps here. + + """ + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Employee(ConcreteBase, Base): + __tablename__ = "employee" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + + __mapper_args__ = { + "polymorphic_identity": "employee", + "concrete": True, + } + + class Manager(Employee): + __tablename__ = "manager" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + manager_data = Column(String(40)) + + __mapper_args__ = { + "polymorphic_identity": "manager", + "concrete": True, + } + + class Engineer(Employee): + __tablename__ = "engineer" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + engineer_info = Column(String(40)) + + __mapper_args__ = { + "polymorphic_identity": "engineer", + "concrete": True, + } + + Base.registry.configure() + + @testing.combinations( + "tuples", ("memory", testing.requires.is64bit), argnames="assert_on" + ) + def test_cache_key_gen(self, assert_on): + Employee = self.classes.Employee + + e1 = aliased(Employee) + e2 = aliased(Employee) + + subq = select(e1).union_all(select(e2)).subquery() + + anno = aliased(Employee, subq) + + stmt = select(anno) + + ck = stmt._generate_cache_key() + + if assert_on == "tuples": + # before the fix for #8790 this was 700 + int_within_variance(142, count_cache_key_tuples(ck), 0.05) + + elif assert_on == "memory": + # before the fix for #8790 this was 55154 + + if util.py312: + testing.skip_test("python platform not available") + elif util.py311: + int_within_variance(39996, total_size(ck), 0.05) + elif util.py310: + int_within_variance(29796, total_size(ck), 0.05) + else: + testing.skip_test("python platform not available") diff --git a/test/orm/test_cascade.py b/test/orm/test_cascade.py index cd7e7c111a3..c32eb00cdb7 100644 --- a/test/orm/test_cascade.py +++ b/test/orm/test_cascade.py @@ -24,6 +24,7 @@ from sqlalchemy.orm.decl_api import declarative_base from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ @@ -94,7 +95,7 @@ def test_delete_with_passive_deletes_all(self): def test_delete_orphan_without_delete(self): Address = self.classes.Address - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "The 'delete-orphan' cascade option requires 'delete'.", relationship, @@ -1221,7 +1222,7 @@ def test_o2m_only_child_transient(self): sess.add(u1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_only_child_persistent(self): User, Address = self.classes.User, self.classes.Address @@ -1239,7 +1240,7 @@ def test_o2m_only_child_persistent(self): sess.add(u1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_backref_child_pending(self): User, Address = self.classes.User, self.classes.Address @@ -1265,7 +1266,7 @@ def test_o2m_backref_child_transient(self): sess.add(u1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_backref_child_transient_nochange(self): User, Address = self.classes.User, self.classes.Address @@ -1301,7 +1302,7 @@ def test_o2m_backref_child_expunged(self): sess.expunge(a1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_backref_child_expunged_nochange(self): User, Address = self.classes.User, self.classes.Address @@ -1350,7 +1351,7 @@ def test_m2o_only_child_transient(self): sess.add(a1) assert u1 not in sess assert a1 in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2o_only_child_expunged(self): User, Address = self.classes.User, self.classes.Address @@ -1367,7 +1368,7 @@ def test_m2o_only_child_expunged(self): sess.expunge(u1) assert u1 not in sess assert a1 in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2o_backref_child_pending(self): User, Address = self.classes.User, self.classes.Address @@ -1393,7 +1394,7 @@ def test_m2o_backref_child_transient(self): sess.add(a1) assert u1 not in sess assert a1 in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2o_backref_child_expunged(self): User, Address = self.classes.User, self.classes.Address @@ -1420,7 +1421,7 @@ def test_m2o_backref_child_expunged(self): sess.expunge(u1) assert u1 not in sess assert a1 in sess - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "not in session", sess.flush ) @@ -1440,7 +1441,7 @@ def test_m2o_backref_future_child_expunged(self): sess.expunge(u1) assert u1 not in sess assert a1 in sess - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "not in session", sess.flush ) @@ -1544,7 +1545,7 @@ def test_m2m_only_child_transient(self): sess.add(i1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_only_child_persistent(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -1562,7 +1563,7 @@ def test_m2m_only_child_persistent(self): sess.add(i1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_backref_child_pending(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -1588,7 +1589,7 @@ def test_m2m_backref_child_transient(self): sess.add(i1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_backref_child_transient_nochange(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -1624,7 +1625,7 @@ def test_m2m_backref_child_expunged(self): sess.expunge(k1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_backref_child_expunged_nochange(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -2310,7 +2311,7 @@ def test_preserves_orphans_onelevel(self): eq_(sess.query(T2).all(), [T2()]) eq_(sess.query(T3).all(), [T3()]) - @testing.future + @testing.future() def test_preserves_orphans_onelevel_postremove(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) @@ -2808,7 +2809,7 @@ def test_o2m_commit_warns(self): a1 = Address(email_address="a1") a1.user = u1 - assert_raises_message(sa_exc.SAWarning, "not in session", sess.commit) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.commit) assert a1 not in sess @@ -2871,7 +2872,7 @@ def test_m2o_commit_warns(self): a1.dingalings.append(d1) assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.commit) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.commit) class PendingOrphanTestSingleLevel(fixtures.MappedTest): @@ -4295,7 +4296,7 @@ class Order(cls.Comparable): ({"delete"}, {"delete"}), ( {"all, delete-orphan"}, - {"delete", "delete-orphan", "merge", "save-update"}, + {"delete", "delete-orphan", "save-update"}, ), ({"save-update, expunge"}, {"save-update"}), ) @@ -4402,7 +4403,10 @@ def test_default_none_cascade(self): not_in(o1, sess) not_in(o2, sess) - def test_default_merge_cascade(self): + @testing.combinations( + "persistent", "pending", argnames="collection_status" + ) + def test_default_merge_cascade(self, collection_status): User, Order, orders, users = ( self.classes.User, self.classes.Order, @@ -4434,12 +4438,31 @@ def test_default_merge_cascade(self): Order(id=2, user_id=1, description="someotherorder"), ) - u1.orders.append(o1) - u1.orders.append(o2) + if collection_status == "pending": + # technically this is pointless, one should not be appending + # to this collection + u1.orders.append(o1) + u1.orders.append(o2) + elif collection_status == "persistent": + sess.add(u1) + sess.flush() + sess.add_all([o1, o2]) + sess.flush() + u1.orders + else: + assert False u1 = sess.merge(u1) - assert not u1.orders + # in 1.4, as of #4993 this was asserting that u1.orders would + # not be present in the new object. However, as observed during + # #8862, this defeats schemes that seek to restore fully loaded + # objects from caches which may even have lazy="raise", but + # in any case would want to not emit new SQL on those collections. + # so we assert here that u1.orders is in fact present + assert "orders" in u1.__dict__ + assert u1.__dict__["orders"] + assert u1.orders def test_default_cascade(self): User, Order, orders, users = ( @@ -4465,7 +4488,7 @@ def test_default_cascade(self): }, ) - eq_(umapper.attrs["orders"].cascade, set()) + eq_(umapper.attrs["orders"].cascade, {"merge"}) def test_write_cascade_disallowed_w_viewonly(self): @@ -4473,7 +4496,7 @@ def test_write_cascade_disallowed_w_viewonly(self): assert_raises_message( sa_exc.ArgumentError, - 'Cascade settings "delete, delete-orphan, merge, save-update" ' + 'Cascade settings "delete, delete-orphan, save-update" ' "apply to persistence operations", relationship, Order, diff --git a/test/orm/test_collection.py b/test/orm/test_collection.py index 3473dcf50bf..c1ef16c6535 100644 --- a/test/orm/test_collection.py +++ b/test/orm/test_collection.py @@ -2504,6 +2504,19 @@ class Child(object): assert control == p.children assert control == list(p.children) + # test #7389 + if hasattr(p.children, "__iadd__"): + control += control + p.children += p.children + assert control == list(p.children) + + control[:] = [o] + p.children[:] = [o] + if hasattr(p.children, "extend"): + control.extend(control) + p.children.extend(p.children) + assert control == list(p.children) + def test_custom(self): someothertable, sometable = ( self.tables.someothertable, diff --git a/test/orm/test_core_compilation.py b/test/orm/test_core_compilation.py index 5d66e339ab1..c5a76f04f7b 100644 --- a/test/orm/test_core_compilation.py +++ b/test/orm/test_core_compilation.py @@ -1,15 +1,22 @@ from sqlalchemy import bindparam +from sqlalchemy import Column +from sqlalchemy import delete from sqlalchemy import exc +from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import insert from sqlalchemy import inspect +from sqlalchemy import Integer +from sqlalchemy import literal from sqlalchemy import literal_column from sqlalchemy import null from sqlalchemy import or_ from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import union +from sqlalchemy import update from sqlalchemy import util from sqlalchemy.orm import aliased from sqlalchemy.orm import column_property @@ -31,12 +38,17 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ +from sqlalchemy.testing import Variation from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.util import resolve_lambda +from sqlalchemy.util.langhelpers import hybridproperty from .inheritance import _poly_fixtures from .test_query import QueryTest +from ..sql import test_compiler +from ..sql.test_compiler import CorrelateTest as _CoreCorrelateTest # TODO: # composites / unions, etc. @@ -107,6 +119,18 @@ def test_froms_join(self): } ], ), + ( + lambda user_alias: (user_alias,), + lambda User, user_alias: [ + { + "name": None, + "type": User, + "aliased": True, + "expr": user_alias, + "entity": user_alias, + } + ], + ), ( lambda User: (User.id,), lambda User: [ @@ -157,17 +181,131 @@ def test_froms_join(self): }, ], ), + ( + lambda user_table: (user_table,), + lambda user_table: [ + { + "name": "id", + "type": testing.eq_type_affinity(sqltypes.Integer), + "expr": user_table.c.id, + }, + { + "name": "name", + "type": testing.eq_type_affinity(sqltypes.String), + "expr": user_table.c.name, + }, + ], + ), ) def test_column_descriptions(self, cols, expected): User, Address = self.classes("User", "Address") + ua = aliased(User) - cols = testing.resolve_lambda(cols, User=User, Address=Address) - expected = testing.resolve_lambda(expected, User=User, Address=Address) + cols = testing.resolve_lambda( + cols, + User=User, + Address=Address, + user_alias=ua, + user_table=inspect(User).local_table, + ) + expected = testing.resolve_lambda( + expected, + User=User, + Address=Address, + user_alias=ua, + user_table=inspect(User).local_table, + ) stmt = select(*cols) - eq_(stmt.column_descriptions, expected) + @testing.combinations(insert, update, delete, argnames="dml_construct") + @testing.combinations( + ( + lambda User: User, + lambda User: (User.id, User.name), + lambda User, user_table: { + "name": "User", + "type": User, + "expr": User, + "entity": User, + "table": user_table, + }, + lambda User: [ + { + "name": "id", + "type": testing.eq_type_affinity(sqltypes.Integer), + "aliased": False, + "expr": User.id, + "entity": User, + }, + { + "name": "name", + "type": testing.eq_type_affinity(sqltypes.String), + "aliased": False, + "expr": User.name, + "entity": User, + }, + ], + ), + argnames="entity, cols, expected_entity, expected_returning", + ) + def test_dml_descriptions( + self, dml_construct, entity, cols, expected_entity, expected_returning + ): + User, Address = self.classes("User", "Address") + + lambda_args = dict( + User=User, + Address=Address, + user_table=inspect(User).local_table, + ) + entity = testing.resolve_lambda(entity, **lambda_args) + cols = testing.resolve_lambda(cols, **lambda_args) + expected_entity = testing.resolve_lambda( + expected_entity, **lambda_args + ) + expected_returning = testing.resolve_lambda( + expected_returning, **lambda_args + ) + + stmt = dml_construct(entity) + if cols: + stmt = stmt.returning(*cols) + + eq_(stmt.entity_description, expected_entity) + eq_(stmt.returning_column_descriptions, expected_returning) + + def test_limit_offset_select(self): + User = self.classes.User + + stmt = select(User.id).limit(5).offset(6) + self.assert_compile( + stmt, + "SELECT users.id FROM users LIMIT :param_1 OFFSET :param_2", + checkparams={"param_1": 5, "param_2": 6}, + ) + + @testing.combinations( + (None, "ROWS ONLY"), + ({"percent": True}, "PERCENT ROWS ONLY"), + ({"percent": True, "with_ties": True}, "PERCENT ROWS WITH TIES"), + ) + def test_fetch_offset_select(self, options, fetch_clause): + User = self.classes.User + + if options is None: + stmt = select(User.id).fetch(5).offset(6) + else: + stmt = select(User.id).fetch(5, **options).offset(6) + + self.assert_compile( + stmt, + "SELECT users.id FROM users OFFSET :param_1 " + "ROWS FETCH FIRST :param_2 %s" % (fetch_clause,), + checkparams={"param_1": 6, "param_2": 5}, + ) + class ColumnsClauseFromsTest(QueryTest, AssertsCompiledSQL): __dialect__ = "default" @@ -798,6 +936,10 @@ def query_expression_fixture(self): properties=util.OrderedDict( [ ("value", query_expression()), + ( + "value_w_default", + query_expression(default_expr=literal(15)), + ), ] ), ) @@ -805,6 +947,24 @@ def query_expression_fixture(self): return User + @testing.fixture + def deferred_fixture(self): + User = self.classes.User + users = self.tables.users + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "name": deferred(users.c.name), + "name_upper": column_property( + func.upper(users.c.name), deferred=True + ), + }, + ) + + return User + @testing.fixture def query_expression_w_joinedload_fixture(self): users, User = ( @@ -890,20 +1050,35 @@ def plain_fixture(self): self.mapper_registry.map_imperatively( User, users, + properties={ + "addresses": relationship(Address, back_populates="user") + }, ) self.mapper_registry.map_imperatively( Address, addresses, properties={ - "user": relationship( - User, - ) + "user": relationship(User, back_populates="addresses") }, ) return User, Address + @testing.fixture + def hard_labeled_self_ref_fixture(self, decl_base): + class A(decl_base): + __tablename__ = "a" + + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey("a.id")) + data = Column(String) + data_lower = column_property(func.lower(data).label("hardcoded")) + + as_ = relationship("A") + + return A + def test_no_joinedload_embedded(self, plain_fixture): User, Address = plain_fixture @@ -930,10 +1105,71 @@ def test_with_expr_one(self, query_expression_fixture): self.assert_compile( stmt, - "SELECT users.name || :name_1 AS anon_1, users.id, " + "SELECT users.name || :name_1 AS anon_1, :param_1 AS anon_2, " + "users.id, " "users.name FROM users", ) + def test_exported_columns_query_expression(self, query_expression_fixture): + """test behaviors related to #8881""" + User = query_expression_fixture + + stmt = select(User) + + eq_( + stmt.selected_columns.keys(), + ["value_w_default", "id", "name"], + ) + + stmt = select(User).options( + with_expression(User.value, User.name + "foo") + ) + + # bigger problem. we still don't include 'value', because we dont + # run query options here. not "correct", but is at least consistent + # with deferred + eq_( + stmt.selected_columns.keys(), + ["value_w_default", "id", "name"], + ) + + def test_exported_columns_colprop(self, column_property_fixture): + """test behaviors related to #8881""" + User, _ = column_property_fixture + + stmt = select(User) + + # we get all the cols because they are not deferred and have a value + eq_( + stmt.selected_columns.keys(), + ["concat", "count", "id", "name"], + ) + + def test_exported_columns_deferred(self, deferred_fixture): + """test behaviors related to #8881""" + User = deferred_fixture + + stmt = select(User) + + # don't include 'name_upper' as it's deferred and readonly. + # "name" however is a column on the table, so even though it is + # deferred, it gets special treatment (related to #6661) + eq_( + stmt.selected_columns.keys(), + ["name", "id"], + ) + + stmt = select(User).options( + undefer(User.name), undefer(User.name_upper) + ) + + # undefer doesn't affect the readonly col because we dont look + # at options when we do selected_columns + eq_( + stmt.selected_columns.keys(), + ["name", "id"], + ) + def test_with_expr_two(self, query_expression_fixture): User = query_expression_fixture @@ -946,7 +1182,8 @@ def test_with_expr_two(self, query_expression_fixture): self.assert_compile( stmt, - "SELECT anon_1.foo, anon_1.id, anon_1.name FROM " + "SELECT anon_1.foo, :param_1 AS anon_2, anon_1.id, " + "anon_1.name FROM " "(SELECT users.id AS id, users.name AS name, " "users.name || :name_1 AS foo FROM users) AS anon_1", ) @@ -1012,22 +1249,84 @@ def test_joinedload_outermost(self, plain_fixture): "ON users_1.id = addresses.user_id", ) - def test_contains_eager_outermost(self, plain_fixture): + def test_joinedload_outermost_w_wrapping_elements(self, plain_fixture): User, Address = plain_fixture stmt = ( - select(Address) - .join(Address.user) - .options(contains_eager(Address.user)) + select(User) + .options(joinedload(User.addresses)) + .limit(10) + .distinct() ) - # render joined eager loads with stringify self.assert_compile( stmt, - "SELECT users.id, users.name, addresses.id AS id_1, " - "addresses.user_id, " - "addresses.email_address " - "FROM addresses JOIN users ON users.id = addresses.user_id", + "SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, " + "addresses_1.user_id, addresses_1.email_address FROM " + "(SELECT DISTINCT users.id AS id, users.name AS name FROM users " + "LIMIT :param_1) " + "AS anon_1 LEFT OUTER JOIN addresses AS addresses_1 " + "ON anon_1.id = addresses_1.user_id", + ) + + def test_contains_eager_outermost_w_wrapping_elements(self, plain_fixture): + """test #8569""" + + User, Address = plain_fixture + + stmt = ( + select(User) + .join(User.addresses) + .options(contains_eager(User.addresses)) + .limit(10) + .distinct() + ) + + self.assert_compile( + stmt, + "SELECT DISTINCT addresses.id, addresses.user_id, " + "addresses.email_address, users.id AS id_1, users.name " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "LIMIT :param_1", + ) + + def test_joinedload_hard_labeled_selfref( + self, hard_labeled_self_ref_fixture + ): + """test #8569""" + + A = hard_labeled_self_ref_fixture + + stmt = select(A).options(joinedload(A.as_)).distinct() + self.assert_compile( + stmt, + "SELECT anon_1.hardcoded, anon_1.id, anon_1.a_id, anon_1.data, " + "lower(a_1.data) AS lower_1, a_1.id AS id_1, a_1.a_id AS a_id_1, " + "a_1.data AS data_1 FROM (SELECT DISTINCT lower(a.data) AS " + "hardcoded, a.id AS id, a.a_id AS a_id, a.data AS data FROM a) " + "AS anon_1 LEFT OUTER JOIN a AS a_1 ON anon_1.id = a_1.a_id", + ) + + def test_contains_eager_hard_labeled_selfref( + self, hard_labeled_self_ref_fixture + ): + """test #8569""" + + A = hard_labeled_self_ref_fixture + + a1 = aliased(A) + stmt = ( + select(A) + .join(A.as_.of_type(a1)) + .options(contains_eager(A.as_.of_type(a1))) + .distinct() + ) + self.assert_compile( + stmt, + "SELECT DISTINCT lower(a.data) AS hardcoded, " + "lower(a_1.data) AS hardcoded, a_1.id, a_1.a_id, a_1.data, " + "a.id AS id_1, a.a_id AS a_id_1, a.data AS data_1 " + "FROM a JOIN a AS a_1 ON a.id = a_1.a_id", ) def test_column_properties(self, column_property_fixture): @@ -2320,3 +2619,55 @@ class Foo(object): ) self.assert_compile(stmt1, expected) self.assert_compile(stmt2, expected) + + +class CorrelateTest(fixtures.DeclarativeMappedTest, _CoreCorrelateTest): + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class T1(Base): + __tablename__ = "t1" + a = Column(Integer, primary_key=True) + + @hybridproperty + def c(self): + return self + + class T2(Base): + __tablename__ = "t2" + a = Column(Integer, primary_key=True) + + @hybridproperty + def c(self): + return self + + def _fixture(self): + t1, t2 = self.classes("T1", "T2") + return t1, t2, select(t1).where(t1.c.a == t2.c.a) + + +class CrudParamOverlapTest(test_compiler.CrudParamOverlapTest): + @testing.fixture( + params=Variation.generate_cases("type_", ["orm"]), + ids=["orm"], + ) + def crud_table_fixture(self, request): + type_ = request.param + + if type_.orm: + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + + class Foo(Base): + __tablename__ = "mytable" + myid = Column(Integer, primary_key=True) + name = Column(String) + description = Column(String) + + table1 = Foo + else: + type_.fail() + + yield table1 diff --git a/test/orm/test_cycles.py b/test/orm/test_cycles.py index 9d0369191e0..1d749cac975 100644 --- a/test/orm/test_cycles.py +++ b/test/orm/test_cycles.py @@ -918,7 +918,6 @@ def test_post_update_m2o(self): favorite=relationship( Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, - remote_side=person.c.favorite_ball_id, post_update=True, _legacy_inactive_history_style=( self._legacy_inactive_history_style @@ -1036,7 +1035,6 @@ def test_post_update_backref(self): favorite=relationship( Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, - remote_side=person.c.favorite_ball_id, _legacy_inactive_history_style=( self._legacy_inactive_history_style ), @@ -1096,7 +1094,6 @@ def test_post_update_o2m(self): favorite=relationship( Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, - remote_side=person.c.favorite_ball_id, _legacy_inactive_history_style=( self._legacy_inactive_history_style ), diff --git a/test/orm/test_default_strategies.py b/test/orm/test_default_strategies.py index 9b228bbaa25..9162d63ecda 100644 --- a/test/orm/test_default_strategies.py +++ b/test/orm/test_default_strategies.py @@ -437,7 +437,7 @@ def test_joined_path_wildcards(self): def go(): users[:] = ( sess.query(User) - .options(joinedload(".*")) + .options(joinedload("*")) .options(defaultload(User.addresses).joinedload("*")) .options(defaultload(User.orders).joinedload("*")) .options( @@ -548,7 +548,7 @@ def test_subquery_path_wildcards(self): def go(): users[:] = ( sess.query(User) - .options(subqueryload(".*")) + .options(subqueryload("*")) .options(defaultload(User.addresses).subqueryload("*")) .options(defaultload(User.orders).subqueryload("*")) .options( diff --git a/test/orm/test_deferred.py b/test/orm/test_deferred.py index bfdfb00b7fd..dcf0d683400 100644 --- a/test/orm/test_deferred.py +++ b/test/orm/test_deferred.py @@ -6,6 +6,7 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import union_all from sqlalchemy import util from sqlalchemy.orm import aliased from sqlalchemy.orm import attributes @@ -1167,7 +1168,7 @@ def _test_load_only_propagate(self, use_load): expected = [ ( "SELECT users.id AS users_id, users.name AS users_name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])", + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])", {"id_1": [7, 8]}, ), ( @@ -1563,14 +1564,11 @@ def test_load_only_subclass_from_relationship_bound(self): def test_defer_on_wildcard_subclass(self): # pretty much the same as load_only except doesn't # exclude the primary key - - # TODO: what is ".*"? this is not documented anywhere, how did this - # get implemented without docs ? see #4390 s = fixture_session() q = ( s.query(Manager) .order_by(Person.person_id) - .options(defer(".*"), undefer(Manager.status)) + .options(defer("*"), undefer(Manager.status)) ) self.assert_compile( q, @@ -1749,6 +1747,14 @@ class A(fixtures.ComparableEntity, Base): bs = relationship("B", order_by="B.id") + class A_default(fixtures.ComparableEntity, Base): + __tablename__ = "a_default" + id = Column(Integer, primary_key=True) + x = Column(Integer) + y = Column(Integer) + + my_expr = query_expression(default_expr=literal(15)) + class B(fixtures.ComparableEntity, Base): __tablename__ = "b" id = Column(Integer, primary_key=True) @@ -1767,7 +1773,7 @@ class C(fixtures.ComparableEntity, Base): @classmethod def insert_data(cls, connection): - A, B, C = cls.classes("A", "B", "C") + A, A_default, B, C = cls.classes("A", "A_default", "B", "C") s = Session(connection) s.add_all( @@ -1778,6 +1784,8 @@ def insert_data(cls, connection): A(id=4, x=2, y=10, bs=[B(id=4, p=19, q=8), B(id=5, p=5, q=5)]), C(id=1, x=1), C(id=2, x=2), + A_default(id=1, x=1, y=2), + A_default(id=2, x=2, y=3), ] ) @@ -1952,6 +1960,149 @@ def test_dont_explode_on_expire_whole(self): q.first() eq_(a1.my_expr, 5) + @testing.combinations("core", "orm", argnames="use_core") + @testing.combinations( + "from_statement", "aliased", argnames="use_from_statement" + ) + @testing.combinations( + "same_name", "different_name", argnames="use_same_labelname" + ) + @testing.combinations( + "has_default", "no_default", argnames="attr_has_default" + ) + def test_expr_from_subq_plain( + self, + use_core, + use_from_statement, + use_same_labelname, + attr_has_default, + ): + """test #8881""" + + if attr_has_default == "has_default": + A = self.classes.A_default + else: + A = self.classes.A + + s = fixture_session() + + if use_same_labelname == "same_name": + labelname = "my_expr" + else: + labelname = "hi" + + if use_core == "core": + stmt = select(A.__table__, literal(12).label(labelname)) + else: + stmt = select(A, literal(12).label(labelname)) + + if use_from_statement == "aliased": + subq = stmt.subquery() + a1 = aliased(A, subq) + stmt = select(a1).options( + with_expression(a1.my_expr, subq.c[labelname]) + ) + else: + subq = stmt + stmt = ( + select(A) + .options( + with_expression( + A.my_expr, subq.selected_columns[labelname] + ) + ) + .from_statement(subq) + ) + + a_obj = s.scalars(stmt).first() + + if ( + use_same_labelname == "same_name" + and attr_has_default == "has_default" + and use_core == "orm" + ): + eq_(a_obj.my_expr, 15) + else: + eq_(a_obj.my_expr, 12) + + @testing.combinations("core", "orm", argnames="use_core") + @testing.combinations( + "from_statement", "aliased", argnames="use_from_statement" + ) + @testing.combinations( + "same_name", "different_name", argnames="use_same_labelname" + ) + @testing.combinations( + "has_default", "no_default", argnames="attr_has_default" + ) + def test_expr_from_subq_union( + self, + use_core, + use_from_statement, + use_same_labelname, + attr_has_default, + ): + """test #8881""" + + if attr_has_default == "has_default": + A = self.classes.A_default + else: + A = self.classes.A + + s = fixture_session() + + if use_same_labelname == "same_name": + labelname = "my_expr" + else: + labelname = "hi" + + if use_core == "core": + stmt = union_all( + select(A.__table__, literal(12).label(labelname)).where( + A.__table__.c.id == 1 + ), + select(A.__table__, literal(18).label(labelname)).where( + A.__table__.c.id == 2 + ), + ) + + else: + stmt = union_all( + select(A, literal(12).label(labelname)).where(A.id == 1), + select(A, literal(18).label(labelname)).where(A.id == 2), + ) + + if use_from_statement == "aliased": + subq = stmt.subquery() + a1 = aliased(A, subq) + stmt = select(a1).options( + with_expression(a1.my_expr, subq.c[labelname]) + ) + else: + subq = stmt + stmt = ( + select(A) + .options( + with_expression( + A.my_expr, subq.selected_columns[labelname] + ) + ) + .from_statement(subq) + ) + + a_objs = s.scalars(stmt).all() + + if ( + use_same_labelname == "same_name" + and attr_has_default == "has_default" + and use_core == "orm" + ): + eq_(a_objs[0].my_expr, 15) + eq_(a_objs[1].my_expr, 15) + else: + eq_(a_objs[0].my_expr, 12) + eq_(a_objs[1].my_expr, 18) + class RaiseLoadTest(fixtures.DeclarativeMappedTest): @classmethod diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py index 692a29b3069..bbfcd0cfd39 100644 --- a/test/orm/test_deprecations.py +++ b/test/orm/test_deprecations.py @@ -77,6 +77,7 @@ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.testing.fixtures import CacheKeyFixture from sqlalchemy.testing.fixtures import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.mock import call @@ -90,8 +91,14 @@ from .inheritance._poly_fixtures import _Polymorphic from .inheritance._poly_fixtures import Company from .inheritance._poly_fixtures import Engineer +from .inheritance._poly_fixtures import Manager +from .inheritance._poly_fixtures import Person from .test_ac_relationships import PartitionByFixture from .test_bind import GetBindTest as _GetBindTest +from .test_default_strategies import ( + DefaultStrategyOptionsTest as _DefaultStrategyOptionsTest, +) +from .test_deferred import InheritanceTest as _deferred_InheritanceTest from .test_dynamic import _DynamicFixture from .test_events import _RemoveListeners from .test_options import PathTest as OptionsPathTest @@ -99,7 +106,6 @@ from .test_options import QueryTest as OptionsQueryTest from .test_query import QueryTest from .test_transaction import _LocalFixture -from ..sql.test_compare import CacheKeyFixture join_aliased_dep = ( @@ -166,6 +172,13 @@ r"The merge_result\(\) function is considered legacy as of the 1.x series" ) +dep_exc_wildcard = ( + r"The undocumented `.{WILDCARD}` format is deprecated and will be removed " + r"in a future version as it is believed to be unused. If you have been " + r"using this functionality, please comment on Issue #4390 on the " + r"SQLAlchemy project tracker." +) + def _aliased_join_warning(arg=None): return testing.expect_warnings( @@ -1870,8 +1883,8 @@ def test_clause_onclause(self): ) def test_from_self_resets_joinpaths(self): - """test a join from from_self() doesn't confuse joins inside the subquery - with the outside. + """test a join from from_self() doesn't confuse joins inside the + subquery with the outside. """ Item, Keyword = self.classes.Item, self.classes.Keyword @@ -3138,7 +3151,7 @@ def test_multiple_adaption(self): mach_alias = machines.select() # note python 2 does not allow parens here; reformat in py3 only - with DeprecatedQueryTest._expect_implicit_subquery(), _aliased_join_warning( # noqa E501 + with DeprecatedQueryTest._expect_implicit_subquery(), _aliased_join_warning( # noqa: E501 "Person->people" ): self.assert_compile( @@ -3871,7 +3884,7 @@ def test_illegal_non_primary(self): with testing.expect_deprecated( "The mapper.non_primary parameter is deprecated" ): - m = self.mapper_registry.map_imperatively( # noqa F841 + m = self.mapper_registry.map_imperatively( # noqa: F841 User, users, non_primary=True, @@ -3934,7 +3947,7 @@ def test_illegal_non_primary_legacy(self): with testing.expect_deprecated( "The mapper.non_primary parameter is deprecated" ): - m = mapper( # noqa F841 + m = mapper( # noqa: F841 User, users, non_primary=True, @@ -6403,6 +6416,39 @@ class InheritedJoinTest( run_setup_mappers = "once" __dialect__ = "default" + def test_join_w_subq_adapt(self): + """test #8162""" + + Company, Manager, Engineer = self.classes( + "Company", "Manager", "Engineer" + ) + + sess = fixture_session() + + with _aliased_join_warning(): + self.assert_compile( + sess.query(Engineer) + .join(Company, Company.company_id == Engineer.company_id) + .outerjoin(Manager, Company.company_id == Manager.company_id) + .filter(~Engineer.company.has()), + "SELECT engineers.person_id AS engineers_person_id, " + "people.person_id AS people_person_id, " + "people.company_id AS people_company_id, " + "people.name AS people_name, people.type AS people_type, " + "engineers.status AS engineers_status, " + "engineers.engineer_name AS engineers_engineer_name, " + "engineers.primary_language AS engineers_primary_language " + "FROM people JOIN engineers " + "ON people.person_id = engineers.person_id " + "JOIN companies ON companies.company_id = people.company_id " + "LEFT OUTER JOIN (people AS people_1 JOIN managers AS " + "managers_1 ON people_1.person_id = managers_1.person_id) " + "ON companies.company_id = people_1.company_id " + "WHERE NOT (EXISTS (SELECT 1 FROM companies " + "WHERE companies.company_id = people.company_id))", + use_default_dialect=True, + ) + def test_load_only_alias_subclass(self): Manager = self.classes.Manager @@ -8140,7 +8186,7 @@ def test_aliased_class_vs_nonaliased(self): lambda users: users.select().where(users.c.id.in_([7, 8])), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE anon_1.name = :name_1", ), ( @@ -8150,14 +8196,14 @@ def test_aliased_class_vs_nonaliased(self): "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name " "AS anon_1_users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE anon_1.users_name = :name_1", ), ( lambda User, sess: sess.query(User).where(User.id.in_([7, 8])), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE anon_1.name = :name_1", ), ) @@ -8671,7 +8717,7 @@ def test_differentiate_self_external(self): "users_1.name AS users_1_name " "FROM users AS users_1, (" "SELECT users.id AS id, users.name AS name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -8684,7 +8730,8 @@ def test_differentiate_self_external(self): "SELECT users_1.id AS users_1_id, " "users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN " + "(__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -8697,7 +8744,7 @@ def test_differentiate_self_external(self): "SELECT users_1.id AS users_1_id, " "users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " - "users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -8712,7 +8759,7 @@ def test_differentiate_self_external(self): "FROM " "(SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id " - "IN ([POSTCOMPILE_id_1])) AS anon_1 " + "IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -9578,3 +9625,83 @@ def kt(*x): [(x and x.id or None, y and y.id or None) for x, y in it], [(u1.id, u2.id), (u1.id, None), (u2.id, u3.id)], ) + + +class DefaultStrategyOptionsTest(_DefaultStrategyOptionsTest): + def test_joined_path_wildcards(self): + sess = self._upgrade_fixture() + users = [] + + User, Order, Item = self.classes("User", "Order", "Item") + + # test upgrade all to joined: 1 sql + def go(): + users[:] = ( + sess.query(User) + .options(joinedload(".*")) + .options(defaultload(User.addresses).joinedload("*")) + .options(defaultload(User.orders).joinedload("*")) + .options( + defaultload(User.orders) + .defaultload(Order.items) + .joinedload("*") + ) + .order_by(self.classes.User.id) + .all() + ) + + with assertions.expect_deprecated(dep_exc_wildcard): + self.assert_sql_count(testing.db, go, 1) + self._assert_fully_loaded(users) + + def test_subquery_path_wildcards(self): + sess = self._upgrade_fixture() + users = [] + + User, Order = self.classes("User", "Order") + + # test upgrade all to subquery: 1 sql + 4 relationships = 5 + def go(): + users[:] = ( + sess.query(User) + .options(subqueryload(".*")) + .options(defaultload(User.addresses).subqueryload("*")) + .options(defaultload(User.orders).subqueryload("*")) + .options( + defaultload(User.orders) + .defaultload(Order.items) + .subqueryload("*") + ) + .order_by(User.id) + .all() + ) + + with assertions.expect_deprecated(dep_exc_wildcard): + self.assert_sql_count(testing.db, go, 5) + + # verify everything loaded, with no additional sql needed + self._assert_fully_loaded(users) + + +class Deferred_InheritanceTest(_deferred_InheritanceTest): + def test_defer_on_wildcard_subclass(self): + # pretty much the same as load_only except doesn't + # exclude the primary key + + # what is ".*"? this is not documented anywhere, how did this + # get implemented without docs ? see #4390 + s = fixture_session() + with assertions.expect_deprecated(dep_exc_wildcard): + q = ( + s.query(Manager) + .order_by(Person.person_id) + .options(defer(".*"), undefer(Manager.status)) + ) + self.assert_compile( + q, + "SELECT managers.status AS managers_status " + "FROM people JOIN managers ON " + "people.person_id = managers.person_id ORDER BY people.person_id", + ) + # note this doesn't apply to "bound" loaders since they don't seem + # to have this ".*" featue. diff --git a/test/orm/test_dynamic.py b/test/orm/test_dynamic.py index 8efd4523820..3997aa9710c 100644 --- a/test/orm/test_dynamic.py +++ b/test/orm/test_dynamic.py @@ -1,10 +1,13 @@ from sqlalchemy import cast +from sqlalchemy import Column from sqlalchemy import desc from sqlalchemy import exc +from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import testing from sqlalchemy.orm import attributes from sqlalchemy.orm import backref @@ -13,8 +16,10 @@ from sqlalchemy.orm import noload from sqlalchemy.orm import Query from sqlalchemy.orm import relationship +from sqlalchemy.orm.session import make_transient_to_detached from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises_message @@ -124,6 +129,8 @@ def _user_order_item_fixture(self): class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL): + __dialect__ = "default" + def test_basic(self): User, Address = self._user_address_fixture() sess = fixture_session() @@ -231,6 +238,31 @@ def my_filter(self, arg): use_default_dialect=True, ) + @testing.combinations( + ("all", []), + ("one", exc.NoResultFound), + ("one_or_none", None), + argnames="method, expected", + ) + @testing.variation("add_to_session", [True, False]) + def test_transient_raise(self, method, expected, add_to_session): + """test 11562""" + User, Address = self._user_address_fixture() + + u1 = User(name="u1") + if add_to_session: + sess = fixture_session() + sess.add(u1) + + meth = getattr(u1.addresses, method) + if expected is exc.NoResultFound: + with expect_raises_message( + exc.NoResultFound, "No row was found when one was required" + ): + meth() + else: + eq_(meth(), expected) + def test_detached_raise(self): """so filtering on a detached dynamic list raises an error...""" @@ -321,7 +353,7 @@ def test_no_m2o_w_uselist(self): }, ) self.mapper_registry.map_imperatively(User, users) - assert_raises_message( + assert_warns_message( exc.SAWarning, "On relationship Address.user, 'dynamic' loaders cannot be " "used with many-to-one/one-to-one relationships and/or " @@ -597,11 +629,17 @@ def test_secondary_as_join(self): ) }, ) - self.mapper_registry.map_imperatively(Item, items) + item_mapper = self.mapper_registry.map_imperatively(Item, items) sess = fixture_session() + u1 = sess.query(User).first() + dyn = u1.items + + # test for #7868 + eq_(dyn._from_obj[0]._annotations["parententity"], item_mapper) + self.assert_compile( u1.items, "SELECT items.id AS items_id, " @@ -613,6 +651,62 @@ def test_secondary_as_join(self): use_default_dialect=True, ) + def test_secondary_as_join_complex_entity(self, registry): + """integration test for #7868""" + Base = registry.generate_base() + + class GrandParent(Base): + __tablename__ = "grandparent" + id = Column(Integer, primary_key=True) + + grand_children = relationship( + "Child", secondary="parent", lazy="dynamic", viewonly=True + ) + + class Parent(Base): + __tablename__ = "parent" + id = Column(Integer, primary_key=True) + grand_parent_id = Column( + Integer, ForeignKey("grandparent.id"), nullable=False + ) + + class Child(Base): + __tablename__ = "child" + id = Column(Integer, primary_key=True) + type = Column(String) + parent_id = Column( + Integer, ForeignKey("parent.id"), nullable=False + ) + + __mapper_args__ = { + "polymorphic_on": type, + "polymorphic_identity": "unknown", + "with_polymorphic": "*", + } + + class SubChild(Child): + __tablename__ = "subchild" + id = Column(Integer, ForeignKey("child.id"), primary_key=True) + + __mapper_args__ = { + "polymorphic_identity": "sub", + } + + gp = GrandParent(id=1) + make_transient_to_detached(gp) + sess = fixture_session() + sess.add(gp) + self.assert_compile( + gp.grand_children.filter_by(id=1), + "SELECT child.id AS child_id, child.type AS child_type, " + "child.parent_id AS child_parent_id, subchild.id AS subchild_id " + "FROM parent, child LEFT OUTER JOIN subchild " + "ON child.id = subchild.id " + "WHERE :param_1 = parent.grand_parent_id " + "AND parent.id = child.parent_id AND child.id = :id_1", + {"id_1": 1}, + ) + def test_secondary_doesnt_interfere_w_join_to_fromlist(self): # tests that the "secondary" being added to the FROM # as part of [ticket:4349] does not prevent a subsequent join to diff --git a/test/orm/test_eager_relations.py b/test/orm/test_eager_relations.py index 32abc3b31c4..fb7550e0ea3 100644 --- a/test/orm/test_eager_relations.py +++ b/test/orm/test_eager_relations.py @@ -31,8 +31,8 @@ from sqlalchemy.orm import undefer from sqlalchemy.sql import operators from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL -from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures @@ -86,6 +86,130 @@ def test_basic(self): ) eq_(self.static.user_address_result, q.order_by(User.id).all()) + @testing.combinations(True, False) + def test_from_statement(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + + sess = fixture_session() + + stmt = select(User).where(User.id == 7) + + def go(): + if legacy: + ret = ( + sess.query(User) + .from_statement(stmt) + .options(joinedload(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(joinedload(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + # joinedload can't be applied here so this necessarily + # has to lazy load the addresses + self.assert_sql_count(testing.db, go, 2) + + @testing.combinations(True, False) + def test_from_statement_contains_eager(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + + sess = fixture_session() + + # for contains_eager, Address.id is enough for it to be picked up + stmt = ( + select(User, Address.id).where(User.id == 7).join(User.addresses) + ) + + def go(): + if legacy: + ret = ( + sess.query(User) + .from_statement(stmt) + .options(contains_eager(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(contains_eager(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + # joinedload can't be applied here so this necessarily + # has to lazy load the addresses + self.assert_sql_count(testing.db, go, 1) + + def test_column_property_adaptation(self, decl_base): + """test #2316 in support of #8064""" + + class A(decl_base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + type = Column(String(40), nullable=False) + __mapper_args__ = {"polymorphic_on": type} + + A.anything = column_property(A.id + 1000) + + class B(A): + __tablename__ = "b" + account_id = Column(Integer, ForeignKey("a.id"), primary_key=True) + x_id = Column(Integer, ForeignKey("x.id"), nullable=False) + __mapper_args__ = {"polymorphic_identity": "named"} + + class X(decl_base): + __tablename__ = "x" + id = Column(Integer, primary_key=True) + b = relationship("B") + + self.assert_compile( + select(X).options(joinedload(X.b)), + "SELECT x.id, a_1.id AS id_1, a_1.type, a_1.id + :id_2 AS anon_1, " + "b_1.account_id, b_1.x_id FROM x " + "LEFT OUTER JOIN " + "(a AS a_1 JOIN b AS b_1 ON a_1.id = b_1.account_id) " + "ON x.id = b_1.x_id", + ) + def test_no_render_in_subquery(self): """test #6378""" @@ -2049,7 +2173,7 @@ def test_uselist_false_warning(self): ) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() - assert_raises( + assert_warns( sa.exc.SAWarning, s.query(User).options(joinedload(User.order)).all ) @@ -3011,10 +3135,14 @@ def test_many_to_one(self): eq_(result.scalars().all(), self.static.address_user_result) - def test_unique_error(self): + @testing.combinations(joinedload, contains_eager) + def test_unique_error(self, opt): User = self.classes.User - stmt = select(User).options(joinedload(User.addresses)) + stmt = select(User).options(opt(User.addresses)) + if opt is contains_eager: + stmt = stmt.join(User.addresses) + s = fixture_session() result = s.execute(stmt) @@ -6068,153 +6196,179 @@ def go(): def test_lazyload_aliased_abs_bcs_one(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(joinedload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(joinedload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_lazyload_aliased_abs_bcs_two(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(defaultload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(defaultload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_pathed_lazyload_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - opt = Load(A).joinedload(A.bs).joinedload(B.cs) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(opt) - ) - self._run_tests(q, 3) + for i in range(2): + s = fixture_session() + aa = aliased(A) + opt = Load(A).joinedload(A.bs).joinedload(B.cs) + + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(opt) + ) + self._run_tests(q, 3) def test_pathed_lazyload_plus_joined_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - opt = Load(aa).defaultload(aa.bs).joinedload(B.cs) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(opt) - ) - self._run_tests(q, 2) + for i in range(2): + s = fixture_session() + aa = aliased(A) + opt = Load(aa).defaultload(aa.bs).joinedload(B.cs) + + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(opt) + ) + self._run_tests(q, 2) def test_pathed_joinedload_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - opt = Load(aa).joinedload(aa.bs).joinedload(B.cs) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(opt) - ) - self._run_tests(q, 1) + for i in range(2): + s = fixture_session() + aa = aliased(A) + opt = Load(aa).joinedload(aa.bs).joinedload(B.cs) + + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(opt) + ) + self._run_tests(q, 1) def test_lazyload_plus_joined_aliased_abs_bcs(self): + """by running the test twice, this test includes a test + for #7447 to ensure cached queries apply the cached option objects + to the InstanceState which line up with the cached current_path.""" + A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(defaultload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 2) + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(defaultload(aa.bs).joinedload(B.cs)) + ) + + self._run_tests(q, 2) def test_joinedload_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(joinedload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 1) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(joinedload(aa.bs).joinedload(B.cs)) + ) + self._run_tests(q, 1) def test_lazyload_unaliased_abs_bcs_one(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(joinedload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(joinedload(aa.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_lazyload_unaliased_abs_bcs_two(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(defaultload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(defaultload(aa.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_lazyload_plus_joined_unaliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(defaultload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 2) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(defaultload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 2) def test_joinedload_unaliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(joinedload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 1) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(joinedload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 1) class EntityViaMultiplePathTestThree(fixtures.DeclarativeMappedTest): diff --git a/test/orm/test_evaluator.py b/test/orm/test_evaluator.py index 62acca58270..5902264e36e 100644 --- a/test/orm/test_evaluator.py +++ b/test/orm/test_evaluator.py @@ -5,15 +5,19 @@ from sqlalchemy import ForeignKey from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import JSON from sqlalchemy import not_ from sqlalchemy import or_ from sqlalchemy import String +from sqlalchemy import testing from sqlalchemy import tuple_ from sqlalchemy.orm import evaluator from sqlalchemy.orm import exc as orm_exc from sqlalchemy.orm import relationship from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -50,6 +54,7 @@ def define_tables(cls, metadata): Column("id", Integer, primary_key=True), Column("name", String(64)), Column("othername", String(64)), + Column("json", JSON), ) @classmethod @@ -200,6 +205,24 @@ def test_boolean_ops(self): ], ) + @testing.combinations( + lambda User: User.name + "_foo" == "named_foo", + # not implemented in 1.4 + # lambda User: User.name.startswith("nam"), + # lambda User: User.name.endswith("named"), + ) + def test_string_ops(self, expr): + User = self.classes.User + + test_expr = testing.resolve_lambda(expr, User=User) + eval_eq( + test_expr, + testcases=[ + (User(name="named"), True), + (User(name="othername"), False), + ], + ) + def test_in(self): User = self.classes.User @@ -268,6 +291,66 @@ def test_null_propagation(self): ], ) + @testing.combinations( + (lambda User: User.id + 5, "id", 10, 15, None), + ( + lambda User: User.name + " name", + "name", + "some value", + "some value name", + None, + ), + ( + lambda User: User.id + "name", + "id", + 10, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"add\" for " + r"datatypes INTEGER, VARCHAR", + ), + ( + lambda User: User.json + 12, + "json", + {"foo": "bar"}, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"add\" for " + r"datatypes JSON, INTEGER", + ), + ( + lambda User: User.json - 12, + "json", + {"foo": "bar"}, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"sub\" for " + r"datatypes JSON, INTEGER", + ), + ( + lambda User: User.json - "foo", + "json", + {"foo": "bar"}, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"sub\" for " + r"datatypes JSON, VARCHAR", + ), + ) + def test_math_op_type_exclusions( + self, expr, attrname, initial_value, expected, message + ): + """test #8507""" + + User = self.classes.User + + expr = testing.resolve_lambda(expr, User=User) + + if expected is evaluator.UnevaluatableError: + with expect_raises_message(evaluator.UnevaluatableError, message): + compiler.process(expr) + else: + obj = User(**{attrname: initial_value}) + + new_value = compiler.process(expr)(obj) + eq_(new_value, expected) + class M2OEvaluateTest(fixtures.DeclarativeMappedTest): @classmethod diff --git a/test/orm/test_events.py b/test/orm/test_events.py index 4dfea6a6de8..052b9e01637 100644 --- a/test/orm/test_events.py +++ b/test/orm/test_events.py @@ -8,6 +8,7 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import text from sqlalchemy import update from sqlalchemy.orm import attributes from sqlalchemy.orm import class_mapper @@ -32,8 +33,10 @@ from sqlalchemy.sql.traversals import NO_CACHE from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_not @@ -199,7 +202,7 @@ def _gen_cache_key(self, anon_map, bindparams): def go(context): for elem in context.user_defined_options: if isinstance(elem, SetShardOption): - m1.update_execution_options(_sa_shard_id=elem.payload) + m1.do_some_mock_thing(_sa_shard_id=elem.payload) stmt = select(User).options( loader_opt(User.addresses).options(loader_opt(Address.dingaling)), @@ -215,21 +218,15 @@ def go(context): loader_opt(User.addresses).options(loader_opt(Address.dingaling)), SetShardOption("some_other_shard"), ) + for u in s.execute(stmt).unique().scalars(): for a in u.addresses: a.dingaling eq_( m1.mock_calls, - ( - [call.update_execution_options(_sa_shard_id="some_shard")] - * num_opts - ) + ([call.do_some_mock_thing(_sa_shard_id="some_shard")] * num_opts) + ( - [ - call.update_execution_options( - _sa_shard_id="some_other_shard" - ) - ] + [call.do_some_mock_thing(_sa_shard_id="some_other_shard")] * num_opts ), ) @@ -296,6 +293,37 @@ def do_orm_execute(ctx): return canary + @testing.combinations( + (lambda: select(1), True), + (lambda User: select(User).union(select(User)), True), + (lambda: text("select * from users"), False), + ) + def test_non_orm_statements(self, stmt, is_select): + sess = Session(testing.db, future=True) + + canary = self._flag_fixture(sess) + + User, Address = self.classes("User", "Address") + stmt = testing.resolve_lambda(stmt, User=User) + sess.execute(stmt).all() + + eq_( + canary.mock_calls, + [ + call.options( + bind_mapper=None, + all_mappers=[], + is_select=is_select, + is_update=False, + is_delete=False, + is_orm_statement=False, + is_relationship_load=False, + is_column_load=False, + lazy_loaded_from=None, + ) + ], + ) + def test_all_mappers_accessor_one(self): User, Address = self.classes("User", "Address") @@ -1061,7 +1089,7 @@ def test_before_after_configured_warn_on_non_mapper(self): m1 = Mock() self.mapper_registry.map_imperatively(User, users) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, r"before_configured' and 'after_configured' ORM events only " r"invoke with the mapper\(\) function or Mapper class as " @@ -1072,7 +1100,7 @@ def test_before_after_configured_warn_on_non_mapper(self): m1, ) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, r"before_configured' and 'after_configured' ORM events only " r"invoke with the mapper\(\) function or Mapper class as " @@ -1213,6 +1241,7 @@ class Animal(AnotherBase): # not been loaded yet (Employer), and therefore cannot be configured: class Mammal(Animal): nonexistent = relationship("Nonexistent") + __mapper_args__ = {"polymorphic_identity": "mammal"} # These new classes should not be configured at this point: unconfigured = list(mapperlib._unconfigured_mappers()) @@ -2082,6 +2111,35 @@ def my_listener(*arg, **kw): s = fixture_session() assert my_listener in s.dispatch.before_flush + @testing.combinations(True, False, argnames="m1") + @testing.combinations(True, False, argnames="m2") + @testing.combinations(True, False, argnames="m3") + @testing.combinations(True, False, argnames="use_insert") + def test_sessionmaker_gen_after_session_listen( + self, m1, m2, m3, use_insert + ): + m1 = Mock() if m1 else None + m2 = Mock() if m2 else None + m3 = Mock() if m3 else None + + if m1: + event.listen(Session, "before_flush", m1, insert=use_insert) + + factory = sessionmaker() + + if m2: + event.listen(factory, "before_flush", m2, insert=use_insert) + + if m3: + event.listen(factory, "before_flush", m3, insert=use_insert) + + st = factory() + st.dispatch.before_flush() + + for m in m1, m2, m3: + if m: + eq_(m.mock_calls, [call()]) + def test_sessionmaker_listen(self): """test that listen can be applied to individual scoped_session() classes.""" @@ -2199,7 +2257,12 @@ def test_rollback_hook(self): u2 = User(name="u1", id=1) sess.add(u2) - assert_raises(sa.exc.SAWarning, sess.commit) + + with expect_raises(sa.exc.IntegrityError), expect_warnings( + "New instance" + ): + sess.commit() + sess.rollback() eq_( canary, @@ -2251,7 +2314,11 @@ def do_something(session, previous_transaction): u2 = User(name="u1", id=1) sess.add(u2) - assert_raises(sa.exc.SAWarning, sess.commit) + with expect_raises(sa.exc.IntegrityError), expect_warnings( + "New instance" + ): + sess.commit() + sess.rollback() eq_(assertions, [True, True]) diff --git a/test/orm/test_expire.py b/test/orm/test_expire.py index 5a12a7da4bf..5c958b15096 100644 --- a/test/orm/test_expire.py +++ b/test/orm/test_expire.py @@ -25,6 +25,7 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import CountStatements from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -846,6 +847,58 @@ def test_relationship_changes_preserved(self): assert "name" in u.__dict__ assert len(u.addresses) == 2 + @testing.combinations( + (True, False), + (False, False), + (False, True), + ) + def test_skip_options_that_dont_match(self, test_control_case, do_expire): + """test #7318""" + + User, Address, Order = self.classes("User", "Address", "Order") + users, addresses, orders = self.tables("users", "addresses", "orders") + + self.mapper_registry.map_imperatively(Order, orders) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + Address, backref="user", lazy="joined" + ), + "orders": relationship(Order), + }, + ) + self.mapper_registry.map_imperatively(Address, addresses) + sess = fixture_session() + + if test_control_case: + # this would be the error we are skipping, make sure it happens + # for up front + with expect_raises_message( + sa.exc.ArgumentError, + 'Mapped attribute "User.addresses" does not apply to ' + "any of the root entities in this query", + ): + row = sess.execute( + select(Order).options(joinedload(User.addresses)) + ).first() + else: + stmt = ( + select(User, Order) + .join_from(User, Order) + .options(joinedload(User.addresses)) + .order_by(User.id, Order.id) + ) + + row = sess.execute(stmt).first() + + u1, o1 = row + if do_expire: + sess.expire(o1) + eq_(o1.description, "order 1") + def test_mapper_joinedload_props_load(self): users, Address, addresses, User = ( self.tables.users, @@ -1026,7 +1079,11 @@ def test_partial_expire_lazy(self): self.mapper_registry.map_imperatively( User, users, - properties={"addresses": relationship(Address, backref="user")}, + properties={ + "addresses": relationship( + Address, backref="user", order_by=addresses.c.id + ) + }, ) self.mapper_registry.map_imperatively(Address, addresses) diff --git a/test/orm/test_froms.py b/test/orm/test_froms.py index af3dd8a60f3..9585da125b4 100644 --- a/test/orm/test_froms.py +++ b/test/orm/test_froms.py @@ -573,6 +573,16 @@ def test_aliases(self): q = s.query(uq1.name, uq2.name).order_by(uq1.name, uq2.name) + self.assert_compile( + q, + "SELECT anon_1.name AS anon_1_name, anon_1.name_1 AS " + "anon_1_name_1 FROM " + "(SELECT users.id AS id, users.name AS name, users_1.id AS id_1, " + "users_1.name AS name_1 FROM users, users AS users_1 " + "WHERE users.id > users_1.id) AS anon_1 " + "ORDER BY anon_1.name, anon_1.name_1", + ) + eq_( q.all(), [ @@ -613,6 +623,158 @@ def test_aliases(self): ], ) + def test_nested_aliases_none_to_none(self): + """test #7576""" + + User = self.classes.User + + u1 = aliased(User) + u2 = aliased(u1) + + self.assert_compile( + select(u2), "SELECT users_1.id, users_1.name FROM users AS users_1" + ) + + def test_nested_alias_none_to_subquery(self): + """test #7576""" + + User = self.classes.User + + subq = select(User.id, User.name).subquery() + + u1 = aliased(User, subq) + + self.assert_compile( + select(u1), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + u2 = aliased(u1) + + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + def test_nested_alias_subquery_to_subquery_w_replace(self): + """test #7576""" + + User = self.classes.User + + subq = select(User.id, User.name).subquery() + + u1 = aliased(User, subq) + + self.assert_compile( + select(u1), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + u2 = aliased(u1, subq) + + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + def test_nested_alias_subquery_to_subquery_w_adaption(self): + """test #7576""" + + User = self.classes.User + + inner_subq = select(User.id, User.name).subquery() + + u1 = aliased(User, inner_subq) + + self.assert_compile( + select(u1), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + outer_subq = select(u1.id, u1.name).subquery() + + u2 = aliased(u1, outer_subq) + + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT anon_2.id AS id, anon_2.name AS name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) " + "AS anon_2) AS anon_1", + ) + + outer_subq = ( + select(u1.id, u1.name, User.id, User.name) + .where(u1.id > User.id) + .subquery() + ) + u2 = aliased(u1, outer_subq) + + # query here is: + # SELECT derived_from_inner_subq.id, derived_from_inner_subq.name + # FROM ( + # SELECT ... FROM inner_subq, users WHERE inner_subq.id > users.id + # ) as outer_subq + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT anon_2.id AS id, anon_2.name AS name, users.id AS id_1, " + "users.name AS name_1 FROM " + "(SELECT users.id AS id, users.name AS name FROM users) " + "AS anon_2, users " + "WHERE anon_2.id > users.id) AS anon_1", + ) + + def test_nested_alias_subquery_w_alias_to_none(self): + """test #7576""" + + User = self.classes.User + + u1 = aliased(User) + + self.assert_compile( + select(u1), "SELECT users_1.id, users_1.name FROM users AS users_1" + ) + + subq = ( + select(User.id, User.name, u1.id, u1.name) + .where(User.id > u1.id) + .subquery() + ) + + # aliased against aliased w/ subquery means, look for u1 inside the + # given subquery. adapt that. + u2 = aliased(u1, subq) + + self.assert_compile( + select(u2), + "SELECT anon_1.id_1, anon_1.name_1 FROM " + "(SELECT users.id AS id, users.name AS name, " + "users_1.id AS id_1, users_1.name AS name_1 " + "FROM users, users AS users_1 " + "WHERE users.id > users_1.id) AS anon_1", + ) + + subq = select(User.id, User.name).subquery() + u2 = aliased(u1, subq) + + # given that, it makes sense that if we remove "u1" from the subquery, + # we get a second FROM element like below. + # this is actually a form of the "wrong" query that was + # reported in #7576, but this is the case where we have a subquery, + # so yes, we need to adapt the "inner" alias to it. + + self.assert_compile( + select(u2), + "SELECT users_1.id, users_1.name FROM users AS users_1, " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + def test_multiple_entities(self): User, Address = self.classes.User, self.classes.Address @@ -2779,7 +2941,7 @@ def test_differentiate_self_external(self): sess.query(User).join(sel, User.id > sel.c.id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT users.id AS id, users.name AS name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) " "AS anon_1 ON users.id > anon_1.id", ) @@ -2788,7 +2950,7 @@ def test_differentiate_self_external(self): "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users AS users_1, (" "SELECT users.id AS id, users.name AS name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -2797,7 +2959,7 @@ def test_differentiate_self_external(self): sess.query(ualias).select_from(ua).join(ualias, ualias.id > ua.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -2806,7 +2968,7 @@ def test_differentiate_self_external(self): sess.query(ualias).select_from(ua).join(ualias, ualias.id > ua.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " - "users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -2816,7 +2978,7 @@ def test_differentiate_self_external(self): sess.query(salias).join(ualias, ualias.id > salias.id), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM " "(SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) diff --git a/test/orm/test_hasparent.py b/test/orm/test_hasparent.py index 425dd947d4c..8f61c11970d 100644 --- a/test/orm/test_hasparent.py +++ b/test/orm/test_hasparent.py @@ -1,5 +1,4 @@ """test the current state of the hasparent() flag.""" - from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import testing @@ -26,6 +25,10 @@ class ParentRemovalTest(fixtures.MappedTest): run_inserts = None + # trying to push GC to do a better job + run_setup_classes = "each" + run_setup_mappers = "each" + @classmethod def define_tables(cls, metadata): if testing.against("oracle"): @@ -173,12 +176,24 @@ def test_stale_state_negative_child_expired(self): """ User = self.classes.User s, u1, a1 = self._fixture() + gc_collect() u2 = User(addresses=[a1]) # noqa s.expire(a1) u1.addresses.remove(a1) + u2_is = u2._sa_instance_state + del u2 + + for i in range(5): + gc_collect() + # heisenberg the GC a little bit, since #7823 caused a lot more + # GC when mappings are set up, larger test suite started failing + # on this being gc'ed + o = u2_is.obj() + assert o is None + # controversy here. The action is # to expire one object, not the other, and remove; # this is pretty abusive in any case. for now @@ -192,13 +207,23 @@ def test_stale_state_negative_child_expired(self): def test_stale_state_negative(self): User = self.classes.User s, u1, a1 = self._fixture() + gc_collect() u2 = User(addresses=[a1]) s.add(u2) s.flush() s._expunge_states([attributes.instance_state(u2)]) + + u2_is = u2._sa_instance_state del u2 - gc_collect() + + for i in range(5): + gc_collect() + # heisenberg the GC a little bit, since #7823 caused a lot more + # GC when mappings are set up, larger test suite started failing + # on this being gc'ed + o = u2_is.obj() + assert o is None assert_raises_message( orm_exc.StaleDataError, diff --git a/test/orm/test_inspect.py b/test/orm/test_inspect.py index 3cc7640cf03..0ffc9e86e01 100644 --- a/test/orm/test_inspect.py +++ b/test/orm/test_inspect.py @@ -440,14 +440,16 @@ def _random_names(self): import random import keyword - names = { - "".join( - random.choice("abcdegfghijklmnopqrstuvwxyz") - for i in range(random.randint(3, 15)) - ) - for j in range(random.randint(4, 12)) - } - return list(names.difference(keyword.kwlist)) + def _random_name(): + while True: + name = "".join( + random.choice("abcdegfghijklmnopqrstuvwxyz") + for i in range(random.randint(5, 15)) + ) + if name not in keyword.kwlist: + return name + + return [_random_name() for i in range(random.randint(8, 15))] def _ordered_name_fixture(self, glbls, clsname, base, supercls): import random diff --git a/test/orm/test_instrumentation.py b/test/orm/test_instrumentation.py index a2d4aa9cac2..73aefe1f05a 100644 --- a/test/orm/test_instrumentation.py +++ b/test/orm/test_instrumentation.py @@ -10,7 +10,7 @@ from sqlalchemy.orm import instrumentation from sqlalchemy.orm import relationship from sqlalchemy.testing import assert_raises -from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import ne_ @@ -525,7 +525,7 @@ class A(object): def __del__(self): pass - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, r"__del__\(\) method on class " r" will cause " diff --git a/test/orm/test_lambdas.py b/test/orm/test_lambdas.py index 5274271d9fa..6de702ad4b0 100644 --- a/test/orm/test_lambdas.py +++ b/test/orm/test_lambdas.py @@ -219,7 +219,7 @@ def test_lambdas_rejected_in_options(self, plain_fixture): assert_raises_message( exc.ArgumentError, - "Cacheable Core or ORM object expected, got", + "ExecutionOption Core or ORM object expected, got", select(lambda: User).options, lambda: subqueryload(User.addresses), ) diff --git a/test/orm/test_lazy_relations.py b/test/orm/test_lazy_relations.py index 3ebff5f43bc..ee578ff50d1 100644 --- a/test/orm/test_lazy_relations.py +++ b/test/orm/test_lazy_relations.py @@ -25,6 +25,7 @@ from sqlalchemy.orm import Session from sqlalchemy.orm import with_parent from sqlalchemy.testing import assert_raises +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -369,7 +370,7 @@ def test_uselist_false_warning(self): self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() u1 = s.query(User).filter(User.id == 7).one() - assert_raises(sa.exc.SAWarning, getattr, u1, "order") + assert_warns(sa.exc.SAWarning, getattr, u1, "order") def test_callable_bind(self): Address, addresses, users, User = ( diff --git a/test/orm/test_loading.py b/test/orm/test_loading.py index 88a160b5a83..d0b5c9d8f9c 100644 --- a/test/orm/test_loading.py +++ b/test/orm/test_loading.py @@ -1,12 +1,17 @@ from sqlalchemy import exc +from sqlalchemy import literal +from sqlalchemy import literal_column from sqlalchemy import select from sqlalchemy import testing +from sqlalchemy import text from sqlalchemy.orm import loading from sqlalchemy.orm import relationship +from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message from sqlalchemy.testing.assertions import eq_ +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.fixtures import fixture_session from . import _fixtures @@ -14,6 +19,90 @@ # class LoadOnIdentTest(_fixtures.FixtureTest): +class SelectStarTest(_fixtures.FixtureTest): + run_setup_mappers = "once" + run_inserts = "once" + run_deletes = None + + @classmethod + def setup_mappers(cls): + cls._setup_stock_mapping() + + @testing.combinations( + "plain", "text", "literal_column", argnames="exprtype" + ) + @testing.combinations("core", "orm", argnames="coreorm") + def test_single_star(self, exprtype, coreorm): + """test for #8235""" + User, Address = self.classes("User", "Address") + + if exprtype == "plain": + star = "*" + elif exprtype == "text": + star = text("*") + elif exprtype == "literal_column": + star = literal_column("*") + else: + assert False + + stmt = ( + select(star) + .select_from(User) + .join(Address) + .where(User.id == 7) + .order_by(User.id, Address.id) + ) + + s = fixture_session() + + if coreorm == "core": + result = s.connection().execute(stmt) + elif coreorm == "orm": + result = s.execute(stmt) + else: + assert False + + eq_(result.all(), [(7, "jack", 1, 7, "jack@bean.com")]) + + @testing.combinations( + "plain", "text", "literal_column", argnames="exprtype" + ) + @testing.combinations( + lambda User, star: (star, User.id), + lambda User, star: (star, User), + lambda User, star: (User.id, star), + lambda User, star: (User, star), + lambda User, star: (literal("some text"), star), + lambda User, star: (star, star), + lambda User, star: (star, text("some text")), + argnames="testcase", + ) + def test_no_star_orm_combinations(self, exprtype, testcase): + """test for #8235""" + User = self.classes.User + + if exprtype == "plain": + star = "*" + elif exprtype == "text": + star = text("*") + elif exprtype == "literal_column": + star = literal_column("*") + else: + assert False + + args = testing.resolve_lambda(testcase, User=User, star=star) + stmt = select(*args).select_from(User) + + s = fixture_session() + + with expect_raises_message( + exc.CompileError, + r"Can't generate ORM query that includes multiple expressions " + r"at the same time as '\*';", + ): + s.execute(stmt) + + class InstanceProcessorTest(_fixtures.FixtureTest): def test_state_no_load_path_comparison(self): # test issue #5110 @@ -64,6 +153,24 @@ class InstancesTest(_fixtures.FixtureTest): def setup_mappers(cls): cls._setup_stock_mapping() + def test_cursor_close_exception_raised_in_iteration(self): + """test #8710""" + + User = self.classes.User + s = fixture_session() + + stmt = select(User).execution_options(yield_per=1) + + result = s.execute(stmt) + raw_cursor = result.raw + + for row in result: + with expect_raises_message(Exception, "whoops"): + for row in result: + raise Exception("whoops") + + is_true(raw_cursor._soft_closed) + def test_cursor_close_w_failed_rowproc(self): User = self.classes.User s = fixture_session() diff --git a/test/orm/test_lockmode.py b/test/orm/test_lockmode.py index e073754848e..b296f22409d 100644 --- a/test/orm/test_lockmode.py +++ b/test/orm/test_lockmode.py @@ -345,7 +345,8 @@ def test_for_update_on_inner_w_joinedload_no_render_oracle(self): "FROM (SELECT anon_2.users_id AS users_id, " "anon_2.users_name AS users_name FROM " "(SELECT users.id AS users_id, users.name AS users_name " - "FROM users) anon_2 WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 " + "FROM users) anon_2 WHERE ROWNUM <= " + "__[POSTCOMPILE_param_1]) anon_1 " "LEFT OUTER JOIN addresses addresses_1 " "ON anon_1.users_id = addresses_1.user_id FOR UPDATE", dialect="oracle", diff --git a/test/orm/test_manytomany.py b/test/orm/test_manytomany.py index 1abf5551a5e..1155096a446 100644 --- a/test/orm/test_manytomany.py +++ b/test/orm/test_manytomany.py @@ -226,8 +226,9 @@ def test_self_referential_bidirectional_mutation(self): assert p2 in p1.parent_places def test_joinedload_on_double(self): - """test that a mapper can have two eager relationships to the same table, via - two different association tables. aliases are required.""" + """test that a mapper can have two eager relationships to the same + table, via two different association tables. aliases are required. + """ ( place_input, diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py index 0f84923ac85..1c46f316931 100644 --- a/test/orm/test_mapper.py +++ b/test/orm/test_mapper.py @@ -5,6 +5,7 @@ from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Integer +from sqlalchemy import literal from sqlalchemy import MetaData from sqlalchemy import select from sqlalchemy import String @@ -23,6 +24,8 @@ from sqlalchemy.orm import dynamic_loader from sqlalchemy.orm import Load from sqlalchemy.orm import load_only +from sqlalchemy.orm import Mapper +from sqlalchemy.orm import mapper from sqlalchemy.orm import reconstructor from sqlalchemy.orm import registry from sqlalchemy.orm import relationship @@ -31,14 +34,17 @@ from sqlalchemy.orm.persistence import _sort_states from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_deprecated_20 from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true from sqlalchemy.testing import ne_ +from sqlalchemy.testing.fixtures import ComparableEntity from sqlalchemy.testing.fixtures import ComparableMixin from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -114,6 +120,38 @@ class Plain(ComparableMixin): foobar="x", ) + def test_class_already_mapped(self): + users, User = ( + self.tables.users, + self.classes.User, + ) + + self.mapper(User, users) + + with expect_raises_message( + sa.exc.ArgumentError, + "Class .*User.* already has a primary mapper defined", + ): + self.mapper(User, users) + + @testing.combinations(mapper, Mapper) + def test_class_already_mapped_legacy(self, fn): + users, User = ( + self.tables.users, + self.classes.User, + ) + + with expect_deprecated_20( + r"Calling the mapper\(\) function directly outside" + ): + fn(User, users) + + with expect_raises_message( + sa.exc.ArgumentError, + "Class .*User.* already has a primary mapper defined", + ): + fn(User, users) + def test_prop_shadow(self): """A backref name may not shadow an existing property name.""" @@ -797,7 +835,7 @@ def test_replace_rel_prop_with_rel_warns(self): ) self.mapper(Address, addresses) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Property User.addresses on Mapper|User|users being replaced " "with new property User.addresses; the old property will " @@ -808,7 +846,8 @@ def test_replace_rel_prop_with_rel_warns(self): ) @testing.combinations((True,), (False,)) - def test_add_column_prop_deannotate(self, autoalias): + def test_add_column_prop_adaption(self, autoalias): + """test ultimately from #2316 revised for #8064""" User, users = self.classes.User, self.tables.users Address, addresses = self.classes.Address, self.tables.addresses @@ -869,9 +908,13 @@ class SubUser(User): "users_1.id = addresses.user_id", ) - def test_column_prop_deannotate(self): - """test that column property deannotates, - bringing expressions down to the original mapped columns. + def test_column_prop_stays_annotated(self): + """test ultimately from #2316 revised for #8064. + + previously column_property() would deannotate the given expression, + however this interfered with some compilation sceanrios. + + """ User, users = self.classes.User, self.tables.users m = self.mapper(User, users) @@ -883,14 +926,18 @@ def test_column_prop_deannotate(self): m.add_property("y", column_property(expr2.scalar_subquery())) assert User.x.property.columns[0] is not expr - assert User.x.property.columns[0].element.left is users.c.name - # a deannotate needs to clone the base, in case - # the original one referenced annotated elements. - assert User.x.property.columns[0].element.right is not expr.right + + assert ( + User.x.property.columns[0].element.left + is User.name.comparator.expr + ) + + assert User.x.property.columns[0].element.right is expr.right assert User.y.property.columns[0] is not expr2 assert ( - User.y.property.columns[0].element._raw_columns[0] is users.c.name + User.y.property.columns[0].element._raw_columns[0] + is User.name.expression ) assert User.y.property.columns[0].element._raw_columns[1] is users.c.id @@ -980,7 +1027,7 @@ class MyUser(User): polymorphic_on=users.c.name, polymorphic_identity="user", ) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Reassigning polymorphic association for identity 'user'", self.mapper_registry.map_imperatively, @@ -1380,6 +1427,65 @@ def test_mapping_to_outerjoin_no_partial_pks(self): ], ) + @testing.requires.ctes + def test_mapping_to_union_dont_overlimit_pk(self, registry, connection): + """test #7842""" + Base = registry.generate_base() + + class Node(Base): + __tablename__ = "cte_nodes" + + id = Column(Integer, primary_key=True) + parent = Column(Integer, ForeignKey("cte_nodes.id")) + + # so we dont have to deal with NULLS FIRST + sort_key = Column(Integer) + + class NodeRel(ComparableEntity, Base): + table = select( + Node.id, Node.parent, Node.sort_key, literal(0).label("depth") + ).cte(recursive=True) + __table__ = table.union_all( + select( + Node.id, + table.c.parent, + table.c.sort_key, + table.c.depth + literal(1), + ) + .select_from(Node) + .join(table, Node.parent == table.c.id) + ) + + __mapper_args__ = { + "primary_key": (__table__.c.id, __table__.c.parent) + } + + nt = NodeRel.__table__ + + eq_(NodeRel.__mapper__.primary_key, (nt.c.id, nt.c.parent)) + + registry.metadata.create_all(connection) + with Session(connection) as session: + n1, n2, n3, n4 = ( + Node(id=1, sort_key=1), + Node(id=2, parent=1, sort_key=2), + Node(id=3, parent=2, sort_key=3), + Node(id=4, parent=3, sort_key=4), + ) + session.add_all([n1, n2, n3, n4]) + session.commit() + + q_rel = select(NodeRel).filter_by(id=4).order_by(NodeRel.sort_key) + eq_( + session.scalars(q_rel).all(), + [ + NodeRel(id=4, parent=None), + NodeRel(id=4, parent=1), + NodeRel(id=4, parent=2), + NodeRel(id=4, parent=3), + ], + ) + def test_scalar_pk_arg(self): users, Keyword, items, Item, User, keywords = ( self.tables.users, @@ -1688,12 +1794,12 @@ def _x(self): ) # object gracefully handles this condition - assert not hasattr(User.x, "__name__") + assert not hasattr(User.x, "foobar") assert not hasattr(User.x, "comparator") m.add_property("some_attr", column_property(users.c.name)) - assert not hasattr(User.x, "__name__") + assert not hasattr(User.x, "foobar") assert hasattr(User.x, "comparator") def test_synonym_of_non_property_raises(self): diff --git a/test/orm/test_merge.py b/test/orm/test_merge.py index d2eade0ea1a..4f3b4e49561 100644 --- a/test/orm/test_merge.py +++ b/test/orm/test_merge.py @@ -7,6 +7,7 @@ from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import PickleType +from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import Text @@ -16,6 +17,7 @@ from sqlalchemy.orm import defer from sqlalchemy.orm import deferred from sqlalchemy.orm import foreign +from sqlalchemy.orm import joinedload from sqlalchemy.orm import relationship from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session @@ -28,6 +30,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ from sqlalchemy.testing import not_in +from sqlalchemy.testing.assertsql import CountStatements from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table @@ -1396,6 +1399,127 @@ def test_no_load_preserves_parents(self): except sa.exc.InvalidRequestError as e: assert "load=False option does not support" in str(e) + @testing.variation("viewonly", ["viewonly", "normal"]) + @testing.variation("load", ["load", "noload"]) + @testing.variation("lazy", ["select", "raise", "raise_on_sql"]) + @testing.variation( + "merge_persistent", ["merge_persistent", "merge_detached"] + ) + @testing.variation("detach_original", ["detach", "persistent"]) + @testing.variation("direction", ["o2m", "m2o"]) + def test_relationship_population_maintained( + self, + viewonly, + load, + lazy, + merge_persistent, + direction, + detach_original, + ): + """test #8862""" + + User, Address = self.classes("User", "Address") + users, addresses = self.tables("users", "addresses") + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + Address, + viewonly=viewonly.viewonly, + lazy=lazy.name, + back_populates="user", + order_by=addresses.c.id, + ) + }, + ) + + self.mapper_registry.map_imperatively( + Address, + addresses, + properties={ + "user": relationship( + User, + viewonly=viewonly.viewonly, + lazy=lazy.name, + back_populates="addresses", + ) + }, + ) + + s = fixture_session() + + u1 = User(id=1, name="u1") + s.add(u1) + s.flush() + s.add_all( + [Address(user_id=1, email_address="e%d" % i) for i in range(1, 4)] + ) + s.commit() + + if direction.o2m: + cls_to_merge = User + obj_to_merge = ( + s.scalars(select(User).options(joinedload(User.addresses))) + .unique() + .one() + ) + attrname = "addresses" + + elif direction.m2o: + cls_to_merge = Address + obj_to_merge = ( + s.scalars( + select(Address) + .filter_by(email_address="e1") + .options(joinedload(Address.user)) + ) + .unique() + .one() + ) + attrname = "user" + else: + assert False + + assert attrname in obj_to_merge.__dict__ + + s2 = Session(testing.db) + + if merge_persistent.merge_persistent: + target_persistent = s2.get(cls_to_merge, obj_to_merge.id) # noqa + + if detach_original.detach: + s.expunge(obj_to_merge) + + with self.sql_execution_asserter(testing.db) as assert_: + merged_object = s2.merge(obj_to_merge, load=load.load) + + assert_.assert_( + CountStatements( + 0 + if load.noload + else 1 + if merge_persistent.merge_persistent + else 2 + ) + ) + + assert attrname in merged_object.__dict__ + + with self.sql_execution_asserter(testing.db) as assert_: + if direction.o2m: + eq_( + merged_object.addresses, + [ + Address(user_id=1, email_address="e%d" % i) + for i in range(1, 4) + ], + ) + elif direction.m2o: + eq_(merged_object.user, User(id=1, name="u1")) + assert_.assert_(CountStatements(0)) + def test_synonym(self): users = self.tables.users @@ -1567,7 +1691,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set()) + eq_(ustate.load_options, ()) for u in s2_users: sess.merge(u) @@ -1575,7 +1699,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set([opt2])) + eq_(ustate.load_options, (opt2,)) # test 2. present options are replaced by merge options sess = fixture_session() @@ -1583,7 +1707,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set([opt1])) + eq_(ustate.load_options, (opt1,)) for u in s2_users: sess.merge(u) @@ -1591,7 +1715,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set([opt2])) + eq_(ustate.load_options, (opt2,)) def test_resolve_conflicts_pending_doesnt_interfere_no_ident(self): User, Address, Order = ( diff --git a/test/orm/test_of_type.py b/test/orm/test_of_type.py index bdf7ab85923..09b3cf51c00 100644 --- a/test/orm/test_of_type.py +++ b/test/orm/test_of_type.py @@ -1151,10 +1151,10 @@ class C1(_C): "c.id AS c_id, c.type AS c_type, c.b_id AS c_b_id, a.id AS a_id, " "a.type AS a_type " "FROM a LEFT OUTER JOIN b ON " - "a.id = b.a_id AND b.type IN ([POSTCOMPILE_type_1]) " + "a.id = b.a_id AND b.type IN (__[POSTCOMPILE_type_1]) " "LEFT OUTER JOIN c ON " - "b.id = c.b_id AND c.type IN ([POSTCOMPILE_type_2]) " - "WHERE a.type IN ([POSTCOMPILE_type_3])" + "b.id = c.b_id AND c.type IN (__[POSTCOMPILE_type_2]) " + "WHERE a.type IN (__[POSTCOMPILE_type_3])" ) _query2 = ( @@ -1162,10 +1162,10 @@ class C1(_C): "ccc.id AS ccc_id, ccc.type AS ccc_type, ccc.b_id AS ccc_b_id, " "aaa.id AS aaa_id, aaa.type AS aaa_type " "FROM a AS aaa LEFT OUTER JOIN b AS bbb " - "ON aaa.id = bbb.a_id AND bbb.type IN ([POSTCOMPILE_type_1]) " + "ON aaa.id = bbb.a_id AND bbb.type IN (__[POSTCOMPILE_type_1]) " "LEFT OUTER JOIN c AS ccc ON " - "bbb.id = ccc.b_id AND ccc.type IN ([POSTCOMPILE_type_2]) " - "WHERE aaa.type IN ([POSTCOMPILE_type_3])" + "bbb.id = ccc.b_id AND ccc.type IN (__[POSTCOMPILE_type_2]) " + "WHERE aaa.type IN (__[POSTCOMPILE_type_3])" ) _query3 = ( @@ -1173,10 +1173,10 @@ class C1(_C): "c.id AS c_id, c.type AS c_type, c.b_id AS c_b_id, " "aaa.id AS aaa_id, aaa.type AS aaa_type " "FROM a AS aaa LEFT OUTER JOIN b AS bbb " - "ON aaa.id = bbb.a_id AND bbb.type IN ([POSTCOMPILE_type_1]) " + "ON aaa.id = bbb.a_id AND bbb.type IN (__[POSTCOMPILE_type_1]) " "LEFT OUTER JOIN c ON " - "bbb.id = c.b_id AND c.type IN ([POSTCOMPILE_type_2]) " - "WHERE aaa.type IN ([POSTCOMPILE_type_3])" + "bbb.id = c.b_id AND c.type IN (__[POSTCOMPILE_type_2]) " + "WHERE aaa.type IN (__[POSTCOMPILE_type_3])" ) def _test(self, join_of_type, of_type_for_c1, aliased_): diff --git a/test/orm/test_options.py b/test/orm/test_options.py index 1a2a5ba70f9..840b3dc2148 100644 --- a/test/orm/test_options.py +++ b/test/orm/test_options.py @@ -23,6 +23,7 @@ from sqlalchemy.orm import util as orm_util from sqlalchemy.orm import with_polymorphic from sqlalchemy.testing import fixtures +from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises_message from sqlalchemy.testing.assertions import AssertsCompiledSQL from sqlalchemy.testing.assertions import emits_warning @@ -2050,12 +2051,16 @@ def test_option_propagate(self): oalias = aliased(Order) opt1 = sa.orm.joinedload(User.orders, Order.items) opt2 = sa.orm.contains_eager(User.orders, Order.items, alias=oalias) - u1 = ( - sess.query(User) - .join(oalias, User.orders) - .options(opt1, opt2) - .first() - ) - ustate = attributes.instance_state(u1) - assert opt1 in ustate.load_options - assert opt2 not in ustate.load_options + + with mock.patch.object( + Load, "_adjust_for_extra_criteria", lambda self, ctx: self + ): + u1 = ( + sess.query(User) + .join(oalias, User.orders) + .options(opt1, opt2) + .first() + ) + ustate = attributes.instance_state(u1) + assert opt1 in ustate.load_options + assert opt2 not in ustate.load_options diff --git a/test/orm/test_pickled.py b/test/orm/test_pickled.py index 11d90bd5907..fe7ac7b7028 100644 --- a/test/orm/test_pickled.py +++ b/test/orm/test_pickled.py @@ -15,6 +15,7 @@ from sqlalchemy.orm import relationship from sqlalchemy.orm import state as sa_state from sqlalchemy.orm import subqueryload +from sqlalchemy.orm import with_loader_criteria from sqlalchemy.orm import with_polymorphic from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.orm.collections import column_mapped_collection @@ -23,10 +24,12 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.pickleable import Address +from sqlalchemy.testing.pickleable import AddressWMixin from sqlalchemy.testing.pickleable import Child1 from sqlalchemy.testing.pickleable import Child2 from sqlalchemy.testing.pickleable import Dingaling from sqlalchemy.testing.pickleable import EmailUser +from sqlalchemy.testing.pickleable import Mixin from sqlalchemy.testing.pickleable import Order from sqlalchemy.testing.pickleable import Parent from sqlalchemy.testing.pickleable import Screen @@ -43,6 +46,10 @@ from .inheritance._poly_fixtures import Person +def no_ed_foo(cls): + return cls.email_address != "ed@foo.com" + + class PickleTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): @@ -324,6 +331,51 @@ def test_invalidated_flag_deepcopy(self): u2.addresses.append(Address()) eq_(len(u2.addresses), 2) + @testing.requires.python3 + @testing.combinations(True, False, argnames="pickle_it") + @testing.combinations(True, False, argnames="use_mixin") + def test_loader_criteria(self, pickle_it, use_mixin): + """test #8109""" + + users, addresses = (self.tables.users, self.tables.addresses) + + AddressCls = AddressWMixin if use_mixin else Address + + self.mapper_registry.map_imperatively( + User, + users, + properties={"addresses": relationship(AddressCls)}, + ) + + self.mapper_registry.map_imperatively(AddressCls, addresses) + + with fixture_session(expire_on_commit=False) as sess: + u1 = User(name="ed") + u1.addresses = [ + AddressCls(email_address="ed@bar.com"), + AddressCls(email_address="ed@foo.com"), + ] + sess.add(u1) + sess.commit() + + with fixture_session(expire_on_commit=False) as sess: + # note that non-lambda is not picklable right now as + # SQL expressions usually can't be pickled. + opt = with_loader_criteria( + Mixin if use_mixin else Address, + no_ed_foo, + include_aliases=True, + ) + + u1 = sess.query(User).options(opt).first() + + if pickle_it: + u1 = pickle.loads(pickle.dumps(u1)) + sess.close() + sess.add(u1) + + eq_([ad.email_address for ad in u1.addresses], ["ed@bar.com"]) + @testing.requires.non_broken_pickle def test_instance_deferred_cols(self): users, addresses = (self.tables.users, self.tables.addresses) diff --git a/test/orm/test_query.py b/test/orm/test_query.py index c567cf1d16b..203b7e7e450 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -38,13 +38,13 @@ from sqlalchemy import Unicode from sqlalchemy import union from sqlalchemy import util +from sqlalchemy.engine import cursor as _cursor from sqlalchemy.engine import default from sqlalchemy.ext.compiler import compiles from sqlalchemy.orm import aliased from sqlalchemy.orm import attributes from sqlalchemy.orm import backref from sqlalchemy.orm import Bundle -from sqlalchemy.orm import clear_mappers from sqlalchemy.orm import column_property from sqlalchemy.orm import contains_eager from sqlalchemy.orm import defer @@ -72,13 +72,16 @@ from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message +from sqlalchemy.testing.assertions import assert_warns_message from sqlalchemy.testing.assertions import eq_ +from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertions import is_not_none from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from sqlalchemy.testing.util import gc_collect from sqlalchemy.types import NullType from sqlalchemy.types import TypeDecorator from sqlalchemy.util import collections_abc @@ -859,6 +862,18 @@ def test_explicit_cols( assert_row_keys(stmt, expected, coreorm_exec) + def test_with_only_columns(self, assert_row_keys): + """test #8001""" + + User, Address = self.classes("User", "Address") + + stmt = select(User.id, Address.email_address).join_from(User, Address) + stmt = stmt.with_only_columns( + stmt.selected_columns.id, stmt.selected_columns.email_address + ) + + assert_row_keys(stmt, ["id", "email_address"], "orm") + def test_explicit_cols_legacy(self): User = self.classes.User @@ -1003,34 +1018,14 @@ def test_explicit_ambiguous_orm_cols_legacy(self): eq_(row._mapping.keys(), ["id", "name", "id", "name"]) @testing.fixture - def uname_fixture(self): + def uname_fixture(self, registry): class Foo(object): pass - if False: - # this conditional creates the table each time which would - # eliminate cross-test memoization issues. if the tests - # are failing without this then there's a memoization issue. - # check AnnotatedColumn memoized keys - m = MetaData() - users = Table( - "users", - m, - Column("id", Integer, primary_key=True), - Column( - "name", - String, - ), - ) - self.mapper_registry.map_imperatively( - Foo, users, properties={"uname": users.c.name} - ) - else: - users = self.tables.users - clear_mappers() - self.mapper_registry.map_imperatively( - Foo, users, properties={"uname": users.c.name} - ) + users = self.tables.users + registry.map_imperatively( + Foo, users, properties={"uname": users.c.name} + ) return Foo @@ -1216,6 +1211,31 @@ def test_get(self): u2 = s.get(User, 7) assert u is not u2 + def test_get_synonym_direct_name(self, decl_base): + """test #8753""" + + class MyUser(decl_base): + __table__ = self.tables.users + + syn_id = synonym("id") + + s = fixture_session() + u = s.get(MyUser, {"syn_id": 7}) + eq_(u.id, 7) + + def test_get_synonym_indirect(self, decl_base): + """test #8753""" + + class MyUser(decl_base): + __table__ = self.tables.users + + uid = __table__.c.id + syn_id = synonym("uid") + + s = fixture_session() + u = s.get(MyUser, {"syn_id": 7}) + eq_(u.uid, 7) + def test_get_composite_pk_no_result(self): CompositePk = self.classes.CompositePk @@ -1314,7 +1334,7 @@ def test_get_fully_null_pk(self): User = self.classes.User s = fixture_session() - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, r"fully NULL primary key identity cannot load any object. " "This condition may raise an error in a future release.", @@ -1328,7 +1348,7 @@ def test_get_fully_null_composite_pk(self, outerjoin_mapping): s = fixture_session() - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, r"fully NULL primary key identity cannot load any object. " "This condition may raise an error in a future release.", @@ -2043,7 +2063,9 @@ def test_op(self): def test_in(self): User = self.classes.User - self._test(User.id.in_(["a", "b"]), "users.id IN ([POSTCOMPILE_id_1])") + self._test( + User.id.in_(["a", "b"]), "users.id IN (__[POSTCOMPILE_id_1])" + ) def test_in_on_relationship_not_supported(self): User, Address = self.classes.User, self.classes.Address @@ -2110,6 +2132,7 @@ def test_function_element_column_labels(self): class max_(expression.FunctionElement): name = "max" + inherit_cache = True @compiles(max_) def visit_max(element, compiler, **kw): @@ -2124,6 +2147,7 @@ def test_truly_unlabeled_sql_expressions(self): class not_named_max(expression.ColumnElement): name = "not_named_max" + inherit_cache = True @compiles(not_named_max) def visit_max(element, compiler, **kw): @@ -5271,6 +5295,8 @@ class YieldTest(_fixtures.FixtureTest): run_setup_mappers = "each" run_inserts = "each" + __backend__ = True + def _eagerload_mappings(self, addresses_lazy=True, user_lazy=True): User, Address = self.classes("User", "Address") users, addresses = self.tables("users", "addresses") @@ -5313,6 +5339,136 @@ def test_basic(self): except StopIteration: pass + def test_we_can_close_cursor(self): + """test new usecase close() added along with #7274""" + self._eagerload_mappings() + + User = self.classes.User + + sess = fixture_session() + + stmt = select(User).execution_options(yield_per=15) + result = sess.execute(stmt) + + with mock.patch.object(result.raw, "_soft_close") as mock_close: + two_results = result.fetchmany(2) + eq_(len(two_results), 2) + + eq_(mock_close.mock_calls, []) + + result.close() + + eq_(mock_close.mock_calls, [mock.call(hard=True)]) + + with expect_raises(sa.exc.ResourceClosedError): + result.fetchmany(10) + + with expect_raises(sa.exc.ResourceClosedError): + result.fetchone() + + with expect_raises(sa.exc.ResourceClosedError): + result.all() + + result.close() + + @testing.combinations("fetchmany", "fetchone", "fetchall") + def test_cursor_is_closed_on_exhausted(self, fetch_method): + """test #7274""" + self._eagerload_mappings() + + User = self.classes.User + + sess = fixture_session() + + stmt = select(User).execution_options(yield_per=15) + result = sess.execute(stmt) + + with mock.patch.object(result.raw, "_soft_close") as mock_close: + # call assertions are implementation specific. + # test needs that _soft_close called at least once and without + # the hard=True flag + if fetch_method == "fetchmany": + while True: + buf = result.fetchmany(2) + if not buf: + break + eq_(mock_close.mock_calls, [mock.call()]) + elif fetch_method == "fetchall": + eq_(len(result.all()), 4) + eq_( + mock_close.mock_calls, [mock.call(), mock.call(hard=False)] + ) + elif fetch_method == "fetchone": + while True: + row = result.fetchone() + if row is None: + break + eq_( + mock_close.mock_calls, [mock.call(), mock.call(hard=False)] + ) + else: + assert False + + # soft closed, we can still get an empty result + eq_(result.all(), []) + + # real closed + result.close() + assert_raises(sa.exc.ResourceClosedError, result.all) + + def test_yield_per_close_on_interrupted_iteration_legacy(self): + """test #8710""" + + self._eagerload_mappings() + + User = self.classes.User + + asserted_result = [None] + + class _Query(Query): + def _iter(self): + asserted_result[0] = super(_Query, self)._iter() + return asserted_result[0] + + sess = fixture_session(query_cls=_Query) + + with expect_raises_message(Exception, "hi"): + for i, row in enumerate(sess.query(User).yield_per(1)): + assert not asserted_result[0]._soft_closed + assert not asserted_result[0].closed + + if i > 1: + raise Exception("hi") + + gc_collect() # needed for pypy, #8762 + assert asserted_result[0]._soft_closed + assert not asserted_result[0].closed + + def test_yield_per_close_on_interrupted_iteration(self): + """test #8710""" + + self._eagerload_mappings() + + User = self.classes.User + + sess = fixture_session() + + with expect_raises_message(Exception, "hi"): + result = sess.execute(select(User).execution_options(yield_per=1)) + for i, row in enumerate(result): + assert not result._soft_closed + assert not result.closed + + if i > 1: + raise Exception("hi") + + gc_collect() # not apparently needed, but defensive for pypy re: #8762 + assert not result._soft_closed + assert not result.closed + result.close() + assert result._soft_closed + assert result.closed + def test_yield_per_and_execution_options_legacy(self): self._eagerload_mappings() @@ -5330,8 +5486,7 @@ def check(ctx): if not k.startswith("_") }, { - "max_row_buffer": 15, - "stream_results": True, + "yield_per": 15, "foo": "bar", "future_result": True, }, @@ -5359,8 +5514,6 @@ def check(ctx): if not k.startswith("_") }, { - "max_row_buffer": 15, - "stream_results": True, "yield_per": 15, "future_result": True, }, @@ -5368,6 +5521,12 @@ def check(ctx): stmt = select(User).execution_options(yield_per=15) result = sess.execute(stmt) + + assert isinstance( + result.raw.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy + ) + eq_(result.raw.cursor_strategy._max_row_buffer, 15) + eq_(len(result.all()), 4) def test_no_joinedload_opt(self): @@ -5383,6 +5542,24 @@ def test_no_joinedload_opt(self): q.all, ) + def test_no_contains_eager_opt(self): + self._eagerload_mappings() + + User = self.classes.User + sess = fixture_session() + q = ( + sess.query(User) + .join(User.addresses) + .options(contains_eager(User.addresses)) + .yield_per(1) + ) + assert_raises_message( + sa_exc.InvalidRequestError, + "Can't use yield_per with eager loaders that require " + "uniquing or row buffering", + q.all, + ) + def test_no_subqueryload_opt(self): self._eagerload_mappings() @@ -5892,12 +6069,11 @@ def test_textual_select_orm_columns(self): ( False, subqueryload, - # sqlite seems happy to interpret the broken SQL and give you the - # correct result somehow, this is a bug in SQLite so don't rely - # upon it doing that - testing.fails("not working yet") + testing.skip_if("sqlite"), ), - (True, subqueryload, testing.fails("not sure about implementation")), + ( + True, + subqueryload, + ), (False, selectinload), (True, selectinload), ) @@ -5925,7 +6101,12 @@ def test_related_eagerload_against_text(self, add_columns, loader_option): def go(): eq_(set(q.all()), set(self.static.user_address_result)) - self.assert_sql_count(testing.db, go, 2) + if loader_option is subqueryload: + # subqueryload necessarily degrades to lazy loads for a text + # statement. + self.assert_sql_count(testing.db, go, 5) + else: + self.assert_sql_count(testing.db, go, 2) def test_whereclause(self): User = self.classes.User @@ -7167,7 +7348,7 @@ def test_one_or_none(self): .one_or_none, ) - @testing.future + @testing.future() def test_getslice(self): assert False diff --git a/test/orm/test_rel_fn.py b/test/orm/test_rel_fn.py index 6f6b0d56dfe..a4e769d445d 100644 --- a/test/orm/test_rel_fn.py +++ b/test/orm/test_rel_fn.py @@ -17,11 +17,13 @@ from sqlalchemy.orm.interfaces import MANYTOONE from sqlalchemy.orm.interfaces import ONETOMANY from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import expect_raises_message class _JoinFixtures(object): @@ -573,7 +575,7 @@ def _join_fixture_inh_selfref_w_entity(self, **kw): ) def _assert_non_simple_warning(self, fn): - assert_raises_message( + assert_warns_message( exc.SAWarning, "Non-simple column elements in " "primary join condition for property " @@ -818,9 +820,12 @@ def test_determine_local_remote_pairs_o2m_composite_selfref_func_rs(self): self._join_fixture_o2m_composite_selfref_func_remote_side() def test_determine_local_remote_pairs_o2m_overlap_func_warning(self): - self._assert_non_simple_warning( - self._join_fixture_m2o_sub_to_joined_sub_func - ) + with expect_raises_message( + exc.ArgumentError, "Could not locate any relevant" + ): + self._assert_non_simple_warning( + self._join_fixture_m2o_sub_to_joined_sub_func + ) def test_determine_local_remote_pairs_o2m_composite_selfref_func_annotated( self, @@ -1238,6 +1243,28 @@ def test_lazy_clause_remote_local_multiple_ref(self): class DeannotateCorrectlyTest(fixtures.TestBase): + def test_annotate_orm_join(self): + """test for #10223""" + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + bs = relationship("B") + + class B(Base): + __tablename__ = "b" + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey(A.id)) + + stmt = select(A).join(A.bs) + + from sqlalchemy.sql import util + + util._deep_annotate(stmt, {"foo": "bar"}) + def test_pj_deannotates(self): from sqlalchemy.orm import declarative_base diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 86f7e9fc919..82ad752c44d 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -1,9 +1,11 @@ import datetime import random +from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import event +from sqlalchemy import exc as sa_exc from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Integer @@ -12,9 +14,13 @@ from sqlalchemy import select from sqlalchemy import sql from sqlalchemy import String +from sqlalchemy import Table from sqlalchemy import testing from sqlalchemy.orm import aliased +from sqlalchemy.orm import column_property +from sqlalchemy.orm import contains_eager from sqlalchemy.orm import defer +from sqlalchemy.orm import join as orm_join from sqlalchemy.orm import joinedload from sqlalchemy.orm import lazyload from sqlalchemy.orm import registry @@ -25,6 +31,9 @@ from sqlalchemy.orm import with_loader_criteria from sqlalchemy.orm.decl_api import declared_attr from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message +from sqlalchemy.testing import fixtures +from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.util import resolve_lambda @@ -53,6 +62,62 @@ def user_address_fixture(self): ) return User, Address + @testing.fixture + def user_address_col_property_fixture(self): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively(Address, addresses) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + Address, + order_by=Address.id, + ), + "num_addresses": column_property( + select(func.count(Address.id)) + .where(Address.user_id == users.c.id) + .correlate_except(Address) + .scalar_subquery() + ), + }, + ) + return User, Address + + @testing.fixture + def user_address_custom_strat_fixture(self): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + def go(strat): + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively( + Address, addresses + ), + lazy=strat, + order_by=Address.id, + ) + }, + ) + return User, Address + + return go + @testing.fixture def order_item_fixture(self): Order, Item = self.classes("Order", "Item") @@ -202,6 +267,22 @@ def test_select_mapper_mapper_criteria(self, user_address_fixture): "FROM users WHERE users.name != :name_1", ) + def test_err_given_in_pathed(self, user_address_fixture): + User, Address = user_address_fixture + + with expect_raises_message( + sa_exc.ArgumentError, + r"Loader option <.*LoaderCriteriaOption.*> is not compatible " + r"with the Load.options\(\) method.", + ): + select(User).options( + selectinload(User.addresses).options( + with_loader_criteria( + Address, Address.email_address != "foo" + ) + ) + ) + def test_criteria_post_replace(self, user_address_fixture): User, Address = user_address_fixture @@ -218,6 +299,201 @@ def test_criteria_post_replace(self, user_address_fixture): "WHERE users.name != :name_1", ) + @testing.combinations( + ( + lambda User, Address: select(Address) + .select_from(User) + .join(User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + # issue #10365 + lambda User, Address: select(Address) + .select_from(User) + .join(Address, User.id == Address.user_id) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address) + .select_from(orm_join(User, Address, User.addresses)) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address) + .join_from(User, Address, User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_user") + def test_criteria_select_from_w_join_left( + self, user_address_fixture, stmt_fn, alias_user + ): + """test #8721""" + User, Address = user_address_fixture + + if alias_user: + User = aliased(User) + + stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address) + + if alias_user: + self.assert_compile( + stmt, + "SELECT addresses.id, addresses.user_id, " + "addresses.email_address FROM users AS users_1 " + "JOIN addresses ON users_1.id = addresses.user_id " + "WHERE users_1.name != :name_1", + ) + else: + self.assert_compile( + stmt, + "SELECT addresses.id, addresses.user_id, " + "addresses.email_address " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "WHERE users.name != :name_1", + ) + + @testing.combinations( + ( + lambda User, Address: select(Address.id, User.id) + .select_from(User) + .join(User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + # issue #10365 - this seems to have already worked + lambda User, Address: select(Address.id, User.id) + .select_from(User) + .join(Address, User.id == Address.user_id) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address.id, User.id) + .select_from(orm_join(User, Address, User.addresses)) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address.id, User.id) + .join_from(User, Address, User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_user") + def test_criteria_select_from_w_join_left_including_entity( + self, user_address_fixture, stmt_fn, alias_user + ): + """test #8721""" + User, Address = user_address_fixture + + if alias_user: + User = aliased(User) + + stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address) + + if alias_user: + self.assert_compile( + stmt, + "SELECT addresses.id, users_1.id AS id_1 " + "FROM users AS users_1 JOIN addresses " + "ON users_1.id = addresses.user_id " + "WHERE users_1.name != :name_1", + ) + else: + self.assert_compile( + stmt, + "SELECT addresses.id, users.id AS id_1 " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "WHERE users.name != :name_1", + ) + + @testing.combinations( + ( + lambda User, Address: select(Address) + .select_from(User) + .join(User.addresses) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + ), + ( + # issue #10365 + lambda User, Address: select(Address) + .select_from(User) + .join(Address, User.id == Address.user_id) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + ), + ( + # for orm_join(), this is set up before we have the context + # available that allows with_loader_criteria to be set up + # correctly + lambda User, Address: select(Address) + .select_from(orm_join(User, Address, User.addresses)) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + testing.fails("not implemented right now"), + ), + ( + lambda User, Address: select(Address) + .join_from(User, Address, User.addresses) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + ), + argnames="stmt_fn", + ) + def test_criteria_select_from_w_join_right( + self, user_address_fixture, stmt_fn + ): + """test #8721""" + User, Address = user_address_fixture + + stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address) + self.assert_compile( + stmt, + "SELECT addresses.id, addresses.user_id, addresses.email_address " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "AND addresses.email_address != :email_address_1", + ) + + @testing.combinations( + "select", + "joined", + "subquery", + "selectin", + "immediate", + argnames="loader_strategy", + ) + def test_loader_strategy_on_refresh( + self, loader_strategy, user_address_custom_strat_fixture + ): + User, Address = user_address_custom_strat_fixture(loader_strategy) + + sess = fixture_session() + + @event.listens_for(sess, "do_orm_execute") + def add_criteria(orm_context): + orm_context.statement = orm_context.statement.options( + with_loader_criteria( + Address, + ~Address.id.in_([5, 3]), + ) + ) + + u1 = sess.get(User, 7) + u2 = sess.get(User, 8) + eq_(u1.addresses, [Address(id=1)]) + eq_(u2.addresses, [Address(id=2), Address(id=4)]) + + for i in range(3): + sess.expire_all() + eq_(u1.addresses, [Address(id=1)]) + eq_(u2.addresses, [Address(id=2), Address(id=4)]) + def test_criteria_post_replace_legacy(self, user_address_fixture): User, Address = user_address_fixture @@ -235,6 +511,38 @@ def test_criteria_post_replace_legacy(self, user_address_fixture): "WHERE users.name != :name_1", ) + def test_criteria_applies_to_column_property( + self, user_address_col_property_fixture + ): + """test related to #8064, added after discussion #9091 which + requested this behavior for with_loader_criteria() where it was + found to be working as of this issue, just not tested""" + + User, Address = user_address_col_property_fixture + + stmt = select(User) + + self.assert_compile( + stmt, + "SELECT (SELECT count(addresses.id) AS count_1 FROM addresses " + "WHERE addresses.user_id = users.id) AS anon_1, " + "users.id, users.name FROM users", + ) + + stmt = select(User).options( + with_loader_criteria( + Address, Address.email_address != "email_address" + ) + ) + + self.assert_compile( + stmt, + "SELECT (SELECT count(addresses.id) AS count_1 FROM addresses " + "WHERE addresses.user_id = users.id AND " + "addresses.email_address != :email_address_1) AS anon_1, " + "users.id, users.name FROM users", + ) + def test_select_from_mapper_mapper_criteria(self, user_address_fixture): User, Address = user_address_fixture @@ -250,6 +558,50 @@ def test_select_from_mapper_mapper_criteria(self, user_address_fixture): "WHERE users.name != :name_1", ) + def test_with_loader_criteria_recursion_check_scalar_subq( + self, user_address_fixture + ): + """test #7491""" + + User, Address = user_address_fixture + subq = select(Address).where(Address.id == 8).scalar_subquery() + stmt = ( + select(User) + .join(Address) + .options(with_loader_criteria(Address, Address.id == subq)) + ) + self.assert_compile( + stmt, + "SELECT users.id, users.name FROM users JOIN addresses " + "ON users.id = addresses.user_id AND addresses.id = " + "(SELECT addresses.id, addresses.user_id, " + "addresses.email_address FROM addresses " + "WHERE addresses.id = :id_1)", + ) + + def test_with_loader_criteria_recursion_check_from_subq( + self, user_address_fixture + ): + """test #7491""" + + User, Address = user_address_fixture + subq = select(Address).where(Address.id == 8).subquery() + stmt = ( + select(User) + .join(Address) + .options(with_loader_criteria(Address, Address.id == subq.c.id)) + ) + # note this query is incorrect SQL right now. This is a current + # artifact of how with_loader_criteria() is used and may be considered + # a bug at some point, in which case if fixed this query can be + # changed. the main thing we are testing at the moment is that + # there is not a recursion overflow. + self.assert_compile( + stmt, + "SELECT users.id, users.name FROM users JOIN addresses " + "ON users.id = addresses.user_id AND addresses.id = anon_1.id", + ) + def test_select_mapper_columns_mapper_criteria(self, user_address_fixture): User, Address = user_address_fixture @@ -367,13 +719,70 @@ def test_select_selectinload_mapper_mapper_criteria( "SELECT addresses.user_id AS addresses_user_id, addresses.id " "AS addresses_id, addresses.email_address " "AS addresses_email_address FROM addresses " - "WHERE addresses.user_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) " "AND addresses.email_address != :email_address_1 " "ORDER BY addresses.id", [{"primary_keys": [7, 8, 9, 10], "email_address_1": "name"}], ), ) + def test_select_selectinload_mapper_mapper_closure_criteria( + self, user_address_fixture + ): + User, Address = user_address_fixture + + def get_statement(closure="name"): + + stmt = select(User).options( + selectinload(User.addresses), + with_loader_criteria( + Address, lambda cls: cls.email_address != closure + ), + ) + return stmt + + s = Session(testing.db, future=True) + + stmt = get_statement(closure="name") + with self.sql_execution_asserter() as asserter: + s.execute(stmt).all() + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users", + [], + ), + CompiledSQL( + "SELECT addresses.user_id AS addresses_user_id, addresses.id " + "AS addresses_id, addresses.email_address " + "AS addresses_email_address FROM addresses " + "WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"primary_keys": [7, 8, 9, 10], "closure_1": "name"}], + ), + ) + + stmt = get_statement(closure="new name") + with self.sql_execution_asserter() as asserter: + s.execute(stmt).all() + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users", + [], + ), + CompiledSQL( + "SELECT addresses.user_id AS addresses_user_id, addresses.id " + "AS addresses_id, addresses.email_address " + "AS addresses_email_address FROM addresses " + "WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"primary_keys": [7, 8, 9, 10], "closure_1": "new name"}], + ), + ) + def test_select_lazyload_mapper_mapper_criteria( self, user_address_fixture ): @@ -436,6 +845,125 @@ def test_select_lazyload_mapper_mapper_criteria( ), ) + def test_select_lazyload_mapper_mapper_closure_criteria( + self, user_address_fixture + ): + User, Address = user_address_fixture + + def get_statement(closure="name"): + + stmt = ( + select(User) + .options( + lazyload(User.addresses), + with_loader_criteria( + Address, lambda cls: cls.email_address != closure + ), + ) + .order_by(User.id) + ) + return stmt + + s = Session(testing.db, future=True) + + stmt = get_statement(closure="name") + with self.sql_execution_asserter() as asserter: + for obj in s.scalars(stmt).all(): + obj.addresses + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users ORDER BY users.id", + [], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 7, "closure_1": "name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 8, "closure_1": "name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 9, "closure_1": "name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 10, "closure_1": "name"}], + ), + ) + + stmt = get_statement(closure="new name") + with self.sql_execution_asserter() as asserter: + for obj in s.scalars( + stmt, execution_options={"populate_existing": True} + ).all(): + obj.addresses + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users ORDER BY users.id", + [], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 7, "closure_1": "new name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 8, "closure_1": "new name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 9, "closure_1": "new name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 10, "closure_1": "new name"}], + ), + ) + def test_select_aliased_inclaliased_criteria(self, user_address_fixture): User, Address = user_address_fixture @@ -1227,7 +1755,8 @@ def go(value): "SELECT addresses.user_id AS addresses_user_id, " "addresses.id AS addresses_id, addresses.email_address " "AS addresses_email_address FROM addresses " - "WHERE addresses.user_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE addresses.user_id IN " + "(__[POSTCOMPILE_primary_keys]) " "AND addresses.email_address != :email_address_1 " "ORDER BY addresses.id", [ @@ -1239,6 +1768,138 @@ def go(value): ), ) + def test_selectinload_local_criteria_subquery(self, user_address_fixture): + """test #7489""" + User, Address = user_address_fixture + + s = Session(testing.db, future=True) + + def go(value): + a1 = aliased(Address) + subq = select(a1.id).where(a1.email_address != value).subquery() + stmt = ( + select(User) + .options( + selectinload(User.addresses.and_(Address.id == subq.c.id)), + ) + .order_by(User.id) + ) + result = s.execute(stmt) + return result + + for value in ( + "ed@wood.com", + "ed@lala.com", + "ed@wood.com", + "ed@lala.com", + ): + s.close() + with self.sql_execution_asserter() as asserter: + result = go(value) + + eq_( + result.scalars().unique().all(), + self._user_minus_edwood(*user_address_fixture) + if value == "ed@wood.com" + else self._user_minus_edlala(*user_address_fixture), + ) + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users ORDER BY users.id" + ), + CompiledSQL( + "SELECT addresses.user_id AS addresses_user_id, " + "addresses.id AS addresses_id, " + "addresses.email_address AS addresses_email_address " + # note the comma-separated FROM clause + "FROM addresses, (SELECT addresses_1.id AS id FROM " + "addresses AS addresses_1 " + "WHERE addresses_1.email_address != :email_address_1) " + "AS anon_1 WHERE addresses.user_id " + "IN (__[POSTCOMPILE_primary_keys]) " + "AND addresses.id = anon_1.id ORDER BY addresses.id", + [ + { + "primary_keys": [7, 8, 9, 10], + "email_address_1": value, + } + ], + ), + ) + + @testing.combinations( + (joinedload, False), + (lazyload, True), + (subqueryload, False), + (selectinload, True), + argnames="opt,results_supported", + ) + def test_loader_criteria_subquery_w_same_entity( + self, user_address_fixture, opt, results_supported + ): + """test #7491. + + note this test also uses the not-quite-supported form of subquery + criteria introduced by #7489. where we also have to clone + the subquery linked only from a column criteria. this required + additional changes to the _annotate() method that is also + test here, which is why two of the loader strategies still fail; + we're just testing that there's no recursion overflow with this + very particular form. + + """ + User, Address = user_address_fixture + + s = Session(testing.db, future=True) + + def go(value): + subq = ( + select(Address.id) + .where(Address.email_address != value) + .subquery() + ) + stmt = ( + select(User) + .options( + # subquery here would need to be added to the FROM + # clause. this isn't quite supported and won't work + # right now with joinedoad() or subqueryload(). + opt(User.addresses.and_(Address.id == subq.c.id)), + ) + .order_by(User.id) + ) + result = s.execute(stmt) + return result + + for value in ( + "ed@wood.com", + "ed@lala.com", + "ed@wood.com", + "ed@lala.com", + ): + s.close() + + if not results_supported: + # for joinedload and subqueryload, the query generated here + # is invalid right now; this is because it's already not + # quite a supported pattern to refer to a subquery-bound + # column in loader criteria. However, the main thing we want + # to prevent here is the recursion overflow, so make sure + # we get a DBAPI error at least indicating compilation + # succeeded. + with expect_raises(sa_exc.DBAPIError): + go(value).scalars().unique().all() + else: + result = go(value).scalars().unique().all() + + eq_( + result, + self._user_minus_edwood(*user_address_fixture) + if value == "ed@wood.com" + else self._user_minus_edlala(*user_address_fixture), + ) + @testing.combinations((True,), (False,), argnames="use_compiled_cache") def test_selectinload_nested_criteria( self, user_order_item_fixture, use_compiled_cache @@ -1304,7 +1965,7 @@ def go(order_description, item_description): "ON items_1.id = order_items_1.item_id " "AND items_1.description = :description_1) " "ON orders.id = order_items_1.order_id " - "WHERE orders.user_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE orders.user_id IN (__[POSTCOMPILE_primary_keys]) " "AND orders.description = :description_2 " "ORDER BY orders.id, items_1.id", [ @@ -1503,3 +2164,175 @@ def test_select_joinm2m_aliased_local_criteria(self, order_item_fixture): "JOIN items AS items_1 ON items_1.id = order_items_1.item_id " "AND items_1.description != :description_1", ) + + +class SubqueryCriteriaTest(fixtures.DeclarativeMappedTest): + """test #10223""" + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Temperature(Base): + __tablename__ = "temperature" + id = Column(Integer, primary_key=True) + pointless_flag = Column(Boolean) + + class Color(Base): + __tablename__ = "color" + id = Column(Integer, primary_key=True) + name = Column(String(10)) + temperature_id = Column(ForeignKey("temperature.id")) + temperature = relationship("Temperature") + + room_connections = Table( + "room_connections", + Base.metadata, + Column( + "room_a_id", + Integer, + # mariadb does not like this FK constraint + # ForeignKey("room.id"), + primary_key=True, + ), + Column( + "room_b_id", + Integer, + # mariadb does not like this FK constraint + # ForeignKey("room.id"), + primary_key=True, + ), + ) + + class Room(Base): + __tablename__ = "room" + id = Column(Integer, primary_key=True) + token = Column(String(10)) + color_id = Column(ForeignKey("color.id")) + color = relationship("Color") + connected_rooms = relationship( + "Room", + secondary=room_connections, + primaryjoin=id == room_connections.c.room_a_id, + secondaryjoin=id == room_connections.c.room_b_id, + ) + + @classmethod + def insert_data(cls, connection): + Room, Temperature, Color = cls.classes("Room", "Temperature", "Color") + with Session(connection) as session: + warm = Temperature(pointless_flag=True) + cool = Temperature(pointless_flag=True) + session.add_all([warm, cool]) + + red = Color(name="red", temperature=warm) + orange = Color(name="orange", temperature=warm) + blue = Color(name="blue", temperature=cool) + green = Color(name="green", temperature=cool) + session.add_all([red, orange, blue, green]) + + red1 = Room(token="Red-1", color=red) + red2 = Room(token="Red-2", color=red) + orange2 = Room(token="Orange-2", color=orange) + blue1 = Room(token="Blue-1", color=blue) + blue2 = Room(token="Blue-2", color=blue) + green1 = Room(token="Green-1", color=green) + red1.connected_rooms = [red2, blue1, green1] + red2.connected_rooms = [red1, blue2, orange2] + blue1.connected_rooms = [red1, blue2, green1] + blue2.connected_rooms = [red2, blue1, orange2] + session.add_all([red1, red2, blue1, blue2, green1, orange2]) + + session.commit() + + @testing.variation( + "join_on_relationship", ["alone", "with_and", "no", "omit"] + ) + def test_selectinload(self, join_on_relationship): + Room, Temperature, Color = self.classes("Room", "Temperature", "Color") + similar_color = aliased(Color) + subquery = ( + select(Color.id) + .join( + similar_color, + similar_color.temperature_id == Color.temperature_id, + ) + .where(similar_color.name == "red") + ) + + if join_on_relationship.alone: + subquery = subquery.join(Color.temperature).where( + Temperature.pointless_flag == True + ) + elif join_on_relationship.with_and: + subquery = subquery.join( + Color.temperature.and_(Temperature.pointless_flag == True) + ) + elif join_on_relationship.no: + subquery = subquery.join( + Temperature, Color.temperature_id == Temperature.id + ).where(Temperature.pointless_flag == True) + elif join_on_relationship.omit: + pass + else: + join_on_relationship.fail() + + session = fixture_session() + room_result = session.scalars( + select(Room) + .order_by(Room.id) + .join(Room.color.and_(Color.name == "red")) + .options( + selectinload( + Room.connected_rooms.and_(Room.color_id.in_(subquery)) + ) + ) + ).unique() + + self._assert_result(room_result) + + def test_contains_eager(self): + Room, Temperature, Color = self.classes("Room", "Temperature", "Color") + similar_color = aliased(Color) + subquery = ( + select(Color.id) + .join( + similar_color, + similar_color.temperature_id == Color.temperature_id, + ) + .join(Color.temperature.and_(Temperature.pointless_flag == True)) + .where(similar_color.name == "red") + ) + + room_alias = aliased(Room) + session = fixture_session() + + room_result = session.scalars( + select(Room) + .order_by(Room.id, room_alias.id) + .join(Room.color.and_(Color.name == "red")) + .join( + room_alias, + Room.connected_rooms.of_type(room_alias).and_( + room_alias.color_id.in_(subquery) + ), + ) + .options(contains_eager(Room.connected_rooms.of_type(room_alias))) + ).unique() + + self._assert_result(room_result) + + def _assert_result(self, room_result): + eq_( + [ + ( + each_room.token, + [room.token for room in each_room.connected_rooms], + ) + for each_room in room_result + ], + [ + ("Red-1", ["Red-2"]), + ("Red-2", ["Red-1", "Orange-2"]), + ], + ) diff --git a/test/orm/test_relationships.py b/test/orm/test_relationships.py index 94b30f3d01a..e98068eddb6 100644 --- a/test/orm/test_relationships.py +++ b/test/orm/test_relationships.py @@ -33,12 +33,13 @@ from sqlalchemy.orm.interfaces import ONETOMANY from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ from sqlalchemy.testing import is_ -from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertsql import assert_engine from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session @@ -872,7 +873,7 @@ def _test_fixture_one_run(self, **kw): @testing.provide_metadata def test_simple_warn(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:Child.parent|Parent.children)' will copy " r"column parent.id to column child.parent_id, which conflicts " @@ -963,7 +964,7 @@ def test_simple_overlaps_works(self): @testing.provide_metadata def test_double_rel_same_mapper_warns(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship 'Parent.child[12]' will copy column parent.id to " r"column child.parent_id, which conflicts with relationship\(s\): " @@ -983,7 +984,7 @@ def test_double_rel_aliased_mapper_works(self): @testing.provide_metadata def test_warn_one(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:BSub1.a|BSub2.a_member|B.a)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -994,7 +995,7 @@ def test_warn_one(self): @testing.provide_metadata def test_warn_two(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:BSub1.a|B.a_member)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -1005,7 +1006,7 @@ def test_warn_two(self): @testing.provide_metadata def test_warn_three(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:BSub1.a|B.a_member|B.a)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -1017,7 +1018,7 @@ def test_warn_three(self): @testing.provide_metadata def test_warn_four(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:B.a|BSub2.a_member|B.a)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -1301,7 +1302,7 @@ def test_overlapping_warning(self): }, ) - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship .* will copy column .* to column " r"employee_t.company_id, which conflicts with relationship\(s\)", @@ -2587,6 +2588,55 @@ class C2(object): registry.map_imperatively(C2, t3) assert C1.c2.property.primaryjoin.compare(t1.c.id == t3.c.t1id) + @testing.combinations( + "annotation", "local_remote", argnames="remote_anno_type" + ) + @testing.combinations("orm_col", "core_col", argnames="use_col_from") + def test_no_remote_on_local_only_cols( + self, decl_base, remote_anno_type, use_col_from + ): + """test #7094. + + a warning should be emitted for an inappropriate remote_side argument + + """ + + class A(decl_base): + __tablename__ = "a" + + id = Column(Integer, primary_key=True) + data = Column(String) + + if remote_anno_type == "annotation": + if use_col_from == "core_col": + bs = relationship( + "B", + primaryjoin=lambda: remote(A.__table__.c.id) + == B.__table__.c.a_id, + ) + elif use_col_from == "orm_col": + bs = relationship( + "B", primaryjoin="remote(A.id) == B.a_id" + ) + elif remote_anno_type == "local_remote": + if use_col_from == "core_col": + bs = relationship( + "B", remote_side=lambda: A.__table__.c.id + ) + elif use_col_from == "orm_col": + bs = relationship("B", remote_side="A.id") + + class B(decl_base): + __tablename__ = "b" + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey("a.id")) + + with expect_warnings( + r"Expression a.id is marked as 'remote', but these column\(s\) " + r"are local to the local side. " + ): + decl_base.registry.configure() + def test_join_error_raised(self, registry): m = MetaData() t1 = Table("t1", m, Column("id", Integer, primary_key=True)) @@ -2905,7 +2955,7 @@ def define_tables(cls, metadata): Column("foo", String(50)), ) - def test_join_on_custom_op(self): + def test_join_on_custom_op_legacy_is_comparison(self): class A(fixtures.BasicEntity): pass @@ -2932,6 +2982,33 @@ class B(fixtures.BasicEntity): "FROM a JOIN b ON a.foo &* b.foo", ) + def test_join_on_custom_bool_op(self): + class A(fixtures.BasicEntity): + pass + + class B(fixtures.BasicEntity): + pass + + self.mapper_registry.map_imperatively( + A, + self.tables.a, + properties={ + "bs": relationship( + B, + primaryjoin=self.tables.a.c.foo.bool_op("&*")( + foreign(self.tables.b.c.foo) + ), + viewonly=True, + ) + }, + ) + self.mapper_registry.map_imperatively(B, self.tables.b) + self.assert_compile( + fixture_session().query(A).join(A.bs), + "SELECT a.id AS a_id, a.foo AS a_foo " + "FROM a JOIN b ON a.foo &* b.foo", + ) + class ViewOnlyHistoryTest(fixtures.MappedTest): @classmethod @@ -6513,7 +6590,7 @@ def test_eager_selectin(self): "(SELECT a.id AS aid, b.id AS id FROM a JOIN b ON a.b_ids " "LIKE :id_1 || b.id || :param_1) AS anon_1 " "ON a_1.id = anon_1.aid JOIN b ON b.id = anon_1.id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", params=[{"id_1": "%", "param_1": "%", "primary_keys": [2]}], ), ) diff --git a/test/orm/test_scoping.py b/test/orm/test_scoping.py index 87f0a2aae89..b2389ced308 100644 --- a/test/orm/test_scoping.py +++ b/test/orm/test_scoping.py @@ -9,6 +9,7 @@ from sqlalchemy.orm import Session from sqlalchemy.orm import sessionmaker from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -101,7 +102,7 @@ def test_config_errors(self): bind=testing.db, ) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "At least one scoped session is already present. ", Session.configure, @@ -156,6 +157,7 @@ def test_methods_etc(self): populate_existing=False, with_for_update=None, identity_token=None, + execution_options=None, ), ], ) diff --git a/test/orm/test_selectin_relations.py b/test/orm/test_selectin_relations.py index f01060aab50..3e44abe88f5 100644 --- a/test/orm/test_selectin_relations.py +++ b/test/orm/test_selectin_relations.py @@ -18,8 +18,8 @@ from sqlalchemy.orm import subqueryload from sqlalchemy.orm import undefer from sqlalchemy.orm import with_polymorphic -from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -89,6 +89,48 @@ def go(): self.assert_sql_count(testing.db, go, 2) + @testing.combinations(True, False) + def test_from_statement(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + sess = fixture_session() + + stmt = select(User).where(User.id == 7) + + def go(): + if legacy: + ret = ( + sess.query(User) + .from_statement(stmt) + .options(selectinload(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(selectinload(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + self.assert_sql_count(testing.db, go, 2) + def user_dingaling_fixture(self): users, Dingaling, User, dingalings, Address, addresses = ( self.tables.users, @@ -1416,7 +1458,7 @@ def test_uselist_false_warning(self): ) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() - assert_raises( + assert_warns( sa.exc.SAWarning, s.query(User).options(selectinload(User.order)).all, ) @@ -1840,7 +1882,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -1890,7 +1932,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -1936,7 +1978,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -1990,7 +2032,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -2038,7 +2080,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -2259,7 +2301,7 @@ def go(): CompiledSQL( "SELECT b.a_id1 AS b_a_id1, b.a_id2 AS b_a_id2, b.id AS b_id " "FROM b WHERE (b.a_id1, b.a_id2) IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", [{"primary_keys": [(i, i + 2) for i in range(1, 20)]}], ), ) @@ -2290,7 +2332,7 @@ def go(): ), CompiledSQL( "SELECT a.id1 AS a_id1, a.id2 AS a_id2 FROM a " - "WHERE (a.id1, a.id2) IN ([POSTCOMPILE_primary_keys])", + "WHERE (a.id1, a.id2) IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [(i, i + 2) for i in range(1, 20)]}], ), ) @@ -2364,19 +2406,19 @@ def go(): CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id " "FROM b WHERE b.a_id IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", {"primary_keys": list(range(1, 48))}, ), CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id " "FROM b WHERE b.a_id IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", {"primary_keys": list(range(48, 95))}, ), CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id " "FROM b WHERE b.a_id IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", {"primary_keys": list(range(95, 101))}, ), ) @@ -2440,19 +2482,19 @@ def go(): # chunk size is 47. so first chunk are a 1->47... CompiledSQL( "SELECT a.id AS a_id FROM a WHERE a.id IN " - "([POSTCOMPILE_primary_keys])", + "(__[POSTCOMPILE_primary_keys])", {"primary_keys": list(range(1, 48))}, ), # second chunk is a 48-94 CompiledSQL( "SELECT a.id AS a_id FROM a WHERE a.id IN " - "([POSTCOMPILE_primary_keys])", + "(__[POSTCOMPILE_primary_keys])", {"primary_keys": list(range(48, 95))}, ), # third and final chunk 95-100. CompiledSQL( "SELECT a.id AS a_id FROM a WHERE a.id IN " - "([POSTCOMPILE_primary_keys])", + "(__[POSTCOMPILE_primary_keys])", {"primary_keys": list(range(95, 101))}, ), ) @@ -2983,13 +3025,13 @@ def test_twolevel_selectin_w_polymorphic(self): "SELECT foo_1.id AS foo_1_id, " "foo_1.type AS foo_1_type, foo_1.foo_id AS foo_1_foo_id " "FROM foo AS foo_1 " - "WHERE foo_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE foo_1.id IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [3]}, ), CompiledSQL( "SELECT foo.id AS foo_id_1, foo.type AS foo_type, " "foo.foo_id AS foo_foo_id FROM foo " - "WHERE foo.id IN ([POSTCOMPILE_primary_keys])", + "WHERE foo.id IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [1]}, ), ) @@ -3153,13 +3195,13 @@ def test_load(self): q.all, CompiledSQL( 'SELECT "user".id AS user_id, "user".type AS user_type ' - 'FROM "user" WHERE "user".type IN ([POSTCOMPILE_type_1])', + 'FROM "user" WHERE "user".type IN (__[POSTCOMPILE_type_1])', {"type_1": ["employer"]}, ), CompiledSQL( "SELECT role.user_id AS role_user_id, role.id AS role_id " "FROM role WHERE role.user_id " - "IN ([POSTCOMPILE_primary_keys])", + "IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [1]}, ), ) @@ -3277,12 +3319,12 @@ def test_use_join_parent_criteria(self): q.all, CompiledSQL( "SELECT a.id AS a_id, a.b_id AS a_b_id, a.q AS a_q " - "FROM a WHERE a.id IN ([POSTCOMPILE_id_1]) ORDER BY a.id", + "FROM a WHERE a.id IN (__[POSTCOMPILE_id_1]) ORDER BY a.id", [{"id_1": [1, 3]}], ), CompiledSQL( "SELECT b.id AS b_id, b.x AS b_x, b.y AS b_y " - "FROM b WHERE b.id IN ([POSTCOMPILE_primary_keys])", + "FROM b WHERE b.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2]}], ), ) @@ -3306,7 +3348,7 @@ def test_use_join_parent_criteria_degrade_on_defer(self): q.all, CompiledSQL( "SELECT a.id AS a_id, a.q AS a_q " - "FROM a WHERE a.id IN ([POSTCOMPILE_id_1]) ORDER BY a.id", + "FROM a WHERE a.id IN (__[POSTCOMPILE_id_1]) ORDER BY a.id", [{"id_1": [1, 3]}], ), # in the very unlikely case that the the FK col on parent is @@ -3317,7 +3359,7 @@ def test_use_join_parent_criteria_degrade_on_defer(self): "SELECT a_1.id AS a_1_id, b.id AS b_id, b.x AS b_x, " "b.y AS b_y " "FROM a AS a_1 JOIN b ON b.id = a_1.b_id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 3]}], ), ) @@ -3341,7 +3383,7 @@ def test_use_join(self): ), CompiledSQL( "SELECT b.id AS b_id, b.x AS b_x, b.y AS b_y " - "FROM b WHERE b.id IN ([POSTCOMPILE_primary_keys])", + "FROM b WHERE b.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2]}], ), ) @@ -3373,7 +3415,7 @@ def test_use_join_omit_join_false(self): CompiledSQL( "SELECT a_1.id AS a_1_id, b.id AS b_id, b.x AS b_x, " "b.y AS b_y FROM a AS a_1 JOIN b ON b.id = a_1.b_id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2, 3, 4, 5]}], ), ) @@ -3408,7 +3450,7 @@ def test_use_join_parent_degrade_on_defer(self): "SELECT a_1.id AS a_1_id, b.id AS b_id, b.x AS b_x, " "b.y AS b_y " "FROM a AS a_1 JOIN b ON b.id = a_1.b_id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2, 3, 4, 5]}], ), ) @@ -3520,13 +3562,15 @@ def test_load_both_wpoly(self): CompiledSQL( "SELECT child_a.parent_id AS child_a_parent_id, " "child_a.id AS child_a_id FROM child_a " - "WHERE child_a.parent_id IN ([POSTCOMPILE_primary_keys])", + "WHERE child_a.parent_id IN " + "(__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1]}], ), CompiledSQL( "SELECT child_b.parent_id AS child_b_parent_id, " "child_b.id AS child_b_id FROM child_b " - "WHERE child_b.parent_id IN ([POSTCOMPILE_primary_keys])", + "WHERE child_b.parent_id IN " + "(__[POSTCOMPILE_primary_keys])", [{"primary_keys": [2]}], ), ), diff --git a/test/orm/test_session.py b/test/orm/test_session.py index 4ee71fd5ba5..83ce629700c 100644 --- a/test/orm/test_session.py +++ b/test/orm/test_session.py @@ -1,8 +1,10 @@ import inspect as _py_inspect import sqlalchemy as sa +from sqlalchemy import delete from sqlalchemy import event from sqlalchemy import ForeignKey +from sqlalchemy import insert from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import select @@ -10,6 +12,7 @@ from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import text +from sqlalchemy import update from sqlalchemy.orm import attributes from sqlalchemy.orm import backref from sqlalchemy.orm import close_all_sessions @@ -24,6 +27,7 @@ from sqlalchemy.orm import was_deleted from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import assertions from sqlalchemy.testing import config from sqlalchemy.testing import engines @@ -461,6 +465,23 @@ def test_make_transient_to_detached_no_key_allowed(self): u1, ) + def test_get_execution_option(self): + users, User = self.tables.users, self.classes.User + + self.mapper_registry.map_imperatively(User, users) + sess = fixture_session() + called = [False] + + @event.listens_for(sess, "do_orm_execute") + def check(ctx): + called[0] = True + eq_(ctx.execution_options["foo"], "bar") + + sess.get(User, 42, execution_options={"foo": "bar"}) + sess.close() + + is_true(called[0]) + class SessionStateTest(_fixtures.FixtureTest): run_inserts = None @@ -1015,7 +1036,7 @@ def e(mapper, conn, target): def test_extra_dirty_state_post_flush_warning(self): s, a1, a2 = self._test_extra_dirty_state() - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Attribute history events accumulated on 1 previously " "clean instances", @@ -1516,14 +1537,22 @@ def test_weakref(self): s = fixture_session() self.mapper_registry.map_imperatively(User, users) + gc_collect() s.add(User(name="ed")) s.flush() assert not s.dirty user = s.query(User).one() + + # heisenberg the GC a little bit, since #7823 caused a lot more + # GC when mappings are set up, larger test suite started failing + # on this being gc'ed + user_is = user._sa_instance_state del user gc_collect() + assert user_is.obj() is None + assert len(s.identity_map) == 0 user = s.query(User).one() @@ -1548,6 +1577,7 @@ def test_weakref_pickled(self): s = fixture_session() self.mapper_registry.map_imperatively(User, users) + gc_collect() s.add(User(name="ed")) s.flush() @@ -1590,6 +1620,8 @@ def test_weakref_with_cycles_o2m(self): properties={"addresses": relationship(Address, backref="user")}, ) self.mapper_registry.map_imperatively(Address, addresses) + gc_collect() + s.add(User(name="ed", addresses=[Address(email_address="ed1")])) s.commit() @@ -1630,6 +1662,8 @@ def test_weakref_with_cycles_o2o(self): }, ) self.mapper_registry.map_imperatively(Address, addresses) + gc_collect() + s.add(User(name="ed", address=Address(email_address="ed1"))) s.commit() @@ -1657,6 +1691,7 @@ def test_auto_detach_on_gc_session(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) + gc_collect() sess = Session(testing.db) @@ -1688,6 +1723,7 @@ def test_fast_discard_race(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) + gc_collect() sess = fixture_session() @@ -2148,6 +2184,32 @@ def test_unbuffered_result_session_is_closed(self, meth): ): result.all() + @testing.combinations("insert", "update", "delete", argnames="dml_expr") + @testing.combinations("core", "orm", argnames="coreorm") + def test_dml_execute(self, dml_expr, coreorm): + User = self.classes.User + users = self.tables.users + + sess = fixture_session() + + if coreorm == "orm": + if dml_expr == "insert": + stmt = insert(User).values(id=12, name="some user") + elif dml_expr == "update": + stmt = update(User).values(name="sone name").filter_by(id=15) + else: + stmt = delete(User).filter_by(id=15) + else: + if dml_expr == "insert": + stmt = insert(users).values(id=12, name="some user") + elif dml_expr == "update": + stmt = update(users).values(name="sone name").filter_by(id=15) + else: + stmt = delete(users).filter_by(id=15) + + result = sess.execute(stmt) + result.close() + @testing.combinations((True,), (False,), argnames="prebuffered") @testing.combinations(("close",), ("expunge_all",), argnames="meth") def test_unbuffered_result_before_session_is_closed( @@ -2258,7 +2320,8 @@ def test_m2o_cascade_add(self): def evt(mapper, conn, instance): instance.addresses[0].user = User(name="u2") - self._test(evt, "related attribute set") + with expect_raises_message(orm_exc.FlushError, ".*Over 100"): + self._test(evt, "related attribute set") def test_m2o_cascade_remove(self): def evt(mapper, conn, instance): @@ -2289,7 +2352,10 @@ def test_plain_delete(self): def evt(mapper, conn, instance): object_session(instance).delete(Address(email="x1")) - self._test(evt, r"Session.delete\(\)") + with expect_raises_message( + sa.exc.InvalidRequestError, ".*is not persisted" + ): + self._test(evt, r"Session.delete\(\)") def _test(self, fn, method): User = self.classes.User @@ -2300,6 +2366,6 @@ def _test(self, fn, method): u1 = User(name="u1", addresses=[Address(name="a1")]) s.add(u1) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Usage of the '%s'" % method, s.commit ) diff --git a/test/orm/test_subquery_relations.py b/test/orm/test_subquery_relations.py index 5be0042b0da..7b0b4dc9dc4 100644 --- a/test/orm/test_subquery_relations.py +++ b/test/orm/test_subquery_relations.py @@ -20,14 +20,15 @@ from sqlalchemy.orm import subqueryload from sqlalchemy.orm import undefer from sqlalchemy.orm import with_polymorphic -from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not from sqlalchemy.testing import is_true from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.testing.assertsql import Or from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -89,6 +90,71 @@ def go(): self.assert_sql_count(testing.db, go, 2) + @testing.combinations(True, False) + def test_from_statement(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + sess = fixture_session() + + stmt = select(User).where(User.id == 7) + + with self.sql_execution_asserter(testing.db) as asserter: + if legacy: + ret = ( + sess.query(User) + # .where(User.id == 7) + .from_statement(stmt) + .options(subqueryload(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(subqueryload(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + asserter.assert_( + Or( + CompiledSQL( + "SELECT users.id AS users_id, users.name AS users_name " + "FROM users WHERE users.id = :id_1", + [{"id_1": 7}], + ), + CompiledSQL( + "SELECT users.id, users.name " + "FROM users WHERE users.id = :id_1", + [{"id_1": 7}], + ), + ), + # issue 7505 + # subqueryload degrades for a from_statement. this is a lazyload + CompiledSQL( + "SELECT addresses.id AS addresses_id, addresses.user_id AS " + "addresses_user_id, addresses.email_address AS " + "addresses_email_address FROM addresses " + "WHERE :param_1 = addresses.user_id ORDER BY addresses.id", + [{"param_1": 7}], + ), + ) + def test_params_arent_cached(self): users, Address, addresses, User = ( self.tables.users, @@ -1444,7 +1510,7 @@ def test_uselist_false_warning(self): ) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() - assert_raises( + assert_warns( sa.exc.SAWarning, s.query(User).options(subqueryload(User.order)).all, ) diff --git a/test/orm/test_transaction.py b/test/orm/test_transaction.py index f0ef37230cb..9d81e95b22d 100644 --- a/test/orm/test_transaction.py +++ b/test/orm/test_transaction.py @@ -513,6 +513,46 @@ def do_begin(conn, name): assert conn.closed assert not fairy.is_valid + @testing.requires.independent_connections + def test_no_rollback_in_committed_state(self): + """test #7388 + + Prior to the fix, using the session.begin() context manager + would produce the error "This session is in 'committed' state; no + further SQL can be emitted ", when it attempted to call .rollback() + if the connection.close() operation failed inside of session.commit(). + + While the real exception was chained inside, this still proved to + be misleading so we now skip the rollback() in this specific case + and allow the original error to be raised. + + """ + + sess = fixture_session() + + def fail(*arg, **kw): + raise BaseException("some base exception") + + with mock.patch.object( + testing.db.dialect, "do_rollback", side_effect=fail + ) as fail_mock, mock.patch.object( + testing.db.dialect, + "do_commit", + side_effect=testing.db.dialect.do_commit, + ) as succeed_mock: + + # sess.begin() -> commit(). why would do_rollback() be called? + # because of connection pool finalize_fairy *after* the commit. + # this will cause the conn.close() in session.commit() to fail, + # but after the DB commit succeeded. + with expect_raises_message(BaseException, "some base exception"): + with sess.begin(): + conn = sess.connection() + fairy_conn = conn.connection + + eq_(succeed_mock.mock_calls, [mock.call(fairy_conn)]) + eq_(fail_mock.mock_calls, [mock.call(fairy_conn)]) + def test_continue_flushing_on_commit(self): """test that post-flush actions get flushed also if we're in commit()""" @@ -1305,43 +1345,6 @@ def test_update_deleted_on_rollback(self): assert u1 in s assert u1 not in s.deleted - @testing.requires.predictable_gc - def test_gced_delete_on_rollback(self): - User, users = self.classes.User, self.tables.users - - s = fixture_session() - u1 = User(name="ed") - s.add(u1) - s.commit() - - s.delete(u1) - u1_state = attributes.instance_state(u1) - assert u1_state in s.identity_map.all_states() - assert u1_state in s._deleted - s.flush() - assert u1_state not in s.identity_map.all_states() - assert u1_state not in s._deleted - del u1 - gc_collect() - assert u1_state.obj() is None - - s.rollback() - # new in 1.1, not in identity map if the object was - # gc'ed and we restore snapshot; we've changed update_impl - # to just skip this object - assert u1_state not in s.identity_map.all_states() - - # in any version, the state is replaced by the query - # because the identity map would switch it - u1 = s.query(User).filter_by(name="ed").one() - assert u1_state not in s.identity_map.all_states() - - eq_(s.scalar(select(func.count("*")).select_from(users)), 1) - s.delete(u1) - s.flush() - eq_(s.scalar(select(func.count("*")).select_from(users)), 0) - s.commit() - def test_trans_deleted_cleared_on_rollback(self): User = self.classes.User s = fixture_session() @@ -2486,10 +2489,10 @@ def test_key_replaced_by_oob_insert(self): class JoinIntoAnExternalTransactionFixture(object): """Test the "join into an external transaction" examples""" - __leave_connections_for_teardown__ = True - def setup_test(self): - self.engine = testing.db + self.engine = engines.testing_engine( + options={"use_reaper": False, "sqlite_savepoint": True} + ) self.connection = self.engine.connect() self.metadata = MetaData() @@ -2550,7 +2553,7 @@ class A(object): # bind an individual Session to the connection self.session = Session(bind=self.connection, future=True) - if testing.requires.savepoints.enabled: + if testing.requires.compat_savepoints.enabled: self.nested = self.connection.begin_nested() @event.listens_for(self.session, "after_transaction_end") @@ -2567,7 +2570,7 @@ def teardown_session(self): if self.trans.is_active: self.trans.rollback() - @testing.requires.savepoints + @testing.requires.compat_savepoints def test_something_with_context_managers(self): A = self.A @@ -2633,7 +2636,7 @@ class A(object): # bind an individual Session to the connection self.session = Session(bind=self.connection) - if testing.requires.savepoints.enabled: + if testing.requires.compat_savepoints.enabled: # start the session in a SAVEPOINT... self.session.begin_nested() diff --git a/test/orm/test_unitofwork.py b/test/orm/test_unitofwork.py index 7b7cb0db6fb..77eaa1a8c20 100644 --- a/test/orm/test_unitofwork.py +++ b/test/orm/test_unitofwork.py @@ -25,6 +25,8 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import is_true +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import AllOf from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.assertsql import Conditional @@ -3499,6 +3501,103 @@ def test_dont_complain_if_no_update(self): s.commit() +class NoRowInsertedTest(fixtures.TestBase): + """test #7594. + + failure modes when INSERT doesnt actually insert a row. + """ + + __backend__ = True + __requires__ = ("returning",) + + @testing.fixture + @testing.skip_if( + "+asyncpg", + "1.4's asyncpg architecture doesn't let us change parameters", + ) + def null_server_default_fixture(self, registry, connection): + @registry.mapped + class MyClass(object): + __tablename__ = "my_table" + + id = Column(Integer, primary_key=True) + data = Column(String(50)) + + registry.metadata.create_all(connection) + + @event.listens_for(connection, "before_cursor_execute", retval=True) + def revert_insert( + conn, cursor, statement, parameters, context, executemany + ): + if statement.startswith("INSERT"): + if statement.endswith("RETURNING my_table.id"): + if executemany: + # remove some rows, so the count is wrong + parameters = parameters[0:1] + else: + # statement should return no rows + statement = ( + "UPDATE my_table SET id=NULL WHERE 1!=1 " + "RETURNING my_table.id" + ) + parameters = {} + else: + assert not testing.against( + "postgresql" + ), "this test has to at least run on PostgreSQL" + testing.config.skip_test( + "backend doesn't support the expected form of " + "RETURNING for this test to work" + ) + return statement, parameters + + return MyClass + + def test_insert_single_no_pk_correct_exception( + self, null_server_default_fixture, connection + ): + MyClass = null_server_default_fixture + + sess = fixture_session(bind=connection) + + m1 = MyClass(data="data") + sess.add(m1) + + with expect_raises_message( + orm_exc.FlushError, + "Single-row INSERT statement for .*MyClass.* did not produce", + ): + sess.flush() + + is_true(inspect(m1).transient) + sess.rollback() + is_true(inspect(m1).transient) + + def test_insert_multi_no_pk_correct_exception( + self, null_server_default_fixture, connection + ): + MyClass = null_server_default_fixture + + sess = fixture_session(bind=connection) + + m1, m2, m3 = MyClass(data="d1"), MyClass(data="d2"), MyClass(data="d3") + sess.add_all([m1, m2, m3]) + + is_multi_row = connection.dialect.insert_executemany_returning + with expect_raises_message( + orm_exc.FlushError, + "%s INSERT statement for .*MyClass.* did not produce" + % ("Multi-row" if is_multi_row else "Single-row"), + ): + sess.flush() + + for m in m1, m2, m3: + is_true(inspect(m).transient) + sess.rollback() + for m in m1, m2, m3: + is_true(inspect(m).transient) + + class EnsurePKSortableTest(fixtures.MappedTest): class SomeEnum(object): # Implements PEP 435 in the minimal fashion needed by SQLAlchemy diff --git a/test/orm/test_unitofworkv2.py b/test/orm/test_unitofworkv2.py index af38a4bab00..4546145396a 100644 --- a/test/orm/test_unitofworkv2.py +++ b/test/orm/test_unitofworkv2.py @@ -25,6 +25,7 @@ from sqlalchemy.orm import Session from sqlalchemy.orm import unitofwork from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import config from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ @@ -1919,7 +1920,7 @@ def test_delete_twice(self): sess.delete(p1) - assert_raises_message( + assert_warns_message( exc.SAWarning, r"DELETE statement on table 'parent' expected to " r"delete 1 row\(s\); 0 were matched.", @@ -1939,7 +1940,7 @@ def test_delete_multi_missing_warning(self): sess.delete(p1) sess.delete(p2) - assert_raises_message( + assert_warns_message( exc.SAWarning, r"DELETE statement on table 'parent' expected to " r"delete 2 row\(s\); 0 were matched.", @@ -2004,7 +2005,7 @@ def test_delete_single_broken_multi_rowcount_still_warns(self): with patch.object( config.db.dialect, "supports_sane_multi_rowcount", False ): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"DELETE statement on table 'parent' expected to " r"delete 1 row\(s\); 0 were matched.", diff --git a/test/orm/test_update_delete.py b/test/orm/test_update_delete.py index 21863c57a52..9eaf1765a31 100644 --- a/test/orm/test_update_delete.py +++ b/test/orm/test_update_delete.py @@ -22,6 +22,9 @@ from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import synonym from sqlalchemy.orm import with_loader_criteria +from sqlalchemy.sql.dml import Delete +from sqlalchemy.sql.dml import Update +from sqlalchemy.sql.selectable import Select from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ @@ -96,6 +99,39 @@ def setup_mappers(cls): ) cls.mapper_registry.map_imperatively(Address, addresses) + @testing.combinations("table", "mapper", "both", argnames="bind_type") + @testing.combinations( + "update", "insert", "delete", argnames="statement_type" + ) + def test_get_bind_scenarios(self, connection, bind_type, statement_type): + """test for #7936""" + + User = self.classes.User + + if statement_type == "insert": + stmt = insert(User).values( + {User.id: 5, User.age: 25, User.name: "spongebob"} + ) + elif statement_type == "update": + stmt = ( + update(User) + .where(User.id == 2) + .values({User.name: "spongebob"}) + ) + elif statement_type == "delete": + stmt = delete(User) + + binds = {} + if bind_type == "both": + binds = {User: connection, User.__table__: connection} + elif bind_type == "mapper": + binds = {User: connection} + elif bind_type == "table": + binds = {User.__table__: connection} + + with Session(binds=binds) as sess: + sess.execute(stmt) + def test_illegal_eval(self): User = self.classes.User s = fixture_session() @@ -665,7 +701,8 @@ def test_update_future(self): list(zip([15, 27, 19, 27])), ) - def test_update_future_lambda(self): + @testing.variation("values_first", [True, False]) + def test_update_future_lambda(self, values_first): User, users = self.classes.User, self.tables.users sess = Session(testing.db, future=True) @@ -674,14 +711,22 @@ def test_update_future_lambda(self): sess.execute(select(User).order_by(User.id)).scalars().all() ) - sess.execute( - lambda_stmt( + new_value = 10 + + if values_first: + stmt = lambda_stmt(lambda: update(User)) + stmt += lambda s: s.values({"age": User.age - new_value}) + stmt += lambda s: s.where(User.age > 29).execution_options( + synchronize_session="evaluate" + ) + else: + stmt = lambda_stmt( lambda: update(User) .where(User.age > 29) - .values({"age": User.age - 10}) + .values({"age": User.age - new_value}) .execution_options(synchronize_session="evaluate") - ), - ) + ) + sess.execute(stmt) eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27]) eq_( @@ -689,14 +734,21 @@ def test_update_future_lambda(self): list(zip([25, 37, 29, 27])), ) - sess.execute( - lambda_stmt( + if values_first: + stmt = lambda_stmt(lambda: update(User)) + stmt += lambda s: s.values({"age": User.age - new_value}) + stmt += lambda s: s.where(User.age > 29).execution_options( + synchronize_session="evaluate" + ) + else: + stmt = lambda_stmt( lambda: update(User) .where(User.age > 29) .values({User.age: User.age - 10}) .execution_options(synchronize_session="evaluate") ) - ) + + sess.execute(stmt) eq_([john.age, jack.age, jill.age, jane.age], [25, 27, 29, 27]) eq_( sess.query(User.age).order_by(User.id).all(), @@ -1427,6 +1479,42 @@ def test_update_preserve_parameter_order_future(self): ] eq_(["name", "age_int"], cols) + @testing.combinations(("update",), ("delete",), argnames="stmt_type") + @testing.combinations( + ("evaluate",), ("fetch",), (None,), argnames="sync_type" + ) + def test_routing_session(self, stmt_type, sync_type, connection): + User = self.classes.User + + if stmt_type == "update": + stmt = update(User).values(age=123) + expected = [Update] + elif stmt_type == "delete": + stmt = delete(User) + expected = [Delete] + else: + assert False + + received = [] + + class RoutingSession(Session): + def get_bind(self, **kw): + received.append(type(kw["clause"])) + return super(RoutingSession, self).get_bind(**kw) + + stmt = stmt.execution_options(synchronize_session=sync_type) + + if sync_type == "fetch": + expected.insert(0, Select) + + if not connection.dialect.full_returning: + expected.insert(0, Select) + + with RoutingSession(bind=connection) as sess: + sess.execute(stmt) + + eq_(received, expected) + class UpdateDeleteIgnoresLoadersTest(fixtures.MappedTest): @classmethod @@ -2184,21 +2272,45 @@ def test_load_from_update(self, connection): [User(name="jack", age=52), User(name="jill", age=34)], ) - def test_load_from_insert(self, connection): + @testing.combinations( + ("single",), + ("multiple", testing.requires.multivalues_inserts), + argnames="params", + ) + def test_load_from_insert(self, connection, params): User = self.classes.User - stmt = ( - insert(User) - .values({User.id: 5, User.age: 25, User.name: "spongebob"}) - .returning(User) - ) + if params == "multiple": + values = [ + {User.id: 5, User.age: 25, User.name: "spongebob"}, + {User.id: 6, User.age: 30, User.name: "patrick"}, + {User.id: 7, User.age: 35, User.name: "squidward"}, + ] + elif params == "single": + values = {User.id: 5, User.age: 25, User.name: "spongebob"} + else: + assert False + + stmt = insert(User).values(values).returning(User) stmt = select(User).from_statement(stmt) with Session(connection) as sess: rows = sess.execute(stmt).scalars().all() - eq_( - rows, - [User(name="spongebob", age=25)], - ) + if params == "multiple": + eq_( + rows, + [ + User(name="spongebob", age=25), + User(name="patrick", age=30), + User(name="squidward", age=35), + ], + ) + elif params == "single": + eq_( + rows, + [User(name="spongebob", age=25)], + ) + else: + assert False diff --git a/test/orm/test_versioning.py b/test/orm/test_versioning.py index 45fad9ab741..30730122410 100644 --- a/test/orm/test_versioning.py +++ b/test/orm/test_versioning.py @@ -20,6 +20,8 @@ from sqlalchemy.orm import Session from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import config from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ @@ -193,7 +195,7 @@ def test_notsane_warning(self): s1.commit() f1.value = "f1rev2" - assert_raises(sa.exc.SAWarning, s1.commit) + assert_warns(sa.exc.SAWarning, s1.commit) finally: testing.db.dialect.supports_sane_rowcount = save @@ -1328,7 +1330,7 @@ def test_mismatch_version_col_warning(self): Base, base, version_id_col=base.c.version_id ) - assert_raises_message( + assert_warns_message( exc.SAWarning, "Inheriting version_id_col 'version_id' does not " "match inherited version_id_col 'version_id' and will not " @@ -2004,3 +2006,55 @@ def test_explicit_assign_from_expired(self): f1.value = "f2" f1.version_id = 2 s1.flush() + + +class QuotedBindVersioningTest(fixtures.MappedTest): + """test for #8056""" + + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table( + "version_table", + metadata, + Column( + "id", Integer, primary_key=True, test_needs_autoincrement=True + ), + # will need parameter quoting for Oracle and PostgreSQL + # dont use 'key' to make sure no the awkward name is definitely + # in the params + Column("_version%id", Integer, nullable=False), + Column("value", String(40), nullable=False), + ) + + @classmethod + def setup_classes(cls): + class Foo(cls.Basic): + pass + + @classmethod + def setup_mappers(cls): + Foo = cls.classes.Foo + vt = cls.tables.version_table + cls.mapper_registry.map_imperatively( + Foo, + vt, + version_id_col=vt.c["_version%id"], + properties={"version": vt.c["_version%id"]}, + ) + + def test_round_trip(self, fixture_session): + Foo = self.classes.Foo + + f1 = Foo(value="v1") + fixture_session.add(f1) + fixture_session.commit() + + f1.value = "v2" + with conditional_sane_rowcount_warnings( + update=True, only_returning=True + ): + fixture_session.commit() + + eq_(f1.version, 2) diff --git a/test/profiles.txt b/test/profiles.txt index b99be234662..5c285f4f453 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -27,8 +27,10 @@ test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2. test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 66 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 68 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 68 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 68 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 68 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 67 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 67 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 72 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 72 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 73 @@ -37,8 +39,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3. test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 73 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 73 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 73 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 72 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 72 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_select @@ -50,8 +52,10 @@ test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2. test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_mssql_pyodbc_dbapiunicode_nocextensions 181 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 181 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 181 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 181 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 181 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 180 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 180 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 195 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 195 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 196 @@ -60,12 +64,15 @@ test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3. test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 196 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 196 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 196 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 195 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 195 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_select_labels -test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 205 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 204 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 204 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 219 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 219 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 212 @@ -74,8 +81,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpy test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 212 -test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 212 -test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 212 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 219 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 219 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_update @@ -93,8 +100,10 @@ test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2. test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 79 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 79 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 79 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 79 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 79 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 78 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 78 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 81 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 81 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 82 @@ -103,8 +112,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3. test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 82 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 82 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 82 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 81 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 81 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause @@ -118,6 +127,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linu test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 169 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 169 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 169 +test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 175 +test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 175 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 175 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 175 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 175 @@ -131,11 +142,15 @@ test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linu # TEST: test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 303 +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 303 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 303 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 303 # TEST: test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 5003 +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 5003 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 5403 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 5403 @@ -143,76 +158,98 @@ test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_6 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1328 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1328 +test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 929 +test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 929 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 929 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 929 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 49105 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 60305 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 52805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 64905 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 47035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 57245 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 50335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 61445 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 50335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 61445 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 47805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 59005 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 51505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 63605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45835 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 56045 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 49435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 60545 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 49435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 60545 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 51705 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 60405 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 54805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 64405 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 51135 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 58845 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53935 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 62545 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53935 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 62545 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 50805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 59505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53905 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 63505 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 50335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 58045 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 61645 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 61645 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45205 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 48905 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 47705 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 52305 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 43035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 45745 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 45535 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 49145 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 45535 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 49145 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 47305 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 56005 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 50405 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 60005 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45235 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 52945 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 48335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 56945 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 48335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 56945 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 46405 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 55105 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 49505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 59105 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 44435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 52145 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 47435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 56045 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 47435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 56045 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 30905 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 33705 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 36605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 31805 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 34405 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 34605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 37505 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 34605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 37505 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 30005 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 32605 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 32805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 35705 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 31005 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 33705 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 36605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 33705 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 36605 # TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 3358 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 3358 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 3479 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 3479 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 3479 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 3479 @@ -220,6 +257,8 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linu test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5327 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 5327 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 5529 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 5529 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 5529 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 5529 @@ -227,34 +266,44 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 68 test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 68 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 73 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 73 test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 73 test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 73 # TEST: test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 387 -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 387 -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 392 -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 392 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 383 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 383 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 388 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 388 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 388 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 388 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 15236 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26249 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 15264 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27281 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 15246 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26259 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15190 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 27207 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 15190 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27207 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21341 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26354 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 21382 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27399 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21291 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26304 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 21344 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 27361 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 21344 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27361 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 9853 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 10003 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 9953 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 10153 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 10304 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 10454 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 10304 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 10454 @@ -262,104 +311,134 @@ test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_c test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1103 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1103 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1104 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1104 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1104 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1104 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 4053 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 4203 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 4153 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 4353 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 4054 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 4204 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 4054 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 4204 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 95938 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 96088 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103539 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 103689 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 99338 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 99738 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 103689 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 103839 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103689 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 103839 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 93988 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 94138 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 101889 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 102039 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 97288 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 97688 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 102039 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 102189 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 102039 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 102189 # TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 496829 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 498671 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 528695 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 530537 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 497722 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 499549 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 527563 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 529405 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 527563 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 529405 # TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 425805 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 443405 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 431505 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 450605 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 425305 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 442905 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 430805 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 449905 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 430205 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 450505 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21984 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 21984 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 22984 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 22984 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 22984 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 22984 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 102029 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 106786 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 106348 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 112356 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 104575 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 109332 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 107759 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 113767 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 107759 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 113767 # TEST: test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 19799 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 20301 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 20739 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 21307 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 20043 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 20497 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 20731 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 21299 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 20731 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 21299 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_load -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1339 -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1373 -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1399 -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1444 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1414 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1454 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1460 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1511 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1460 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1511 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_no_load -test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 101,17 -test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 101,17 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 107,18 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 107,18 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 103,18 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 103,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 103,18 # TEST: test.aaa_profiling.test_orm.QueryTest.test_query_cols -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5844 -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6604 -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 6152 -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6942 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5936 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6696 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 6150 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6940 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 6150 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6940 # TEST: test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 251605 -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 269605 -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 262205 -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 281705 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 264805 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 282905 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 263605 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 283105 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 263605 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 283105 # TEST: test.aaa_profiling.test_orm.SessionTest.test_expire_lots -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1149 -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1144 -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1269 -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1258 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1158 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1146 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1252 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1256 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1255 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1264 # TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 90 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 90 +test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 74 +test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 74 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 74 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 74 @@ -367,6 +446,8 @@ test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpyth test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 33 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33 +test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 24 +test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 24 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 24 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 24 @@ -388,6 +469,8 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 53 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 51 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 53 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 55 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 55 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 55 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 55 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 55 @@ -417,6 +500,8 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 92 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 94 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 94 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 94 @@ -446,6 +531,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 17 +test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 17 @@ -473,8 +560,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 43564 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 1551 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 13553 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1489 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 13508 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1488 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 13490 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1507 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 13511 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 1584 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 13588 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 88324 @@ -483,8 +572,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 13581 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 1572 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 13576 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1525 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 13529 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1507 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 13511 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings @@ -502,8 +591,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_6 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 45571 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 2554 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 15556 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 2492 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 15511 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 2491 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 15493 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 2511 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 15515 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 2588 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 15592 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 89328 @@ -512,8 +603,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_6 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 15585 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 2576 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 15580 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 2529 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 15533 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 2511 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 15515 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] @@ -533,6 +624,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 14 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 20 @@ -562,6 +655,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 25 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 20 @@ -591,6 +686,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 25 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 20 @@ -620,6 +717,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 19 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 19 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 18 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 20 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 28 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 30 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 25 @@ -649,6 +748,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpy test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6283 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 233 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6253 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 227 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6227 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87009 @@ -678,6 +779,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cp test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6283 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 233 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6253 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 227 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6227 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87009 @@ -705,8 +808,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 36570 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 549 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6551 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 487 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6506 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 486 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6488 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6510 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 583 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6587 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87323 @@ -715,8 +820,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 6580 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 571 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 6575 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 524 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6528 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6510 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_unicode @@ -734,8 +839,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpytho test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 36570 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 549 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6551 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 487 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6506 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 486 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6488 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6510 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 583 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6587 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87323 @@ -744,5 +851,5 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpytho test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 6580 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 571 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 6575 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 524 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6528 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6510 diff --git a/test/requirements.py b/test/requirements.py index 687dadfd1aa..47f5c49eb34 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -198,9 +198,8 @@ def non_native_boolean_unconstrained(self): @property def standalone_binds(self): - """target database/driver supports bound parameters as column expressions - without being in the context of a typed column. - + """target database/driver supports bound parameters as column + expressions without being in the context of a typed column. """ return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver") @@ -364,6 +363,17 @@ def cursor_works_post_rollback(self): return skip_if(["+pyodbc"], "no driver support") + @property + def select_star_mixed(self): + r"""target supports expressions like "SELECT x, y, \*, z FROM table" + + apparently MySQL / MariaDB, Oracle doesn't handle this. + + We only need a few backends so just cover SQLite / PG + + """ + return only_on(["sqlite", "postgresql"]) + @property def independent_connections(self): """ @@ -399,6 +409,7 @@ def memory_process_intensive(self): [ no_support("oracle", "Oracle XE usually can't handle these"), no_support("mssql+pyodbc", "MS ODBC drivers struggle"), + no_support("+aiosqlite", "very unreliable driver"), self._running_on_windows(), ] ) @@ -558,6 +569,16 @@ def savepoints(self): "savepoints not supported", ) + @property + def compat_savepoints(self): + """Target database must support savepoints, or a compat + recipe e.g. for sqlite will be used""" + + return skip_if( + ["sybase", ("mysql", "<", (5, 0, 3))], + "savepoints not supported", + ) + @property def savepoints_w_release(self): return self.savepoints + skip_if( @@ -588,6 +609,18 @@ def implicit_default_schema(self): """ return only_on(["postgresql"]) + @property + def has_temp_table(self): + """target dialect supports checking a single temp table name + + unfortunately this is not the same as temp_table_names + + """ + + return only_on(["sqlite", "oracle", "postgresql", "mssql"]) + skip_if( + self._sqlite_file_db + ) + @property def default_schema_name_switch(self): return only_on(["postgresql", "oracle"]) @@ -711,7 +744,7 @@ def intersect(self): """Target database must support INTERSECT or equivalent.""" return fails_if( - ["firebird", self._mysql_not_mariadb_103, "sybase"], + ["firebird", self._mysql_not_mariadb_103_not_mysql8031, "sybase"], "no support for INTERSECT", ) @@ -719,7 +752,7 @@ def intersect(self): def except_(self): """Target database must support EXCEPT or equivalent (i.e. MINUS).""" return fails_if( - ["firebird", self._mysql_not_mariadb_103, "sybase"], + ["firebird", self._mysql_not_mariadb_103_not_mysql8031, "sybase"], "no support for EXCEPT", ) @@ -1095,6 +1128,30 @@ def _sqlite_json(self, config): def sqlite_memory(self): return only_on(self._sqlite_memory_db) + def _sqlite_partial_idx(self, config): + if not against(config, "sqlite"): + return False + else: + with config.db.connect() as conn: + connection = conn.connection + cursor = connection.cursor() + try: + cursor.execute("SELECT * FROM pragma_index_info('idx52')") + except: + return False + else: + return ( + cursor.description is not None + and len(cursor.description) >= 3 + ) + finally: + cursor.close() + + @property + def sqlite_partial_indexes(self): + + return only_on(self._sqlite_partial_idx) + @property def reflects_json_type(self): return only_on( @@ -1126,6 +1183,36 @@ def datetime(self): return exclusions.open() + @property + def datetime_implicit_bound(self): + """target dialect when given a datetime object will bind it such + that the database server knows the object is a datetime, and not + a plain string. + + """ + # pg8000 works in main / 2.0, support in 1.4 is not fully + # present. + return exclusions.skip_if("postgresql+pg8000") + exclusions.fails_on( + # mariadbconnector works. pyodbc we dont know, not supported in + # testing. + [ + "+mysqldb", + "+pymysql", + "+asyncmy", + "+mysqlconnector", + "+cymysql", + "+aiomysql", + ] + ) + + @property + def datetime_timezone(self): + return exclusions.only_on("postgresql") + + @property + def time_timezone(self): + return exclusions.only_on("postgresql") + exclusions.skip_if("+pg8000") + @property def datetime_microseconds(self): """target dialect supports representation of Python @@ -1143,6 +1230,10 @@ def timestamp_microseconds(self): return only_on(["oracle"]) + @property + def timestamp_microseconds_implicit_bound(self): + return self.timestamp_microseconds + exclusions.fails_on(["oracle"]) + @property def datetime_historic(self): """target dialect supports representation of Python @@ -1269,6 +1360,18 @@ def precision_numerics_retains_significant_digits(self): ] ) + @property + def literal_float_coercion(self): + return skip_if("+asyncmy") + + @property + def infinity_floats(self): + return fails_on_everything_except( + "sqlite", "postgresql+psycopg2", "postgresql+asyncpg" + ) + skip_if( + "postgresql+pg8000", "seems to work on pg14 only, not earlier?" + ) + @property def precision_generic_float_type(self): """target backend will return native floating point numbers with at @@ -1387,7 +1490,7 @@ def check_range_types(config): def async_dialect(self): """dialect makes use of await_() to invoke operations on the DBAPI.""" - return only_on( + return self.asyncio + only_on( LambdaPredicate( lambda config: config.db.dialect.is_async, "Async dialect required", @@ -1670,12 +1773,38 @@ def _mysql_not_mariadb_103(self, config): or config.db.dialect._mariadb_normalized_version_info < (10, 3) ) + def _mysql_not_mariadb_103_not_mysql8031(self, config): + return (against(config, ["mysql", "mariadb"])) and ( + ( + config.db.dialect._is_mariadb + and config.db.dialect._mariadb_normalized_version_info + < (10, 3) + ) + or ( + not config.db.dialect._is_mariadb + and config.db.dialect.server_version_info < (8, 0, 31) + ) + ) + def _mysql_not_mariadb_104(self, config): return (against(config, ["mysql", "mariadb"])) and ( not config.db.dialect._is_mariadb or config.db.dialect._mariadb_normalized_version_info < (10, 4) ) + def _mysql_not_mariadb_104_not_mysql8031(self, config): + return (against(config, ["mysql", "mariadb"])) and ( + ( + config.db.dialect._is_mariadb + and config.db.dialect._mariadb_normalized_version_info + < (10, 4) + ) + or ( + not config.db.dialect._is_mariadb + and config.db.dialect.server_version_info < (8, 0, 31) + ) + ) + def _has_mysql_on_windows(self, config): with config.db.connect() as conn: return ( @@ -1828,3 +1957,31 @@ def autoincrement_without_sequence(self): def reflect_tables_no_columns(self): # so far sqlite, mariadb, mysql don't support this return only_on(["postgresql"]) + + @property + def mssql_filestream(self): + "returns if mssql supports filestream" + + def check(config): + with config.db.connect() as conn: + res = conn.exec_driver_sql( + "SELECT [type] FROM sys.master_files WHERE " + "database_id = DB_ID() AND [type] = 2" + ).scalar() + return res is not None + + return only_on(["mssql"]) + only_if(check) + + @property + def has_json_each(self): + def go(config): + try: + with config.db.connect() as conn: + conn.exec_driver_sql( + """SELECT x.value FROM json_each('["b", "a"]') as x""" + ) + return True + except exc.DBAPIError: + return False + + return only_if(go, "json_each is required") diff --git a/test/sql/test_case_statement.py b/test/sql/test_case_statement.py index c6d5f0185ba..c676315b2c5 100644 --- a/test/sql/test_case_statement.py +++ b/test/sql/test_case_statement.py @@ -2,7 +2,7 @@ from sqlalchemy import case from sqlalchemy import cast from sqlalchemy import Column -from sqlalchemy import exc +from sqlalchemy import func from sqlalchemy import Integer from sqlalchemy import literal_column from sqlalchemy import MetaData @@ -13,7 +13,6 @@ from sqlalchemy import text from sqlalchemy.sql import column from sqlalchemy.sql import table -from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -126,23 +125,62 @@ def test_case(self, connection): ], ) - def test_literal_interpretation_ambiguous(self): - assert_raises_message( - exc.ArgumentError, - r"Column expression expected, got 'x'", - case, - ("x", "y"), + def test_literal_interpretation_one(self): + """note this is modified as of #7287 to accept strings, tuples + and other literal values as input + where they are interpreted as bound values just like any other + expression. + + Previously, an exception would be raised that the literal was + ambiguous. + + + """ + self.assert_compile( + case(("x", "y")), + "CASE WHEN :param_1 THEN :param_2 END", + checkparams={"param_1": "x", "param_2": "y"}, ) - def test_literal_interpretation_ambiguous_tuple(self): - assert_raises_message( - exc.ArgumentError, - r"Column expression expected, got \('x', 'y'\)", - case, - (("x", "y"), "z"), + def test_literal_interpretation_two(self): + """note this is modified as of #7287 to accept strings, tuples + and other literal values as input + where they are interpreted as bound values just like any other + expression. + + Previously, an exception would be raised that the literal was + ambiguous. + + + """ + self.assert_compile( + case( + (("x", "y"), "z"), + ), + "CASE WHEN :param_1 THEN :param_2 END", + checkparams={"param_1": ("x", "y"), "param_2": "z"}, ) - def test_literal_interpretation(self): + def test_literal_interpretation_two_point_five(self): + """note this is modified as of #7287 to accept strings, tuples + and other literal values as input + where they are interpreted as bound values just like any other + expression. + + Previously, an exception would be raised that the literal was + ambiguous. + + + """ + self.assert_compile( + case( + (12, "z"), + ), + "CASE WHEN :param_1 THEN :param_2 END", + checkparams={"param_1": 12, "param_2": "z"}, + ) + + def test_literal_interpretation_three(self): t = table("test", column("col1")) self.assert_compile( @@ -221,6 +259,16 @@ def test_text_doesnt_explode(self, connection): [("no",), ("no",), ("no",), ("yes",), ("no",), ("no",)], ) + def test_text_doenst_explode_even_in_whenlist(self): + """test #7287""" + self.assert_compile( + case( + (text(":case = 'upper'"), func.upper(literal_column("q"))), + else_=func.lower(literal_column("q")), + ), + "CASE WHEN :case = 'upper' THEN upper(q) ELSE lower(q) END", + ) + def testcase_with_dict(self): query = select( case( diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index 2db7a574464..3e13174f790 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -13,9 +13,11 @@ from sqlalchemy import extract from sqlalchemy import Float from sqlalchemy import Integer +from sqlalchemy import literal from sqlalchemy import literal_column from sqlalchemy import MetaData from sqlalchemy import or_ +from sqlalchemy import PickleType from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table @@ -67,6 +69,7 @@ from sqlalchemy.sql.selectable import Select from sqlalchemy.sql.selectable import Selectable from sqlalchemy.sql.selectable import SelectStatementGrouping +from sqlalchemy.sql.type_api import UserDefinedType from sqlalchemy.sql.visitors import InternalTraversal from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -202,11 +205,32 @@ class CoreFixtures(object): bindparam("bar", type_=String) ), ), + lambda: ( + # test #11471 + text("select * from table") + .columns(a=Integer()) + .add_cte(table_b.select().cte()), + text("select * from table") + .columns(a=Integer()) + .add_cte(table_b.select().where(table_b.c.a > 5).cte()), + ), + lambda: ( + literal(1).op("+")(literal(1)), + literal(1).op("-")(literal(1)), + column("q").op("-")(literal(1)), + UnaryExpression(table_a.c.b, modifier=operators.neg), + UnaryExpression(table_a.c.b, modifier=operators.desc_op), + UnaryExpression(table_a.c.b, modifier=operators.custom_op("!")), + UnaryExpression(table_a.c.b, modifier=operators.custom_op("~")), + ), lambda: ( column("q") == column("x"), column("q") == column("y"), column("z") == column("x"), column("z") + column("x"), + column("z").op("foo")(column("x")), + column("z").op("foo")(literal(1)), + column("z").op("bar")(column("x")), column("z") - column("x"), column("x") - column("z"), column("z") > column("x"), @@ -222,6 +246,14 @@ class CoreFixtures(object): column("q").like("somstr", escape="\\"), column("q").like("somstr", escape="X"), ), + lambda: ( + column("q").regexp_match("y", flags="ig"), + column("q").regexp_match("y", flags="q"), + column("q").regexp_match("y"), + column("q").regexp_replace("y", "z", flags="ig"), + column("q").regexp_replace("y", "z", flags="q"), + column("q").regexp_replace("y", "z"), + ), lambda: ( column("q", ARRAY(Integer))[3] == 5, column("q", ARRAY(Integer))[3:5] == 5, @@ -264,6 +296,8 @@ class CoreFixtures(object): ), lambda: ( table("a", column("x"), column("y")), + table("a", column("x"), column("y"), schema="q"), + table("a", column("x"), column("y"), schema="y"), table("a", column("x"), column("y"))._annotate({"orm": True}), table("b", column("x"), column("y"))._annotate({"orm": True}), ), @@ -283,6 +317,7 @@ class CoreFixtures(object): ), lambda: ( bindparam("x"), + bindparam("x", literal_execute=True), bindparam("y"), bindparam("x", type_=Integer), bindparam("x", type_=String), @@ -437,6 +472,21 @@ class CoreFixtures(object): select(table_a.c.a) .where(table_a.c.b == 5) .with_for_update(nowait=True), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(nowait=True, skip_locked=True), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(nowait=True, read=True), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(of=table_a.c.a), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(of=table_a.c.b), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(nowait=True, key_share=True), select(table_a.c.a).where(table_a.c.b == 5).correlate(table_b), select(table_a.c.a) .where(table_a.c.b == 5) @@ -1048,110 +1098,7 @@ def eight(): ] -class CacheKeyFixture(object): - def _compare_equal(self, a, b, compare_values): - a_key = a._generate_cache_key() - b_key = b._generate_cache_key() - - if a_key is None: - assert a._annotations.get("nocache") - - assert b_key is None - else: - - eq_(a_key.key, b_key.key) - eq_(hash(a_key.key), hash(b_key.key)) - - for a_param, b_param in zip(a_key.bindparams, b_key.bindparams): - assert a_param.compare(b_param, compare_values=compare_values) - return a_key, b_key - - def _run_cache_key_fixture(self, fixture, compare_values): - case_a = fixture() - case_b = fixture() - - for a, b in itertools.combinations_with_replacement( - range(len(case_a)), 2 - ): - if a == b: - a_key, b_key = self._compare_equal( - case_a[a], case_b[b], compare_values - ) - if a_key is None: - continue - else: - a_key = case_a[a]._generate_cache_key() - b_key = case_b[b]._generate_cache_key() - - if a_key is None or b_key is None: - if a_key is None: - assert case_a[a]._annotations.get("nocache") - if b_key is None: - assert case_b[b]._annotations.get("nocache") - continue - - if a_key.key == b_key.key: - for a_param, b_param in zip( - a_key.bindparams, b_key.bindparams - ): - if not a_param.compare( - b_param, compare_values=compare_values - ): - break - else: - # this fails unconditionally since we could not - # find bound parameter values that differed. - # Usually we intended to get two distinct keys here - # so the failure will be more descriptive using the - # ne_() assertion. - ne_(a_key.key, b_key.key) - else: - ne_(a_key.key, b_key.key) - - # ClauseElement-specific test to ensure the cache key - # collected all the bound parameters that aren't marked - # as "literal execute" - if isinstance(case_a[a], ClauseElement) and isinstance( - case_b[b], ClauseElement - ): - assert_a_params = [] - assert_b_params = [] - - for elem in visitors.iterate(case_a[a]): - if elem.__visit_name__ == "bindparam": - assert_a_params.append(elem) - - for elem in visitors.iterate(case_b[b]): - if elem.__visit_name__ == "bindparam": - assert_b_params.append(elem) - - # note we're asserting the order of the params as well as - # if there are dupes or not. ordering has to be - # deterministic and matches what a traversal would provide. - eq_( - sorted(a_key.bindparams, key=lambda b: b.key), - sorted( - util.unique_list(assert_a_params), key=lambda b: b.key - ), - ) - eq_( - sorted(b_key.bindparams, key=lambda b: b.key), - sorted( - util.unique_list(assert_b_params), key=lambda b: b.key - ), - ) - - def _run_cache_key_equal_fixture(self, fixture, compare_values): - case_a = fixture() - case_b = fixture() - - for a, b in itertools.combinations_with_replacement( - range(len(case_a)), 2 - ): - self._compare_equal(case_a[a], case_b[b], compare_values) - - -class CacheKeyTest(CacheKeyFixture, CoreFixtures, fixtures.TestBase): +class CacheKeyTest(fixtures.CacheKeyFixture, CoreFixtures, fixtures.TestBase): # we are slightly breaking the policy of not having external dialect # stuff in here, but use pg/mysql as test cases to ensure that these # objects don't report an inaccurate cache key, which is dependent @@ -1193,6 +1140,27 @@ def test_values_doesnt_caches_right_now(self): is_(large_v1._generate_cache_key(), None) + @testing.combinations( + (lambda: column("x"), lambda: column("x"), lambda: column("y")), + ( + lambda: func.foo_bar(1, 2, 3), + lambda: func.foo_bar(4, 5, 6), + lambda: func.foo_bar_bat(1, 2, 3), + ), + ) + def test_cache_key_object_comparators(self, lc1, lc2, lc3): + """test ne issue detected as part of #10042""" + c1 = lc1() + c2 = lc2() + c3 = lc3() + + eq_(c1._generate_cache_key(), c2._generate_cache_key()) + ne_(c1._generate_cache_key(), c3._generate_cache_key()) + is_true(c1._generate_cache_key() == c2._generate_cache_key()) + is_false(c1._generate_cache_key() != c2._generate_cache_key()) + is_true(c1._generate_cache_key() != c3._generate_cache_key()) + is_false(c1._generate_cache_key() == c3._generate_cache_key()) + def test_cache_key(self): for fixtures_, compare_values in [ (self.fixtures, True), @@ -1264,13 +1232,20 @@ class Foobar2(ColumnElement): # the None for cache key will prevent objects # which contain these elements from being cached. f1 = Foobar1() - eq_(f1._generate_cache_key(), None) + with expect_warnings( + "Class Foobar1 will not make use of SQL compilation caching" + ): + eq_(f1._generate_cache_key(), None) f2 = Foobar2() - eq_(f2._generate_cache_key(), None) + with expect_warnings( + "Class Foobar2 will not make use of SQL compilation caching" + ): + eq_(f2._generate_cache_key(), None) s1 = select(column("q"), Foobar2()) + # warning is memoized, won't happen the second time eq_(s1._generate_cache_key(), None) def test_get_children_no_method(self): @@ -1348,6 +1323,10 @@ def setup_test_class(cls): ] def test_all_present(self): + """test for elements that are in SQLAlchemy Core, that they are + also included in the fixtures above. + + """ need = set( cls for cls in class_hierarchy(ClauseElement) @@ -1357,6 +1336,7 @@ def test_all_present(self): or issubclass(cls, AliasedReturnsRows) ) and not issubclass(cls, (Annotated)) + and cls.__module__.startswith("sqlalchemy.") and "orm" not in cls.__module__ and "compiler" not in cls.__module__ and "crud" not in cls.__module__ @@ -1634,6 +1614,7 @@ def test_compare_labels(self): def test_compare_binds(self): b1 = bindparam("foo", type_=Integer()) + b1l = bindparam("foo", type_=Integer(), literal_execute=True) b2 = bindparam("foo", type_=Integer()) b3 = bindparam("foo", type_=String()) @@ -1644,6 +1625,9 @@ def c2(): return 6 b4 = bindparam("foo", type_=Integer(), callable_=c1) + b4l = bindparam( + "foo", type_=Integer(), callable_=c1, literal_execute=True + ) b5 = bindparam("foo", type_=Integer(), callable_=c2) b6 = bindparam("foo", type_=Integer(), callable_=c1) @@ -1664,6 +1648,22 @@ def c2(): is_false(b7.compare(b8)) is_true(b7.compare(b7)) + # cache key + def compare_key(left, right, expected): + lk = left._generate_cache_key().key + rk = right._generate_cache_key().key + is_(lk == rk, expected) + + compare_key(b1, b4, True) + compare_key(b1, b5, True) + compare_key(b8, b5, True) + compare_key(b8, b7, True) + compare_key(b8, b3, False) + compare_key(b1, b1l, False) + compare_key(b1, b4l, False) + compare_key(b4, b4l, False) + compare_key(b7, b4l, False) + def test_compare_tables(self): is_true(table_a.compare(table_a_2)) @@ -1744,19 +1744,21 @@ def test_is_select(self, case): class TypesTest(fixtures.TestBase): - def test_typedec_no_cache(self): - class MyType(TypeDecorator): + @testing.combinations(TypeDecorator, UserDefinedType) + def test_thirdparty_no_cache(self, base): + class MyType(base): impl = String expr = column("q", MyType()) == 1 with expect_warnings( - r"TypeDecorator MyType\(\) will not produce a cache key" + r"%s MyType\(\) will not produce a cache key" % base.__name__ ): is_(expr._generate_cache_key(), None) - def test_typedec_cache_false(self): - class MyType(TypeDecorator): + @testing.combinations(TypeDecorator, UserDefinedType) + def test_thirdparty_cache_false(self, base): + class MyType(base): impl = String cache_ok = False @@ -1765,8 +1767,9 @@ class MyType(TypeDecorator): is_(expr._generate_cache_key(), None) - def test_typedec_cache_ok(self): - class MyType(TypeDecorator): + @testing.combinations(TypeDecorator, UserDefinedType) + def test_thirdparty_cache_ok(self, base): + class MyType(base): impl = String cache_ok = True @@ -1816,3 +1819,69 @@ def go3(): eq_(c1, c2) ne_(c1, c3) eq_(c1, c4) + + def test_thirdparty_sub_subclass_no_cache(self): + class MyType(PickleType): + pass + + expr = column("q", MyType()) == 1 + + with expect_warnings( + r"TypeDecorator MyType\(\) will not produce a cache key" + ): + is_(expr._generate_cache_key(), None) + + def test_userdefined_sub_subclass_no_cache(self): + class MyType(UserDefinedType): + cache_ok = True + + class MySubType(MyType): + pass + + expr = column("q", MySubType()) == 1 + + with expect_warnings( + r"UserDefinedType MySubType\(\) will not produce a cache key" + ): + is_(expr._generate_cache_key(), None) + + def test_userdefined_sub_subclass_cache_ok(self): + class MyType(UserDefinedType): + cache_ok = True + + class MySubType(MyType): + cache_ok = True + + def go1(): + expr = column("q", MySubType()) == 1 + return expr + + def go2(): + expr = column("p", MySubType()) == 1 + return expr + + c1 = go1()._generate_cache_key()[0] + c2 = go1()._generate_cache_key()[0] + c3 = go2()._generate_cache_key()[0] + + eq_(c1, c2) + ne_(c1, c3) + + def test_thirdparty_sub_subclass_cache_ok(self): + class MyType(PickleType): + cache_ok = True + + def go1(): + expr = column("q", MyType()) == 1 + return expr + + def go2(): + expr = column("p", MyType()) == 1 + return expr + + c1 = go1()._generate_cache_key()[0] + c2 = go1()._generate_cache_key()[0] + c3 = go2()._generate_cache_key()[0] + + eq_(c1, c2) + ne_(c1, c3) diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 419d14ce7c6..11946513753 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -25,6 +25,7 @@ from sqlalchemy import Date from sqlalchemy import desc from sqlalchemy import distinct +from sqlalchemy import Enum from sqlalchemy import exc from sqlalchemy import except_ from sqlalchemy import exists @@ -32,6 +33,7 @@ from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Index +from sqlalchemy import insert from sqlalchemy import Integer from sqlalchemy import intersect from sqlalchemy import join @@ -60,6 +62,7 @@ from sqlalchemy import types from sqlalchemy import union from sqlalchemy import union_all +from sqlalchemy import update from sqlalchemy import util from sqlalchemy.dialects import mysql from sqlalchemy.dialects import oracle @@ -96,6 +99,9 @@ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing import ne_ +from sqlalchemy.testing import Variation +from sqlalchemy.testing.schema import pep435_enum +from sqlalchemy.types import UserDefinedType from sqlalchemy.util import u table1 = table( @@ -1222,6 +1228,14 @@ def test_exists(self): "SELECT NOT (NOT (EXISTS (SELECT 1))) AS anon_1", ) + self.assert_compile( + exists(42) + .select_from(table1) + .where(table1.c.name == "foo", table1.c.description == "bar"), + "EXISTS (SELECT 42 FROM mytable WHERE mytable.name = :name_1 " + "AND mytable.description = :description_1)", + ) + def test_exists_method(self): subq = ( select(func.count(table2.c.otherid)) @@ -3198,7 +3212,7 @@ def test_naming(self): (exprs[1], "hoho", "hoho(mytable.myid)", "hoho_1"), ( exprs[2], - "_no_label", + "name", "CAST(mytable.name AS NUMERIC)", "name", # due to [ticket:4449] ), @@ -3222,6 +3236,7 @@ def test_naming(self): t = table1 s1 = select(col).select_from(t) + eq_(col._proxy_key, key if key != "_no_label" else None) eq_(list(s1.subquery().c.keys()), [key]) if lbl: @@ -3647,6 +3662,97 @@ def test_binds(self): s, ) + def test_bind_param_escaping(self): + """general bind param escape unit tests added as a result of + #8053. + + The final application of an escaped param name + was moved out of compiler and into DefaultExecutionContext in + related issue #8056. + + However in #8113 we made this conditional to suit usage recipes + posted in the FAQ. + + + """ + + SomeEnum = pep435_enum("SomeEnum") + one = SomeEnum("one", 1) + SomeEnum("two", 2) + + t = Table( + "t", + MetaData(), + Column("_id", Integer, primary_key=True), + Column("_data", Enum(SomeEnum)), + ) + + class MyCompiler(compiler.SQLCompiler): + def bindparam_string(self, name, **kw): + kw["escaped_from"] = name + return super(MyCompiler, self).bindparam_string( + '"%s"' % name, **kw + ) + + dialect = default.DefaultDialect() + dialect.statement_compiler = MyCompiler + + self.assert_compile( + t.insert(), + 'INSERT INTO t (_id, _data) VALUES (:"_id", :"_data")', + dialect=dialect, + ) + + compiled = t.insert().compile( + dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data")) + ) + + # not escaped + params = compiled.construct_params( + {"_id": 1, "_data": one}, escape_names=False + ) + eq_(params, {"_id": 1, "_data": one}) + + # escaped by default + params = compiled.construct_params({"_id": 1, "_data": one}) + eq_(params, {'"_id"': 1, '"_data"': one}) + + # escaped here as well + eq_(compiled.params, {'"_data"': None, '"_id"': None}) + + # bind processors aren't part of this + eq_(compiled._bind_processors, {"_data": mock.ANY}) + + dialect.paramstyle = "pyformat" + compiled = t.insert().compile( + dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data")) + ) + + # FAQ recipe works + eq_( + compiled.string % compiled.params, + "INSERT INTO t (_id, _data) VALUES (None, None)", + ) + + def test_expanding_non_expanding_conflict(self): + """test #8018""" + + s = select( + literal("x").in_(bindparam("q")), + bindparam("q"), + ) + + with expect_raises_message( + exc.CompileError, + r"Can't reuse bound parameter name 'q' in both 'expanding' " + r"\(e.g. within an IN expression\) and non-expanding contexts. " + "If this parameter is to " + "receive a list/array value, set 'expanding=True' on " + "it for expressions that aren't IN, otherwise use " + "a different parameter name.", + ): + str(s) + def test_unique_binds_no_clone_collision(self): """test #6824""" bp = bindparam("foo", unique=True) @@ -3930,7 +4036,8 @@ def test_construct_params_combine_extracted( extracted_parameters=s1_cache_key[1], ) - def test_construct_params_w_bind_clones_post(self): + @testing.combinations(True, False, argnames="adapt_before_key") + def test_construct_params_w_bind_clones_post(self, adapt_before_key): """test that a BindParameter that has been cloned after the cache key was generated still matches up when construct_params() is called with an extracted parameter collection. @@ -3954,6 +4061,11 @@ def test_construct_params_w_bind_clones_post(self): # it's anonymous so unique=True is_true(original_bind.unique) + # test #7903 - adapt the statement *before* we make the cache + # key also + if adapt_before_key: + stmt = sql_util.ClauseAdapter(table1).traverse(stmt) + # cache key against the original param cache_key = stmt._generate_cache_key() @@ -4006,7 +4118,8 @@ def test_construct_params_w_bind_clones_post(self): {"myid_1": 10}, ) - def test_construct_duped_params_w_bind_clones_post(self): + @testing.combinations(True, False, argnames="adapt_before_key") + def test_construct_duped_params_w_bind_clones_post(self, adapt_before_key): """same as previous test_construct_params_w_bind_clones_post but where the binds have been used repeatedly, and the adaption occurs on a per-subquery basis. @@ -4029,6 +4142,10 @@ def test_construct_duped_params_w_bind_clones_post(self): # it's anonymous so unique=True is_true(original_bind.unique) + # variant that exercises #7903 + if adapt_before_key: + stmt = sql_util.ClauseAdapter(table1).traverse(stmt) + # cache key against the original param cache_key = stmt._generate_cache_key() @@ -4110,7 +4227,7 @@ def test_construct_params_w_bind_clones_pre(self): be unique, still matches up when construct_params() is called with an extracted parameter collection. - other ORM feaures like optimized_compare() end up doing something + other ORM features like optimized_compare() end up doing something like this, such as if there are multiple "has()" or "any()" which would have cloned the join condition and changed the values of bound parameters. @@ -4168,13 +4285,28 @@ def test_construct_params_w_bind_clones_pre(self): {"myid_1": 20, "myid_2": 18}, ) + @testing.combinations("default", "default_qmark", argnames="dialect") + def test_literal_execute_combinations(self, dialect): + """test #10142""" + + a = bindparam("a", value="abc", literal_execute=True) + b = bindparam("b", value="def", literal_execute=True) + c = bindparam("c", value="ghi", literal_execute=True) + self.assert_compile( + select(a, b, a, c), + "SELECT 'abc' AS anon_1, 'def' AS anon_2, 'abc' AS anon__1, " + "'ghi' AS anon_3", + render_postcompile=True, + dialect=dialect, + ) + def test_tuple_expanding_in_no_values(self): expr = tuple_(table1.c.myid, table1.c.name).in_( [(1, "foo"), (5, "bar")] ) self.assert_compile( expr, - "(mytable.myid, mytable.name) IN " "([POSTCOMPILE_param_1])", + "(mytable.myid, mytable.name) IN " "(__[POSTCOMPILE_param_1])", checkparams={"param_1": [(1, "foo"), (5, "bar")]}, check_post_param={"param_1": [(1, "foo"), (5, "bar")]}, check_literal_execute={}, @@ -4209,7 +4341,7 @@ def test_tuple_expanding_in_values(self): dialect.tuple_in_values = True self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_([(1, "foo"), (5, "bar")]), - "(mytable.myid, mytable.name) IN " "([POSTCOMPILE_param_1])", + "(mytable.myid, mytable.name) IN " "(__[POSTCOMPILE_param_1])", dialect=dialect, checkparams={"param_1": [(1, "foo"), (5, "bar")]}, check_post_param={"param_1": [(1, "foo"), (5, "bar")]}, @@ -4345,7 +4477,7 @@ def test_expanding_parameter(self): tuple_(table1.c.myid, table1.c.name).in_( bindparam("foo", expanding=True) ), - "(mytable.myid, mytable.name) IN ([POSTCOMPILE_foo])", + "(mytable.myid, mytable.name) IN (__[POSTCOMPILE_foo])", ) dialect = default.DefaultDialect() @@ -4354,13 +4486,13 @@ def test_expanding_parameter(self): tuple_(table1.c.myid, table1.c.name).in_( bindparam("foo", expanding=True) ), - "(mytable.myid, mytable.name) IN ([POSTCOMPILE_foo])", + "(mytable.myid, mytable.name) IN (__[POSTCOMPILE_foo])", dialect=dialect, ) self.assert_compile( table1.c.myid.in_(bindparam("foo", expanding=True)), - "mytable.myid IN ([POSTCOMPILE_foo])", + "mytable.myid IN (__[POSTCOMPILE_foo])", ) def test_limit_offset_select_literal_binds(self): @@ -4406,6 +4538,51 @@ def test_multiple_col_binds(self): "OR mytable.myid = :myid_2 OR mytable.myid = :myid_3", ) + @testing.combinations("plain", "expanding", argnames="exprtype") + def test_literal_bind_typeerror(self, exprtype): + """test #8800""" + + if exprtype == "expanding": + stmt = select(table1).where( + table1.c.myid.in_([("tuple",), ("tuple",)]) + ) + elif exprtype == "plain": + stmt = select(table1).where(table1.c.myid == ("tuple",)) + else: + assert False + + with expect_raises_message( + exc.CompileError, + r"Could not render literal value \"\(\'tuple\',\)\" " + r"with datatype INTEGER; see parent " + r"stack trace for more detail.", + ): + stmt.compile(compile_kwargs={"literal_binds": True}) + + @testing.combinations("plain", "expanding", argnames="exprtype") + def test_literal_bind_dont_know_how_to_quote(self, exprtype): + """test #8800""" + + class MyType(UserDefinedType): + def get_col_spec(self, **kw): + return "MYTYPE" + + col = column("x", MyType()) + + if exprtype == "expanding": + stmt = select(table1).where(col.in_([("tuple",), ("tuple",)])) + elif exprtype == "plain": + stmt = select(table1).where(col == ("tuple",)) + else: + assert False + + with expect_raises_message( + exc.CompileError, + r"No literal value renderer is available for literal " + r"value \"\('tuple',\)\" with datatype MYTYPE", + ): + stmt.compile(compile_kwargs={"literal_binds": True}) + @testing.fixture def ansi_compiler_fixture(self): dialect = default.DefaultDialect() @@ -4421,7 +4598,7 @@ class Compiler(compiler.StrSQLCompiler): ( "one", select(literal("someliteral")), - "SELECT [POSTCOMPILE_param_1] AS anon_1", + "SELECT __[POSTCOMPILE_param_1] AS anon_1", dict( check_literal_execute={"param_1": "someliteral"}, check_post_param={}, @@ -4430,14 +4607,14 @@ class Compiler(compiler.StrSQLCompiler): ( "two", select(table1.c.myid + 3), - "SELECT mytable.myid + [POSTCOMPILE_myid_1] " + "SELECT mytable.myid + __[POSTCOMPILE_myid_1] " "AS anon_1 FROM mytable", dict(check_literal_execute={"myid_1": 3}, check_post_param={}), ), ( "three", select(table1.c.myid.in_([4, 5, 6])), - "SELECT mytable.myid IN ([POSTCOMPILE_myid_1]) " + "SELECT mytable.myid IN (__[POSTCOMPILE_myid_1]) " "AS anon_1 FROM mytable", dict( check_literal_execute={"myid_1": [4, 5, 6]}, @@ -4447,14 +4624,14 @@ class Compiler(compiler.StrSQLCompiler): ( "four", select(func.mod(table1.c.myid, 5)), - "SELECT mod(mytable.myid, [POSTCOMPILE_mod_2]) " + "SELECT mod(mytable.myid, __[POSTCOMPILE_mod_2]) " "AS mod_1 FROM mytable", dict(check_literal_execute={"mod_2": 5}, check_post_param={}), ), ( "five", select(literal("foo").in_([])), - "SELECT [POSTCOMPILE_param_1] IN ([POSTCOMPILE_param_2]) " + "SELECT __[POSTCOMPILE_param_1] IN (__[POSTCOMPILE_param_2]) " "AS anon_1", dict( check_literal_execute={"param_1": "foo", "param_2": []}, @@ -4464,7 +4641,7 @@ class Compiler(compiler.StrSQLCompiler): ( "six", select(literal(util.b("foo"))), - "SELECT [POSTCOMPILE_param_1] AS anon_1", + "SELECT __[POSTCOMPILE_param_1] AS anon_1", dict( check_literal_execute={"param_1": util.b("foo")}, check_post_param={}, @@ -4473,7 +4650,7 @@ class Compiler(compiler.StrSQLCompiler): ( "seven", select(table1.c.myid == bindparam("foo", callable_=lambda: 5)), - "SELECT mytable.myid = [POSTCOMPILE_foo] AS anon_1 FROM mytable", + "SELECT mytable.myid = __[POSTCOMPILE_foo] AS anon_1 FROM mytable", dict(check_literal_execute={"foo": 5}, check_post_param={}), ), argnames="stmt, expected, kw", @@ -4495,7 +4672,7 @@ def test_render_literal_execute_parameter(self): table1.c.myid == bindparam("foo", 5, literal_execute=True) ), "SELECT mytable.myid FROM mytable " - "WHERE mytable.myid = [POSTCOMPILE_foo]", + "WHERE mytable.myid = __[POSTCOMPILE_foo]", ) def test_render_literal_execute_parameter_literal_binds(self): @@ -4540,7 +4717,7 @@ def test_render_expanding_parameter(self): table1.c.myid.in_(bindparam("foo", expanding=True)) ), "SELECT mytable.myid FROM mytable " - "WHERE mytable.myid IN ([POSTCOMPILE_foo])", + "WHERE mytable.myid IN (__[POSTCOMPILE_foo])", ) def test_render_expanding_parameter_literal_binds(self): @@ -4635,6 +4812,291 @@ def test_render_nulls_literal_binds(self, stmt, expected, warns, params): stmt, expected, literal_binds=True, params=params ) + standalone_escape = testing.combinations( + ("normalname", "normalname"), + ("_name", "_name"), + ("[BracketsAndCase]", "_BracketsAndCase_"), + ("has spaces", "has_spaces"), + argnames="paramname, expected", + ) + + @standalone_escape + @testing.variation("use_positional", [True, False]) + def test_standalone_bindparam_escape( + self, paramname, expected, use_positional + ): + stmt = select(table1.c.myid).where( + table1.c.name == bindparam(paramname, value="x") + ) + if use_positional: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable WHERE mytable.name = ?", + params={paramname: "y"}, + checkpositional=("y",), + dialect="sqlite", + ) + else: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable WHERE mytable.name = :%s" + % (expected,), + params={paramname: "y"}, + checkparams={expected: "y"}, + dialect="default", + ) + + @standalone_escape + @testing.variation("use_assert_compile", [True, False]) + @testing.variation("use_positional", [True, False]) + def test_standalone_bindparam_escape_expanding( + self, paramname, expected, use_assert_compile, use_positional + ): + stmt = select(table1.c.myid).where( + table1.c.name.in_(bindparam(paramname, value=["a", "b"])) + ) + if use_assert_compile: + if use_positional: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable " + "WHERE mytable.name IN (?, ?)", + params={paramname: ["y", "z"]}, + # NOTE: this is what render_postcompile will do right now + # if you run construct_params(). render_postcompile mode + # is not actually used by the execution internals, it's for + # user-facing compilation code. So this is likely a + # current limitation of construct_params() which is not + # doing the full blown postcompile; just assert that's + # what it does for now. it likely should be corrected + # to make more sense. + checkpositional=(["y", "z"], ["y", "z"]), + dialect="sqlite", + render_postcompile=True, + ) + else: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable WHERE mytable.name IN " + "(:%s_1, :%s_2)" % (expected, expected), + params={paramname: ["y", "z"]}, + # NOTE: this is what render_postcompile will do right now + # if you run construct_params(). render_postcompile mode + # is not actually used by the execution internals, it's for + # user-facing compilation code. So this is likely a + # current limitation of construct_params() which is not + # doing the full blown postcompile; just assert that's + # what it does for now. it likely should be corrected + # to make more sense. + checkparams={ + "%s_1" % expected: ["y", "z"], + "%s_2" % expected: ["y", "z"], + }, + dialect="default", + render_postcompile=True, + ) + else: + # this is what DefaultDialect actually does. + # this should be matched to DefaultDialect._init_compiled() + if use_positional: + compiled = stmt.compile( + dialect=default.DefaultDialect(paramstyle="qmark") + ) + else: + compiled = stmt.compile(dialect=default.DefaultDialect()) + checkparams = compiled.construct_params( + {paramname: ["y", "z"]}, escape_names=False + ) + # nothing actually happened. if the compiler had + # render_postcompile set, the + # above weird param thing happens + eq_(checkparams, {paramname: ["y", "z"]}) + expanded_state = compiled._process_parameters_for_postcompile( + checkparams + ) + eq_( + expanded_state.additional_parameters, + {"%s_1" % (expected,): "y", "%s_2" % (expected,): "z"}, + ) + if use_positional: + eq_( + expanded_state.positiontup, + ["%s_1" % (expected,), "%s_2" % (expected,)], + ) + + +class CrudParamOverlapTest(AssertsCompiledSQL, fixtures.TestBase): + """tests for #9075. + + we apparently allow same-column-named bindparams in values(), even though + we do *not* allow same-column-named bindparams in other parts of the + statement, but only if the bindparam is associated with that column in the + VALUES / SET clause. If you use a name that matches that of a column in + values() but associate it with a different column, you also get the error. + + This is supported, see + test_insert.py::InsertTest::test_binds_that_match_columns and + test_update.py::UpdateTest::test_binds_that_match_columns. The use + case makes sense because the "overlapping binds" issue is that using + a column name in bindparam() will conflict with the bindparam() + that crud.py is going to make for that column in VALUES / SET; but if we + are replacing the actual expression that would be in VALUES / SET, then + it's fine, there is no conflict. + + The test suite is extended in + test/orm/test_core_compilation.py with ORM mappings that caused + the failure that was fixed by #9075. + + + """ + + __dialect__ = "default" + + @testing.fixture( + params=Variation.generate_cases("type_", ["lowercase", "uppercase"]), + ids=["lowercase", "uppercase"], + ) + def crud_table_fixture(self, request): + type_ = request.param + + if type_.lowercase: + table1 = table( + "mytable", + column("myid", Integer), + column("name", String), + column("description", String), + ) + elif type_.uppercase: + table1 = Table( + "mytable", + MetaData(), + Column("myid", Integer), + Column("name", String), + Column("description", String), + ) + else: + type_.fail() + + yield table1 + + def test_same_named_binds_insert_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = insert(table1).values( + myid=bindparam("myid"), + description=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, description) VALUES " + "(:myid, coalesce(:description, :coalesce_1))", + ) + + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, description) VALUES " + "(:myid, coalesce(:description, :coalesce_1))", + params={"myid": 5, "description": "foo"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + }, + ) + + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, name, description) VALUES " + "(:myid, :name, coalesce(:description, :coalesce_1))", + params={"myid": 5, "description": "foo", "name": "bar"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + "name": "bar", + }, + ) + + def test_same_named_binds_update_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = update(table1).values( + myid=bindparam("myid"), + description=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, " + "description=coalesce(:description, :coalesce_1)", + ) + + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, " + "description=coalesce(:description, :coalesce_1)", + params={"myid": 5, "description": "foo"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + }, + ) + + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, name=:name, " + "description=coalesce(:description, :coalesce_1)", + params={"myid": 5, "description": "foo", "name": "bar"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + "name": "bar", + }, + ) + + def test_different_named_binds_insert_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = insert(table1).values( + myid=bindparam("myid"), + name=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, name) VALUES " + "(:myid, coalesce(:description, :coalesce_1))", + ) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description"]) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description", "name"]) + + def test_different_named_binds_update_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = update(table1).values( + myid=bindparam("myid"), + name=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, " + "name=coalesce(:description, :coalesce_1)", + ) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description"]) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description", "name"]) + class UnsupportedTest(fixtures.TestBase): def test_unsupported_element_str_visit_name(self): @@ -5122,7 +5584,7 @@ def test_schema_translate_map_table(self): self.assert_compile( schema.CreateTable(t1), - "CREATE TABLE [SCHEMA__none].t1 (q INTEGER)", + "CREATE TABLE __[SCHEMA__none].t1 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5134,7 +5596,7 @@ def test_schema_translate_map_table(self): self.assert_compile( schema.CreateTable(t2), - "CREATE TABLE [SCHEMA_foo].t2 (q INTEGER)", + "CREATE TABLE __[SCHEMA_foo].t2 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5146,7 +5608,7 @@ def test_schema_translate_map_table(self): self.assert_compile( schema.CreateTable(t3), - "CREATE TABLE [SCHEMA_bar].t3 (q INTEGER)", + "CREATE TABLE __[SCHEMA_bar].t3 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5167,7 +5629,7 @@ def test_schema_translate_map_special_chars(self): self.assert_compile( schema.CreateTable(t1), - "CREATE TABLE [SCHEMA__none].t1 (q INTEGER)", + "CREATE TABLE __[SCHEMA__none].t1 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5179,7 +5641,7 @@ def test_schema_translate_map_special_chars(self): self.assert_compile( schema.CreateTable(t2), - "CREATE TABLE [SCHEMA_foo % ^ #].t2 (q INTEGER)", + "CREATE TABLE __[SCHEMA_foo % ^ #].t2 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5191,7 +5653,7 @@ def test_schema_translate_map_special_chars(self): self.assert_compile( schema.CreateTable(t3), - "CREATE TABLE [SCHEMA_bar {}].t3 (q INTEGER)", + "CREATE TABLE __[SCHEMA_bar {}].t3 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5236,39 +5698,39 @@ def test_schema_translate_map_sequence(self): self.assert_compile( schema.CreateSequence(s1), - "CREATE SEQUENCE [SCHEMA__none].s1 START WITH 1", + "CREATE SEQUENCE __[SCHEMA__none].s1 START WITH 1", schema_translate_map=schema_translate_map, ) self.assert_compile( s1.next_value(), - "", + "", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateSequence(s2), - "CREATE SEQUENCE [SCHEMA_foo].s2 START WITH 1", + "CREATE SEQUENCE __[SCHEMA_foo].s2 START WITH 1", schema_translate_map=schema_translate_map, ) self.assert_compile( s2.next_value(), - "", + "", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateSequence(s3), - "CREATE SEQUENCE [SCHEMA_bar].s3 START WITH 1", + "CREATE SEQUENCE __[SCHEMA_bar].s3 START WITH 1", schema_translate_map=schema_translate_map, ) self.assert_compile( s3.next_value(), - "", + "", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) @@ -5306,24 +5768,24 @@ def test_schema_translate_map_sequence_server_default(self): self.assert_compile( schema.CreateTable(t1), - "CREATE TABLE [SCHEMA__none].t1 " - "(id INTEGER DEFAULT " + "CREATE TABLE __[SCHEMA__none].t1 " + "(id INTEGER DEFAULT " "NOT NULL, PRIMARY KEY (id))", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateTable(t2), - "CREATE TABLE [SCHEMA__none].t2 " - "(id INTEGER DEFAULT " + "CREATE TABLE __[SCHEMA__none].t2 " + "(id INTEGER DEFAULT " "NOT NULL, PRIMARY KEY (id))", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateTable(t3), - "CREATE TABLE [SCHEMA__none].t3 " - "(id INTEGER DEFAULT " + "CREATE TABLE __[SCHEMA__none].t3 " + "(id INTEGER DEFAULT " "NOT NULL, PRIMARY KEY (id))", schema_translate_map=schema_translate_map, dialect="default_enhanced", @@ -5517,12 +5979,12 @@ def test_schema_translate_aliases(self): self.assert_compile( stmt, - "SELECT [SCHEMA__none].myothertable.otherid, " - "[SCHEMA__none].myothertable.othername, " + "SELECT __[SCHEMA__none].myothertable.otherid, " + "__[SCHEMA__none].myothertable.othername, " "mytable_1.myid, mytable_1.name, mytable_1.description " - "FROM [SCHEMA__none].myothertable JOIN " - "[SCHEMA__none].mytable AS mytable_1 " - "ON [SCHEMA__none].myothertable.otherid = mytable_1.myid " + "FROM __[SCHEMA__none].myothertable JOIN " + "__[SCHEMA__none].mytable AS mytable_1 " + "ON __[SCHEMA__none].myothertable.otherid = mytable_1.myid " "WHERE mytable_1.name = :name_1", schema_translate_map=schema_translate_map, ) @@ -5894,6 +6356,14 @@ def test_correlate_except_none(self, value): ) ) + def test_correlate_except_empty(self): + t1, t2, s1 = self._fixture() + self._assert_where_all_correlated( + select(t1, t2).where( + t2.c.a == s1.correlate_except().scalar_subquery() + ) + ) + def test_correlate_except_having(self): t1, t2, s1 = self._fixture() self._assert_having_correlated( diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index 22107eeee51..40f92e41d01 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -486,20 +486,38 @@ def test_recursive_union_alias_four(self): "SELECT cs1.x, cs2.x AS x_1 FROM bar AS cs1, cte AS cs2", ) - def test_conflicting_names(self): + @testing.combinations(True, False, argnames="identical") + @testing.combinations(True, False, argnames="use_clone") + def test_conflicting_names(self, identical, use_clone): """test a flat out name conflict.""" s1 = select(1) c1 = s1.cte(name="cte1", recursive=True) - s2 = select(1) - c2 = s2.cte(name="cte1", recursive=True) + if use_clone: + c2 = c1._clone() + if not identical: + c2 = c2.union(select(2)) + else: + if identical: + s2 = select(1) + else: + s2 = select(column("q")) + c2 = s2.cte(name="cte1", recursive=True) s = select(c1, c2) - assert_raises_message( - CompileError, - "Multiple, unrelated CTEs found " "with the same name: 'cte1'", - s.compile, - ) + + if use_clone and identical: + self.assert_compile( + s, + 'WITH RECURSIVE cte1("1") AS (SELECT 1) SELECT cte1.1, ' + 'cte1.1 AS "1_1" FROM cte1', + ) + else: + assert_raises_message( + CompileError, + "Multiple, unrelated CTEs found " "with the same name: 'cte1'", + s.compile, + ) def test_with_recursive_no_name_currently_buggy(self): s1 = select(1) @@ -551,6 +569,54 @@ def test_wrecur_dupe_col_names(self): "SELECT cte.id, cte.manager_id, cte.id_1 FROM cte", ) + @testing.combinations(True, False, argnames="use_object") + @testing.combinations("order_by", "group_by", argnames="order_by") + def test_order_by_group_by_label_w_scalar_subquery( + self, use_object, order_by + ): + """test issue #7269""" + t = table("test", column("a")) + + b = t.c.a.label("b") + + if use_object: + arg = b + else: + arg = "b" + + if order_by == "order_by": + cte = select(b).order_by(arg).cte() + elif order_by == "group_by": + cte = select(b).group_by(arg).cte() + else: + assert False + + stmt = select(select(cte.c.b).label("c")) + + if use_object and order_by == "group_by": + # group_by(b) is de-references the label, due a difference in + # handling between coercions.GroupByImpl and coercions.OrderByImpl. + # "order by" makes use of the ClauseElement._order_by_label_element + # feature but group_by() doesn't. it's not clear if group_by() + # could do the same thing order_by() does. + self.assert_compile( + stmt, + "WITH anon_1 AS " + "(SELECT test.a AS b FROM test GROUP BY test.a) " + "SELECT (SELECT anon_1.b FROM anon_1) AS c", + ) + else: + self.assert_compile( + stmt, + "WITH anon_1 AS (SELECT test.a AS b FROM test %s b) " + "SELECT (SELECT anon_1.b FROM anon_1) AS c" + % ("ORDER BY" if order_by == "order_by" else "GROUP BY") + # prior to the fix, the use_object version came out as: + # "WITH anon_1 AS (SELECT test.a AS b FROM test " + # "ORDER BY test.a) " + # "SELECT (SELECT anon_1.b FROM anon_1) AS c" + ) + def test_wrecur_dupe_col_names_w_grouping(self): """test #6710 @@ -1565,6 +1631,72 @@ def test_compound_select_uses_independent_cte(self): }, ) + def test_textual_select_uses_independent_cte_one(self): + """test #7760""" + products = table("products", column("id"), column("price")) + + upd_cte = ( + products.update().values(price=10).where(products.c.price > 50) + ).cte() + + stmt = ( + text( + "SELECT products.id, products.price " + "FROM products WHERE products.price < :price_2" + ) + .columns(products.c.id, products.c.price) + .bindparams(price_2=45) + .add_cte(upd_cte) + ) + + self.assert_compile( + stmt, + "WITH anon_1 AS (UPDATE products SET price=:param_1 " + "WHERE products.price > :price_1) " + "SELECT products.id, products.price " + "FROM products WHERE products.price < :price_2", + checkparams={"param_1": 10, "price_1": 50, "price_2": 45}, + ) + + def test_textual_select_uses_independent_cte_two(self): + + foo = table("foo", column("id")) + bar = table("bar", column("id"), column("attr"), column("foo_id")) + s1 = select(foo.c.id) + s2 = text( + "SELECT bar.id, bar.attr FROM bar " + "WHERE bar.foo_id IN (SELECT id FROM baz)" + ).columns(bar.c.id, bar.c.attr) + s3 = s2.add_cte(s1.cte(name="baz")) + + self.assert_compile( + s3, + "WITH baz AS (SELECT foo.id AS id FROM foo) " + "SELECT bar.id, bar.attr FROM bar WHERE bar.foo_id IN " + "(SELECT id FROM baz)", + ) + + def test_textual_select_stack_correction(self): + """test #7798 , regression from #7760""" + + foo = table("foo", column("id")) + bar = table("bar", column("id"), column("attr"), column("foo_id")) + + s1 = text("SELECT id FROM foo").columns(foo.c.id) + s2 = text( + "SELECT bar.id, bar.attr FROM bar WHERE br.id IN " + "(SELECT id FROM baz)" + ).columns(bar.c.id, bar.c.attr) + s3 = bar.insert().from_select(list(s2.selected_columns), s2) + s4 = s3.add_cte(s1.cte(name="baz")) + + self.assert_compile( + s4, + "WITH baz AS (SELECT id FROM foo) INSERT INTO bar (id, attr) " + "SELECT bar.id, bar.attr FROM bar WHERE br.id IN " + "(SELECT id FROM baz)", + ) + def test_insert_uses_independent_cte(self): products = table("products", column("id"), column("price")) @@ -1721,6 +1853,53 @@ def test_no_alias_construct(self): "foo", ) + def test_recursive_cte_with_multiple_union(self): + root_query = select(literal(1).label("val")).cte( + "increasing", recursive=True + ) + rec_part_1 = select((root_query.c.val + 3).label("val")).where( + root_query.c.val < 15 + ) + rec_part_2 = select((root_query.c.val + 5).label("val")).where( + root_query.c.val < 15 + ) + union_rec_query = root_query.union(rec_part_1, rec_part_2) + union_stmt = select(union_rec_query) + self.assert_compile( + union_stmt, + "WITH RECURSIVE increasing(val) AS " + "(SELECT :param_1 AS val " + "UNION SELECT increasing.val + :val_1 AS val FROM increasing " + "WHERE increasing.val < :val_2 " + "UNION SELECT increasing.val + :val_3 AS val FROM increasing " + "WHERE increasing.val < :val_4) " + "SELECT increasing.val FROM increasing", + ) + + def test_recursive_cte_with_multiple_union_all(self): + root_query = select(literal(1).label("val")).cte( + "increasing", recursive=True + ) + rec_part_1 = select((root_query.c.val + 3).label("val")).where( + root_query.c.val < 15 + ) + rec_part_2 = select((root_query.c.val + 5).label("val")).where( + root_query.c.val < 15 + ) + + union_all_rec_query = root_query.union_all(rec_part_1, rec_part_2) + union_all_stmt = select(union_all_rec_query) + self.assert_compile( + union_all_stmt, + "WITH RECURSIVE increasing(val) AS " + "(SELECT :param_1 AS val " + "UNION ALL SELECT increasing.val + :val_1 AS val FROM increasing " + "WHERE increasing.val < :val_2 " + "UNION ALL SELECT increasing.val + :val_3 AS val FROM increasing " + "WHERE increasing.val < :val_4) " + "SELECT increasing.val FROM increasing", + ) + class NestingCTETest(fixtures.TestBase, AssertsCompiledSQL): @@ -1916,7 +2095,8 @@ def test_compound_select_with_nesting_cte_in_cte(self): ") SELECT cte.outer_cte FROM cte", ) - def test_nesting_cte_in_recursive_cte(self): + @testing.fixture + def nesting_cte_in_recursive_cte(self): nesting_cte = select(literal(1).label("inner_cte")).cte( "nesting", nesting=True ) @@ -1925,20 +2105,85 @@ def test_nesting_cte_in_recursive_cte(self): "rec_cte", recursive=True ) rec_part = select(rec_cte.c.outer_cte).where( - rec_cte.c.outer_cte == literal(1) + rec_cte.c.outer_cte == literal(42) ) rec_cte = rec_cte.union(rec_part) stmt = select(rec_cte) + return stmt + + def test_nesting_cte_in_recursive_cte_positional( + self, nesting_cte_in_recursive_cte + ): self.assert_compile( - stmt, + nesting_cte_in_recursive_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " + "(SELECT ? AS inner_cte) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = ?) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkpositional=(1, 42), + dialect="default_qmark", + ) + + def test_nesting_cte_in_recursive_cte(self, nesting_cte_in_recursive_cte): + self.assert_compile( + nesting_cte_in_recursive_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " + "(SELECT :param_1 AS inner_cte) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = :param_2) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkparams={"param_1": 1, "param_2": 42}, + ) + + @testing.fixture + def nesting_cte_in_recursive_cte_w_add_cte(self): + nesting_cte = select(literal(1).label("inner_cte")).cte( + "nesting", nesting=True + ) + + rec_cte = select(nesting_cte.c.inner_cte.label("outer_cte")).cte( + "rec_cte", recursive=True + ) + rec_part = select(rec_cte.c.outer_cte).where( + rec_cte.c.outer_cte == literal(42) + ) + rec_cte = rec_cte.union(rec_part) + + stmt = select(rec_cte) + return stmt + + def test_nesting_cte_in_recursive_cte_w_add_cte_positional( + self, nesting_cte_in_recursive_cte_w_add_cte + ): + self.assert_compile( + nesting_cte_in_recursive_cte_w_add_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " + "(SELECT ? AS inner_cte) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = ?) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkpositional=(1, 42), + dialect="default_qmark", + ) + + def test_nesting_cte_in_recursive_cte_w_add_cte( + self, nesting_cte_in_recursive_cte_w_add_cte + ): + self.assert_compile( + nesting_cte_in_recursive_cte_w_add_cte, "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " "(SELECT :param_1 AS inner_cte) " "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " "WHERE rec_cte.outer_cte = :param_2) " "SELECT rec_cte.outer_cte FROM rec_cte", + checkparams={"param_1": 1, "param_2": 42}, ) def test_recursive_nesting_cte_in_cte(self): @@ -2040,18 +2285,19 @@ def test_aliased_recursive_nesting_cte_in_cte(self): "SELECT cte.outer_cte FROM cte", ) - def test_same_nested_cte_is_not_generated_twice(self): + @testing.fixture + def same_nested_cte_is_not_generated_twice(self): # Same = name and query nesting_cte_used_twice = select(literal(1).label("inner_cte_1")).cte( "nesting_cte", nesting=True ) select_add_cte = select( - (nesting_cte_used_twice.c.inner_cte_1 + 1).label("next_value") + (nesting_cte_used_twice.c.inner_cte_1 + 2).label("next_value") ).cte("nesting_2", nesting=True) union_cte = ( select( - (nesting_cte_used_twice.c.inner_cte_1 - 1).label("next_value") + (nesting_cte_used_twice.c.inner_cte_1 - 3).label("next_value") ) .union(select(select_add_cte)) .cte("wrapper", nesting=True) @@ -2062,9 +2308,36 @@ def test_same_nested_cte_is_not_generated_twice(self): .add_cte(nesting_cte_used_twice) .union(select(nesting_cte_used_twice)) ) + return stmt + def test_same_nested_cte_is_not_generated_twice_positional( + self, same_nested_cte_is_not_generated_twice + ): self.assert_compile( - stmt, + same_nested_cte_is_not_generated_twice, + "WITH nesting_cte AS " + "(SELECT ? AS inner_cte_1)" + ", wrapper AS " + "(WITH nesting_2 AS " + "(SELECT nesting_cte.inner_cte_1 + ? " + "AS next_value " + "FROM nesting_cte)" + " SELECT nesting_cte.inner_cte_1 - ? " + "AS next_value " + "FROM nesting_cte UNION SELECT nesting_2.next_value " + "AS next_value FROM nesting_2)" + " SELECT wrapper.next_value " + "FROM wrapper UNION SELECT nesting_cte.inner_cte_1 " + "FROM nesting_cte", + checkpositional=(1, 2, 3), + dialect="default_qmark", + ) + + def test_same_nested_cte_is_not_generated_twice( + self, same_nested_cte_is_not_generated_twice + ): + self.assert_compile( + same_nested_cte_is_not_generated_twice, "WITH nesting_cte AS " "(SELECT :param_1 AS inner_cte_1)" ", wrapper AS " @@ -2074,19 +2347,25 @@ def test_same_nested_cte_is_not_generated_twice(self): "FROM nesting_cte)" " SELECT nesting_cte.inner_cte_1 - :inner_cte_1_1 " "AS next_value " - "FROM nesting_cte UNION SELECT nesting_2.next_value AS next_value " - "FROM nesting_2)" + "FROM nesting_cte UNION SELECT nesting_2.next_value " + "AS next_value FROM nesting_2)" " SELECT wrapper.next_value " "FROM wrapper UNION SELECT nesting_cte.inner_cte_1 " "FROM nesting_cte", + checkparams={ + "param_1": 1, + "inner_cte_1_2": 2, + "inner_cte_1_1": 3, + }, ) - def test_recursive_nesting_cte_in_recursive_cte(self): + @testing.fixture + def recursive_nesting_cte_in_recursive_cte(self): nesting_cte = select(literal(1).label("inner_cte")).cte( "nesting", nesting=True, recursive=True ) nesting_rec_part = select(nesting_cte.c.inner_cte).where( - nesting_cte.c.inner_cte == literal(1) + nesting_cte.c.inner_cte == literal(2) ) nesting_cte = nesting_cte.union(nesting_rec_part) @@ -2094,14 +2373,37 @@ def test_recursive_nesting_cte_in_recursive_cte(self): "rec_cte", recursive=True ) rec_part = select(rec_cte.c.outer_cte).where( - rec_cte.c.outer_cte == literal(1) + rec_cte.c.outer_cte == literal(3) ) rec_cte = rec_cte.union(rec_part) stmt = select(rec_cte) + return stmt + + def test_recursive_nesting_cte_in_recursive_cte_positional( + self, recursive_nesting_cte_in_recursive_cte + ): self.assert_compile( - stmt, + recursive_nesting_cte_in_recursive_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (" + "WITH RECURSIVE nesting(inner_cte) AS " + "(SELECT ? AS inner_cte UNION " + "SELECT nesting.inner_cte AS inner_cte FROM nesting " + "WHERE nesting.inner_cte = ?) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = ?) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkpositional=(1, 2, 3), + dialect="default_qmark", + ) + + def test_recursive_nesting_cte_in_recursive_cte( + self, recursive_nesting_cte_in_recursive_cte + ): + self.assert_compile( + recursive_nesting_cte_in_recursive_cte, "WITH RECURSIVE rec_cte(outer_cte) AS (" "WITH RECURSIVE nesting(inner_cte) AS " "(SELECT :param_1 AS inner_cte UNION " @@ -2111,6 +2413,7 @@ def test_recursive_nesting_cte_in_recursive_cte(self): "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " "WHERE rec_cte.outer_cte = :param_3) " "SELECT rec_cte.outer_cte FROM rec_cte", + checkparams={"param_1": 1, "param_2": 2, "param_3": 3}, ) def test_select_from_insert_cte_with_nesting(self): @@ -2239,7 +2542,43 @@ def test_compound_select_with_nesting_cte_in_custom_order(self): ") SELECT cte.outer_cte FROM cte", ) - def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): + @testing.fixture + def cte_in_compound_select(self): + upper = select(literal(1).label("z")) + + lower_a_cte = select(literal(2).label("x")).cte("xx", nesting=True) + lower_a = select(literal(3).label("y")).add_cte(lower_a_cte) + lower_b = select(literal(4).label("w")) + + stmt = upper.union_all(lower_a.union_all(lower_b)) + return stmt + + def test_cte_in_compound_select_positional(self, cte_in_compound_select): + self.assert_compile( + cte_in_compound_select, + "SELECT ? AS z UNION ALL (WITH xx AS " + "(SELECT ? AS x) " + "SELECT ? AS y UNION ALL SELECT ? AS w)", + checkpositional=(1, 2, 3, 4), + dialect="default_qmark", + ) + + def test_cte_in_compound_select(self, cte_in_compound_select): + self.assert_compile( + cte_in_compound_select, + "SELECT :param_1 AS z UNION ALL (WITH xx AS " + "(SELECT :param_2 AS x) " + "SELECT :param_3 AS y UNION ALL SELECT :param_4 AS w)", + checkparams={ + "param_1": 1, + "param_2": 2, + "param_3": 3, + "param_4": 4, + }, + ) + + @testing.fixture + def recursive_cte_referenced_multiple_times_with_nesting_cte(self): rec_root = select(literal(1).label("the_value")).cte( "recursive_cte", recursive=True ) @@ -2252,7 +2591,7 @@ def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): exists( select(rec_root_ref.c.the_value) .where(rec_root_ref.c.the_value < 10) - .limit(1) + .limit(5) ).label("val") ).cte("should_continue", nesting=True) @@ -2268,13 +2607,43 @@ def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): rec_cte = rec_root.union_all(rec_part) stmt = rec_cte.select() + return stmt + def test_recursive_cte_referenced_multiple_times_with_nesting_cte_pos( + self, recursive_cte_referenced_multiple_times_with_nesting_cte + ): self.assert_compile( - stmt, + recursive_cte_referenced_multiple_times_with_nesting_cte, + "WITH RECURSIVE recursive_cte(the_value) AS (" + "SELECT ? AS the_value UNION ALL (" + "WITH allow_multiple_ref AS (" + "SELECT recursive_cte.the_value AS the_value " + "FROM recursive_cte)" + ", should_continue AS (SELECT EXISTS (" + "SELECT allow_multiple_ref.the_value FROM allow_multiple_ref" + " WHERE allow_multiple_ref.the_value < ?" + " LIMIT ?) AS val) " + "SELECT allow_multiple_ref.the_value * ? AS anon_1" + " FROM allow_multiple_ref, should_continue " + "WHERE should_continue.val != 1" + " UNION ALL SELECT allow_multiple_ref.the_value * ?" + " AS anon_2 FROM allow_multiple_ref, should_continue" + " WHERE should_continue.val != 1))" + " SELECT recursive_cte.the_value FROM recursive_cte", + checkpositional=(1, 10, 5, 2, 3), + dialect="default_qmark", + ) + + def test_recursive_cte_referenced_multiple_times_with_nesting_cte( + self, recursive_cte_referenced_multiple_times_with_nesting_cte + ): + self.assert_compile( + recursive_cte_referenced_multiple_times_with_nesting_cte, "WITH RECURSIVE recursive_cte(the_value) AS (" "SELECT :param_1 AS the_value UNION ALL (" "WITH allow_multiple_ref AS (" - "SELECT recursive_cte.the_value AS the_value FROM recursive_cte)" + "SELECT recursive_cte.the_value AS the_value " + "FROM recursive_cte)" ", should_continue AS (SELECT EXISTS (" "SELECT allow_multiple_ref.the_value FROM allow_multiple_ref" " WHERE allow_multiple_ref.the_value < :the_value_2" @@ -2286,4 +2655,11 @@ def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): " AS anon_2 FROM allow_multiple_ref, should_continue" " WHERE should_continue.val != true))" " SELECT recursive_cte.the_value FROM recursive_cte", + checkparams={ + "param_1": 1, + "param_2": 5, + "the_value_2": 10, + "the_value_1": 2, + "the_value_3": 3, + }, ) diff --git a/test/sql/test_defaults.py b/test/sql/test_defaults.py index ef924e06819..d967db6aaf9 100644 --- a/test/sql/test_defaults.py +++ b/test/sql/test_defaults.py @@ -19,6 +19,7 @@ from sqlalchemy.sql import select from sqlalchemy.sql import text from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ @@ -1447,7 +1448,7 @@ def test_unicode_default(self): def test_nonunicode_default(self): default = b("foo") - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Unicode column 'foobar' has non-unicode " "default value b?'foo' specified.", diff --git a/test/sql/test_deprecations.py b/test/sql/test_deprecations.py index 9b74ab1fa65..426eb16aed2 100644 --- a/test/sql/test_deprecations.py +++ b/test/sql/test_deprecations.py @@ -2465,7 +2465,7 @@ def test_issue_5429_compile(self): self.assert_compile( column("x").notin_(["foo", "bar"]), - "(x NOT IN ([POSTCOMPILE_x_1]))", + "(x NOT IN (__[POSTCOMPILE_x_1]))", ) def test_issue_5429_operators(self): diff --git a/test/sql/test_external_traversal.py b/test/sql/test_external_traversal.py index 0d43448d5ed..7a058bfcdae 100644 --- a/test/sql/test_external_traversal.py +++ b/test/sql/test_external_traversal.py @@ -193,9 +193,12 @@ def visit_grouping(self, elem): ("name with~~tildes~~",), argnames="name", ) - def test_bindparam_key_proc_for_copies(self, meth, name): + @testing.combinations(True, False, argnames="positional") + def test_bindparam_key_proc_for_copies(self, meth, name, positional): r"""test :ticket:`6249`. + Revised for :ticket:`8056`. + The key of the bindparam needs spaces and other characters escaped out for the POSTCOMPILE regex to work correctly. @@ -206,7 +209,7 @@ def test_bindparam_key_proc_for_copies(self, meth, name): and the compiler postcompile reg is:: - re.sub(r"\[POSTCOMPILE_(\S+)\]", process_expanding, self.string) + re.sub(r"\__[POSTCOMPILE_(\S+)\]", process_expanding, self.string) Interestingly, brackets in the name seems to work out. @@ -223,13 +226,25 @@ def test_bindparam_key_proc_for_copies(self, meth, name): token = re.sub(r"[%\(\) \$\[\]]", "_", name) - self.assert_compile( - expr, - '"%(name)s" IN (:%(token)s_1_1, ' - ":%(token)s_1_2, :%(token)s_1_3)" % {"name": name, "token": token}, - render_postcompile=True, - dialect="default", - ) + if positional: + self.assert_compile( + expr, + '"%(name)s" IN (?, ?, ?)' % {"name": name}, + checkpositional=(1, 2, 3), + render_postcompile=True, + dialect="default_qmark", + ) + else: + tokens = ["%s_1_%s" % (token, i) for i in range(1, 4)] + self.assert_compile( + expr, + '"%(name)s" IN (:%(token)s_1_1, ' + ":%(token)s_1_2, :%(token)s_1_3)" + % {"name": name, "token": token}, + checkparams=dict(zip(tokens, [1, 2, 3])), + render_postcompile=True, + dialect="default", + ) def test_expanding_in_bindparam_safe_to_clone(self): expr = column("x").in_([1, 2, 3]) @@ -241,7 +256,7 @@ def test_expanding_in_bindparam_safe_to_clone(self): stmt = and_(expr, expr2) self.assert_compile( - stmt, "x IN ([POSTCOMPILE_x_1]) AND x IN ([POSTCOMPILE_x_1])" + stmt, "x IN (__[POSTCOMPILE_x_1]) AND x IN (__[POSTCOMPILE_x_1])" ) self.assert_compile( stmt, "x IN (1, 2, 3) AND x IN (1, 2, 3)", literal_binds=True @@ -827,6 +842,48 @@ def test_params_elements_in_setup_joins(self): sel._generate_cache_key()[1], ) + def test_params_on_expr_against_subquery(self): + """test #7489""" + + meta = MetaData() + + b = Table("b", meta, Column("id", Integer), Column("data", String)) + + subq = select(b.c.id).where(b.c.data == "some data").subquery() + criteria = b.c.id == subq.c.id + + stmt = select(b).where(criteria) + param_key = stmt._generate_cache_key()[1][0].key + + self.assert_compile( + stmt, + "SELECT b.id, b.data FROM b, (SELECT b.id AS id " + "FROM b WHERE b.data = :data_1) AS anon_1 WHERE b.id = anon_1.id", + checkparams={"data_1": "some data"}, + ) + eq_( + [ + eq_clause_element(bindparam(param_key, value="some data")), + ], + stmt._generate_cache_key()[1], + ) + + stmt = select(b).where(criteria.params({param_key: "some other data"})) + self.assert_compile( + stmt, + "SELECT b.id, b.data FROM b, (SELECT b.id AS id " + "FROM b WHERE b.data = :data_1) AS anon_1 WHERE b.id = anon_1.id", + checkparams={"data_1": "some other data"}, + ) + eq_( + [ + eq_clause_element( + bindparam(param_key, value="some other data") + ), + ], + stmt._generate_cache_key()[1], + ) + def test_params_subqueries_in_joins_one(self): """test #7055""" @@ -2636,7 +2693,7 @@ class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL): """Tests the generative capability of Insert, Update""" - __dialect__ = "default" + __dialect__ = "default_enhanced" # fixme: consolidate converage from elsewhere here and expand @@ -2878,3 +2935,41 @@ def test_update_no_support_multi_constructor(self): "UPDATE construct does not support multiple parameter sets.", stmt.compile, ) + + @testing.variation("stmt_type", ["update", "delete"]) + def test_whereclause_returning_adapted(self, stmt_type): + """test #9033""" + + if stmt_type.update: + stmt = ( + t1.update() + .where(t1.c.col1 == 10) + .values(col1=15) + .returning(t1.c.col1) + ) + elif stmt_type.delete: + stmt = t1.delete().where(t1.c.col1 == 10).returning(t1.c.col1) + else: + stmt_type.fail() + + stmt = visitors.replacement_traverse(stmt, {}, lambda elem: None) + + assert isinstance(stmt._where_criteria, tuple) + assert isinstance(stmt._returning, tuple) + + stmt = stmt.where(t1.c.col2 == 5).returning(t1.c.col2) + + if stmt_type.update: + self.assert_compile( + stmt, + "UPDATE table1 SET col1=:col1 WHERE table1.col1 = :col1_1 " + "AND table1.col2 = :col2_1 RETURNING table1.col1, table1.col2", + ) + elif stmt_type.delete: + self.assert_compile( + stmt, + "DELETE FROM table1 WHERE table1.col1 = :col1_1 " + "AND table1.col2 = :col2_1 RETURNING table1.col1, table1.col2", + ) + else: + stmt_type.fail() diff --git a/test/sql/test_from_linter.py b/test/sql/test_from_linter.py index a2291386852..49370b1e67e 100644 --- a/test/sql/test_from_linter.py +++ b/test/sql/test_from_linter.py @@ -1,6 +1,10 @@ +from sqlalchemy import column +from sqlalchemy import func from sqlalchemy import Integer +from sqlalchemy import JSON from sqlalchemy import select from sqlalchemy import sql +from sqlalchemy import table from sqlalchemy import testing from sqlalchemy import true from sqlalchemy.testing import config @@ -161,6 +165,64 @@ def test_lateral_subqueries_ok_do_we_still_find_cartesians(self): assert start is p3 assert froms == {p1} + @testing.variation("additional_transformation", ["alias", "none"]) + @testing.variation("joins_implicitly", [True, False]) + @testing.variation( + "type_", ["table_valued", "table_valued_derived", "column_valued"] + ) + def test_fn_valued( + self, joins_implicitly, additional_transformation, type_ + ): + """test #7845, #9009""" + + my_table = table( + "tbl", + column("id", Integer), + column("data", JSON()), + ) + + sub_dict = my_table.c.data["d"] + + if type_.table_valued or type_.table_valued_derived: + tv = func.json_each(sub_dict) + + tv = tv.table_valued("key", joins_implicitly=joins_implicitly) + + if type_.table_valued_derived: + tv = tv.render_derived(name="tv", with_types=True) + + if additional_transformation.alias: + tv = tv.alias() + + has_key = tv.c.key == "f" + stmt = select(my_table.c.id).where(has_key) + elif type_.column_valued: + tv = func.json_array_elements(sub_dict) + + if additional_transformation.alias: + tv = tv.alias(joins_implicitly=joins_implicitly).column + else: + tv = tv.column_valued("key", joins_implicitly=joins_implicitly) + + stmt = select(my_table.c.id, tv) + else: + type_.fail() + + froms, start = find_unmatching_froms(stmt, my_table) + + if joins_implicitly: + is_(start, None) + is_(froms, None) + elif type_.column_valued: + assert start == my_table + assert froms == {tv.scalar_alias} + + elif type_.table_valued or type_.table_valued_derived: + assert start == my_table + assert froms == {tv} + else: + type_.fail() + def test_count_non_eq_comparison_operators(self): query = select(self.a).where(self.a.c.col_a > self.b.c.col_b) froms, start = find_unmatching_froms(query, self.a) diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py index f3fb724c073..908fd9faaf0 100644 --- a/test/sql/test_functions.py +++ b/test/sql/test_functions.py @@ -25,7 +25,6 @@ from sqlalchemy import testing from sqlalchemy import Text from sqlalchemy import true -from sqlalchemy import types as sqltypes from sqlalchemy import util from sqlalchemy.dialects import mysql from sqlalchemy.dialects import oracle @@ -37,6 +36,7 @@ from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy.sql import operators from sqlalchemy.sql import quoted_name +from sqlalchemy.sql import sqltypes from sqlalchemy.sql import table from sqlalchemy.sql.compiler import BIND_TEMPLATES from sqlalchemy.sql.functions import FunctionElement @@ -86,6 +86,7 @@ def test_compile(self): # test generic function compile class fake_func(GenericFunction): + inherit_cache = True __return_type__ = sqltypes.Integer def __init__(self, arg, **kwargs): @@ -112,6 +113,7 @@ def test_operators_custom(self, op, other, expected, use_custom): if use_custom: class MyFunc(FunctionElement): + inherit_cache = True name = "myfunc" type = Integer() @@ -140,6 +142,7 @@ def test_use_labels(self): def test_use_labels_function_element(self): class max_(FunctionElement): name = "max" + inherit_cache = True @compiles(max_) def visit_max(element, compiler, **kw): @@ -265,7 +268,7 @@ def test_annotation_dialect_specific(self): def test_custom_default_namespace(self): class myfunc(GenericFunction): - pass + inherit_cache = True assert isinstance(func.myfunc(), myfunc) self.assert_compile(func.myfunc(), "myfunc()") @@ -273,6 +276,7 @@ class myfunc(GenericFunction): def test_custom_type(self): class myfunc(GenericFunction): type = DateTime + inherit_cache = True assert isinstance(func.myfunc().type, DateTime) self.assert_compile(func.myfunc(), "myfunc()") @@ -280,12 +284,14 @@ class myfunc(GenericFunction): def test_custom_legacy_type(self): # in case someone was using this system class myfunc(GenericFunction): + inherit_cache = True __return_type__ = DateTime assert isinstance(func.myfunc().type, DateTime) def test_case_sensitive(self): class MYFUNC(GenericFunction): + inherit_cache = True type = DateTime assert isinstance(func.MYFUNC().type, DateTime) @@ -341,6 +347,7 @@ class replaceable_func_override(GenericFunction): def test_custom_w_custom_name(self): class myfunc(GenericFunction): + inherit_cache = True name = "notmyfunc" assert isinstance(func.notmyfunc(), myfunc) @@ -348,6 +355,7 @@ class myfunc(GenericFunction): def test_custom_w_quoted_name(self): class myfunc(GenericFunction): + inherit_cache = True name = quoted_name("NotMyFunc", quote=True) identifier = "myfunc" @@ -355,6 +363,7 @@ class myfunc(GenericFunction): def test_custom_w_quoted_name_no_identifier(self): class myfunc(GenericFunction): + inherit_cache = True name = quoted_name("NotMyFunc", quote=True) # note this requires that the quoted name be lower cased for @@ -364,6 +373,7 @@ class myfunc(GenericFunction): def test_custom_package_namespace(self): def cls1(pk_name): class myfunc(GenericFunction): + inherit_cache = True package = pk_name return myfunc @@ -377,6 +387,7 @@ class myfunc(GenericFunction): def test_custom_name(self): class MyFunction(GenericFunction): name = "my_func" + inherit_cache = True def __init__(self, *args): args = args + (3,) @@ -392,20 +403,24 @@ class GeoBuffer(GenericFunction): package = "geo" name = "BufferOne" identifier = "buf1" + inherit_cache = True class GeoBuffer2(GenericFunction): type = Integer name = "BufferTwo" identifier = "buf2" + inherit_cache = True class BufferThree(GenericFunction): type = Integer identifier = "buf3" + inherit_cache = True class GeoBufferFour(GenericFunction): type = Integer name = "BufferFour" identifier = "Buf4" + inherit_cache = True self.assert_compile(func.geo.buf1(), "BufferOne()") self.assert_compile(func.buf2(), "BufferTwo()") @@ -418,7 +433,7 @@ class GeoBufferFour(GenericFunction): def test_custom_args(self): class myfunc(GenericFunction): - pass + inherit_cache = True self.assert_compile( myfunc(1, 2, 3), "myfunc(:myfunc_1, :myfunc_2, :myfunc_3)" @@ -762,6 +777,22 @@ def test_funcfilter_windowing_range(self): "OVER (PARTITION BY mytable.description RANGE BETWEEN :param_1 " "FOLLOWING AND :param_2 FOLLOWING) " "AS anon_1 FROM mytable", + checkparams={"name_1": "foo", "param_1": 1, "param_2": 5}, + ) + + def test_funcfilter_windowing_range_positional(self): + self.assert_compile( + select( + func.rank() + .filter(table1.c.name > "foo") + .over(range_=(1, 5), partition_by=["description"]) + ), + "SELECT rank() FILTER (WHERE mytable.name > ?) " + "OVER (PARTITION BY mytable.description RANGE BETWEEN ? " + "FOLLOWING AND ? FOLLOWING) " + "AS anon_1 FROM mytable", + checkpositional=("foo", 1, 5), + dialect="default_qmark", ) def test_funcfilter_windowing_rows(self): @@ -1015,6 +1046,7 @@ def test_conn_execute(self, connection): from sqlalchemy.ext.compiler import compiles class myfunc(FunctionElement): + inherit_cache = True type = Date() @compiles(myfunc) @@ -1383,7 +1415,7 @@ def test_named_with_ordinality(self): `WITH ORDINALITY AS unnested(unnested, ordinality) ON true LEFT OUTER JOIN b ON unnested.unnested = b.ref - """ # noqa 501 + """ # noqa: 501 a = table("a", column("id"), column("refs")) b = table("b", column("id"), column("ref")) @@ -1414,6 +1446,30 @@ def test_named_with_ordinality(self): "LEFT OUTER JOIN b ON unnested.unnested = b.ref", ) + def test_render_derived_maintains_tableval_type(self): + fn = func.json_something() + + tv = fn.table_valued(column("x", String)) + + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + + tv = tv.render_derived() + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + + def test_alias_maintains_tableval_type(self): + fn = func.json_something() + + tv = fn.table_valued(column("x", String)) + + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + + tv = tv.alias() + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + def test_star_with_ordinality(self): """ SELECT * FROM generate_series(4,1,-1) WITH ORDINALITY; diff --git a/test/sql/test_identity_column.py b/test/sql/test_identity_column.py index 00404dae791..a93c5e6c507 100644 --- a/test/sql/test_identity_column.py +++ b/test/sql/test_identity_column.py @@ -55,14 +55,10 @@ class _IdentityDDLFixture(testing.AssertsCompiledSQL): "ALWAYS AS IDENTITY (START WITH 1 MAXVALUE 10 CYCLE)", ), ( - dict(always=False, cache=1000, order=True), - "BY DEFAULT AS IDENTITY (CACHE 1000 ORDER)", - ), - (dict(order=True, cycle=True), "BY DEFAULT AS IDENTITY (ORDER CYCLE)"), - ( - dict(order=False, cycle=False), - "BY DEFAULT AS IDENTITY (NO ORDER NO CYCLE)", + dict(always=False, cache=1000, cycle=False), + "BY DEFAULT AS IDENTITY (CACHE 1000 NO CYCLE)", ), + (dict(cycle=True), "BY DEFAULT AS IDENTITY (CYCLE)"), ) def test_create_ddl(self, identity_args, text): @@ -72,7 +68,6 @@ def test_create_ddl(self, identity_args, text): text = text.replace("NO MINVALUE", "NOMINVALUE") text = text.replace("NO MAXVALUE", "NOMAXVALUE") text = text.replace("NO CYCLE", "NOCYCLE") - text = text.replace("NO ORDER", "NOORDER") t = Table( "foo_table", @@ -170,7 +165,7 @@ def test_on_null(self): Column( "foo", Integer(), - Identity(always=False, on_null=True, start=42, order=True), + Identity(always=False, on_null=True, start=42, cycle=True), ), ) text = " ON NULL" if testing.against("oracle") else "" @@ -179,7 +174,7 @@ def test_on_null(self): ( "CREATE TABLE foo_table (foo INTEGER GENERATED BY DEFAULT" + text - + " AS IDENTITY (START WITH 42 ORDER))" + + " AS IDENTITY (START WITH 42 CYCLE))" ), ) @@ -268,7 +263,7 @@ def fn(**kwargs): assert_raises_message(ArgumentError, text, fn, server_onupdate="42") def test_to_metadata(self): - identity1 = Identity("by default", on_null=True, start=123) + identity1 = Identity("by default", cycle=True, start=123) m = MetaData() t = Table( "t", m, Column("x", Integer), Column("y", Integer, identity1) diff --git a/test/sql/test_insert.py b/test/sql/test_insert.py index 51045daac22..c052ac5da43 100644 --- a/test/sql/test_insert.py +++ b/test/sql/test_insert.py @@ -24,6 +24,7 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures @@ -67,7 +68,11 @@ class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): def test_binds_that_match_columns(self): """test bind params named after column names - replace the normal SET/VALUES generation.""" + replace the normal SET/VALUES generation. + + See also test_compiler.py::CrudParamOverlapTest + + """ t = table("foo", column("x"), column("y")) @@ -662,6 +667,75 @@ def foo(ctx): checkparams={"name_1": "foo", "foo": None}, ) + def test_insert_from_select_fn_defaults_compound(self): + """test #8073""" + + metadata = MetaData() + + table = Table( + "sometable", + metadata, + Column("id", Integer, primary_key=True), + Column("foo", Integer, default="foo"), + Column("bar", Integer, default="bar"), + ) + table1 = self.tables.mytable + sel = ( + select(table1.c.myid) + .where(table1.c.name == "foo") + .union(select(table1.c.myid).where(table1.c.name == "foo")) + ) + ins = table.insert().from_select(["id"], sel) + with expect_raises_message( + exc.CompileError, + r"Can't extend statement for INSERT..FROM SELECT to include " + r"additional default-holding column\(s\) 'foo', 'bar'. " + r"Convert the selectable to a subquery\(\) first, or pass " + r"include_defaults=False to Insert.from_select\(\) to skip these " + r"columns.", + ): + ins.compile() + + def test_insert_from_select_fn_defaults_compound_subquery(self): + """test #8073""" + + metadata = MetaData() + + def foo(ctx): + return 12 + + table = Table( + "sometable", + metadata, + Column("id", Integer, primary_key=True), + Column("foo", Integer, default="foo"), + Column("bar", Integer, default="bar"), + ) + table1 = self.tables.mytable + sel = ( + select(table1.c.myid) + .where(table1.c.name == "foo") + .union(select(table1.c.myid).where(table1.c.name == "foo")) + .subquery() + ) + + ins = table.insert().from_select(["id"], sel) + self.assert_compile( + ins, + "INSERT INTO sometable (id, foo, bar) SELECT anon_1.myid, " + ":foo AS anon_2, :bar AS anon_3 FROM " + "(SELECT mytable.myid AS myid FROM mytable " + "WHERE mytable.name = :name_1 UNION " + "SELECT mytable.myid AS myid FROM mytable " + "WHERE mytable.name = :name_2) AS anon_1", + checkparams={ + "foo": None, + "bar": None, + "name_1": "foo", + "name_2": "foo", + }, + ) + def test_insert_from_select_dont_mutate_raw_columns(self): # test [ticket:3603] from sqlalchemy import table diff --git a/test/sql/test_insert_exec.py b/test/sql/test_insert_exec.py index 76b4ba01ea8..334df9575e9 100644 --- a/test/sql/test_insert_exec.py +++ b/test/sql/test_insert_exec.py @@ -20,6 +20,14 @@ from sqlalchemy.testing.schema import Table +class ExpectExpr: + def __init__(self, element): + self.element = element + + def __clause_element__(self): + return self.element + + class InsertExecTest(fixtures.TablesTest): __backend__ = True @@ -36,13 +44,27 @@ def define_tables(cls, metadata): ) @testing.requires.multivalues_inserts - def test_multivalues_insert(self, connection): + @testing.combinations("string", "column", "expect", argnames="keytype") + def test_multivalues_insert(self, connection, keytype): + users = self.tables.users + + if keytype == "string": + user_id, user_name = "user_id", "user_name" + elif keytype == "column": + user_id, user_name = users.c.user_id, users.c.user_name + elif keytype == "expect": + user_id, user_name = ExpectExpr(users.c.user_id), ExpectExpr( + users.c.user_name + ) + else: + assert False + connection.execute( users.insert().values( [ - {"user_id": 7, "user_name": "jack"}, - {"user_id": 8, "user_name": "ed"}, + {user_id: 7, user_name: "jack"}, + {user_id: 8, user_name: "ed"}, ] ) ) diff --git a/test/sql/test_labels.py b/test/sql/test_labels.py index 535d4dd0be8..869134f9c0c 100644 --- a/test/sql/test_labels.py +++ b/test/sql/test_labels.py @@ -2,7 +2,10 @@ from sqlalchemy import Boolean from sqlalchemy import cast from sqlalchemy import exc as exceptions +from sqlalchemy import func +from sqlalchemy import insert from sqlalchemy import Integer +from sqlalchemy import literal_column from sqlalchemy import MetaData from sqlalchemy import or_ from sqlalchemy import select @@ -16,6 +19,7 @@ from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy.sql import roles from sqlalchemy.sql import table +from sqlalchemy.sql.base import prefix_anon_map from sqlalchemy.sql.elements import _truncated_label from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.elements import WrapsColumnExpression @@ -26,9 +30,11 @@ from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import is_ from sqlalchemy.testing import mock from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from sqlalchemy.types import TypeEngine IDENT_LENGTH = 29 @@ -799,12 +805,14 @@ class ColExprLabelTest(fixtures.TestBase, AssertsCompiledSQL): """ - __dialect__ = "default" + __dialect__ = "default_enhanced" table1 = table("some_table", column("name"), column("value")) def _fixture(self): class SomeColThing(WrapsColumnExpression, ColumnElement): + inherit_cache = False + def __init__(self, expression): self.clause = coercions.expect( roles.ExpressionElementRole, expression @@ -822,6 +830,101 @@ def process(element, compiler, **kw): return SomeColThing + @testing.fixture + def compiler_column_fixture(self): + return self._fixture() + + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + def test_plain_select_compiler_expression(self, compiler_column_fixture): + expr = compiler_column_fixture + table1 = self.table1 + + self.assert_compile( + select( + table1.c.name, + expr(table1.c.value), + ), + "SELECT some_table.name, SOME_COL_THING(some_table.value) " + "AS value FROM some_table", + ) + + def test_plain_select_column_expression(self, column_expression_fixture): + table1 = column_expression_fixture + + self.assert_compile( + select(table1), + "SELECT some_table.name, lower(some_table.value) AS value " + "FROM some_table", + ) + + def test_plain_returning_compiler_expression( + self, compiler_column_fixture + ): + expr = compiler_column_fixture + table1 = self.table1 + + self.assert_compile( + insert(table1).returning( + table1.c.name, + expr(table1.c.value), + ), + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, " + "SOME_COL_THING(some_table.value) AS value", + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, lower(some_table.value) AS value", + ) + + def test_select_dupes_column_expression(self, column_expression_fixture): + table1 = column_expression_fixture + + self.assert_compile( + select(table1.c.name, table1.c.value, table1.c.value), + "SELECT some_table.name, lower(some_table.value) AS value, " + "lower(some_table.value) AS value__1 FROM some_table", + ) + + def test_returning_dupes_column_expression( + self, column_expression_fixture + ): + table1 = column_expression_fixture + + stmt = insert(table1).returning( + table1.c.name, table1.c.value, table1.c.value + ) + + # 1.4 behavior only; limited support for labels in RETURNING + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, lower(some_table.value) AS value, " + "lower(some_table.value) AS value", + ) + def test_column_auto_label_dupes_label_style_none(self): expr = self._fixture() table1 = self.table1 @@ -936,6 +1039,33 @@ def test_type_coerce_auto_label_label_style_none(self): "some_table.name FROM some_table", ) + @testing.combinations("inside", "outside") + def test_wraps_col_expr_label_propagate(self, cast_location): + """test #8084""" + + table1 = self.table1 + + if cast_location == "inside": + expr = cast(table1.c.name, Integer).label("foo") + elif cast_location == "outside": + expr = cast(table1.c.name.label("foo"), Integer) + else: + assert False + + self.assert_compile( + select(expr), + "SELECT CAST(some_table.name AS INTEGER) AS foo FROM some_table", + ) + is_(select(expr).selected_columns.foo, expr) + + subq = select(expr).subquery() + self.assert_compile( + select(subq).where(subq.c.foo == 10), + "SELECT anon_1.foo FROM (SELECT CAST(some_table.name AS INTEGER) " + "AS foo FROM some_table) AS anon_1 WHERE anon_1.foo = :foo_1", + checkparams={"foo_1": 10}, + ) + def test_type_coerce_auto_label_label_style_disambiguate(self): table1 = self.table1 @@ -959,6 +1089,7 @@ def test_boolean_auto_label(self): # not sure if this SQL is right but this is what it was # before the new labeling, just different label name "SELECT value = 0 AS value, value", + dialect="default", ) def test_label_auto_label_use_labels(self): @@ -1008,3 +1139,35 @@ def test_column_auto_label_use_labels(self): "SOME_COL_THING(some_table.value) " "AS some_table_value FROM some_table", ) + + @testing.combinations( + # the resulting strings are completely arbitrary and are not + # exposed in SQL with current implementations. we want to + # only assert that the operation doesn't fail. It's safe to + # change the assertion cases for this test if the label escaping + # format changes + (literal_column("'(1,2]'"), "'_1,2]'_1"), + (literal_column("))"), "__1"), + (literal_column("'%('"), "'_'_1"), + ) + def test_labels_w_strformat_chars_in_isolation(self, test_case, expected): + """test #8724""" + + pa = prefix_anon_map() + eq_(test_case._anon_key_label % pa, expected) + + @testing.combinations( + ( + select(literal_column("'(1,2]'"), literal_column("'(1,2]'")), + "SELECT '(1,2]', '(1,2]'", + ), + (select(literal_column("))"), literal_column("))")), "SELECT )), ))"), + ( + select(literal_column("'%('"), literal_column("'%('")), + "SELECT '%(', '%('", + ), + ) + def test_labels_w_strformat_chars_in_statements(self, test_case, expected): + """test #8724""" + + self.assert_compile(test_case, expected) diff --git a/test/sql/test_lambdas.py b/test/sql/test_lambdas.py index 2e794d7bcf9..ede9702010b 100644 --- a/test/sql/test_lambdas.py +++ b/test/sql/test_lambdas.py @@ -8,6 +8,7 @@ from sqlalchemy.sql import bindparam from sqlalchemy.sql import coercions from sqlalchemy.sql import column +from sqlalchemy.sql import func from sqlalchemy.sql import join from sqlalchemy.sql import lambda_stmt from sqlalchemy.sql import lambdas @@ -17,6 +18,7 @@ from sqlalchemy.sql import select from sqlalchemy.sql import table from sqlalchemy.sql import util as sql_util +from sqlalchemy.sql.base import ExecutableOption from sqlalchemy.sql.traversals import HasCacheKey from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL @@ -26,8 +28,10 @@ from sqlalchemy.testing import ne_ from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.types import ARRAY from sqlalchemy.types import Boolean from sqlalchemy.types import Integer +from sqlalchemy.types import JSON from sqlalchemy.types import String @@ -36,6 +40,26 @@ class LambdaElementTest( ): __dialect__ = "default" + def test_reject_methods(self): + """test #7032""" + + t1 = table("t1", column("q"), column("p")) + + subq = select(t1).subquery + + with expect_raises_message( + exc.ArgumentError, + "Method 0) else False + _kwargs = { + "extend_existing": _extend_existing, + attrib: _attrib_value, + } + table_a = Table( + "a", + metadata, + Column("foo", String, primary_key=True), + **_kwargs + ) + eq_(getattr(table_a, attrib), _attrib_value) + eq_(getattr(metadata.tables["a"], attrib), _attrib_value) + class PKAutoIncrementTest(fixtures.TestBase): def test_multi_integer_no_autoinc(self): @@ -2500,6 +2595,52 @@ def test_default_schema_metadata_fk_alt_local(self): t2 = Table("t2", m, Column("x", Integer, ForeignKey("bar.t1.x"))) assert t2.c.x.references(t1.c.x) + @testing.combinations( + (schema.CreateSchema("sa_schema"), "CREATE SCHEMA sa_schema"), + (schema.DropSchema("sa_schema"), "DROP SCHEMA sa_schema"), + # note we don't yet support lower-case table() or + # lower-case column() for this + # ( + # schema.CreateTable(table("t", column("q", Integer))), + # "CREATE TABLE t (q INTEGER)", + # ), + ( + schema.CreateTable(Table("t", MetaData(), Column("q", Integer))), + "CREATE TABLE t (q INTEGER)", + ), + ( + schema.DropTable(Table("t", MetaData(), Column("q", Integer))), + "DROP TABLE t", + ), + ( + schema.CreateIndex( + Index( + "foo", + "x", + _table=Table("t", MetaData(), Column("x", Integer)), + ) + ), + "CREATE INDEX foo ON t (x)", + ), + ( + schema.DropIndex( + Index( + "foo", + "x", + _table=Table("t", MetaData(), Column("x", Integer)), + ) + ), + "DROP INDEX foo", + ), + ( + schema.CreateSequence(Sequence("my_seq")), + "CREATE SEQUENCE my_seq START WITH 1", + ), + (schema.DropSequence(Sequence("my_seq")), "DROP SEQUENCE my_seq"), + ) + def test_stringify_schema_elements(self, element, expected): + eq_ignore_whitespace(str(element), expected) + def test_create_drop_schema(self): self.assert_compile( @@ -5252,6 +5393,29 @@ def test_fk_attrs(self): a1.append_constraint(fk) eq_(fk.name, "fk_address_user_id_user_id") + @testing.combinations(True, False, argnames="col_has_type") + def test_fk_ref_local_referent_has_no_type(self, col_has_type): + """test #7958""" + + metadata = MetaData( + naming_convention={ + "fk": "fk_%(referred_column_0_name)s", + } + ) + Table("a", metadata, Column("id", Integer, primary_key=True)) + b = Table( + "b", + metadata, + Column("id", Integer, primary_key=True), + Column("aid", ForeignKey("a.id")) + if not col_has_type + else Column("aid", Integer, ForeignKey("a.id")), + ) + fks = list( + c for c in b.constraints if isinstance(c, ForeignKeyConstraint) + ) + eq_(fks[0].name, "fk_id") + def test_custom(self): def key_hash(const, table): return "HASH_%s" % table.name diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index 79aa4d79452..fb0ecddb382 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -1,8 +1,10 @@ import datetime import operator +import pickle from sqlalchemy import and_ from sqlalchemy import between +from sqlalchemy import bindparam from sqlalchemy import exc from sqlalchemy import Integer from sqlalchemy import join @@ -59,6 +61,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not +from sqlalchemy.testing.assertions import expect_deprecated from sqlalchemy.types import ARRAY from sqlalchemy.types import Boolean from sqlalchemy.types import Concatenable @@ -66,6 +69,7 @@ from sqlalchemy.types import Indexable from sqlalchemy.types import JSON from sqlalchemy.types import MatchType +from sqlalchemy.types import NullType from sqlalchemy.types import TypeDecorator from sqlalchemy.types import TypeEngine from sqlalchemy.types import UserDefinedType @@ -656,6 +660,8 @@ class ExtensionOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL): def test_contains(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def contains(self, other, **kw): return self.op("->")(other) @@ -664,6 +670,8 @@ def contains(self, other, **kw): def test_getitem(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def __getitem__(self, index): return self.op("->")(index) @@ -682,6 +690,8 @@ def __getitem__(self, index): def test_lshift(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def __lshift__(self, other): return self.op("->")(other) @@ -690,6 +700,8 @@ def __lshift__(self, other): def test_rshift(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def __rshift__(self, other): return self.op("->")(other) @@ -1212,15 +1224,14 @@ def test_empty_clauses(self, op, str_op, str_continue): # these warning classes will change to ArgumentError when the # deprecated behavior is disabled - assert_raises_message( - exc.SADeprecationWarning, + with expect_deprecated( r"Invoking %(str_op)s\(\) without arguments is deprecated, and " r"will be disallowed in a future release. For an empty " r"%(str_op)s\(\) construct, use " r"%(str_op)s\(%(str_continue)s, \*args\)\." - % {"str_op": str_op, "str_continue": str_continue}, - op, - ) + % {"str_op": str_op, "str_continue": str_continue} + ): + op() def test_empty_and_raw(self): self.assert_compile( @@ -1415,14 +1426,14 @@ def test_operator_precedence_5(self): self.assert_compile( self.table2.select().where(5 + self.table2.c.field.in_([5, 6])), "SELECT op.field FROM op WHERE :param_1 + " - "(op.field IN ([POSTCOMPILE_field_1]))", + "(op.field IN (__[POSTCOMPILE_field_1]))", ) def test_operator_precedence_6(self): self.assert_compile( self.table2.select().where((5 + self.table2.c.field).in_([5, 6])), "SELECT op.field FROM op WHERE :field_1 + op.field " - "IN ([POSTCOMPILE_param_1])", + "IN (__[POSTCOMPILE_param_1])", ) def test_operator_precedence_7(self): @@ -1766,28 +1777,28 @@ class InTest(fixtures.TestBase, testing.AssertsCompiledSQL): def test_in_1(self): self.assert_compile( self.table1.c.myid.in_(["a"]), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": ["a"]}, ) def test_in_2(self): self.assert_compile( ~self.table1.c.myid.in_(["a"]), - "(mytable.myid NOT IN ([POSTCOMPILE_myid_1]))", + "(mytable.myid NOT IN (__[POSTCOMPILE_myid_1]))", checkparams={"myid_1": ["a"]}, ) def test_in_3(self): self.assert_compile( self.table1.c.myid.in_(["a", "b"]), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": ["a", "b"]}, ) def test_in_4(self): self.assert_compile( self.table1.c.myid.in_(iter(["a", "b"])), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": ["a", "b"]}, ) @@ -1882,7 +1893,7 @@ def test_in_18(self): def test_in_19(self): self.assert_compile( self.table1.c.myid.in_([1, 2, 3]), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": [1, 2, 3]}, ) @@ -1989,7 +2000,7 @@ def test_in_29(self, is_in): if is_in: self.assert_compile( expr, - "(a, b, c) %s ([POSTCOMPILE_param_1])" + "(a, b, c) %s (__[POSTCOMPILE_param_1])" % ("IN" if is_in else "NOT IN"), checkparams={"param_1": [(3, "hi", b"there"), (4, "Q", b"P")]}, ) @@ -2002,7 +2013,7 @@ def test_in_29(self, is_in): else: self.assert_compile( expr, - "((a, b, c) NOT IN ([POSTCOMPILE_param_1]))", + "((a, b, c) NOT IN (__[POSTCOMPILE_param_1]))", checkparams={"param_1": [(3, "hi", b"there"), (4, "Q", b"P")]}, ) self.assert_compile( @@ -2029,7 +2040,7 @@ def test_in_empty_tuple(self, is_in, negate): if is_in: self.assert_compile( expr, - "(a, b, c) IN ([POSTCOMPILE_param_1])", + "(a, b, c) IN (__[POSTCOMPILE_param_1])", checkparams={"param_1": []}, ) self.assert_compile( @@ -2041,7 +2052,7 @@ def test_in_empty_tuple(self, is_in, negate): else: self.assert_compile( expr, - "((a, b, c) NOT IN ([POSTCOMPILE_param_1]))", + "((a, b, c) NOT IN (__[POSTCOMPILE_param_1]))", checkparams={"param_1": []}, ) self.assert_compile( @@ -2064,7 +2075,7 @@ def test_in_empty_single(self, is_in, negate): if is_in: self.assert_compile( expr, - "a IN ([POSTCOMPILE_a_1])", + "a IN (__[POSTCOMPILE_a_1])", checkparams={"a_1": []}, ) self.assert_compile( @@ -2076,7 +2087,7 @@ def test_in_empty_single(self, is_in, negate): else: self.assert_compile( expr, - "(a NOT IN ([POSTCOMPILE_a_1]))", + "(a NOT IN (__[POSTCOMPILE_a_1]))", checkparams={"a_1": []}, ) self.assert_compile( @@ -2094,7 +2105,8 @@ def test_in_self_plus_negated(self): stmt = and_(expr1, expr2) self.assert_compile( - stmt, "a IN ([POSTCOMPILE_a_1]) AND (a NOT IN ([POSTCOMPILE_a_2]))" + stmt, + "a IN (__[POSTCOMPILE_a_1]) AND (a NOT IN (__[POSTCOMPILE_a_2]))", ) self.assert_compile( stmt, "a IN (5) AND (a NOT IN (5))", literal_binds=True @@ -2108,7 +2120,8 @@ def test_in_self_plus_negated_empty(self): stmt = and_(expr1, expr2) self.assert_compile( - stmt, "a IN ([POSTCOMPILE_a_1]) AND (a NOT IN ([POSTCOMPILE_a_2]))" + stmt, + "a IN (__[POSTCOMPILE_a_1]) AND (a NOT IN (__[POSTCOMPILE_a_2]))", ) self.assert_compile( stmt, @@ -2120,7 +2133,7 @@ def test_in_set(self): s = {1, 2, 3} self.assert_compile( self.table1.c.myid.in_(s), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": list(s)}, ) @@ -2138,7 +2151,7 @@ def __iter__(self): seq = MySeq([1, 2, 3]) self.assert_compile( self.table1.c.myid.in_(seq), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": [1, 2, 3]}, ) @@ -2239,6 +2252,22 @@ def test_pickle_operators_two(self): clause = tuple_(1, 2, 3) eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause)))) + @testing.combinations(Integer(), String(), JSON(), argnames="typ") + @testing.variation("eval_first", [True, False]) + def test_pickle_comparator(self, typ, eval_first): + """test #10213""" + + table1 = Table("t", MetaData(), Column("x", typ)) + t1 = table1.c.x + + if eval_first: + t1.comparator + + t1p = pickle.loads(pickle.dumps(table1.c.x)) + + is_not(t1p.comparator.__class__, NullType.Comparator) + is_(t1.comparator.__class__, t1p.comparator.__class__) + @testing.combinations( (operator.lt, "<", ">"), (operator.gt, ">", "<"), @@ -2475,6 +2504,12 @@ def test_like_3(self): "mytable.myid LIKE :myid_1 ESCAPE '\\'", ) + def test_like_quote_escape(self): + self.assert_compile( + self.table1.c.myid.like("somstr", escape="'"), + "mytable.myid LIKE :myid_1 ESCAPE ''''", + ) + def test_like_4(self): self.assert_compile( ~self.table1.c.myid.like("somstr", escape="\\"), @@ -2830,6 +2865,36 @@ def test_contains(self): checkparams={"x_1": "y"}, ) + def test_contains_encoded(self): + self.assert_compile( + column("x").contains(b"y"), + "x LIKE '%' || :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_not_contains_encoded(self): + self.assert_compile( + ~column("x").contains(b"y"), + "x NOT LIKE '%' || :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_contains_encoded_mysql(self): + self.assert_compile( + column("x").contains(b"y"), + "x LIKE concat(concat('%%', %s), '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + + def test_not_contains_encoded_mysql(self): + self.assert_compile( + ~column("x").contains(b"y"), + "x NOT LIKE concat(concat('%%', %s), '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + def test_contains_escape(self): self.assert_compile( column("x").contains("a%b_c", escape="\\"), @@ -2993,6 +3058,36 @@ def test_startswith_autoescape_custom_escape(self): checkparams={"x_1": "a^%b^_c/d^^e"}, ) + def test_startswith_encoded(self): + self.assert_compile( + column("x").startswith(b"y"), + "x LIKE :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_startswith_encoded_mysql(self): + self.assert_compile( + column("x").startswith(b"y"), + "x LIKE concat(%s, '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + + def test_not_startswith_encoded(self): + self.assert_compile( + ~column("x").startswith(b"y"), + "x NOT LIKE :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_not_startswith_encoded_mysql(self): + self.assert_compile( + ~column("x").startswith(b"y"), + "x NOT LIKE concat(%s, '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + def test_not_startswith(self): self.assert_compile( ~column("x").startswith("y"), @@ -3083,6 +3178,28 @@ def test_endswith(self): checkparams={"x_1": "y"}, ) + def test_endswith_encoded(self): + self.assert_compile( + column("x").endswith(b"y"), + "x LIKE '%' || :x_1", + checkparams={"x_1": b"y"}, + ) + + def test_endswith_encoded_mysql(self): + self.assert_compile( + column("x").endswith(b"y"), + "x LIKE concat('%%', %s)", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + + def test_not_endswith_encoded(self): + self.assert_compile( + ~column("x").endswith(b"y"), + "x NOT LIKE '%' || :x_1", + checkparams={"x_1": b"y"}, + ) + def test_endswith_escape(self): self.assert_compile( column("x").endswith("a%b_c", escape="\\"), @@ -3529,13 +3646,31 @@ def test_illegal_ops(self, t_fixture): t.c.data + all_(t.c.arrval), "tab1.data + ALL (tab1.arrval)" ) + @testing.combinations("all", "any", argnames="op") + def test_any_all_bindparam_coercion(self, t_fixture, op): + """test #7979""" + t = t_fixture + + if op == "all": + expr = t.c.arrval.all(bindparam("param")) + expected = "%(param)s = ALL (tab1.arrval)" + elif op == "any": + expr = t.c.arrval.any(bindparam("param")) + expected = "%(param)s = ANY (tab1.arrval)" + else: + assert False + + is_(expr.left.type._type_affinity, Integer) + + self.assert_compile(expr, expected, dialect="postgresql") + def test_any_array_comparator_accessor(self, t_fixture): t = t_fixture self.assert_compile( t.c.arrval.any(5, operator.gt), - ":param_1 > ANY (tab1.arrval)", - checkparams={"param_1": 5}, + ":arrval_1 > ANY (tab1.arrval)", + checkparams={"arrval_1": 5}, ) def test_any_array_comparator_negate_accessor(self, t_fixture): @@ -3543,8 +3678,8 @@ def test_any_array_comparator_negate_accessor(self, t_fixture): self.assert_compile( ~t.c.arrval.any(5, operator.gt), - "NOT (:param_1 > ANY (tab1.arrval))", - checkparams={"param_1": 5}, + "NOT (:arrval_1 > ANY (tab1.arrval))", + checkparams={"arrval_1": 5}, ) def test_all_array_comparator_accessor(self, t_fixture): @@ -3552,8 +3687,8 @@ def test_all_array_comparator_accessor(self, t_fixture): self.assert_compile( t.c.arrval.all(5, operator.gt), - ":param_1 > ALL (tab1.arrval)", - checkparams={"param_1": 5}, + ":arrval_1 > ALL (tab1.arrval)", + checkparams={"arrval_1": 5}, ) def test_all_array_comparator_negate_accessor(self, t_fixture): @@ -3561,8 +3696,8 @@ def test_all_array_comparator_negate_accessor(self, t_fixture): self.assert_compile( ~t.c.arrval.all(5, operator.gt), - "NOT (:param_1 > ALL (tab1.arrval))", - checkparams={"param_1": 5}, + "NOT (:arrval_1 > ALL (tab1.arrval))", + checkparams={"arrval_1": 5}, ) def test_any_array_expression(self, t_fixture): diff --git a/test/sql/test_query.py b/test/sql/test_query.py index 0d817011329..2c0cf152762 100644 --- a/test/sql/test_query.py +++ b/test/sql/test_query.py @@ -1241,7 +1241,7 @@ def test_union_ordered_alias(self, connection): "has trouble extracting anonymous column from union subquery", ) @testing.fails_on( - testing.requires._mysql_not_mariadb_104, "FIXME: unknown" + testing.requires._mysql_not_mariadb_104_not_mysql8031, "FIXME: unknown" ) @testing.fails_on("sqlite", "FIXME: unknown") def test_union_all(self, connection): @@ -1362,7 +1362,7 @@ def test_except_style2(self, connection): eq_(found2, wanted) @testing.fails_on( - ["sqlite", testing.requires._mysql_not_mariadb_104], + ["sqlite", testing.requires._mysql_not_mariadb_104_not_mysql8031], "Can't handle this style of nesting", ) @testing.requires.except_ @@ -1400,7 +1400,7 @@ def test_except_style4(self, connection): @testing.requires.intersect @testing.fails_on( - ["sqlite", testing.requires._mysql_not_mariadb_104], + ["sqlite", testing.requires._mysql_not_mariadb_104_not_mysql8031], "sqlite can't handle leading parenthesis", ) def test_intersect_unions(self, connection): diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index bf912bd2553..e1a414bd1b0 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -21,6 +21,7 @@ from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import true +from sqlalchemy import tuple_ from sqlalchemy import type_coerce from sqlalchemy import TypeDecorator from sqlalchemy import util @@ -49,6 +50,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ from sqlalchemy.testing import is_ +from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true from sqlalchemy.testing import le_ from sqlalchemy.testing import mock @@ -96,6 +98,58 @@ def define_tables(cls, metadata): Column("user_name", VARCHAR(20)), test_needs_acid=True, ) + Table( + "test", + metadata, + Column("x", Integer, primary_key=True), + Column("y", String(50)), + ) + + @testing.variation( + "type_", ["text", "driversql", "core", "textstar", "driverstar"] + ) + def test_freeze(self, type_, connection): + """test #8963""" + + users = self.tables.users + connection.execute( + users.insert(), + [ + dict(user_id=1, user_name="john"), + dict(user_id=2, user_name="jack"), + ], + ) + + if type_.core: + stmt = select(users).order_by(users.c.user_id) + else: + if "star" in type_.name: + stmt = "select * from users order by user_id" + else: + stmt = "select user_id, user_name from users order by user_id" + + if "text" in type_.name: + stmt = text(stmt) + + if "driver" in type_.name: + result = connection.exec_driver_sql(stmt) + else: + result = connection.execute(stmt) + + frozen = result.freeze() + + unfrozen = frozen() + eq_(unfrozen.keys(), ["user_id", "user_name"]) + eq_(unfrozen.all(), [(1, "john"), (2, "jack")]) + + unfrozen = frozen() + eq_( + unfrozen.mappings().all(), + [ + {"user_id": 1, "user_name": "john"}, + {"user_id": 2, "user_name": "jack"}, + ], + ) def test_row_iteration(self, connection): users = self.tables.users @@ -636,6 +690,80 @@ def test_column_accessor_unary(self, connection): eq_(r._mapping[users.c.user_name], "john") eq_(r.user_name, "john") + @testing.fixture + def _ab_row_fixture(self, connection): + r = connection.execute( + select(literal(1).label("a"), literal(2).label("b")) + ).first() + return r + + def test_named_tuple_access(self, _ab_row_fixture): + r = _ab_row_fixture + eq_(r.a, 1) + eq_(r.b, 2) + + def test_named_tuple_missing_attr(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message( + AttributeError, "Could not locate column in row for column 'c'" + ): + r.c + + def test_named_tuple_no_delete_present(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message(AttributeError, "can't delete attribute"): + del r.a + + def test_named_tuple_no_delete_missing(self, _ab_row_fixture): + r = _ab_row_fixture + # including for non-existent attributes + with expect_raises_message(AttributeError, "can't delete attribute"): + del r.c + + def test_named_tuple_no_assign_present(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message(AttributeError, "can't set attribute"): + r.a = 5 + + with expect_raises_message(AttributeError, "can't set attribute"): + r.a += 5 + + def test_named_tuple_no_assign_missing(self, _ab_row_fixture): + r = _ab_row_fixture + # including for non-existent attributes + with expect_raises_message(AttributeError, "can't set attribute"): + r.c = 5 + + def test_named_tuple_no_self_assign_missing(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message( + AttributeError, "Could not locate column in row for column 'c'" + ): + r.c += 5 + + def test_mapping_tuple_readonly_errors(self, connection): + r = connection.execute( + select(literal(1).label("a"), literal(2).label("b")) + ).first() + r = r._mapping + eq_(r["a"], 1) + eq_(r["b"], 2) + + with expect_raises_message( + KeyError, "Could not locate column in row for column 'c'" + ): + r["c"] + + with expect_raises_message( + TypeError, "'RowMapping' object does not support item assignment" + ): + r["a"] = 5 + + with expect_raises_message( + TypeError, "'RowMapping' object does not support item assignment" + ): + r["a"] += 5 + def test_column_accessor_err(self, connection): r = connection.execute(select(1)).first() assert_raises_message( @@ -651,6 +779,7 @@ def test_column_accessor_err(self, connection): lambda: r._mapping["foo"], ) + @testing.skip_if("+aiosqlite", "unknown issue") @testing.combinations( (True,), (False,), @@ -775,6 +904,37 @@ def test_row_as_args(self, connection): connection.execute(users.insert(), r._mapping) eq_(connection.execute(users.select()).fetchall(), [(1, "john")]) + @testing.requires.tuple_in + def test_row_tuple_interpretation(self, connection): + """test #7292""" + users = self.tables.users + + connection.execute( + users.insert(), + [ + dict(user_id=1, user_name="u1"), + dict(user_id=2, user_name="u2"), + dict(user_id=3, user_name="u3"), + ], + ) + rows = connection.execute( + select(users.c.user_id, users.c.user_name) + ).all() + + # was previously needed + # rows = [(x, y) for x, y in rows] + + new_stmt = ( + select(users) + .where(tuple_(users.c.user_id, users.c.user_name).in_(rows)) + .order_by(users.c.user_id) + ) + + eq_( + connection.execute(new_stmt).all(), + [(1, "u1"), (2, "u2"), (3, "u3")], + ) + def test_result_as_args(self, connection): users = self.tables.users users2 = self.tables.users2 @@ -907,6 +1067,50 @@ def test_ambiguous_column_contains(self, connection): set([True]), ) + @testing.combinations( + (("name_label", "*"), False), + (("*", "name_label"), False), + (("user_id", "name_label", "user_name"), False), + (("user_id", "name_label", "*", "user_name"), True), + argnames="cols,other_cols_are_ambiguous", + ) + @testing.requires.select_star_mixed + def test_label_against_star( + self, connection, cols, other_cols_are_ambiguous + ): + """test #8536""" + users = self.tables.users + + connection.execute(users.insert(), dict(user_id=1, user_name="john")) + + stmt = select( + *[ + text("*") + if colname == "*" + else users.c.user_name.label("name_label") + if colname == "name_label" + else users.c[colname] + for colname in cols + ] + ) + + row = connection.execute(stmt).first() + + eq_(row._mapping["name_label"], "john") + + if other_cols_are_ambiguous: + with expect_raises_message( + exc.InvalidRequestError, "Ambiguous column name" + ): + row._mapping["user_id"] + with expect_raises_message( + exc.InvalidRequestError, "Ambiguous column name" + ): + row._mapping["user_name"] + else: + eq_(row._mapping["user_id"], 1) + eq_(row._mapping["user_name"], "john") + def test_loose_matching_one(self, connection): users = self.tables.users addresses = self.tables.addresses @@ -1660,6 +1864,195 @@ def __getattr__(self, name): with expect_raises_message(Exception, "canary"): r.lastrowid + @testing.combinations("plain", "mapping", "scalar", argnames="result_type") + @testing.combinations( + "stream_results", "yield_per", "yield_per_meth", argnames="optname" + ) + @testing.combinations(10, 50, argnames="value") + @testing.combinations("meth", "stmt", argnames="send_opts_how") + def test_stream_options( + self, + connection, + optname, + value, + send_opts_how, + result_type, + close_result_when_finished, + ): + table = self.tables.test + + connection.execute( + table.insert(), + [{"x": i, "y": "t_%d" % i} for i in range(15, 3000)], + ) + + if optname == "stream_results": + opts = {"stream_results": True, "max_row_buffer": value} + elif optname == "yield_per": + opts = {"yield_per": value} + elif optname == "yield_per_meth": + opts = {"stream_results": True} + else: + assert False + + if send_opts_how == "meth": + result = connection.execution_options(**opts).execute( + table.select() + ) + elif send_opts_how == "stmt": + result = connection.execute( + table.select().execution_options(**opts) + ) + else: + assert False + + if result_type == "mapping": + result = result.mappings() + real_result = result._real_result + elif result_type == "scalar": + result = result.scalars() + real_result = result._real_result + else: + real_result = result + + if optname == "yield_per_meth": + result = result.yield_per(value) + + if result_type == "mapping" or result_type == "scalar": + real_result = result._real_result + else: + real_result = result + + close_result_when_finished(result, consume=True) + + if optname == "yield_per" and value is not None: + expected_opt = { + "stream_results": True, + "max_row_buffer": value, + "yield_per": value, + } + elif optname == "stream_results" and value is not None: + expected_opt = { + "stream_results": True, + "max_row_buffer": value, + } + else: + expected_opt = None + + if expected_opt is not None: + eq_(real_result.context.execution_options, expected_opt) + + if value is None: + assert isinstance( + real_result.cursor_strategy, _cursor.CursorFetchStrategy + ) + return + + assert isinstance( + real_result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy + ) + eq_(real_result.cursor_strategy._max_row_buffer, value) + + if optname == "yield_per" or optname == "yield_per_meth": + eq_(real_result.cursor_strategy._bufsize, value) + else: + eq_(real_result.cursor_strategy._bufsize, min(value, 5)) + eq_(len(real_result.cursor_strategy._rowbuffer), 1) + + next(result) + next(result) + + if optname == "yield_per" or optname == "yield_per_meth": + eq_(len(real_result.cursor_strategy._rowbuffer), value - 1) + else: + # based on default growth of 5 + eq_(len(real_result.cursor_strategy._rowbuffer), 4) + + for i, row in enumerate(result): + if i == 186: + break + + if optname == "yield_per" or optname == "yield_per_meth": + eq_( + len(real_result.cursor_strategy._rowbuffer), + value - (188 % value), + ) + else: + # based on default growth of 5 + eq_( + len(real_result.cursor_strategy._rowbuffer), + 7 if value == 10 else 42, + ) + + if optname == "yield_per" or optname == "yield_per_meth": + # ensure partition is set up to same size + partition = next(result.partitions()) + eq_(len(partition), value) + + @testing.fixture + def autoclose_row_fixture(self, connection): + users = self.tables.users + connection.execute( + users.insert(), + [ + {"user_id": 1, "name": "u1"}, + {"user_id": 2, "name": "u2"}, + {"user_id": 3, "name": "u3"}, + {"user_id": 4, "name": "u4"}, + {"user_id": 5, "name": "u5"}, + ], + ) + + @testing.fixture(params=["plain", "scalars", "mapping"]) + def result_fixture(self, request, connection): + users = self.tables.users + + result_type = request.param + + if result_type == "plain": + result = connection.execute(select(users)) + elif result_type == "scalars": + result = connection.scalars(select(users)) + elif result_type == "mapping": + result = connection.execute(select(users)).mappings() + else: + assert False + + return result + + def test_results_can_close(self, autoclose_row_fixture, result_fixture): + """test #8710""" + + r1 = result_fixture + + is_false(r1.closed) + is_false(r1._soft_closed) + + r1._soft_close() + is_false(r1.closed) + is_true(r1._soft_closed) + + r1.close() + is_true(r1.closed) + is_true(r1._soft_closed) + + def test_autoclose_rows_exhausted_plain( + self, connection, autoclose_row_fixture, result_fixture + ): + result = result_fixture + + assert not result._soft_closed + assert not result.closed + + read_iterator = list(result) + eq_(len(read_iterator), 5) + + assert result._soft_closed + assert not result.closed + + result.close() + assert result.closed + class KeyTargetingTest(fixtures.TablesTest): run_inserts = "once" @@ -1766,6 +2159,7 @@ def _test_keyed_targeting_no_label_at_all(self, expression, conn): def test_keyed_targeting_no_label_at_all_one(self, connection): class not_named_max(expression.ColumnElement): name = "not_named_max" + inherit_cache = True @compiles(not_named_max) def visit_max(element, compiler, **kw): @@ -1783,6 +2177,7 @@ def visit_max(element, compiler, **kw): def test_keyed_targeting_no_label_at_all_two(self, connection): class not_named_max(expression.ColumnElement): name = "not_named_max" + inherit_cache = True @compiles(not_named_max) def visit_max(element, compiler, **kw): @@ -2682,6 +3077,109 @@ def test_buffered_fetchmany_yield_per(self, connection): # buffer of 98, plus buffer of 99 - 89, 10 rows eq_(len(result.cursor_strategy._rowbuffer), 10) + for i, row in enumerate(result): + if i == 206: + break + + eq_(i, 206) + + def test_iterator_remains_unbroken(self, connection): + """test related to #8710. + + demonstrate that we can't close the cursor by catching + GeneratorExit inside of our iteration. Leaving the iterable + block using break, then picking up again, would be directly + impacted by this. So this provides a clear rationale for + providing context manager support for result objects. + + """ + table = self.tables.test + + connection.execute( + table.insert(), + [{"x": i, "y": "t_%d" % i} for i in range(15, 250)], + ) + + result = connection.execute(table.select()) + result = result.yield_per(100) + for i, row in enumerate(result): + if i == 188: + # this will raise GeneratorExit inside the iterator. + # so we can't close the DBAPI cursor here, we have plenty + # more rows to yield + break + + eq_(i, 188) + + # demonstrate getting more rows + for i, row in enumerate(result, 188): + if i == 206: + break + + eq_(i, 206) + + @testing.combinations(True, False, argnames="close_on_init") + @testing.combinations( + "fetchone", "fetchmany", "fetchall", argnames="fetch_style" + ) + def test_buffered_fetch_auto_soft_close( + self, connection, close_on_init, fetch_style + ): + """test #7274""" + + table = self.tables.test + + connection.execute( + table.insert(), + [{"x": i, "y": "t_%d" % i} for i in range(15, 30)], + ) + + result = connection.execute(table.select().limit(15)) + assert isinstance(result.cursor_strategy, _cursor.CursorFetchStrategy) + + if close_on_init: + # close_on_init - the initial buffering will exhaust the cursor, + # should soft close immediately + result = result.yield_per(30) + else: + # not close_on_init - soft close will occur after fetching an + # empty buffer + result = result.yield_per(5) + assert isinstance( + result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy + ) + + with mock.patch.object(result, "_soft_close") as soft_close: + if fetch_style == "fetchone": + while True: + row = result.fetchone() + + if row: + eq_(soft_close.mock_calls, []) + else: + # fetchone() is also used by first(), scalar() + # and one() which want to embed a hard close in one + # step + eq_(soft_close.mock_calls, [mock.call(hard=False)]) + break + elif fetch_style == "fetchmany": + while True: + rows = result.fetchmany(5) + + if rows: + eq_(soft_close.mock_calls, []) + else: + eq_(soft_close.mock_calls, [mock.call()]) + break + elif fetch_style == "fetchall": + rows = result.fetchall() + + eq_(soft_close.mock_calls, [mock.call()]) + else: + assert False + + result.close() + def test_buffered_fetchmany_yield_per_all(self, connection): table = self.tables.test diff --git a/test/sql/test_returning.py b/test/sql/test_returning.py index 10bf3beb6fe..2db9b8bc9de 100644 --- a/test/sql/test_returning.py +++ b/test/sql/test_returning.py @@ -350,6 +350,50 @@ def test_no_ipk_on_returning(self, connection): "inserted_primary_key", ) + @testing.fixture + def column_expression_fixture(self, metadata, connection): + class MyString(TypeDecorator): + cache_ok = True + impl = String(50) + + def column_expression(self, column): + return func.lower(column) + + t1 = Table( + "some_table", + metadata, + Column("name", String(50)), + Column("value", MyString(50)), + ) + metadata.create_all(connection) + return t1 + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns, connection + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = ( + insert(table1) + .values(name="n1", value="ValUE1") + .returning(table1) + ) + else: + stmt = ( + insert(table1) + .values(name="n1", value="ValUE1") + .returning(table1.c.name, table1.c.value) + ) + + result = connection.execute(stmt) + row = result.first() + + eq_(row._mapping["name"], "n1") + eq_(row._mapping["value"], "value1") + @testing.fails_on_everything_except("postgresql", "firebird") def test_literal_returning(self, connection): if testing.against("postgresql"): diff --git a/test/sql/test_select.py b/test/sql/test_select.py index 17b47d96de7..c9abb7fb8b4 100644 --- a/test/sql/test_select.py +++ b/test/sql/test_select.py @@ -8,15 +8,16 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table +from sqlalchemy import testing from sqlalchemy import tuple_ from sqlalchemy import union from sqlalchemy.sql import column +from sqlalchemy.sql import literal from sqlalchemy.sql import table from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import fixtures - table1 = table( "mytable", column("myid", Integer), @@ -412,3 +413,23 @@ def test_select_tuple_subquery(self): "SELECT anon_1.name FROM (SELECT mytable.name AS name, " "(mytable.myid, mytable.name) AS anon_2 FROM mytable) AS anon_1", ) + + @testing.combinations( + ("union_all", "UNION ALL"), + ("union", "UNION"), + ("intersect_all", "INTERSECT ALL"), + ("intersect", "INTERSECT"), + ("except_all", "EXCEPT ALL"), + ("except_", "EXCEPT"), + ) + def test_select_multiple_compound_elements(self, methname, joiner): + stmt = select(literal(1)) + meth = getattr(stmt, methname) + stmt = meth(select(literal(2)), select(literal(3))) + + self.assert_compile( + stmt, + "SELECT :param_1 AS anon_1" + " %(joiner)s SELECT :param_2 AS anon_2" + " %(joiner)s SELECT :param_3 AS anon_3" % {"joiner": joiner}, + ) diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py index e68e98a3ccb..c29d9d5a50a 100644 --- a/test/sql/test_selectable.py +++ b/test/sql/test_selectable.py @@ -4,11 +4,13 @@ from sqlalchemy import Boolean from sqlalchemy import cast from sqlalchemy import Column +from sqlalchemy import delete from sqlalchemy import exc from sqlalchemy import exists from sqlalchemy import false from sqlalchemy import ForeignKey from sqlalchemy import func +from sqlalchemy import insert from sqlalchemy import Integer from sqlalchemy import join from sqlalchemy import literal_column @@ -27,6 +29,7 @@ from sqlalchemy import type_coerce from sqlalchemy import TypeDecorator from sqlalchemy import union +from sqlalchemy import update from sqlalchemy import util from sqlalchemy.sql import Alias from sqlalchemy.sql import annotation @@ -86,6 +89,135 @@ class SelectableTest( ): __dialect__ = "default" + @testing.combinations( + ( + (table1.c.col1, table1.c.col2), + [ + { + "name": "col1", + "type": table1.c.col1.type, + "expr": table1.c.col1, + }, + { + "name": "col2", + "type": table1.c.col2.type, + "expr": table1.c.col2, + }, + ], + ), + ( + (table1,), + [ + { + "name": "col1", + "type": table1.c.col1.type, + "expr": table1.c.col1, + }, + { + "name": "col2", + "type": table1.c.col2.type, + "expr": table1.c.col2, + }, + { + "name": "col3", + "type": table1.c.col3.type, + "expr": table1.c.col3, + }, + { + "name": "colx", + "type": table1.c.colx.type, + "expr": table1.c.colx, + }, + ], + ), + ( + (func.count(table1.c.col1),), + [ + { + "name": "count", + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col1) + ), + } + ], + ), + ( + (func.count(table1.c.col1), func.count(table1.c.col2)), + [ + { + "name": "count", + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col1) + ), + }, + { + "name": "count_1", + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col2) + ), + }, + ], + ), + ) + def test_core_column_descriptions(self, cols, expected): + stmt = select(*cols) + # reverse eq_ is so eq_clause_element works + eq_(expected, stmt.column_descriptions) + + @testing.combinations(insert, update, delete, argnames="dml_construct") + @testing.combinations( + ( + table1, + (table1.c.col1, table1.c.col2), + {"name": "table1", "table": table1}, + [ + { + "name": "col1", + "type": table1.c.col1.type, + "expr": table1.c.col1, + }, + { + "name": "col2", + "type": table1.c.col2.type, + "expr": table1.c.col2, + }, + ], + ), + ( + table1, + (func.count(table1.c.col1),), + {"name": "table1", "table": table1}, + [ + { + "name": None, + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col1) + ), + }, + ], + ), + ( + table1, + None, + {"name": "table1", "table": table1}, + [], + ), + argnames="entity, cols, expected_entity, expected_returning", + ) + def test_dml_descriptions( + self, dml_construct, entity, cols, expected_entity, expected_returning + ): + stmt = dml_construct(entity) + if cols: + stmt = stmt.returning(*cols) + + eq_(stmt.entity_description, expected_entity) + eq_(expected_returning, stmt.returning_column_descriptions) + def test_indirect_correspondence_on_labels(self): # this test depends upon 'distance' to # get the right result @@ -252,8 +384,10 @@ def test_labels_anon_generate_binds_subquery(self): @testing.combinations((True,), (False,)) def test_broken_select_same_named_explicit_cols(self, use_anon): - # this is issue #6090. the query is "wrong" and we dont know how + """test for #6090. the query is "wrong" and we dont know how # to render this right now. + + """ stmt = select( table1.c.col1, table1.c.col2, @@ -280,6 +414,24 @@ def test_broken_select_same_named_explicit_cols(self, use_anon): ): select(stmt.subquery()).compile() + def test_same_anon_named_explicit_cols(self): + """test for #8569. This adjusts the change in #6090 to not apply + to anonymous labels. + + """ + lc = literal_column("col2").label(None) + + subq1 = select(lc).subquery() + + stmt2 = select(subq1, lc).subquery() + + self.assert_compile( + select(stmt2), + "SELECT anon_1.col2_1, anon_1.col2_1_1 FROM " + "(SELECT anon_2.col2_1 AS col2_1, col2 AS col2_1 FROM " + "(SELECT col2 AS col2_1) AS anon_2) AS anon_1", + ) + def test_select_label_grouped_still_corresponds(self): label = select(table1.c.col1).label("foo") label2 = label.self_group() @@ -1275,21 +1427,67 @@ def test_table_joined_to_select_of_table(self): assert j4.corresponding_column(j2.c.aid) is j4.c.aid assert j4.corresponding_column(a.c.id) is j4.c.id - def test_two_metadata_join_raises(self): + @testing.combinations(True, False) + def test_two_metadata_join_raises(self, include_a_joining_table): + """test case from 2008 enhanced as of #8101, more specific failure + modes for non-resolvable FKs + + """ m = MetaData() m2 = MetaData() t1 = Table("t1", m, Column("id", Integer), Column("id2", Integer)) - t2 = Table("t2", m, Column("id", Integer, ForeignKey("t1.id"))) + + if include_a_joining_table: + t2 = Table("t2", m, Column("id", Integer, ForeignKey("t1.id"))) + t3 = Table("t3", m2, Column("id", Integer, ForeignKey("t1.id2"))) - s = ( - select(t2, t3) - .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) - .subquery() - ) + with expect_raises_message( + exc.NoReferencedTableError, + "Foreign key associated with column 't3.id'", + ): + t3.join(t1) - assert_raises(exc.NoReferencedTableError, s.join, t1) + if include_a_joining_table: + s = ( + select(t2, t3) + .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + .subquery() + ) + else: + s = ( + select(t3) + .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + .subquery() + ) + + with expect_raises_message( + exc.NoReferencedTableError, + "Foreign key associated with column 'anon_1.t3_id' could not " + "find table 't1' with which to generate a foreign key to target " + "column 'id2'", + ): + select(s.join(t1)), + + # manual join is OK. using select().join() here is also exercising + # that join() does not need to resolve FKs if we provided the + # ON clause + if include_a_joining_table: + self.assert_compile( + select(s).join( + t1, and_(s.c.t2_id == t1.c.id, s.c.t3_id == t1.c.id) + ), + "SELECT anon_1.t2_id, anon_1.t3_id FROM (SELECT " + "t2.id AS t2_id, t3.id AS t3_id FROM t2, t3) AS anon_1 " + "JOIN t1 ON anon_1.t2_id = t1.id AND anon_1.t3_id = t1.id", + ) + else: + self.assert_compile( + select(s).join(t1, s.c.t3_id == t1.c.id), + "SELECT anon_1.t3_id FROM (SELECT t3.id AS t3_id FROM t3) " + "AS anon_1 JOIN t1 ON anon_1.t3_id = t1.id", + ) def test_multi_label_chain_naming_col(self): # See [ticket:2167] for this one. @@ -2777,7 +2975,7 @@ def test_proxy_set_iteration_includes_annotated(self): # proxy_set, as corresponding_column iterates through proxy_set # in this way d = {} - for col in p2._uncached_proxy_set(): + for col in p2._uncached_proxy_list(): d.update(col._annotations) eq_(d, {"weight": 10}) @@ -2793,7 +2991,7 @@ def test_proxy_set_iteration_includes_annotated_two(self): proxy._proxies = [c1._annotate({"weight": 10})] d = {} - for col in proxy._uncached_proxy_set(): + for col in proxy._uncached_proxy_list(): d.update(col._annotations) eq_(d, {"weight": 10}) @@ -2948,7 +3146,7 @@ def test_annotate_expressions(self): (table1.c.col1 == 5, "table1.col1 = :col1_1"), ( table1.c.col1.in_([2, 3, 4]), - "table1.col1 IN ([POSTCOMPILE_col1_1])", + "table1.col1 IN (__[POSTCOMPILE_col1_1])", ), ]: eq_(str(expr), expected) diff --git a/test/sql/test_sequences.py b/test/sql/test_sequences.py index a0fef99be31..5766b724e54 100644 --- a/test/sql/test_sequences.py +++ b/test/sql/test_sequences.py @@ -84,13 +84,21 @@ def test_create_drop_ddl(self): ) self.assert_compile( - CreateSequence(Sequence("foo_seq", cache=1000, order=True)), - "CREATE SEQUENCE foo_seq START WITH 1 CACHE 1000 ORDER", + CreateSequence(Sequence("foo_seq", cache=1000)), + "CREATE SEQUENCE foo_seq START WITH 1 CACHE 1000", ) + # remove this when the `order` parameter is removed + # issue #10207 - ensure ORDER does not render + self.assert_compile( + CreateSequence(Sequence("foo_seq", order=True)), + "CREATE SEQUENCE foo_seq START WITH 1", + ) + # only renders for Oracle self.assert_compile( CreateSequence(Sequence("foo_seq", order=True)), "CREATE SEQUENCE foo_seq START WITH 1 ORDER", + dialect="oracle", ) self.assert_compile( diff --git a/test/sql/test_text.py b/test/sql/test_text.py index 15f6f604861..4fff0ed7ef3 100644 --- a/test/sql/test_text.py +++ b/test/sql/test_text.py @@ -6,6 +6,7 @@ from sqlalchemy import Column from sqlalchemy import desc from sqlalchemy import exc +from sqlalchemy import extract from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import Integer @@ -182,6 +183,14 @@ def test_select_composition_eight(self): "(select f from bar where lala=heyhey) foo WHERE foo.f = t.id", ) + def test_expression_element_role(self): + """test #7287""" + + self.assert_compile( + extract("year", text("some_date + :param")), + "EXTRACT(year FROM some_date + :param)", + ) + @testing.combinations( ( None, diff --git a/test/sql/test_type_expressions.py b/test/sql/test_type_expressions.py index adcaef39cb4..7c219262079 100644 --- a/test/sql/test_type_expressions.py +++ b/test/sql/test_type_expressions.py @@ -182,28 +182,40 @@ def test_select_binds(self): "test_table WHERE test_table.y = lower(:y_1)", ) - def test_in_binds(self): + @testing.variation( + "compile_opt", ["plain", "postcompile", "literal_binds"] + ) + def test_in_binds(self, compile_opt): table = self._fixture() - self.assert_compile( - select(table).where( - table.c.y.in_(["hi", "there", "some", "expr"]) - ), - "SELECT test_table.x, lower(test_table.y) AS y FROM " - "test_table WHERE test_table.y IN " - "([POSTCOMPILE_y_1~~lower(~~REPL~~)~~])", - render_postcompile=False, + stmt = select(table).where( + table.c.y.in_(["hi", "there", "some", "expr"]) ) - self.assert_compile( - select(table).where( - table.c.y.in_(["hi", "there", "some", "expr"]) - ), - "SELECT test_table.x, lower(test_table.y) AS y FROM " - "test_table WHERE test_table.y IN " - "(lower(:y_1_1), lower(:y_1_2), lower(:y_1_3), lower(:y_1_4))", - render_postcompile=True, - ) + if compile_opt.plain: + self.assert_compile( + stmt, + "SELECT test_table.x, lower(test_table.y) AS y FROM " + "test_table WHERE test_table.y IN " + "(__[POSTCOMPILE_y_1~~lower(~~REPL~~)~~])", + render_postcompile=False, + ) + elif compile_opt.postcompile: + self.assert_compile( + stmt, + "SELECT test_table.x, lower(test_table.y) AS y FROM " + "test_table WHERE test_table.y IN " + "(lower(:y_1_1), lower(:y_1_2), lower(:y_1_3), lower(:y_1_4))", + render_postcompile=True, + ) + elif compile_opt.literal_binds: + self.assert_compile( + stmt, + "SELECT test_table.x, lower(test_table.y) AS y FROM " + "test_table WHERE test_table.y IN " + "(lower('hi'), lower('there'), lower('some'), lower('expr'))", + literal_binds=True, + ) def test_dialect(self): table = self._fixture() diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 01266d15b88..4fdbcf95116 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -75,12 +75,14 @@ from sqlalchemy.sql.sqltypes import TypeEngine from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import AssertsExecutionResults from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_deprecated_20 from sqlalchemy.testing import expect_raises +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -782,6 +784,136 @@ def test_expanding_in_typedec_of_typedec(self, connection): eq_(result.fetchall(), [(3, 1500), (4, 900)]) +class TypeDecoratorSpecialCasesTest(AssertsCompiledSQL, fixtures.TestBase): + __backend__ = True + + @testing.requires.array_type + def test_typedec_of_array_modified(self, metadata, connection): + """test #7249""" + + class SkipsFirst(TypeDecorator): # , Indexable): + impl = ARRAY(Integer, zero_indexes=True) + + cache_ok = True + + def process_bind_param(self, value, dialect): + return value[1:] + + def copy(self, **kw): + return SkipsFirst(**kw) + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + t = Table( + "t", + metadata, + Column("id", Integer, primary_key=True), + Column("data", SkipsFirst), + ) + t.create(connection) + + connection.execute(t.insert(), {"data": [1, 2, 3]}) + val = connection.scalar(select(t.c.data)) + eq_(val, [2, 3]) + + val = connection.scalar(select(t.c.data[0])) + eq_(val, 2) + + def test_typedec_of_array_ops(self): + class ArrayDec(TypeDecorator): + impl = ARRAY(Integer, zero_indexes=True) + + cache_ok = True + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + expr1 = column("q", ArrayDec)[0] + expr2 = column("q", ARRAY(Integer, zero_indexes=True))[0] + + eq_(expr1.right.type._type_affinity, Integer) + eq_(expr2.right.type._type_affinity, Integer) + + self.assert_compile( + column("q", ArrayDec).any(7, operator=operators.lt), + "%(q_1)s < ANY (q)", + dialect="postgresql", + ) + + self.assert_compile( + column("q", ArrayDec)[5], "q[%(q_1)s]", dialect="postgresql" + ) + + def test_typedec_of_json_ops(self): + class JsonDec(TypeDecorator): + impl = JSON() + + cache_ok = True + + self.assert_compile( + column("q", JsonDec)["q"], "q -> %(q_1)s", dialect="postgresql" + ) + + self.assert_compile( + column("q", JsonDec)["q"].as_integer(), + "CAST(q ->> %(q_1)s AS INTEGER)", + dialect="postgresql", + ) + + @testing.requires.array_type + def test_typedec_of_array(self, metadata, connection): + """test #7249""" + + class ArrayDec(TypeDecorator): + impl = ARRAY(Integer, zero_indexes=True) + + cache_ok = True + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + t = Table( + "t", + metadata, + Column("id", Integer, primary_key=True), + Column("data", ArrayDec), + ) + + t.create(connection) + + connection.execute(t.insert(), {"data": [1, 2, 3]}) + val = connection.scalar(select(t.c.data)) + eq_(val, [1, 2, 3]) + + val = connection.scalar(select(t.c.data[0])) + eq_(val, 1) + + @testing.requires.json_type + def test_typedec_of_json(self, metadata, connection): + """test #7249""" + + class JsonDec(TypeDecorator): + impl = JSON() + + cache_ok = True + + t = Table( + "t", + metadata, + Column("id", Integer, primary_key=True), + Column("data", JsonDec), + ) + t.create(connection) + + connection.execute(t.insert(), {"data": {"key": "value"}}) + val = connection.scalar(select(t.c.data)) + eq_(val, {"key": "value"}) + + val = connection.scalar(select(t.c.data["key"].as_string())) + eq_(val, "value") + + class BindProcessorInsertValuesTest(UserDefinedRoundTripTest): """related to #6770, test that insert().values() applies to bound parameter handlers including the None value.""" @@ -1464,6 +1596,8 @@ class Foo(TypeDecorator): def test_type_decorator_compile_variant_two(self): class UTypeOne(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPEONE" @@ -1474,6 +1608,8 @@ def process(value): return process class UTypeTwo(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPETWO" @@ -1522,6 +1658,8 @@ class Foo(TypeDecorator): class VariantTest(fixtures.TestBase, AssertsCompiledSQL): def setup_test(self): class UTypeOne(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPEONE" @@ -1532,6 +1670,8 @@ def process(value): return process class UTypeTwo(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPETWO" @@ -1542,6 +1682,8 @@ def process(value): return process class UTypeThree(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPETHREE" @@ -1693,10 +1835,10 @@ def test_unicode_warnings_typelevel_native_unicode(self): dialect.supports_unicode_binds = True uni = u.dialect_impl(dialect).bind_processor(dialect) if util.py3k: - assert_raises(exc.SAWarning, uni, b"x") + assert_warns(exc.SAWarning, uni, b"x") assert isinstance(uni(unicodedata), str) else: - assert_raises(exc.SAWarning, uni, "x") + assert_warns(exc.SAWarning, uni, "x") assert isinstance(uni(unicodedata), unicode) # noqa def test_unicode_warnings_typelevel_sqla_unicode(self): @@ -1705,7 +1847,7 @@ def test_unicode_warnings_typelevel_sqla_unicode(self): dialect = default.DefaultDialect() dialect.supports_unicode_binds = False uni = u.dialect_impl(dialect).bind_processor(dialect) - assert_raises(exc.SAWarning, uni, util.b("x")) + assert_warns(exc.SAWarning, uni, util.b("x")) assert isinstance(uni(unicodedata), util.binary_type) eq_(uni(unicodedata), unicodedata.encode("utf-8")) @@ -2496,13 +2638,44 @@ def test_repr(self): "inherit_schema=True, native_enum=False)", ) + def test_repr_two(self): + e = Enum("x", "y", name="somename", create_constraint=True) + eq_( + repr(e), + "Enum('x', 'y', name='somename', create_constraint=True)", + ) + + def test_repr_three(self): + e = Enum("x", "y", native_enum=False, length=255) + eq_( + repr(e), + "Enum('x', 'y', native_enum=False, length=255)", + ) + + def test_repr_four(self): + with expect_warnings( + "Enum 'length' argument is currently ignored unless native_enum" + ): + e = Enum("x", "y", length=255) + # length is currently ignored if native_enum is not False + eq_( + repr(e), + "Enum('x', 'y')", + ) + def test_length_native(self): - e = Enum("x", "y", "long", length=42) + with expect_warnings( + "Enum 'length' argument is currently ignored unless native_enum" + ): + e = Enum("x", "y", "long", length=42) eq_(e.length, len("long")) # no error is raised - e = Enum("x", "y", "long", length=1) + with expect_warnings( + "Enum 'length' argument is currently ignored unless native_enum" + ): + e = Enum("x", "y", "long", length=1) eq_(e.length, len("long")) def test_length_raises(self): @@ -2961,6 +3134,8 @@ def define_tables(cls, metadata): global MyCustomType, MyTypeDec class MyCustomType(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "INT" @@ -3437,6 +3612,24 @@ def test_detect_coercion_not_fooled_by_mock(self): class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" + def test_compile_err_formatting(self): + with expect_raises_message( + exc.CompileError, + r"No literal value renderer is available for literal " + r"value \"\(1, 2, 3\)\" with datatype NULL", + ): + func.foo((1, 2, 3)).compile(compile_kwargs={"literal_binds": True}) + + def test_strict_bool_err_formatting(self): + typ = Boolean() + + dialect = default.DefaultDialect() + with expect_raises_message( + TypeError, + r"Not a boolean value: \(5,\)", + ): + typ.bind_processor(dialect)((5,)) + @testing.requires.unbounded_varchar def test_string_plain(self): self.assert_compile(String(), "VARCHAR") @@ -4038,8 +4231,8 @@ def test_render_datetime(self, value): lit = literal(value) assert_raises_message( - NotImplementedError, - "Don't know how to literal-quote value.*", + exc.CompileError, + r"No literal value renderer is available for literal value.*", lit.compile, dialect=testing.db.dialect, compile_kwargs={"literal_binds": True}, diff --git a/test/sql/test_update.py b/test/sql/test_update.py index 93deae5565e..214fb913fa6 100644 --- a/test/sql/test_update.py +++ b/test/sql/test_update.py @@ -316,7 +316,11 @@ def test_correlated_update_seven(self): def test_binds_that_match_columns(self): """test bind params named after column names - replace the normal SET/VALUES generation.""" + replace the normal SET/VALUES generation. + + See also test_compiler.py::CrudParamOverlapTest + + """ t = table("foo", column("x"), column("y")) diff --git a/test/sql/test_values.py b/test/sql/test_values.py index dcd32a6791a..1c5e0a1fbb6 100644 --- a/test/sql/test_values.py +++ b/test/sql/test_values.py @@ -277,7 +277,8 @@ def test_use_cols_tricky_not_every_type_given( with expect_raises_message( exc.CompileError, - "Don't know how to render literal SQL value: 'textA'", + r"No literal value renderer is available for literal " + r"value \"'textA'\" with datatype NULL", ): str(stmt) diff --git a/tools/format_docs_code.py b/tools/format_docs_code.py new file mode 100644 index 00000000000..05e5e01f10a --- /dev/null +++ b/tools/format_docs_code.py @@ -0,0 +1,386 @@ +from argparse import ArgumentParser +from argparse import RawDescriptionHelpFormatter +from collections.abc import Iterator +from functools import partial +from pathlib import Path +import re +from typing import NamedTuple + +from black import format_str +from black.const import DEFAULT_LINE_LENGTH +from black.files import parse_pyproject_toml +from black.mode import Mode +from black.mode import TargetVersion + + +home = Path(__file__).parent.parent +ignore_paths = (re.compile(r"changelog/unreleased_\d{2}"),) + + +class BlockLine(NamedTuple): + line: str + line_no: int + code: str + padding: str | None = None # relevant only on first line of block + sql_marker: str | None = None + + +_Block = list[BlockLine] + + +def _format_block( + input_block: _Block, + exit_on_error: bool, + errors: list[tuple[int, str, Exception]], + is_doctest: bool, + file: str, +) -> list[str]: + if not is_doctest: + # The first line may have additional padding. Remove then restore later + add_padding = start_space.match(input_block[0].code).groups()[0] + skip = len(add_padding) + code = "\n".join( + l.code[skip:] if l.code.startswith(add_padding) else l.code + for l in input_block + ) + else: + add_padding = None + code = "\n".join(l.code for l in input_block) + + try: + formatted = format_str(code, mode=BLACK_MODE) + except Exception as e: + start_line = input_block[0].line_no + first_error = not errors + if not REPORT_ONLY_DOCTEST or is_doctest: + type_ = "doctest" if is_doctest else "plain" + errors.append((start_line, code, e)) + if first_error: + print() # add newline + print( + f"--- {file}:{start_line} Could not format {type_} code " + f"block:\n{code}\n---Error: {e}" + ) + if exit_on_error: + print("Exiting since --exit-on-error was passed") + raise + else: + print("Ignoring error") + return [l.line for l in input_block] + else: + formatted_code_lines = formatted.splitlines() + padding = input_block[0].padding + sql_prefix = input_block[0].sql_marker or "" + + if is_doctest: + formatted_lines = [ + f"{padding}{sql_prefix}>>> {formatted_code_lines[0]}", + *( + f"{padding}...{' ' if fcl else ''}{fcl}" + for fcl in formatted_code_lines[1:] + ), + ] + else: + formatted_lines = [ + f"{padding}{add_padding}{sql_prefix}{formatted_code_lines[0]}", + *( + f"{padding}{add_padding}{fcl}" if fcl else fcl + for fcl in formatted_code_lines[1:] + ), + ] + if not input_block[-1].line and formatted_lines[-1]: + # last line was empty and black removed it. restore it + formatted_lines.append("") + return formatted_lines + + +format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$") + +doctest_code_start = re.compile(r"^(\s+)({(?:opensql|sql|stop)})?>>>\s?(.+)") +doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)") + +sql_code_start = re.compile(r"^(\s+)({(?:open)?sql})") +sql_code_stop = re.compile(r"^(\s+){stop}") + +start_code_section = re.compile( + r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$" +) +start_space = re.compile(r"^(\s*)[^ ]?") + + +def format_file( + file: Path, exit_on_error: bool, check: bool +) -> tuple[bool, int]: + buffer = [] + if not check: + print(f"Running file {file} ..", end="") + original = file.read_text("utf-8") + doctest_block: _Block | None = None + plain_block: _Block | None = None + + plain_code_section = False + plain_padding = None + plain_padding_len = None + sql_section = False + + errors = [] + + do_doctest_format = partial( + _format_block, + exit_on_error=exit_on_error, + errors=errors, + is_doctest=True, + file=str(file), + ) + + def doctest_format(): + nonlocal doctest_block + if doctest_block: + buffer.extend(do_doctest_format(doctest_block)) + doctest_block = None + + do_plain_format = partial( + _format_block, + exit_on_error=exit_on_error, + errors=errors, + is_doctest=False, + file=str(file), + ) + + def plain_format(): + nonlocal plain_block + if plain_block: + buffer.extend(do_plain_format(plain_block)) + plain_block = None + + disable_format = False + for line_no, line in enumerate(original.splitlines(), 1): + + if ( + line + and not disable_format + and start_code_section.match(line.strip()) + ): + # start_code_section regexp requires no spaces at the start + plain_format() + plain_code_section = True + assert not sql_section + plain_padding = start_space.match(line).groups()[0] + plain_padding_len = len(plain_padding) + buffer.append(line) + continue + elif ( + plain_code_section + and line.strip() + and not line.startswith(" " * (plain_padding_len + 1)) + ): + plain_code_section = sql_section = False + elif match := format_directive.match(line): + assert not plain_code_section + disable_format = match.groups()[0] == "off" + + if doctest_block: + assert not plain_block + if match := doctest_code_continue.match(line): + doctest_block.append( + BlockLine(line, line_no, match.groups()[0]) + ) + continue + else: + doctest_format() + elif plain_block: + if ( + plain_code_section + and not doctest_code_start.match(line) + and not sql_code_start.match(line) + ): + plain_block.append( + BlockLine(line, line_no, line[plain_padding_len:]) + ) + continue + else: + plain_format() + + if line and (match := doctest_code_start.match(line)): + # the line is in a doctest + plain_code_section = sql_section = False + plain_format() + padding, sql_marker, code = match.groups() + doctest_block = [ + BlockLine(line, line_no, code, padding, sql_marker) + ] + elif line and plain_code_section: + assert not disable_format + assert not doctest_block + if match := sql_code_start.match(line): + plain_format() + sql_section = True + buffer.append(line) + elif sql_section: + if match := sql_code_stop.match(line): + sql_section = False + no_stop_line = line.replace("{stop}", "") + # start of a plain block + if no_stop_line.strip(): + assert not plain_block + plain_block = [ + BlockLine( + line, + line_no, + no_stop_line[plain_padding_len:], + plain_padding, + "{stop}", + ) + ] + continue + buffer.append(line) + else: + # start of a plain block + assert not doctest_block + plain_block = [ + BlockLine( + line, + line_no, + line[plain_padding_len:], + plain_padding, + ) + ] + else: + buffer.append(line) + + doctest_format() + plain_format() + if buffer: + buffer.append("") + updated = "\n".join(buffer) + equal = original == updated + if not check: + print( + f"..done. {len(errors)} error(s).", + "No changes" if equal else "Changes detected", + ) + if not equal: + # write only if there are changes to write + file.write_text(updated, "utf-8", newline="\n") + else: + # if there is nothing in the buffer something strange happened so + # don't do anything + if not check: + print(".. Nothing to write") + equal = bool(original) is False + + if check: + if not equal: + print(f"File {file} would be formatted") + return equal, len(errors) + + +def iter_files(directory: str) -> Iterator[Path]: + yield from ( + file + for file in (home / directory).glob("./**/*.rst") + if not any(pattern.search(file.as_posix()) for pattern in ignore_paths) + ) + + +def main( + file: list[str] | None, directory: str, exit_on_error: bool, check: bool +): + if file is not None: + result = [format_file(Path(f), exit_on_error, check) for f in file] + else: + result = [ + format_file(doc, exit_on_error, check) + for doc in iter_files(directory) + ] + + if check: + formatting_error_counts = [e for _, e in result if e] + to_reformat = len([b for b, _ in result if not b]) + + if not to_reformat and not formatting_error_counts: + print("All files are correctly formatted") + exit(0) + else: + print( + f"{to_reformat} file(s) would be reformatted;", + ( + f"{sum(formatting_error_counts)} formatting errors " + f"reported in {len(formatting_error_counts)} files" + ) + if formatting_error_counts + else "no formatting errors reported", + ) + + exit(1) + + +if __name__ == "__main__": + parser = ArgumentParser( + description="""Formats code inside docs using black. Supports \ +doctest code blocks and plain code block identified as indented sections \ +that are preceded by ``::`` or ``.. sourcecode:: py``. + +To disable formatting on a file section the comment ``.. format: off`` \ +disables formatting until ``.. format: on`` is encountered or the file ends. + +Use --report-doctest to ignore errors on plain code blocks. +""", + formatter_class=RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-f", + "--file", + help="Format only this file instead of all docs", + nargs="+", + ) + parser.add_argument( + "-d", + "--directory", + help="Find documents in this directory and its sub dirs", + default="doc/build", + ) + parser.add_argument( + "-c", + "--check", + help="Don't write the files back, just return the " + "status. Return code 0 means nothing would change. " + "Return code 1 means some files would be reformatted", + action="store_true", + ) + parser.add_argument( + "-e", + "--exit-on-error", + help="Exit in case of black format error instead of ignoring it", + action="store_true", + ) + parser.add_argument( + "-l", + "--project-line-length", + help="Configure the line length to the project value instead " + "of using the black default of 88", + action="store_true", + ) + parser.add_argument( + "-rd", + "--report-doctest", + help="Report errors only when running doctest blocks. When active " + "exit-on-error will be valid only on doctest blocks", + action="store_true", + ) + args = parser.parse_args() + + config = parse_pyproject_toml(home / "pyproject.toml") + BLACK_MODE = Mode( + target_versions=set( + TargetVersion[val.upper()] + for val in config.get("target_version", []) + if val != "py27" + ), + line_length=config.get("line_length", DEFAULT_LINE_LENGTH) + if args.project_line_length + else DEFAULT_LINE_LENGTH, + ) + REPORT_ONLY_DOCTEST = args.report_doctest + + main(args.file, args.directory, args.exit_on_error, args.check) diff --git a/tools/normalize_file_headers.py b/tools/normalize_file_headers.py new file mode 100644 index 00000000000..ba4cd5734f8 --- /dev/null +++ b/tools/normalize_file_headers.py @@ -0,0 +1,69 @@ +from datetime import date +from pathlib import Path +import re + +from sqlalchemy.util.tool_support import code_writer_cmd + +sa_path = Path(__file__).parent.parent / "lib/sqlalchemy" + + +file_re = re.compile(r"^# [\w+/]+.(?:pyx?|pxd)$", re.MULTILINE) +license_re = re.compile( + r"Copyright .C. (\d+)-\d+ the SQLAlchemy authors and contributors" +) + +this_year = date.today().year +license_ = f""" +# Copyright (C) 2005-{this_year} the SQLAlchemy authors and \ +contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +""" + + +def run_file(cmd: code_writer_cmd, file: Path, update_year: bool): + content = file.read_text("utf-8") + path = str(file.relative_to(sa_path)).replace("\\", "/") # handle windows + path_comment = f"# {path}" + has_license = bool(license_re.search(content)) + if file_re.match(content.strip()): + if has_license: + to_sub = path_comment + else: + to_sub = path_comment + license_ + content = file_re.sub(to_sub, content, count=1) + else: + content = path_comment + ("\n" if has_license else license_) + content + + if has_license and update_year: + content = license_re.sub( + rf"Copyright (C) \1-{this_year} the SQLAlchemy " + "authors and contributors", + content, + 1, + ) + cmd.write_output_file_from_text(content, file) + + +def run(cmd: code_writer_cmd, update_year: bool): + i = 0 + for ext in ("py", "pyx", "pxd"): + for file in sa_path.glob(f"**/*.{ext}"): + run_file(cmd, file, update_year) + i += 1 + cmd.write_status(f"\nDone. Processed {i} files.") + + +if __name__ == "__main__": + cmd = code_writer_cmd(__file__) + with cmd.add_arguments() as parser: + parser.add_argument( + "--update-year", + action="store_true", + help="Update the year in the license files", + ) + + with cmd.run_program(): + run(cmd, cmd.args.update_year) diff --git a/tox.ini b/tox.ini index d8ba67a440c..071415c4f9a 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,15 @@ [tox] envlist = py +[greenletextras] +extras= + asyncio + sqlite: aiosqlite + sqlite_file: aiosqlite + postgresql: postgresql-asyncpg + mysql: asyncmy + mysql: aiomysql + [testenv] # note that we have a .coveragerc file that points coverage specifically # at ./lib/sqlalchemy, and *not* at the build that tox might create under .tox. @@ -10,35 +19,37 @@ envlist = py # Jenkins etc. need to call "coverage erase" externally. cov_args=--cov=sqlalchemy --cov-report term --cov-append --cov-report xml --exclude-tag memory-intensive --exclude-tag timing-intensive -k "not aaa_profiling" -install_command=python -m pip install {env:TOX_PIP_OPTS:} {opts} {packages} +# new opt as of tox 4.4.0 was set to True causing it to dump the +# deps below into a constraints file, while requirements of the +# form ".[aiosqlite]" are not valid constraints, those are requirements +constrain_package_deps=false usedevelop= cov: True +extras= + py{3,38,39,310,311,312,313,314}: {[greenletextras]extras} + + postgresql: postgresql + postgresql: postgresql-pg8000 + + mysql: mysql + mysql: pymysql + mysql: mariadb-connector + + oracle: oracle + mssql: mssql + deps= pytest>=4.6.11,<5.0; python_version < '3' - pytest>=6.2; python_version >= '3' + pytest>=6.2,<8; python_version >= '3' pytest-xdist mock; python_version < '3.3' - sqlite: .[aiosqlite] - sqlite_file: .[aiosqlite] - sqlite_file: .[sqlcipher]; python_version >= '3' and python_version < '3.10' - postgresql: .[postgresql] - postgresql: .[postgresql_asyncpg]; python_version >= '3' - postgresql: .[postgresql_pg8000]; python_version >= '3' - mysql: .[mysql] - mysql: .[pymysql] - mysql: .[asyncmy]; python_version >= '3' - mysql: .[mariadb_connector]; python_version >= '3' + py313: git+https://github.com/python-greenlet/greenlet.git\#egg=greenlet - oracle: .[oracle] - - mssql: .[mssql] - - dbapimain-sqlite: git+https://github.com/omnilib/aiosqlite.git#egg=aiosqlite - dbapimain-sqlite: git+https://github.com/coleifer/sqlcipher3.git#egg=sqlcipher3 + dbapimain-sqlite: git+https://github.com/omnilib/aiosqlite.git\#egg=aiosqlite dbapimain-postgresql: git+https://github.com/psycopg/psycopg2.git#egg=psycopg2 dbapimain-postgresql: git+https://github.com/MagicStack/asyncpg.git#egg=asyncpg @@ -46,15 +57,14 @@ deps= dbapimain-mysql: git+https://github.com/PyMySQL/mysqlclient-python.git#egg=mysqlclient dbapimain-mysql: git+https://github.com/PyMySQL/PyMySQL.git#egg=pymysql - dbapimain-mysql: git+https://github.com/mariadb-corporation/mariadb-connector-python#egg=mariadb dbapimain-oracle: git+https://github.com/oracle/python-cx_Oracle.git#egg=cx_Oracle - dbapimain-mssql: git+https://github.com/mkleehammer/pyodbc.git#egg=pyodbc + dbapimain-mssql: git+https://github.com/mkleehammer/pyodbc.git\#egg=pyodbc cov: pytest-cov -allowlist_externals=sh +allowlist_externals=sh, /bin/true # PYTHONPATH - erased so that we use the build that's present # in .tox as the SQLAlchemy library to be imported @@ -73,8 +83,11 @@ allowlist_externals=sh setenv= PYTHONPATH= PYTHONNOUSERSITE=1 + + PYTEST_COLOR={tty:--color=yes} + MEMUSAGE=--nomemory - BASECOMMAND=python -m pytest --rootdir {toxinidir} --log-info=sqlalchemy.testing + BASECOMMAND=python -m pytest {env:PYTEST_COLOR} --rootdir {toxinidir} --log-info=sqlalchemy.testing WORKERS={env:TOX_WORKERS:-n4 --max-worker-restart=5} @@ -82,27 +95,39 @@ setenv= cext: REQUIRE_SQLALCHEMY_CEXT=1 cov: COVERAGE={[testenv]cov_args} backendonly: BACKENDONLY=--backend-only - memusage: MEMUSAGE='-k test_memusage' + memusage: MEMUSAGE=-k test_memusage test/aaa_profiling/ oracle: WORKERS={env:TOX_WORKERS:-n2 --max-worker-restart=5} oracle: ORACLE={env:TOX_ORACLE:--db oracle} sqlite: SQLITE={env:TOX_SQLITE:--db sqlite} sqlite_file: SQLITE={env:TOX_SQLITE_FILE:--db sqlite_file} - py3{,5,6,7,8,9,10,11}-sqlite: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite} - py3{,5,6,7,8,9}-sqlite_file: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite --dbdriver pysqlcipher} - # omit pysqlcipher for Python 3.10 - py3{,10,11}-sqlite_file: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite} + + sqlite: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver pysqlite_numeric --dbdriver aiosqlite} + py{313,314}-sqlite: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver pysqlite_numeric} + + sqlite-nogreenlet: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver pysqlite_numeric} + + py-sqlite_file: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite} postgresql: POSTGRESQL={env:TOX_POSTGRESQL:--db postgresql} py2{,7}-postgresql: POSTGRESQL={env:TOX_POSTGRESQL_PY2K:{env:TOX_POSTGRESQL:--db postgresql}} - py3{,5,6,7,8,9,10,11}-postgresql: EXTRA_PG_DRIVERS={env:EXTRA_PG_DRIVERS:--dbdriver psycopg2 --dbdriver asyncpg --dbdriver pg8000} + py3{,5,6,7,8,9,10,11,12,13,14}-postgresql: EXTRA_PG_DRIVERS={env:EXTRA_PG_DRIVERS:--dbdriver psycopg2 --dbdriver asyncpg --dbdriver pg8000} + py312-postgresql: EXTRA_PG_DRIVERS={env:EXTRA_PG_DRIVERS:--dbdriver psycopg2 --dbdriver pg8000} mysql: MYSQL={env:TOX_MYSQL:--db mysql} py2{,7}-mysql: MYSQL={env:TOX_MYSQL_PY2K:{env:TOX_MYSQL:--db mysql}} + + PY_SPECIFIC= + py3{,5,6,7,8,9}: PY_SPECIFIC=--exclude-tag memory-intensive --exclude-tag timing-intensive + py2{,7}: PY_SPECIFIC=--exclude-tag memory-intensive --exclude-tag timing-intensive + + memusage: PY_SPECIFIC= + mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql} - py3-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} + # py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} + py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver asyncmy --dbdriver aiomysql} mssql: MSSQL={env:TOX_MSSQL:--db mssql} @@ -110,16 +135,32 @@ setenv= oracle,mssql,sqlite_file: IDENTS=--write-idents db_idents.txt oracle,mssql,sqlite_file: MEMUSAGE=--nomemory + + # tox as of 2.0 blocks all environment variables from the # outside, unless they are here (or in TOX_TESTENV_PASSENV, # wildcards OK). Need at least these -passenv=ORACLE_HOME NLS_LANG TOX_POSTGRESQL TOX_POSTGRESQL_PY2K TOX_MYSQL TOX_MYSQL_PY2K TOX_ORACLE TOX_MSSQL TOX_SQLITE TOX_SQLITE_FILE TOX_WORKERS EXTRA_SQLITE_DRIVERS EXTRA_PG_DRIVERS EXTRA_MYSQL_DRIVERS +passenv= + ORACLE_HOME + NLS_LANG + TOX_POSTGRESQL + TOX_POSTGRESQL_PY2K + TOX_MYSQL + TOX_MYSQL_PY2K + TOX_ORACLE + TOX_MSSQL + TOX_SQLITE + TOX_SQLITE_FILE + TOX_WORKERS + EXTRA_SQLITE_DRIVERS + EXTRA_PG_DRIVERS + EXTRA_MYSQL_DRIVERS # for nocext, we rm *.so in lib in case we are doing usedevelop=True commands= cext: /bin/true nocext: sh -c "rm -f lib/sqlalchemy/*.so" - {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:EXTRA_SQLITE_DRIVERS:} {env:POSTGRESQL:} {env:EXTRA_PG_DRIVERS:} {env:MYSQL:} {env:EXTRA_MYSQL_DRIVERS:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + {env:BASECOMMAND} {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:EXTRA_SQLITE_DRIVERS:} {env:POSTGRESQL:} {env:EXTRA_PG_DRIVERS:} {env:MYSQL:} {env:EXTRA_MYSQL_DRIVERS:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt @@ -130,18 +171,23 @@ deps= greenlet != 0.4.17 mock; python_version < '3.3' importlib_metadata; python_version < '3.8' - mypy + mypy >= 1.2.0,<1.11 patch==1.* git+https://github.com/sqlalchemy/sqlalchemy2-stubs commands = - pytest test/ext/mypy/test_mypy_plugin_py3k.py {posargs} + pytest {env:PYTEST_COLOR} test/ext/mypy/test_mypy_plugin_py3k.py {posargs} # thanks to https://julien.danjou.info/the-best-flake8-extensions/ [testenv:pep8] basepython = python3 + +extras= + {[greenletextras]extras} + deps= flake8 - flake8-import-order + #flake8-import-order + git+https://github.com/sqlalchemyorg/flake8-import-order@fix_options flake8-builtins flake8-docstrings>=1.3.1 flake8-rst-docstrings @@ -150,22 +196,29 @@ deps= pydocstyle pygments black==21.5b1 + click<8.1 commands = flake8 ./lib/ ./test/ ./examples/ setup.py doc/build/conf.py {posargs} black --check ./lib/ ./test/ ./examples/ setup.py doc/build/conf.py # command run in the github action when cext are active. [testenv:github-cext] +extras= + {[greenletextras]extras} + deps = {[testenv]deps} .[aiosqlite] commands= - python -m pytest {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + python -m pytest {env:PYTEST_COLOR} {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt # command run in the github action when cext are not active. [testenv:github-nocext] +extras= + {[greenletextras]extras} + deps = {[testenv]deps} .[aiosqlite] commands= - python -m pytest {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + python -m pytest {env:PYTEST_COLOR} {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt