diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..28784749c4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/docs" + schedule: + interval: "daily" + allow: + - dependency-name: "sphinx-scylladb-theme" + - dependency-name: "sphinx-multiversion-scylla" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..63a822bb08 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,17 @@ +## Pre-review checklist + + + +- [ ] I have split my patch into logically separate commits. +- [ ] All commit messages clearly explain what they change and why. +- [ ] I added relevant tests for new features and bug fixes. +- [ ] All commits compile, pass static checks and pass test. +- [ ] PR description sums up the changes and reasons why they should be introduced. +- [ ] I have provided docstrings for the public items that I want to introduce. +- [ ] I have adjusted the documentation in `./docs/source/`. +- [ ] I added appropriate `Fixes:` annotations to PR description. \ No newline at end of file diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml deleted file mode 100644 index 63c30c5bf0..0000000000 --- a/.github/workflows/build-experimental.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: experimental -on: [push, pull_request] - -env: - CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libev libev-devel openssl openssl-devel" - CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_BUILD: "cp38* cp39* cp310*" - CIBW_SKIP: "*musllinux*" -jobs: - build_wheels: - if: contains(github.event.pull_request.labels.*.name, 'test-build-experimental') || github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - # The host should always be linux - runs-on: ubuntu-18.04 - name: Build experimental ${{ matrix.archs }} wheels - strategy: - fail-fast: false - matrix: - archs: [ aarch64, ppc64le ] - - steps: - - uses: actions/checkout@v2.1.0 - - - name: Set up QEMU - id: qemu - uses: docker/setup-qemu-action@v1 - with: - platforms: all - if: runner.os == 'Linux' - - - uses: actions/setup-python@v2 - name: Install Python - - - name: Install cibuildwheel - run: | - python -m pip install cibuildwheel==2.3.0 - - - name: Build wheels - run: | - python -m cibuildwheel --archs ${{ matrix.archs }} --output-dir wheelhouse - - - uses: actions/upload-artifact@v2 - with: - path: ./wheelhouse/*.whl - - upload_pypi: - needs: [build_wheels] - runs-on: ubuntu-latest - # upload to PyPI on every tag starting with 'v' - if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - # alternatively, to publish when a GitHub Release is created, use the following rule: - # if: github.event_name == 'release' && github.event.action == 'published' - steps: - - uses: actions/download-artifact@v2 - with: - name: artifact - path: dist - - - uses: pypa/gh-action-pypi-publish@master - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} - diff --git a/.github/workflows/build-pre-release.yml b/.github/workflows/build-pre-release.yml new file mode 100644 index 0000000000..e1326b6aa5 --- /dev/null +++ b/.github/workflows/build-pre-release.yml @@ -0,0 +1,21 @@ +name: Test building on pre-release python version + +on: + workflow_dispatch: + inputs: + python-version: + description: 'Python version to run against' + required: true + type: string + + target: + type: string + description: "target os to build for: linux,macos-x86,macos-arm,windows,linux-aarch64" + default: "linux,macos-x86,macos-arm,windows,linux-aarch64" + +jobs: + build-and-publish: + uses: ./.github/workflows/lib-build-and-push.yml + with: + python-version: ${{ inputs.python-version }} + target: ${{ inputs.target }} diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 320df2e779..15c77f3861 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -1,155 +1,33 @@ name: Build and upload to PyPi -on: [push, pull_request] - - -env: - CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)' && EVENT_LOOP_MANAGER=gevent pytest --import-mode append {project}/tests/unit/io/test_geventreactor.py && EVENT_LOOP_MANAGER=eventlet pytest --import-mode append {project}/tests/unit/io/test_eventletreactor.py " - CIBW_TEST_COMMAND_MACOS: "pytest --import-mode append {project}/tests/unit -k 'not (test_multi_timer_validation or test_empty_connections or test_connection_initialization or test_timer_cancellation or test_cloud)' " - CIBW_TEST_COMMAND_WINDOWS: "pytest --import-mode append {project}/tests/unit -k \"not (test_deserialize_date_range_year or test_datetype or test_libevreactor or test_connection_initialization or test_cloud)\" " - CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt pytest" - CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" - CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_SKIP: cp35* cp36* *musllinux* +on: + push: + branches: + - master + - 'branch-**' + workflow_dispatch: jobs: - build_wheels: - name: Build wheels ${{ matrix.os }} (${{ matrix.platform }}) - if: contains(github.event.pull_request.labels.*.name, 'test-build') || github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - os: ubuntu-latest - platform: x86_64 - - - os: ubuntu-latest - platform: i686 - - - os: ubuntu-latest - platform: PyPy - - - os: windows-latest - platform: win32 - - - os: windows-latest - platform: win64 - - - os: windows-latest - platform: PyPy - - - os: macos-latest - platform: all - - - os: macos-latest - platform: PyPy - - steps: - - uses: actions/checkout@v2 - - - uses: actions/setup-python@v2 - name: Install Python - - - name: Install cibuildwheel - run: | - python -m pip install cibuildwheel==2.3.0 - - - name: Install OpenSSL for Windows - if: runner.os == 'Windows' - run: | - choco install openssl -f -y - - - name: Install OpenSSL for MacOS - if: runner.os == 'MacOs' - run: | - brew install libev - - - name: Overwrite for Linux 64 - if: runner.os == 'Linux' && matrix.platform == 'x86_64' - run: | - echo "CIBW_BUILD=cp3*_x86_64" >> $GITHUB_ENV - - - name: Overwrite for Linux 32 - if: runner.os == 'Linux' && matrix.platform == 'i686' - run: | - echo "CIBW_BUILD=cp*_i686" >> $GITHUB_ENV - echo "CIBW_TEST_COMMAND_LINUX=" >> $GITHUB_ENV - - - name: Overwrite for Linux PyPy - if: runner.os == 'Linux' && matrix.platform == 'PyPy' - run: | - echo "CIBW_BUILD=pp*" >> $GITHUB_ENV - echo "CIBW_TEST_COMMAND_LINUX=" >> $GITHUB_ENV - - - name: Overwrite for Windows 64 - if: runner.os == 'Windows' && matrix.platform == 'win64' - run: | - echo "CIBW_BUILD=cp*win_amd64" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append - - - name: Overwrite for Windows 32 - if: runner.os == 'Windows' && matrix.platform == 'win32' - run: | - echo "CIBW_BUILD=cp*win32" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append - - - name: Overwrite for Windows PyPY - if: runner.os == 'Windows' && matrix.platform == 'PyPy' - run: | - echo "CIBW_BUILD=pp*" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append - echo "CIBW_TEST_COMMAND_WINDOWS=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append - - - name: Overwrite for MacOs - if: runner.os == 'MacOs' && matrix.platform == 'all' - run: | - echo "CIBW_BUILD=cp37* cp38*" >> $GITHUB_ENV - echo "CIBW_BEFORE_TEST_MACOS=pip install -r {project}/test-requirements.txt pytest" >> $GITHUB_ENV - - - name: Overwrite for MacOs PyPy - if: runner.os == 'MacOs' && matrix.platform == 'PyPy' - run: | - echo "CIBW_BUILD=pp*" >> $GITHUB_ENV - echo "CIBW_BEFORE_TEST_MACOS=pip install -r {project}/test-requirements.txt pytest" >> $GITHUB_ENV - echo "CIBW_TEST_COMMAND_MACOS=" >> $GITHUB_ENV - - - name: Build wheels - run: | - python -m cibuildwheel --output-dir wheelhouse - - - uses: actions/upload-artifact@v2 - with: - path: ./wheelhouse/*.whl - - build_sdist: - name: Build source distribution - if: contains(github.event.pull_request.labels.*.name, 'test-build') || github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - uses: actions/setup-python@v2 - name: Install Python - - - name: Build sdist - run: python setup.py sdist - - - uses: actions/upload-artifact@v2 - with: - path: dist/*.tar.gz - - upload_pypi: - needs: [build_wheels, build_sdist] - runs-on: ubuntu-latest - # upload to PyPI on every tag starting with 'v' - if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - # alternatively, to publish when a GitHub Release is created, use the following rule: - # if: github.event_name == 'release' && github.event.action == 'published' + build-and-publish: + name: "Build wheels" + uses: ./.github/workflows/lib-build-and-push.yml + with: + upload: false + + # TODO: Remove when https://github.com/pypa/gh-action-pypi-publish/issues/166 is fixed and update build-and-publish.with.upload to ${{ endsWith(github.event.ref, 'scylla') }} + publish: + name: "Publish wheels to PyPi" + if: ${{ endsWith(github.event.ref, 'scylla') }} + needs: build-and-publish + runs-on: ubuntu-24.04 + permissions: + id-token: write steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v7 with: - name: artifact path: dist + merge-multiple: true - - uses: pypa/gh-action-pypi-publish@master + - uses: pypa/gh-action-pypi-publish@release/v1 with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} + skip-existing: true diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml new file mode 100644 index 0000000000..3e1f1067d7 --- /dev/null +++ b/.github/workflows/build-test.yml @@ -0,0 +1,23 @@ +name: Test wheels building + +on: + pull_request: + branches: + - master + paths-ignore: + - docs/* + - examples/* + - .gitignore + - '*.rst' + - '*.ini' + - LICENSE + - .github/dependabot.yml + - .github/pull_request_template.md + +jobs: + test-wheels-build: + name: "Test wheels building" + if: "!contains(github.event.pull_request.labels.*.name, 'disable-test-build')" + uses: ./.github/workflows/lib-build-and-push.yml + with: + upload: false \ No newline at end of file diff --git a/.github/workflows/docs-pages.yml b/.github/workflows/docs-pages.yml new file mode 100644 index 0000000000..0da86fef34 --- /dev/null +++ b/.github/workflows/docs-pages.yml @@ -0,0 +1,48 @@ +name: "Docs / Publish" +# For more information, +# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows + +permissions: + contents: write + +on: + push: + branches: + - master + - 'branch-**' + paths: + - "docs/**" + - ".github/workflows/docs-pages.yml" + - "cassandra/**" + - "pyproject.toml" + - "setup.py" + - "CHANGELOG.rst" + workflow_dispatch: + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v6 + with: + ref: ${{ github.event.repository.default_branch }} + persist-credentials: false + fetch-depth: 0 + + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + working-directory: docs + enable-cache: true + + - name: Build docs + run: make -C docs multiversion + + - name: Build redirects + run: make -C docs redirects + + - name: Deploy docs to GitHub Pages + run: ./docs/_utils/deploy.sh + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docs-pages@v2.yaml b/.github/workflows/docs-pages@v2.yaml deleted file mode 100644 index a5cd2f2390..0000000000 --- a/.github/workflows/docs-pages@v2.yaml +++ /dev/null @@ -1,33 +0,0 @@ -name: "Docs / Publish" - -on: - push: - branches: - - master - paths: - - "docs/**" - workflow_dispatch: - -jobs: - release: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - persist-credentials: false - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: 3.7 - - name: Setup Cassandra dependencies - run: sudo apt-get install gcc python-dev libev4 libev-dev - - name: Build driver - run: python setup.py develop - - name: Build docs - run: make -C docs multiversion - - name: Deploy docs to GitHub Pages - run: ./docs/_utils/deploy.sh - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docs-pr.yml b/.github/workflows/docs-pr.yml new file mode 100644 index 0000000000..b5651c8159 --- /dev/null +++ b/.github/workflows/docs-pr.yml @@ -0,0 +1,43 @@ +name: "Docs / Build PR" +# For more information, +# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows + +on: + push: + branches: + - master + paths: + - "docs/**" + - ".github/workflows/docs-pr.yml" + - "cassandra/**" + - "pyproject.toml" + - "setup.py" + - "CHANGELOG.rst" + pull_request: + paths: + - "docs/**" + - ".github/workflows/docs-pr.yml" + - "cassandra/**" + - "pyproject.toml" + - "setup.py" + - "CHANGELOG.rst" + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v6 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + working-directory: docs + enable-cache: true + + - name: Build docs + run: make -C docs test diff --git a/.github/workflows/docs-pr@v1.yaml b/.github/workflows/docs-pr@v1.yaml deleted file mode 100644 index 2cb972b840..0000000000 --- a/.github/workflows/docs-pr@v1.yaml +++ /dev/null @@ -1,28 +0,0 @@ -name: "Docs / Build PR" - -on: - pull_request: - branches: - - master - paths: - - "docs/**" - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - persist-credentials: false - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: 3.7 - - name: Setup Cassandra dependencies - run: sudo apt-get install gcc python-dev libev4 libev-dev - - name: Build driver - run: python setup.py develop - - name: Build docs - run: make -C docs test \ No newline at end of file diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 8e1d292be8..210c2d4e2b 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -1,23 +1,95 @@ name: Integration tests on: - pull_request: + push: branches: - - master - + - master + - 'branch-**' + paths-ignore: + - docs/* + - examples/* + - scripts/* + - .gitignore + - '*.rst' + - '*.ini' + - LICENSE + - .github/dependabot.yml + - .github/pull_request_template.md + - "*.md" + - .github/workflows/docs-* + pull_request: + paths-ignore: + - docs/* + - examples/* + - scripts/* + - .gitignore + - '*.rst' + - '*.ini' + - LICENSE + - .github/dependabot.yml + - .github/pull_request_template.md + - "*.md" + - .github/workflows/docs-* - + workflow_dispatch: jobs: tests: - runs-on: ubuntu-20.04 - if: contains(github.event.pull_request.labels.*.name, 'integration-tests') + name: test ${{ matrix.event_loop_manager }} (${{ matrix.python-version }}) + if: "!contains(github.event.pull_request.labels.*.name, 'disable-integration-tests')" + runs-on: ubuntu-24.04 + env: + SCYLLA_VERSION: release:2025.2 + strategy: + fail-fast: false + matrix: + java-version: [8] + python-version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] + event_loop_manager: ["libev", "asyncio", "asyncore"] + exclude: + - python-version: "3.12" + event_loop_manager: "asyncore" + - python-version: "3.13" + event_loop_manager: "asyncore" + - python-version: "3.14" + event_loop_manager: "asyncore" + - python-version: "3.14t" + event_loop_manager: "asyncore" + steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v2 + - uses: actions/checkout@v6 + + - name: Set up JDK ${{ matrix.java-version }} + uses: actions/setup-java@v5 with: - python-version: 3.8 + java-version: ${{ matrix.java-version }} + distribution: 'adopt' + + - name: Install libev + run: sudo apt-get install libev4 libev-dev + + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + python-version: ${{ matrix.python-version }} + + # This is to get honest accounting of test time vs download time vs build time. + # Not strictly necessary for running tests. + - name: Build driver + run: uv sync + + # This is to get honest accounting of test time vs download time vs build time. + # Not strictly necessary for running tests. + - name: Download Scylla + run: | + uv run ccm create scylla-driver-temp -n 1 --scylla --version ${SCYLLA_VERSION} + uv run ccm remove - name: Test with pytest + env: + EVENT_LOOP_MANAGER: ${{ matrix.event_loop_manager }} + PROTOCOL_VERSION: 4 run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py - # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py + if [[ "${{ matrix.python-version }}" =~ t$ ]]; then + export PYTHON_GIL=0 + fi + uv run pytest tests/integration/standard/ tests/integration/cqlengine/ diff --git a/.github/workflows/lib-build-and-push.yml b/.github/workflows/lib-build-and-push.yml new file mode 100644 index 0000000000..735a4638f4 --- /dev/null +++ b/.github/workflows/lib-build-and-push.yml @@ -0,0 +1,195 @@ +name: Build and upload to PyPi + +on: + workflow_call: + inputs: + upload: + description: 'Upload to PyPI' + type: boolean + required: false + default: false + + python-version: + description: 'Python version to run on' + type: string + required: false + default: "3.13" + + target: + description: "Target os to build for: linux,macos,windows" + type: string + required: false + default: "linux,macos-x86,macos-arm,windows,linux-aarch64" + + target_tag: + description: "Publish particular tag" + type: string + required: false + default: "" + + ignore_tests: + description: "Don't run tests" + type: boolean + required: false + default: false + +jobs: + prepare-matrix: + name: "Prepare matrix to run for ${{ inputs.python-version }} on `${{ inputs.target }}`" + runs-on: ubuntu-24.04 + outputs: + matrix: ${{ steps.prepare.outputs.matrix }} + steps: + - name: Prepare matrix json from input matrix list + id: prepare + run: | + echo -n "[" > /tmp/matrix.json + was_added="" + for target in $(echo "${{ inputs.target }}" | tr -d " " | tr "," "\n") + do + if [[ "${target}" == "linux" ]]; then + [ -n "$was_added" ] && echo -n "," >> /tmp/matrix.json + echo -n '{"os":"ubuntu-24.04", "target": "linux"}' >> /tmp/matrix.json + was_added=1 + elif [[ "${target}" == "linux-aarch64" ]]; then + [ -n "$was_added" ] && echo -n "," >> /tmp/matrix.json + echo -n '{"os":"ubuntu-24.04-arm", "target": "linux-aarch64"}' >> /tmp/matrix.json + was_added=1 + elif [[ "${target}" == "windows" ]]; then + [ -n "$was_added" ] && echo -n "," >> /tmp/matrix.json + echo -n '{"os":"windows-2022", "target": "windows"}' >> /tmp/matrix.json + was_added=1 + elif [[ "${target}" == "macos-x86" ]]; then + [ -n "$was_added" ] && echo -n "," >> /tmp/matrix.json + echo -n '{"os":"macos-15-intel", "target": "macos-x86"}' >> /tmp/matrix.json + was_added=1 + elif [[ "${target}" == "macos-arm" ]]; then + [ -n "$was_added" ] && echo -n "," >> /tmp/matrix.json + echo -n '{"os":"macos-14", "target": "macos-arm"}' >> /tmp/matrix.json + was_added=1 + fi + done + echo -n "]" >> /tmp/matrix.json + echo -e "Resulted matrix json:\n$(cat /tmp/matrix.json)" + echo "matrix=$(cat /tmp/matrix.json)" >> $GITHUB_OUTPUT + + build-wheels: + name: Build wheels for ${{ matrix.target }} on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + needs: prepare-matrix + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.prepare-matrix.outputs.matrix) }} + + steps: + - uses: actions/checkout@v6 + + - name: Checkout tag ${{ inputs.target_tag }} + if: inputs.target_tag != '' + uses: actions/checkout@v6 + with: + ref: ${{ inputs.target_tag }} + + - name: Disable tests + if: inputs.ignore_tests + shell: bash + run: | + echo "CIBW_TEST_COMMAND=true" >> $GITHUB_ENV; + echo "CIBW_TEST_COMMAND_WINDOWS=(exit 0)" >> $GITHUB_ENV; + echo "CIBW_TEST_SKIP=*" >> $GITHUB_ENV; + echo "CIBW_BEFORE_TEST=true" >> $GITHUB_ENV; + echo "CIBW_BEFORE_TEST_WINDOWS=(exit 0)" >> $GITHUB_ENV; + + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + python-version: ${{ inputs.python-version }} + + - name: Install cibuildwheel + run: | + uv tool install 'cibuildwheel==3.2.1' + + - name: Install OpenSSL for Windows + if: runner.os == 'Windows' + run: | + choco install openssl.light --no-progress -y + + - name: Install Conan + if: runner.os == 'Windows' + uses: turtlebrowser/get-conan@main + + - name: Configure libev for Windows + if: runner.os == 'Windows' + run: | + conan profile detect + conan install conanfile.py + + - name: Install libev for MacOS + if: runner.os == 'MacOs' + run: | + brew install libev + + - name: Overwrite for MacOS + if: runner.os == 'MacOS' + run: | + ##### Set MACOSX_DEPLOYMENT_TARGET + if [ "${{ matrix.os }}" == "macos-15-intel" ]; then + echo "MACOSX_DEPLOYMENT_TARGET=15.0" >> $GITHUB_ENV; + echo "Enforcing target deployment for 15.0" + elif [ "${{ matrix.os }}" == "macos-14" ]; then + echo "MACOSX_DEPLOYMENT_TARGET=14.0" >> $GITHUB_ENV; + echo "Enforcing target deployment for 14.0" + fi + + - name: Build wheels + if: matrix.target != 'linux-aarch64' + shell: bash + run: | + GITHUB_WORKFLOW_REF="scylladb/python-driver/.github/workflows/lib-build-and-push.yml@refs/heads/master" cibuildwheel --output-dir wheelhouse + + - name: Build wheels for linux aarch64 + if: matrix.target == 'linux-aarch64' + run: | + GITHUB_WORKFLOW_REF="scylladb/python-driver/.github/workflows/lib-build-and-push.yml@refs/heads/master" CIBW_BUILD="cp3*" cibuildwheel --archs aarch64 --output-dir wheelhouse + + - uses: actions/upload-artifact@v6 + with: + name: wheels-${{ matrix.target }}-${{ matrix.os }} + path: ./wheelhouse/*.whl + + build-sdist: + name: Build source distribution + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + python-version: ${{ inputs.python-version }} + + - name: Build sdist + run: uv build --sdist + + - uses: actions/upload-artifact@v6 + with: + name: source-dist + path: dist/*.tar.gz + + upload_pypi: + if: inputs.upload + needs: [build-wheels, build-sdist] + runs-on: ubuntu-24.04 + permissions: + id-token: write + + steps: + - uses: actions/download-artifact@v7 + with: + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 + with: + skip-existing: true diff --git a/.github/workflows/publish-manually.yml b/.github/workflows/publish-manually.yml new file mode 100644 index 0000000000..09b9779117 --- /dev/null +++ b/.github/workflows/publish-manually.yml @@ -0,0 +1,66 @@ +name: Build and upload to PyPi manually + +permissions: + contents: read + +on: + workflow_dispatch: + inputs: + upload: + description: 'Upload to PyPI' + type: boolean + required: false + default: false + + python-version: + description: 'Python version to run on' + type: string + required: false + default: "3.13" + + target: + description: "Target os to build for: linux,macos,windows" + type: string + required: false + default: "linux,macos-x86,macos-arm,windows,linux-aarch64" + + target_tag: + description: "Publish particular tag" + type: string + required: false + default: "" + + ignore_tests: + description: "Don't run tests" + type: boolean + required: false + default: false + +jobs: + build-and-publish: + name: "Build wheels" + uses: ./.github/workflows/lib-build-and-push.yml + with: + upload: false + python-version: ${{ inputs.python-version }} + ignore_tests: ${{ inputs.ignore_tests }} + target_tag: ${{ inputs.target_tag }} + target: ${{ inputs.target }} + + # TODO: Remove when https://github.com/pypa/gh-action-pypi-publish/issues/166 is fixed and update build-and-publish.with.upload to ${{ inputs.upload }} + publish: + name: "Publish wheels to PyPi" + needs: build-and-publish + if: inputs.upload + runs-on: ubuntu-24.04 + permissions: + id-token: write + steps: + - uses: actions/download-artifact@v7 + with: + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 + with: + skip-existing: true diff --git a/.gitignore b/.gitignore index d2e5116b32..28cf1ba218 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ *.swo *.so *.egg +*.eggs *.egg-info *.attr .tox @@ -11,13 +12,12 @@ build MANIFEST dist .coverage -nosetests.xml cover/ docs/_build/ -docs/poetry.lock tests/integration/ccm setuptools*.tar.gz setuptools*.egg +venv/ cassandra/*.c !cassandra/cmurmur3.c @@ -43,3 +43,40 @@ tests/unit/cython/bytesio_testhelper.c #iPython *.ipynb +# Files from upstream that we don't need +Jenkinsfile +Jenkinsfile.bak +appveyor.yml +appveyor/appveyor.ps1 +appveyor/run_test.ps1 +build.yaml.bak +docs.yaml +doxyfile +tox.ini +test-datastax-requirements.txt +docs/api/cassandra/datastax/graph/fluent/index.rst +docs/api/cassandra/datastax/graph/fluent/predicates.rst +docs/api/cassandra/datastax/graph/fluent/query.rst +docs/api/cassandra/datastax/graph/index.rst +docs/api/cassandra/graph.rst +docs/classic_graph.rst +docs/core_graph.rst +docs/geo_types.rst +docs/graph.rst +docs/graph_fluent.rst + +# Codex - AI assistant metadata +.codex/ +.codex-cache/ +.codex-config.json +.codex-settings.json +codex.log +AGENTS.md + +# Claude - AI assistant metadata +.anthropic/ +.claude/ +claude.log +claude_history.json +claude_config.json +CLAUDE.md \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cb8dddfc51..82c84ccc51 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,195 @@ +3.29.7 +====== +December 08, 2025 + +Bug Fixes +--------- +* Make compression=None a valid case (#610) + +3.29.6 +====== +November 27, 2025 + +* Rename connection_metadata to client_routes (#608) +* TokenAwarePolicy: enable shuffling by default (#478) +* Add support of LWT flag for BatchStatement (#606) +* Add support of CONNECTION_METADATA_CHANGE event (#601) +* Add LWT support (#584) +* Add support for Python 3.14 (#566) +* Fix dict handling in pool and metrics (#595) +* Remove serverless code (#590) +* tests: drop `sure` package (#592) +* compression: better handle configuration problems (#585) + +3.29.5 +====== +November 5, 2025 + +Bug Fixes +--------- +* Update TokenAwarePolicy.make_query_plan to schedule to replicas first (#548) +* Drop _tablets_routing_v1 flag from token-aware policy (#547) +* Fix dc aware and rack aware policies initialization (#546) +* Fix Cluster.metadata_request_timeout and default it from control_connection_timeout (#539) + +Others +------ +* Drop python 3.9 support (#564) + +3.29.4 +====== +August 16, 2025 + +Features +-------- +* Add Cluster.application_info to report application information to server (#486) +* Move to uv package manager (#496) + +Bug Fixes +--------- +* Fix deadlocks on evicting connection in HostConnectionPool and ConnectionPool (#499) +* Fix libevreactor crashing when connection added and closed right away (#508) + +Others +------ +* Remove outdated protocols support (v1 and v2) (#493, #525) +* Remove DSE integration tests (#502) +* Optimise shard port allocator (#506) +* Remove self.assert (#507) +* Minor performance improvement for make_token_replica_map (#513) +* Remove in-memory Scylla tables support (#518) +* Add optional dependencies for SNAPPY and LZ4 compressors (#517) +* Remove support for protocol versions not supported by Scylla (#492) +* Set monitor_reporting_enabled False by default (#523) + +3.29.3 +====== +March 11, 2025 + +Bug Fixes +--------- + +* Fix regression after #443 when _is_gevent_monkey_patched was broken (#452) + +Others +------ +* Fix sync_table to raise error on adding partition key (#433) +* Upgrade CICD Runners to ubuntu-24.04 (#451) +* Make connection tests to fail when failed to import connection class from EVENT_LOOP_MANAGER + +3.29.2 +====== +September 9, 2024 + +Features +-------- +* Convert to pytest for running unit and integration tests (PYTHON-1297) +* Add support for Cassandra 4.1.x and 5.0 releases to CI (PYTHON-1393) +* Extend driver vector support to arbitrary subtypes and fix handling of variable length types (PYTHON-1369) + +Bug Fixes +--------- +* Python NumpyProtocolHandler does not work with NumPy 1.24.0 or greater (PYTHON-1359) +* cibuildwheel appears to not be stripping Cython-generated shared objects (PYTHON-1387) +* Windows build for Python 3.12 compiled without libev support (PYTHON-1386) + +Others +------ +* Update README.rst with badges for version and license (PR 1210) +* Remove dependency on old mock external module (PR 1201) +* Removed future print_function, division, and with and some pre 3.7 handling (PR 1208) +* Update geomet dependency (PR 1207) +* Remove problematic escape sequences in some docstrings to avoid SyntaxWarning in Python 3.12 (PR 1205) +* Use timezone-aware API to avoid deprecated warning (PR 1213) + +3.29.1 +====== +March 19, 2024 + +Bug Fixes +--------- +* cassandra-driver for Python 3.12 Linux is compiled without libev support (PYTHON-1378) +* Consider moving to native wheel builds for OS X and removing universal2 wheels (PYTHON-1379) + +3.29.0 +====== +December 19, 2023 + +Features +-------- +* Add support for Python 3.9 through 3.12, drop support for 3.7 (PYTHON-1283) +* Removal of dependency on six module (PR 1172) +* Raise explicit exception when deserializing a vector with a subtype that isn’t a constant size (PYTHON-1371) + +Others +------ +* Remove outdated Python pre-3.7 references (PR 1186) +* Remove backup(.bak) files (PR 1185) +* Fix doc typo in add_callbacks (PR 1177) + +3.28.0 +====== +June 5, 2023 + +Features +-------- +* Add support for vector type (PYTHON-1352) +* Cryptography module is now an optional dependency (PYTHON-1351) + +Bug Fixes +--------- +* Store IV along with encrypted text when using column-level encryption (PYTHON-1350) +* Create session-specific protocol handlers to contain session-specific CLE policies (PYTHON-1356) + +Others +------ +* Use Cython for smoke builds (PYTHON-1343) +* Don't fail when inserting UDTs with prepared queries with some missing fields (PR 1151) +* Convert print statement to function in docs (PR 1157) +* Update comment for retry policy (DOC-3278) +* Added error handling blog reference (DOC-2813) + +3.27.0 +====== +May 1, 2023 + +Features +-------- +* Add support for client-side encryption (PYTHON-1341) + +3.26.0 +====== +March 13, 2023 + +Features +-------- +* Add support for execution profiles in execute_concurrent (PR 1122) + +Bug Fixes +--------- +* Handle empty non-final result pages (PR 1110) +* Do not re-use stream IDs for in-flight requests (PR 1114) +* Asyncore race condition cause logging exception on shutdown (PYTHON-1266) + +Others +------ +* Fix deprecation warning in query tracing (PR 1103) +* Remove mutable default values from some tests (PR 1116) +* Remove dependency on unittest2 (PYTHON-1289) +* Fix deprecation warnings for asyncio.coroutine annotation in asyncioreactor (PYTHON-1290) +* Fix typos in source files (PR 1126) +* HostFilterPolicyInitTest fix for Python 3.11 (PR 1131) +* Fix for DontPrepareOnIgnoredHostsTest (PYTHON-1287) +* tests.integration.simulacron.test_connection failures (PYTHON-1304) +* tests.integration.standard.test_single_interface.py appears to be failing for C* 4.0 (PYTHON-1329) +* Authentication tests appear to be failing fraudulently (PYTHON-1328) +* PreparedStatementTests.test_fail_if_different_query_id_on_reprepare() failing unexpectedly (PTYHON-1327) +* Refactor deprecated unittest aliases for Python 3.11 compatibility (PR 1112) + +Deprecations +------------ +* This release removes support for Python 2.7.x as well as Python 3.5.x and 3.6.x + 3.25.0 ====== March 18, 2021 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index cdd742c063..8b8fc0e791 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -5,7 +5,7 @@ Contributions are welcome in the form of bug reports or pull requests. Bug Reports ----------- -Quality bug reports are welcome at the `DataStax Python Driver JIRA `_. +Quality bug reports are welcome at the `Scylla Python Driver Github `_. There are plenty of `good resources `_ describing how to create good bug reports. They will not be repeated in detail here, but in general, the bug report include where appropriate: @@ -18,16 +18,105 @@ good bug reports. They will not be repeated in detail here, but in general, the Pull Requests ------------- If you're able to fix a bug yourself, you can `fork the repository `_ and submit a `Pull Request `_ with the fix. -Please include tests demonstrating the issue and fix. For examples of how to run the tests, consult the `dev README `_. - -Contribution License Agreement ------------------------------- -To protect the community, all contributors are required to `sign the DataStax Contribution License Agreement `_. The process is completely electronic and should only take a few minutes. +Please include tests demonstrating the issue and fix. For examples of how to run the tests, consult the further parts of this document. Design and Implementation Guidelines ------------------------------------ -- We support Python 2.7+, so any changes must work in any of these runtimes (we use ``six``, ``futures``, and some internal backports for compatability) - We have integrations (notably Cassandra cqlsh) that require pure Python and minimal external dependencies. We try to avoid new external dependencies. Where compiled extensions are concerned, there should always be a pure Python fallback implementation. - This project follows `semantic versioning `_, so breaking API changes will only be introduced in major versions. - Legacy ``cqlengine`` has varying degrees of overreaching client-side validation. Going forward, we will avoid client validation where server feedback is adequate and not overly expensive. - When writing tests, try to achieve maximal coverage in unit tests (where it is faster to run across many runtimes). Integration tests are good for things where we need to test server interaction, or where it is important to test across different server versions (emulating in unit tests would not be effective). + +Dev setup +========= + +We recommend using `uv` tool for running tests, linters and basically everything else, +since it makes Python tooling ecosystem mostly usable. +To install it, see instructions at https://docs.astral.sh/uv/getting-started/installation/ +The rest of this document assumes you have `uv` installed. + +It is also strongly recommended to use C/C++-caching tool like ccache or sccache. +When modifying driver files, rebuilding Cython modules is often necessary. +Without caching, each such rebuild may take over a minute. Caching usually brings it +down to about 2-3 seconds. + +Building the Docs +================= + +To build and preview the documentation for the ScyllaDB Python driver locally, you must first manually install `python-driver`. +This is necessary for autogenerating the reference documentation of the driver. +You can find detailed instructions on how to install the driver in the `Installation guide `_. + +After installing the driver, you can build the documentation: +- Make sure you have Python version compatible with docs. You can see supported version in ``docs/pyproject.toml`` - look for ``python`` in ``tool.poetry.dependencies`` section. +- Install poetry: ``pip install poetry`` +- To preview docs in your browser: ``make -C docs preview`` + +Tests +===== + +Running Unit Tests +------------------ +Unit tests can be run like so:: + + uv run pytest tests/unit + EVENT_LOOP_MANAGER=gevent uv run pytest tests/unit/io/test_geventreactor.py + EVENT_LOOP_MANAGER=eventlet uv run pytest tests/unit/io/test_eventletreactor.py + +You can run a specific test method like so:: + + uv run pytest tests/unit/test_connection.py::ConnectionTest::test_bad_protocol_version + +Running Integration Tests +------------------------- +In order to run integration tests, you must specify a version to run using either of: +* ``SCYLLA_VERSION`` e.g. ``release:2025.2`` +* ``CASSANDRA_VERSION`` +environment variable:: + + SCYLLA_VERSION="release:2025.2" uv run pytest tests/integration/standard tests/integration/cqlengine/ + +Or you can specify a scylla/cassandra directory (to test unreleased versions):: + + SCYLLA_VERSION=/path/to/scylla uv run pytest tests/integration/standard/ + +Specifying the usage of an already running Scylla cluster +------------------------------------------------------------ +The test will start the appropriate Scylla clusters when necessary but if you don't want this to happen because a Scylla cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example:: + + USE_CASS_EXTERNAL=1 SCYLLA_VERSION='release:5.1' uv run pytest tests/integration/standard + +Specify a Protocol Version for Tests +------------------------------------ +The protocol version defaults to: +- 4 for Scylla >= 3.0 and Scylla Enterprise > 2019. +- 3 for older versions of Scylla +- 5 for Cassandra >= 4.0, 4 for Cassandra >= 2.2, 3 for Cassandra >= 2.1, 2 for Cassandra >= 2.0 +You can overwrite it with the ``PROTOCOL_VERSION`` environment variable:: + + PROTOCOL_VERSION=3 SCYLLA_VERSION="release:5.1" uv run pytest tests/integration/standard tests/integration/cqlengine/ + +Seeing Test Logs in Real Time +----------------------------- +Sometimes it's useful to output logs for the tests as they run:: + + uv run pytest -s tests/unit/ + +Use tee to capture logs and see them on your terminal:: + + uv run pytest -s tests/unit/ 2>&1 | tee test.log + + +Running the Benchmarks +====================== +There needs to be a version of Scyll running locally so before running the benchmarks, if ccm is installed: + + uv run ccm create benchmark_cluster --scylla -v release:2025.2 -n 1 -s + +To run the benchmarks, pick one of the files under the ``benchmarks/`` dir and run it:: + + uv run benchmarks/future_batches.py + +There are a few options. Use ``--help`` to see them all:: + + uv run benchmarks/future_batches.py --help diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 9b03b497e3..0000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,675 +0,0 @@ -#!groovy -/* - -There are multiple combinations to test the python driver. - -Test Profiles: - - Full: Execute all unit and integration tests, including long tests. - Standard: Execute unit and integration tests. - Smoke Tests: Execute a small subset of tests. - EVENT_LOOP: Execute a small subset of tests selected to test EVENT_LOOPs. - -Matrix Types: - - Full: All server versions, python runtimes tested with and without Cython. - Develop: Smaller matrix for dev purpose. - Cassandra: All cassandra server versions. - Dse: All dse server versions. - -Parameters: - - EVENT_LOOP: 'LIBEV' (Default), 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED' - CYTHON: Default, 'True', 'False' - -*/ - -@Library('dsdrivers-pipeline-lib@develop') -import com.datastax.jenkins.drivers.python.Slack - -slack = new Slack() - -// Define our predefined matrices -matrices = [ - "FULL": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] - ], - "DEVELOP": [ - "SERVER": ['2.1', '3.11', 'dse-6.8'], - "RUNTIME": ['2.7.18', '3.6.10'], - "CYTHON": ["True", "False"] - ], - "CASSANDRA": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] - ], - "DSE": [ - "SERVER": ['dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] - ] -] - -def getBuildContext() { - /* - Based on schedule, parameters and branch name, configure the build context and env vars. - */ - - def driver_display_name = 'Cassandra Python Driver' - if (env.GIT_URL.contains('riptano/python-driver')) { - driver_display_name = 'private ' + driver_display_name - } else if (env.GIT_URL.contains('python-dse-driver')) { - driver_display_name = 'DSE Python Driver' - } - - def git_sha = "${env.GIT_COMMIT.take(7)}" - def github_project_url = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - def github_branch_url = "${github_project_url}/tree/${env.BRANCH_NAME}" - def github_commit_url = "${github_project_url}/commit/${env.GIT_COMMIT}" - - def profile = "${params.PROFILE}" - def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" - matrixType = "FULL" - developBranchPattern = ~"((dev|long)-)?python-.*" - - if (developBranchPattern.matcher(env.BRANCH_NAME).matches()) { - matrixType = "DEVELOP" - if (env.BRANCH_NAME.contains("long")) { - profile = "FULL" - } - } - - // Check if parameters were set explicitly - if (params.MATRIX != "DEFAULT") { - matrixType = params.MATRIX - } - - matrix = matrices[matrixType].clone() - if (params.CYTHON != "DEFAULT") { - matrix["CYTHON"] = [params.CYTHON] - } - - if (params.SERVER_VERSION != "DEFAULT") { - matrix["SERVER"] = [params.SERVER_VERSION] - } - - if (params.PYTHON_VERSION != "DEFAULT") { - matrix["RUNTIME"] = [params.PYTHON_VERSION] - } - - if (params.CI_SCHEDULE == "WEEKNIGHTS") { - matrix["SERVER"] = params.CI_SCHEDULE_SERVER_VERSION.split(' ') - matrix["RUNTIME"] = params.CI_SCHEDULE_PYTHON_VERSION.split(' ') - } - - context = [ - vars: [ - "PROFILE=${profile}", - "EVENT_LOOP=${EVENT_LOOP}", - "DRIVER_DISPLAY_NAME=${driver_display_name}", "GIT_SHA=${git_sha}", "GITHUB_PROJECT_URL=${github_project_url}", - "GITHUB_BRANCH_URL=${github_branch_url}", "GITHUB_COMMIT_URL=${github_commit_url}" - ], - matrix: matrix - ] - - return context -} - -def buildAndTest(context) { - initializeEnvironment() - installDriverAndCompileExtensions() - - try { - executeTests() - } finally { - junit testResults: '*_results.xml' - } -} - -def getMatrixBuilds(buildContext) { - def tasks = [:] - matrix = buildContext.matrix - - matrix["SERVER"].each { serverVersion -> - matrix["RUNTIME"].each { runtimeVersion -> - matrix["CYTHON"].each { cythonFlag -> - def taskVars = [ - "CASSANDRA_VERSION=${serverVersion}", - "PYTHON_VERSION=${runtimeVersion}", - "CYTHON_ENABLED=${cythonFlag}" - ] - def cythonDesc = cythonFlag == "True" ? ", Cython": "" - tasks["${serverVersion}, py${runtimeVersion}${cythonDesc}"] = { - node("${OS_VERSION}") { - checkout scm - - withEnv(taskVars) { - buildAndTest(context) - } - } - } - } - } - } - return tasks -} - -def initializeEnvironment() { - sh label: 'Initialize the environment', script: '''#!/bin/bash -lex - pyenv global ${PYTHON_VERSION} - sudo apt-get install socat - pip install --upgrade pip - pip install -U setuptools - pip install ${HOME}/ccm - ''' - - // Determine if server version is Apache CassandraⓇ or DataStax Enterprise - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { - sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex - pip install -r test-datastax-requirements.txt - ''' - } else { - sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex - pip install -r test-requirements.txt - ''' - - sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex - pip uninstall -y geomet - ''' - } - - sh label: 'Install unit test modules', script: '''#!/bin/bash -lex - pip install nose-ignore-docstring nose-exclude service_identity - ''' - - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install cython modules', script: '''#!/bin/bash -lex - pip install cython numpy - ''' - } - - sh label: 'Download Apache CassandraⓇ or DataStax Enterprise', script: '''#!/bin/bash -lex - . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} - ''' - - sh label: 'Display Python and environment information', script: '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - python --version - pip --version - pip freeze - printenv | sort - ''' -} - -def installDriverAndCompileExtensions() { - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace - ''' - } else { - sh label: 'Install the driver and compile with C extensions without Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace --no-cython - ''' - } -} - -def executeStandardTests() { - - sh label: 'Execute unit tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - ''' - - sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true - ''' - - sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - ''' - - sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - ''' - - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { - sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - ''' - } - - sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - ''' - - if (env.PROFILE == 'FULL') { - sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - ''' - } -} - -def executeDseSmokeTests() { - sh label: 'Execute profile DataStax Enterprise smoke test integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - ''' -} - -def executeEventLoopTests() { - sh label: 'Execute profile event loop manager integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - ''' -} - -def executeTests() { - switch(env.PROFILE) { - case 'DSE-SMOKE-TEST': - executeDseSmokeTests() - break - case 'EVENT_LOOP': - executeEventLoopTests() - break - default: - executeStandardTests() - break - } -} - - -// TODO move this in the shared lib -def getDriverMetricType() { - metric_type = 'oss' - if (env.GIT_URL.contains('riptano/python-driver')) { - metric_type = 'oss-private' - } else if (env.GIT_URL.contains('python-dse-driver')) { - metric_type = 'dse' - } - return metric_type -} - -def submitCIMetrics(buildType) { - long durationMs = currentBuild.duration - long durationSec = durationMs / 1000 - long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 - def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') - metric_type = getDriverMetricType() - def durationMetric = "okr.ci.python.${metric_type}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" - - timeout(time: 1, unit: 'MINUTES') { - withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), - string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { - withEnv(["DURATION_METRIC=${durationMetric}"]) { - sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex - echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} - ''' - } - } - } -} - -def describeBuild(buildContext) { - script { - def runtimes = buildContext.matrix["RUNTIME"] - def serverVersions = buildContext.matrix["SERVER"] - def numBuilds = runtimes.size() * serverVersions.size() * buildContext.matrix["CYTHON"].size() - currentBuild.displayName = "${env.PROFILE} (${env.EVENT_LOOP} | ${numBuilds} builds)" - currentBuild.description = "${env.PROFILE} build testing servers (${serverVersions.join(', ')}) against Python (${runtimes.join(', ')}) using ${env.EVENT_LOOP} event loop manager" - } -} - -def scheduleTriggerJobName = "drivers/python/oss/master/disabled" - -pipeline { - agent none - - // Global pipeline timeout - options { - timeout(time: 10, unit: 'HOURS') // TODO timeout should be per build - buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts - numToKeepStr: '50')) // Keep only the last 50 build records - } - - parameters { - choice( - name: 'ADHOC_BUILD_TYPE', - choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], - description: '''

Perform a adhoc build operation

- - - - - - - - - - - - - - - -
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') - choice( - name: 'PROFILE', - choices: ['STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], - description: '''

Profile to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
STANDARDExecute the standard tests for the driver
FULLExecute all tests for the driver, including long tests.
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT_LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP)
''') - choice( - name: 'MATRIX', - choices: ['DEFAULT', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], - description: '''

The matrix for the build.

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
DEFAULTDefault to the build context.
FULLAll server versions, python runtimes tested with and without Cython.
DEVELOPSmaller matrix for dev purpose.
CASSANDRAAll cassandra server versions.
DSEAll dse server versions.
''') - choice( - name: 'PYTHON_VERSION', - choices: ['DEFAULT', '2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - description: 'Python runtime version. Default to the build context.') - choice( - name: 'SERVER_VERSION', - choices: ['DEFAULT', - '2.1', // Legacy Apache CassandraⓇ - '2.2', // Legacy Apache CassandraⓇ - '3.0', // Previous Apache CassandraⓇ - '3.11', // Current Apache CassandraⓇ - '4.0', // Development Apache CassandraⓇ - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Previous DataStax Enterprise - 'dse-6.8', // Current DataStax Enterprise - ], - description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
DEFAULTDefault to the build context.
2.1Apache CassandraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0Apache CassandraⓇ v3.0.x
3.11Apache CassandraⓇ v3.11.x
4.0Apache CassandraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1DataStax Enterprise v5.1.x
dse-6.0DataStax Enterprise v6.0.x
dse-6.7DataStax Enterprise v6.7.x
dse-6.8DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') - choice( - name: 'CYTHON', - choices: ['DEFAULT', 'True', 'False'], - description: '''

Flag to determine if Cython should be enabled

- - - - - - - - - - - - - - - - - - - -
ChoiceDescription
DefaultDefault to the build context.
TrueEnable Cython
FalseDisable Cython
''') - choice( - name: 'EVENT_LOOP', - choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], - description: '''

Event loop manager to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
LIBEVA full-featured and high-performance event loop that is loosely modeled after libevent, but without its limitations and bugs
GEVENTA co-routine -based Python networking library that uses greenlet to provide a high-level synchronous API on top of the libev or libuv event loop
EVENTLETA concurrent networking library for Python that allows you to change how you run your code, not how you write it
ASYNCIOA library to write concurrent code using the async/await syntax
ASYNCOREA module provides the basic infrastructure for writing asynchronous socket service clients and servers
TWISTEDAn event-driven networking engine written in Python and licensed under the open source MIT license
''') - choice( - name: 'CI_SCHEDULE', - choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], - description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_PYTHON_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing python version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_SERVER_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing server version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - } - - triggers { - parameterizedCron((scheduleTriggerJobName == env.JOB_NAME) ? """ - # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - """ : "") - } - - environment { - OS_VERSION = 'ubuntu/bionic64/python-driver' - CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1536M' - } - - stages { - stage ('Build and Test') { - agent { - // // If I removed this agent block, GIT_URL and GIT_COMMIT aren't set. - // // However, this trigger an additional checkout - label "master" - } - when { - beforeAgent true - allOf { - not { buildingTag() } - } - } - - steps { - script { - context = getBuildContext() - withEnv(context.vars) { - describeBuild(context) - slack.notifyChannel() - - // build and test all builds - parallel getMatrixBuilds(context) - - // send the metrics - submitCIMetrics('commit') - slack.notifyChannel(currentBuild.currentResult) - } - } - } - } - - } -} diff --git a/Jenkinsfile.bak b/Jenkinsfile.bak deleted file mode 100644 index 87b20804ca..0000000000 --- a/Jenkinsfile.bak +++ /dev/null @@ -1,873 +0,0 @@ -#!groovy - -def initializeEnvironment() { - env.DRIVER_DISPLAY_NAME = 'Cassandra Python Driver' - env.DRIVER_METRIC_TYPE = 'oss' - if (env.GIT_URL.contains('riptano/python-driver')) { - env.DRIVER_DISPLAY_NAME = 'private ' + env.DRIVER_DISPLAY_NAME - env.DRIVER_METRIC_TYPE = 'oss-private' - } else if (env.GIT_URL.contains('python-dse-driver')) { - env.DRIVER_DISPLAY_NAME = 'DSE Python Driver' - env.DRIVER_METRIC_TYPE = 'dse' - } - - env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" - env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" - env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" - - sh label: 'Assign Python global environment', script: '''#!/bin/bash -lex - pyenv global ${PYTHON_VERSION} - ''' - - sh label: 'Install socat; required for unix socket tests', script: '''#!/bin/bash -lex - sudo apt-get install socat - ''' - - sh label: 'Install the latest setuptools', script: '''#!/bin/bash -lex - pip install --upgrade pip - pip install -U setuptools - ''' - - sh label: 'Install CCM', script: '''#!/bin/bash -lex - pip install ${HOME}/ccm - ''' - - // Determine if server version is Apache Cassandra� or DataStax Enterprise - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { - sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex - pip install -r test-datastax-requirements.txt - ''' - } else { - sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex - pip install -r test-requirements.txt - ''' - - sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex - pip uninstall -y geomet - ''' - - } - - sh label: 'Install unit test modules', script: '''#!/bin/bash -lex - pip install nose-ignore-docstring nose-exclude service_identity - ''' - - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install cython modules', script: '''#!/bin/bash -lex - pip install cython numpy - ''' - } - - sh label: 'Download Apache CassandraⓇ or DataStax Enterprise', script: '''#!/bin/bash -lex - . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} - ''' - - sh label: 'Display Python and environment information', script: '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - python --version - pip --version - printenv | sort - ''' -} - -def installDriverAndCompileExtensions() { - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace - ''' - } else { - sh label: 'Install the driver and compile with C extensions without Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace --no-cython - ''' - } -} - -def executeStandardTests() { - - sh label: 'Execute unit tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - ''' - - sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true - ''' - - sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - ''' - - sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - ''' - - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { - sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - ''' - } - - sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - ''' - - if (env.EXECUTE_LONG_TESTS == 'True') { - sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - ''' - } -} - -def executeDseSmokeTests() { - sh label: 'Execute profile DataStax Enterprise smoke test integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - ''' -} - -def executeEventLoopTests() { - sh label: 'Execute profile event loop manager integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - ''' -} - -def executeUpgradeTests() { - sh label: 'Execute profile upgrade integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true - ''' -} - -def executeTests() { - switch(params.PROFILE) { - case 'DSE-SMOKE-TEST': - executeDseSmokeTests() - break - case 'EVENT-LOOP': - executeEventLoopTests() - break - case 'UPGRADE': - executeUpgradeTests() - break - default: - executeStandardTests() - break - } -} - -def notifySlack(status = 'started') { - // Set the global pipeline scoped environment (this is above each matrix) - env.BUILD_STATED_SLACK_NOTIFIED = 'true' - - def buildType = 'Commit' - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" - } - - def color = 'good' // Green - if (status.equalsIgnoreCase('aborted')) { - color = '808080' // Grey - } else if (status.equalsIgnoreCase('unstable')) { - color = 'warning' // Orange - } else if (status.equalsIgnoreCase('failed')) { - color = 'danger' // Red - } - - def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] -<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - message += " - ${params.CI_SCHEDULE_PYTHON_VERSION} - ${params.EVENT_LOOP_MANAGER}" - } - if (!status.equalsIgnoreCase('Started')) { - message += """ -${status} after ${currentBuild.durationString - ' and counting'}""" - } - - slackSend color: "${color}", - channel: "#python-driver-dev-bots", - message: "${message}" -} - -def submitCIMetrics(buildType) { - long durationMs = currentBuild.duration - long durationSec = durationMs / 1000 - long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 - def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') - def durationMetric = "okr.ci.python.${env.DRIVER_METRIC_TYPE}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" - - timeout(time: 1, unit: 'MINUTES') { - withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), - string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { - withEnv(["DURATION_METRIC=${durationMetric}"]) { - sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex - echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} - ''' - } - } - } -} - -def describePerCommitStage() { - script { - def type = 'standard' - def serverDescription = 'current Apache CassandaraⓇ and supported DataStax Enterprise versions' - if (env.BRANCH_NAME ==~ /long-python.*/) { - type = 'long' - } else if (env.BRANCH_NAME ==~ /dev-python.*/) { - type = 'dev' - } - - currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" - currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.18 and v3.5.9 using ${env.EVENT_LOOP_MANAGER} event loop manager" - } - - sh label: 'Describe the python environment', script: '''#!/bin/bash -lex - python -V - pip freeze - ''' -} - -def describeScheduledTestingStage() { - script { - def type = params.CI_SCHEDULE.toLowerCase().capitalize() - def displayName = "${type} schedule (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def serverVersionDescription = "${params.CI_SCHEDULE_SERVER_VERSION.replaceAll(' ', ', ')} server version(s) in the matrix" - def pythonVersionDescription = "${params.CI_SCHEDULE_PYTHON_VERSION.replaceAll(' ', ', ')} Python version(s) in the matrix" - def description = "${type} scheduled testing using ${env.EVENT_LOOP_MANAGER} event loop manager" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython enabled" - } - if (params.PROFILE != 'NONE') { - description += ", ${params.PROFILE} profile" - } - description += ", ${serverVersionDescription}, and ${pythonVersionDescription}" - currentBuild.description = description - } -} - -def describeAdhocTestingStage() { - script { - def serverType = params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[0] - def serverDisplayName = 'Apache CassandaraⓇ' - def serverVersion = " v${serverType}" - if (serverType == 'ALL') { - serverDisplayName = "all ${serverDisplayName} and DataStax Enterprise server versions" - serverVersion = '' - } else { - try { - serverVersion = " v${env.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[1]}" - } catch (e) { - ;; // no-op - } - if (serverType == 'dse') { - serverDisplayName = 'DataStax Enterprise' - } - } - def displayName = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} for v${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION} (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def description = "Testing ${serverDisplayName} ${serverVersion} using ${env.EVENT_LOOP_MANAGER} against Python ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython" - } - if (params.PROFILE == 'NONE') { - if (params.EXECUTE_LONG_TESTS) { - description += ", with" - } else { - description += ", without" - } - description += " long tests executed" - } else { - description += ", ${params.PROFILE} profile" - } - currentBuild.description = description - } -} - -def branchPatternCron = ~"(master)" -def riptanoPatternCron = ~"(riptano)" - -pipeline { - agent none - - // Global pipeline timeout - options { - timeout(time: 10, unit: 'HOURS') - buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts - numToKeepStr: '50')) // Keep only the last 50 build records - } - - parameters { - choice( - name: 'ADHOC_BUILD_TYPE', - choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], - description: '''

Perform a adhoc build operation

- - - - - - - - - - - - - - - -
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION', - choices: ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - description: 'Python version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', - choices: ['2.1', // Legacy Apache CassandraⓇ - '2.2', // Legacy Apache CassandraⓇ - '3.0', // Previous Apache CassandraⓇ - '3.11', // Current Apache CassandraⓇ - '4.0', // Development Apache CassandraⓇ - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Previous DataStax Enterprise - 'dse-6.8', // Current DataStax Enterprise - 'ALL'], - description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
2.1Apache CassandaraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0Apache CassandaraⓇ v3.0.x
3.11Apache CassandaraⓇ v3.11.x
4.0Apache CassandaraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1DataStax Enterprise v5.1.x
dse-6.0DataStax Enterprise v6.0.x
dse-6.7DataStax Enterprise v6.7.x
dse-6.8DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') - booleanParam( - name: 'CYTHON', - defaultValue: false, - description: 'Flag to determine if Cython should be enabled for scheduled or adhoc builds') - booleanParam( - name: 'EXECUTE_LONG_TESTS', - defaultValue: false, - description: 'Flag to determine if long integration tests should be executed for scheduled or adhoc builds') - choice( - name: 'EVENT_LOOP_MANAGER', - choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], - description: '''

Event loop manager to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
LIBEVA full-featured and high-performance event loop that is loosely modeled after libevent, but without its limitations and bugs
GEVENTA co-routine -based Python networking library that uses greenlet to provide a high-level synchronous API on top of the libev or libuv event loop
EVENTLETA concurrent networking library for Python that allows you to change how you run your code, not how you write it
ASYNCIOA library to write concurrent code using the async/await syntax
ASYNCOREA module provides the basic infrastructure for writing asynchronous socket service clients and servers
TWISTEDAn event-driven networking engine written in Python and licensed under the open source MIT license
''') - choice( - name: 'PROFILE', - choices: ['NONE', 'DSE-SMOKE-TEST', 'EVENT-LOOP', 'UPGRADE'], - description: '''

Profile to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
NONEExecute the standard tests for the driver
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT-LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP_MANAGER)
UPGRADEExecute only the upgrade tests
''') - choice( - name: 'CI_SCHEDULE', - choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], - description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_PYTHON_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing python version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_SERVER_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing server version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - } - - triggers { - parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(GIT_URL).find()) ? """ - # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Saturday around 12:00, 4:00 and 8:00 PM - # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.9.x) - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Sunday around 12:00 and 4:00 AM - # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - """ : "") - } - - environment { - OS_VERSION = 'ubuntu/bionic64/python-driver' - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" - EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" - CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1536M' - } - - stages { - stage ('Per-Commit') { - options { - timeout(time: 2, unit: 'HOURS') - } - when { - beforeAgent true - branch pattern: '((dev|long)-)?python-.*', comparator: 'REGEXP' - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '3.11', // Current Apache Cassandra - 'dse-6.8' // Current DataStax Enterprise - } - axis { - name 'PYTHON_VERSION' - values '2.7.18', '3.5.9' - } - axis { - name 'CYTHON_ENABLED' - values 'False' - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describePerCommitStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - - script { - if (env.BRANCH_NAME ==~ /long-python.*/) { - withEnv(["EXECUTE_LONG_TESTS=True"]) { - executeTests() - } - } - else { - executeTests() - } - } - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - always { - node('master') { - submitCIMetrics('commit') - } - } - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - stage ('Scheduled-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7' // Current DataStax Enterprise - } - axis { - name 'CYTHON_ENABLED' - values 'True', 'False' - } - } - when { - beforeAgent true - allOf { - expression { return params.CI_SCHEDULE_SERVER_VERSION.split(' ').any { it =~ /(ALL|${env.CASSANDRA_VERSION})/ } } - } - } - - environment { - PYTHON_VERSION = "${params.CI_SCHEDULE_PYTHON_VERSION}" - } - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describeScheduledTestingStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - - stage('Adhoc-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - not { buildingTag() } - } - } - - environment { - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - PYTHON_VERSION = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - } - - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - '4.0', // Development Apache Cassandra - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Current DataStax Enterprise - 'dse-6.8' // Development DataStax Enterprise - } - } - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION ==~ /(ALL|${env.CASSANDRA_VERSION})/ } - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Describe-Build') { - steps { - describeAdhocTestingStage() - } - } - stage('Initialize-Environment') { - steps { - initializeEnvironment() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - } - } -} diff --git a/MAINTENANCE.md b/MAINTENANCE.md new file mode 100644 index 0000000000..8fc860ac4b --- /dev/null +++ b/MAINTENANCE.md @@ -0,0 +1,13 @@ +Releasing +========= +* Run the tests and ensure they all pass +* Update the version in ``cassandra/__init__.py`` +* Add the new version in ``docs/conf.py`` (variables: ``TAGS``, ``LATEST_VERSION``, ``DEPRECATED_VERSIONS``). + * For patch version releases (like ``3.26.8-scylla -> 3.26.9-scylla``) replace the old version with new one in ``TAGS`` and update ``LATEST_VERSION``. + * For minor version releases (like ``3.26.9-scylla -> 3.27.0-scylla``) add new version to ``TAGS``, update ``LATEST_VERSION`` and add previous minor version to ``DEPRECATED_VERSIONS``. +* Commit the version changes, e.g. ``git commit -m 'Release 3.26.9'`` +* Tag the release. For example: ``git tag -a 3.26.9-scylla -m 'Release 3.26.9'`` +* Push the tag and new ``master`` SIMULTANEOUSLY: ``git push --atomic origin master v6.0.21-scylla`` +* Now new version and its docs should be automatically published. Check `PyPI `_ and `docs `_ to make sure its there. +* If you didn't push branch and tag simultaneously (or doc publishing failed for other reason) then restart the relevant job from GitHub Actions UI. +* Publish a GitHub Release and a post on community forum. diff --git a/MANIFEST.in b/MANIFEST.in index 660db719b0..f67e7fc2fe 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,7 @@ -include setup.py README.rst MANIFEST.in LICENSE ez_setup.py +include setup.py README.rst MANIFEST.in LICENSE include cassandra/cmurmur3.c include cassandra/io/libevwrapper.c include cassandra/*.pyx include cassandra/*.pxd include cassandra/*.h +graft build-release \ No newline at end of file diff --git a/README-dev.rst b/README-dev.rst deleted file mode 100644 index f2d044b103..0000000000 --- a/README-dev.rst +++ /dev/null @@ -1,211 +0,0 @@ -Releasing -========= -* Run the tests and ensure they all pass -* Update CHANGELOG.rst - * Check for any missing entries - * Add today's date to the release section -* Update the version in ``cassandra/__init__.py`` - * For beta releases, use a version like ``(2, 1, '0b1')`` - * For release candidates, use a version like ``(2, 1, '0rc1')`` - * When in doubt, follow PEP 440 versioning -* Add the new version in ``docs.yaml`` -* Commit the changelog and version changes, e.g. ``git commit -m'version 1.0.0'`` -* Tag the release. For example: ``git tag -a 1.0.0 -m 'version 1.0.0'`` -* Push the tag and new ``master``: ``git push origin 1.0.0 ; git push origin master`` -* Update the `python-driver` submodule of `python-driver-wheels`, - commit then push. This will trigger TravisCI and the wheels building. -* For a GA release, upload the package to pypi:: - - # Clean the working directory - python setup.py clean - rm dist/* - - # Build the source distribution - python setup.py sdist - - # Download all wheels from the jfrog repository and copy them in - # the dist/ directory - cp /path/to/wheels/*.whl dist/ - - # Upload all files - twine upload dist/* - -* On pypi, make the latest GA the only visible version -* Update the docs (see below) -* Append a 'postN' string to the version tuple in ``cassandra/__init__.py`` - so that it looks like ``(x, y, z, 'postN')`` - - * After a beta or rc release, this should look like ``(2, 1, '0b1', 'post0')`` - -* After the release has been tagged, add a section to docs.yaml with the new tag ref:: - - versions: - - name: - ref: - -* Commit and push -* Update 'cassandra-test' branch to reflect new release - - * this is typically a matter of merging or rebasing onto master - * test and push updated branch to origin - -* Update the JIRA versions: https://datastax-oss.atlassian.net/plugins/servlet/project-config/PYTHON/versions - - * add release dates and set version as "released" - -* Make an announcement on the mailing list - -Building the Docs -================= - -*Note*: The docs build instructions have been tested with Sphinx 2.4.4 and Fedora 32. - -To build and preview the theme locally, you will need to install the following software: - -- `Git `_ -- `Python 3.7 `_ -- `pip `_ - -Run the following command to build the docs. - -.. code:: console - - cd docs - make preview - -Once the command completes processing, open http://127.0.0.1:5500/ with your preferred browser. - -Building multiple documentation versions -======================================== - -Build docs for all the versions. - -``` -cd docs -make multiversion -``` - Then, open ``docs/_build/dirhtml//index.html`` with your preferred browser. - -**NOTE:** If you only can see docs generated for the master branch, try to run ``git fetch --tags`` to download the latest tags from remote. - -Tests -===== - -Running Unit Tests ------------------- -Unit tests can be run like so:: - - nosetests -w tests/unit/ - -You can run a specific test method like so:: - - nosetests -w tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version - -Running Integration Tests -------------------------- -In order to run integration tests, you must specify a version to run using the ``CASSANDRA_VERSION`` or ``DSE_VERSION`` environment variable:: - - CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard - -Or you can specify a cassandra directory (to test unreleased versions):: - - CASSANDRA_DIR=/home/thobbs/cassandra nosetests -w tests/integration/standard/ - -Specifying the usage of an already running Cassandra cluster ------------------------------------------------------------- -The test will start the appropriate Cassandra clusters when necessary but if you don't want this to happen because a Cassandra cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example:: - - USE_CASS_EXTERNAL=1 CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard - -Specify a Protocol Version for Tests ------------------------------------- -The protocol version defaults to 1 for cassandra 1.2 and 2 otherwise. You can explicitly set -it with the ``PROTOCOL_VERSION`` environment variable:: - - PROTOCOL_VERSION=3 nosetests -w tests/integration/standard - -Seeing Test Logs in Real Time ------------------------------ -Sometimes it's useful to output logs for the tests as they run:: - - nosetests -w tests/unit/ --nocapture --nologcapture - -Use tee to capture logs and see them on your terminal:: - - nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log - -Testing Multiple Python Versions --------------------------------- -If you want to test all of python 2.7, 3.5, 3.6, 3.7, and pypy, use tox (this is what -TravisCI runs):: - - tox - -By default, tox only runs the unit tests. - -Running the Benchmarks -====================== -There needs to be a version of cassandra running locally so before running the benchmarks, if ccm is installed: - - ccm create benchmark_cluster -v 3.0.1 -n 1 -s - -To run the benchmarks, pick one of the files under the ``benchmarks/`` dir and run it:: - - python benchmarks/future_batches.py - -There are a few options. Use ``--help`` to see them all:: - - python benchmarks/future_batches.py --help - -Packaging for Cassandra -======================= -A source distribution is included in Cassandra, which uses the driver internally for ``cqlsh``. -To package a released version, checkout the tag and build a source zip archive:: - - python setup.py sdist --formats=zip - -If packaging a pre-release (untagged) version, it is useful to include a commit hash in the archive -name to specify the built version:: - - python setup.py egg_info -b-`git rev-parse --short HEAD` sdist --formats=zip - -The file (``dist/scylla-driver-.zip``) is packaged with Cassandra in ``cassandra/lib/scylla-driver-internal-only*zip``. - -Releasing an EAP -================ - -An EAP release is only uploaded on a private server and it is not published on pypi. - -* Clean the environment:: - - python setup.py clean - -* Package the source distribution:: - - python setup.py sdist - -* Test the source distribution:: - - pip install dist/scylla-driver-.tar.gz - -* Upload the package on the EAP download server. -* Build the documentation:: - - python setup.py doc - -* Upload the docs on the EAP download server. - -Adding a New Python Runtime Support -=================================== - -* Add the new python version to our jenkins image: - https://github.com/riptano/openstack-jenkins-drivers/ - -* Add the new python version in job-creator: - https://github.com/riptano/job-creator/ - -* Run the tests and ensure they all pass - * also test all event loops - -* Update the wheels building repo to support that version: - https://github.com/riptano/python-dse-driver-wheels diff --git a/README.rst b/README.rst index eaf5106c8d..84ceb443a3 100644 --- a/README.rst +++ b/README.rst @@ -1,16 +1,26 @@ +.. |license| image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg + :target: https://opensource.org/licenses/Apache-2.0 +.. |version| image:: https://badge.fury.io/py/scylla-driver.svg + :target: https://badge.fury.io/py/scylla-driver + +|license| |version| + Scylla Python Driver ==================== A modern, feature-rich and highly-tunable Python client library for Scylla Open Source (2.1+) and Apache Cassandra (2.1+) and Scylla Enterprise (2018.1.x+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -.. image:: https://github.com/scylladb/python-driver/workflows/Build%20and%20upload%20to%20PyPi/badge.svg?tag=*-scylla - :target: https://github.com/scylladb/python-driver/actions?query=workflow%3A%22Build+and+upload+to+PyPi%22+event%3Apush+branch%3A*-scylla +.. image:: https://github.com/scylladb/python-driver/actions/workflows/build-push.yml/badge.svg?branch=master + :target: https://github.com/scylladb/python-driver/actions/workflows/build-push.yml?query=event%3Apush+branch%3Amaster -.. image:: https://github.com/scylladb/python-driver/workflows/CI%20Docs/badge.svg?tag=*-scylla - :target: https://github.com/scylladb/python-driver/actions?query=workflow%3A%22CI+Docs%22+event%3Apush+branch%3A*-scylla +.. image:: https://github.com/scylladb/python-driver/actions/workflows/docs-pages.yaml/badge.svg?branch=master + :target: https://github.com/scylladb/python-driver/actions/workflows/docs-pages.yaml?query=event%3Apush+branch%3Amaster -The driver supports Python versions 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. +.. image:: https://github.com/scylladb/python-driver/actions/workflows/integration-tests.yml/badge.svg?branch=master + :target: https://github.com/scylladb/python-driver/actions/workflows/integration-tests.yml?query=event%3Apush+branch%3Amaster + +The driver supports Python versions 3.10-3.14. .. **Note:** This driver does not support big-endian systems. @@ -24,8 +34,9 @@ Features * `Automatic reconnection `_ * Configurable `load balancing `_ and `retry policies `_ * `Concurrent execution utilities `_ -* `Object mapper `_ -* `Shard awareness `_ +* `Object mapper `_ +* `Shard awareness `_ +* `Tablet awareness `_ Installation ------------ @@ -43,7 +54,7 @@ The documentation can be found online `here `_ -* `Getting started guide `_ +* `Getting started guide `_ * `API docs `_ * `Performance tips `_ @@ -59,12 +70,16 @@ Object Mapper ------------- cqlengine (originally developed by Blake Eggleston and Jon Haddad, with contributions from the community) is now maintained as an integral part of this package. Refer to -`documentation here `_. +`documentation here `_. Contributing ------------ See `CONTRIBUTING `_. +Error Handling +-------------- +While originally written for the Java driver, users may reference the `Cassandra error handling done right blog `_ for resolving error handling scenarios with Apache Cassandra. + Reporting Problems ------------------ Please report any bugs and make any feature requests by clicking the New Issue button in @@ -74,8 +89,7 @@ If you would like to contribute, please feel free to send a pull request. Getting Help ------------ -Your best options for getting help with the driver are the -`mailing list `_ +You can ask questions on `ScyllaDB Community Forum `_ and the Scylla Users `Slack channel `_. License diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index d1daaa6ec6..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,26 +0,0 @@ -environment: - matrix: - - PYTHON: "C:\\Python27-x64" - cassandra_version: 3.11.2 - ci_type: standard - - PYTHON: "C:\\Python35-x64" - cassandra_version: 3.11.2 - ci_type: standard -os: Visual Studio 2015 -platform: - - x64 -install: - - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" - - ps: .\appveyor\appveyor.ps1 -build_script: - - cmd: | - "%VS140COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64 - python setup.py install --no-cython -test_script: - - ps: .\appveyor\run_test.ps1 -cache: - - C:\Users\appveyor\.m2 - - C:\ProgramData\chocolatey\bin - - C:\ProgramData\chocolatey\lib - - C:\Users\appveyor\jce_policy-1.7.0.zip - - C:\Users\appveyor\jce_policy-1.8.0.zip \ No newline at end of file diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 deleted file mode 100644 index 5f6840e4e1..0000000000 --- a/appveyor/appveyor.ps1 +++ /dev/null @@ -1,80 +0,0 @@ -$env:JAVA_HOME="C:\Program Files\Java\jdk1.8.0" -$env:PATH="$($env:JAVA_HOME)\bin;$($env:PATH)" -$env:CCM_PATH="C:\Users\appveyor\ccm" -$env:CASSANDRA_VERSION=$env:cassandra_version -$env:EVENT_LOOP_MANAGER="asyncore" -$env:SIMULACRON_JAR="C:\Users\appveyor\simulacron-standalone-0.7.0.jar" - -python --version -python -c "import platform; print(platform.architecture())" -# Install Ant -Start-Process cinst -ArgumentList @("-y","ant") -Wait -NoNewWindow -# Workaround for ccm, link ant.exe -> ant.bat -If (!(Test-Path C:\ProgramData\chocolatey\bin\ant.bat)) { - cmd /c mklink C:\ProgramData\chocolatey\bin\ant.bat C:\ProgramData\chocolatey\bin\ant.exe -} - - -$jce_indicator = "$target\README.txt" -# Install Java Cryptographic Extensions, needed for SSL. -If (!(Test-Path $jce_indicator)) { - $zip = "C:\Users\appveyor\jce_policy-$($env:java_version).zip" - $target = "$($env:JAVA_HOME)\jre\lib\security" - # If this file doesn't exist we know JCE hasn't been installed. - $url = "https://www.dropbox.com/s/po4308hlwulpvep/UnlimitedJCEPolicyJDK7.zip?dl=1" - $extract_folder = "UnlimitedJCEPolicy" - If ($env:java_version -eq "1.8.0") { - $url = "https://www.dropbox.com/s/al1e6e92cjdv7m7/jce_policy-8.zip?dl=1" - $extract_folder = "UnlimitedJCEPolicyJDK8" - } - # Download zip to staging area if it doesn't exist, we do this because - # we extract it to the directory based on the platform and we want to cache - # this file so it can apply to all platforms. - if(!(Test-Path $zip)) { - (new-object System.Net.WebClient).DownloadFile($url, $zip) - } - - Add-Type -AssemblyName System.IO.Compression.FileSystem - [System.IO.Compression.ZipFile]::ExtractToDirectory($zip, $target) - - $jcePolicyDir = "$target\$extract_folder" - Move-Item $jcePolicyDir\* $target\ -force - Remove-Item $jcePolicyDir -} - -# Download simulacron -$simulacron_url = "https://github.com/datastax/simulacron/releases/download/0.7.0/simulacron-standalone-0.7.0.jar" -$simulacron_jar = $env:SIMULACRON_JAR -if(!(Test-Path $simulacron_jar)) { - (new-object System.Net.WebClient).DownloadFile($simulacron_url, $simulacron_jar) -} - -# Install Python Dependencies for CCM. -Start-Process python -ArgumentList "-m pip install psutil pyYaml six numpy" -Wait -NoNewWindow - -# Clone ccm from git and use master. -If (!(Test-Path $env:CCM_PATH)) { - Start-Process git -ArgumentList "clone -b cassandra-test https://github.com/pcmanus/ccm.git $($env:CCM_PATH)" -Wait -NoNewWindow -} - - -# Copy ccm -> ccm.py so windows knows to run it. -If (!(Test-Path $env:CCM_PATH\ccm.py)) { - Copy-Item "$env:CCM_PATH\ccm" "$env:CCM_PATH\ccm.py" -} - -$env:PYTHONPATH="$($env:CCM_PATH);$($env:PYTHONPATH)" -$env:PATH="$($env:CCM_PATH);$($env:PATH)" - -# Predownload cassandra version for CCM if it isn't already downloaded. -# This is necessary because otherwise ccm fails -If (!(Test-Path C:\Users\appveyor\.ccm\repository\$env:cassandra_version)) { - Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py create -v $($env:cassandra_version) -n 1 predownload" -Wait -NoNewWindow - echo "Checking status of download" - python $env:CCM_PATH\ccm.py status - Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py remove predownload" -Wait -NoNewWindow - echo "Downloaded version $env:cassandra_version" -} - -Start-Process python -ArgumentList "-m pip install -r test-requirements.txt" -Wait -NoNewWindow -Start-Process python -ArgumentList "-m pip install nose-ignore-docstring" -Wait -NoNewWindow diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 deleted file mode 100644 index fc95ec7e52..0000000000 --- a/appveyor/run_test.ps1 +++ /dev/null @@ -1,49 +0,0 @@ -Set-ExecutionPolicy Unrestricted -Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope Process -force -Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser -force -Get-ExecutionPolicy -List -echo $env:Path -echo "JAVA_HOME: $env:JAVA_HOME" -echo "PYTHONPATH: $env:PYTHONPATH" -echo "Cassandra version: $env:CASSANDRA_VERSION" -echo "Simulacron jar: $env:SIMULACRON_JAR" -echo $env:ci_type -python --version -python -c "import platform; print(platform.architecture())" - -$wc = New-Object 'System.Net.WebClient' - -if($env:ci_type -eq 'unit'){ - echo "Running Unit tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit - - $env:EVENT_LOOP_MANAGER="gevent" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py - $env:EVENT_LOOP_MANAGER="eventlet" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py - $env:EVENT_LOOP_MANAGER="asyncore" - - echo "uploading unit results" - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) - -} - -if($env:ci_type -eq 'standard'){ - - echo "Running CQLEngine integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine - $cqlengine_tests_result = $lastexitcode - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) - echo "uploading CQLEngine test results" - - echo "Running standard integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml .\tests\integration\standard - $integration_tests_result = $lastexitcode - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\standard_results.xml)) - echo "uploading standard integration test results" -} - - -$exit_result = $unit_tests_result + $cqlengine_tests_result + $integration_tests_result + $simulacron_tests_result -echo "Exit result: $exit_result" -exit $exit_result diff --git a/benchmarks/base.py b/benchmarks/base.py index 47a03bbd68..2000b4069f 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -54,7 +54,7 @@ from cassandra.io.libevreactor import LibevConnection have_libev = True supported_reactors.append(LibevConnection) -except ImportError as exc: +except cassandra.DependencyException as exc: pass have_asyncio = False diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index e3ecfe3be5..87eb999cfe 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -18,7 +18,6 @@ from threading import Event from base import benchmark, BenchmarkThread -from six.moves import range log = logging.getLogger(__name__) @@ -50,10 +49,7 @@ def insert_next(self, previous_result=sentinel): def run(self): self.start_profile() - if self.protocol_version >= 3: - concurrency = 1000 - else: - concurrency = 100 + concurrency = 1000 for _ in range(min(concurrency, self.num_queries)): self.insert_next() diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index 8cd915ebab..de4484e617 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -14,7 +14,7 @@ import logging from base import benchmark, BenchmarkThread -from six.moves import queue +import queue log = logging.getLogger(__name__) diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 9a9fcfcd50..901573c18e 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -14,7 +14,7 @@ import logging from base import benchmark, BenchmarkThread -from six.moves import queue +import queue log = logging.getLogger(__name__) diff --git a/benchmarks/sync.py b/benchmarks/sync.py index f2a45fcd7d..96e744f700 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -13,7 +13,6 @@ # limitations under the License. from base import benchmark, BenchmarkThread -from six.moves import range class Runner(BenchmarkThread): diff --git a/build.yaml.bak b/build.yaml.bak deleted file mode 100644 index 100c86558a..0000000000 --- a/build.yaml.bak +++ /dev/null @@ -1,264 +0,0 @@ -schedules: - nightly_master: - schedule: nightly - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] - - commit_long_test: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/long-python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] - - commit_branches: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] - - commit_branches_dev: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/dev-python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [2.7, 3.7, 3.6, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.8'] - - release_test: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/release-.+/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - - weekly_master: - schedule: 0 10 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.5] - - cassandra: ['2.2', '3.1'] - - weekly_gevent: - schedule: 0 14 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='gevent' - JUST_EVENT_LOOP=1 - - weekly_eventlet: - schedule: 0 18 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='eventlet' - JUST_EVENT_LOOP=1 - - weekly_asyncio: - schedule: 0 22 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncio' - JUST_EVENT_LOOP=1 - matrix: - exclude: - - python: [2.7] - - weekly_async: - schedule: 0 10 * * 7 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncore' - JUST_EVENT_LOOP=1 - - weekly_twister: - schedule: 0 14 * * 7 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='twisted' - JUST_EVENT_LOOP=1 - - upgrade_tests: - schedule: adhoc - branches: - include: [master, python-546] - env_vars: | - EVENT_LOOP_MANAGER='libev' - JUST_UPGRADE=True - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse'] - -python: - - 2.7 - - 3.5 - - 3.6 - - 3.7 - - 3.8 - -os: - - ubuntu/bionic64/python-driver - -cassandra: - - '2.1' - - '2.2' - - '3.0' - - '3.11' - - '4.0' - - 'dse-4.8' - - 'dse-5.0' - - 'dse-5.1' - - 'dse-6.0' - - 'dse-6.7' - - 'dse-6.8.0' - -env: - CYTHON: - - CYTHON - - NO_CYTHON - -build: - - script: | - export JAVA_HOME=$CCM_JAVA_HOME - export PATH=$JAVA_HOME/bin:$PATH - export PYTHONPATH="" - export CCM_MAX_HEAP_SIZE=1024M - - # Required for unix socket tests - sudo apt-get install socat - - # Install latest setuptools - pip install --upgrade pip - pip install -U setuptools - - pip install git+ssh://git@github.com/riptano/ccm-private.git@cassandra-7544-native-ports-with-dse-fix - - #pip install $HOME/ccm - - if [ -n "$CCM_IS_DSE" ]; then - pip install -r test-datastax-requirements.txt - else - pip install -r test-requirements.txt - fi - - pip install nose-ignore-docstring - pip install nose-exclude - pip install service_identity - - FORCE_CYTHON=False - if [[ $CYTHON == 'CYTHON' ]]; then - FORCE_CYTHON=True - pip install cython - pip install numpy - # Install the driver & compile C extensions - python setup.py build_ext --inplace - else - # Install the driver & compile C extensions with no cython - python setup.py build_ext --inplace --no-cython - fi - - echo "JUST_UPGRADE: $JUST_UPGRADE" - if [[ $JUST_UPGRADE == 'True' ]]; then - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true - exit 0 - fi - - if [[ $JUST_SMOKE == 'true' ]]; then - # When we ONLY want to run the smoke tests - echo "JUST_SMOKE: $JUST_SMOKE" - echo "==========RUNNING SMOKE TESTS===========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION DSE_VERSION='6.7.0' MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - exit 0 - fi - - # Run the unit tests, this is not done in travis because - # it takes too much time for the whole matrix to build with cython - if [[ $CYTHON == 'CYTHON' ]]; then - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - fi - - if [ -n "$JUST_EVENT_LOOP" ]; then - echo "Running integration event loop subset with $EVENT_LOOP_MANAGER" - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - exit 0 - fi - - echo "Running with event loop manager: $EVENT_LOOP_MANAGER" - echo "==========RUNNING SIMULACRON TESTS==========" - SIMULACRON_JAR="$HOME/simulacron.jar" - SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - echo "Running with event loop manager: $EVENT_LOOP_MANAGER" - echo "==========RUNNING CQLENGINE TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - - echo "==========RUNNING INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - - if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then - echo "==========RUNNING DSE INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$DSE_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - fi - - echo "==========RUNNING CLOUD TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CLOUD_PROXY_PATH="$HOME/proxy/" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - - if [ -z "$EXCLUDE_LONG" ]; then - echo "==========RUNNING LONG INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - fi - - - xunit: - - "*_results.xml" diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 84a7de11a5..88fbb11f88 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from enum import Enum import logging @@ -22,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 1) +__version_info__ = (3, 29, 7) __version__ = '.'.join(map(str, __version_info__)) @@ -55,7 +56,7 @@ class ConsistencyLevel(object): QUORUM = 4 """ - ``ceil(RF/2)`` replicas must respond to consider the operation a success + ``ceil(RF/2) + 1`` replicas must respond to consider the operation a success """ ALL = 5 @@ -134,16 +135,6 @@ class ProtocolVersion(object): """ Defines native protocol versions supported by this driver. """ - V1 = 1 - """ - v1, supported in Cassandra 1.2-->2.2 - """ - - V2 = 2 - """ - v2, supported in Cassandra 2.0-->2.2; - added support for lightweight transactions, batch operations, and automatic query paging. - """ V3 = 3 """ @@ -179,9 +170,9 @@ class ProtocolVersion(object): DSE private protocol v2, supported in DSE 6.0+ """ - SUPPORTED_VERSIONS = (DSE_V2, DSE_V1, V6, V5, V4, V3, V2, V1) + SUPPORTED_VERSIONS = (V5, V4, V3) """ - A tuple of all supported protocol versions + A tuple of all supported protocol versions for ScyllaDB, including future v5 version. """ BETA_VERSIONS = (V6,) @@ -232,14 +223,6 @@ def uses_error_code_map(cls, version): def uses_keyspace_flag(cls, version): return version >= cls.V5 and version != cls.DSE_V1 - @classmethod - def has_continuous_paging_support(cls, version): - return version >= cls.DSE_V1 - - @classmethod - def has_continuous_paging_next_pages(cls, version): - return version >= cls.DSE_V2 - @classmethod def has_checksumming_support(cls, version): return cls.V5 <= version < cls.DSE_V1 @@ -728,3 +711,39 @@ class UnresolvableContactPoints(DriverException): contact points, only when lookup fails for all hosts """ pass + + +class OperationType(Enum): + Read = 0 + Write = 1 + + +class RateLimitReached(ConfigurationException): + ''' + Rate limit was exceeded for a partition affected by the request. + ''' + op_type = None + rejected_by_coordinator = False + + def __init__(self, op_type=None, rejected_by_coordinator=False): + self.op_type = op_type + self.rejected_by_coordinator = rejected_by_coordinator + message = f"[request_error_rate_limit_reached OpType={op_type.name} RejectedByCoordinator={rejected_by_coordinator}]" + Exception.__init__(self, message) + + +class DependencyException(Exception): + """ + Specific exception class for handling issues with driver dependencies + """ + + excs = [] + """ + A sequence of child exceptions + """ + + def __init__(self, msg, excs=[]): + complete_msg = msg + if excs: + complete_msg += ("\nThe following exceptions were observed: \n - " + '\n - '.join(str(e) for e in excs)) + Exception.__init__(self, complete_msg) diff --git a/cassandra/application_info.py b/cassandra/application_info.py new file mode 100644 index 0000000000..bdb084201a --- /dev/null +++ b/cassandra/application_info.py @@ -0,0 +1,53 @@ +# Copyright 2025 ScyllaDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + + +class ApplicationInfoBase: + """ + A class that holds application information and adds it to startup message options + """ + def add_startup_options(self, options: dict[str, str]): + raise NotImplementedError() + + +class ApplicationInfo(ApplicationInfoBase): + application_name: Optional[str] + application_version: Optional[str] + client_id: Optional[str] + + def __init__( + self, + application_name: Optional[str] = None, + application_version: Optional[str] = None, + client_id: Optional[str] = None + ): + if application_name and not isinstance(application_name, str): + raise TypeError('application_name must be a string') + if application_version and not isinstance(application_version, str): + raise TypeError('application_version must be a string') + if client_id and not isinstance(client_id, str): + raise TypeError('client_id must be a string') + + self.application_name = application_name + self.application_version = application_version + self.client_id = client_id + + def add_startup_options(self, options: dict[str, str]): + if self.application_name: + options['APPLICATION_NAME'] = self.application_name + if self.application_version: + options['APPLICATION_VERSION'] = self.application_version + if self.client_id: + options['CLIENT_ID'] = self.client_id diff --git a/cassandra/auth.py b/cassandra/auth.py index dcee131f4d..f41ba9f73d 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -32,8 +32,6 @@ except ImportError: SASLClient = None -import six - log = logging.getLogger(__name__) # Custom payload keys related to DSE Unified Auth @@ -270,15 +268,15 @@ def __init__(self, username, password): self.password = password def get_mechanism(self): - return six.b("PLAIN") + return b"PLAIN" def get_initial_challenge(self): - return six.b("PLAIN-START") + return b"PLAIN-START" def evaluate_challenge(self, challenge): - if challenge == six.b('PLAIN-START'): + if challenge == b'PLAIN-START': data = "\x00%s\x00%s" % (self.username, self.password) - return data if six.PY2 else data.encode() + return data.encode() raise Exception('Did not receive a valid challenge response from server') @@ -297,13 +295,13 @@ def __init__(self, host, service, qops, properties): self.sasl = SASLClient(host, service, 'GSSAPI', qops=qops, **properties) def get_mechanism(self): - return six.b("GSSAPI") + return b"GSSAPI" def get_initial_challenge(self): - return six.b("GSSAPI-START") + return b"GSSAPI-START" def evaluate_challenge(self, challenge): - if challenge == six.b('GSSAPI-START'): + if challenge == b'GSSAPI-START': return self.sasl.process() else: return self.sasl.process(challenge) diff --git a/cassandra/c_shard_info.pyx b/cassandra/c_shard_info.pyx index 012bfe172b..a8affd9bba 100644 --- a/cassandra/c_shard_info.pyx +++ b/cassandra/c_shard_info.pyx @@ -19,34 +19,23 @@ cdef extern from *: cdef class ShardingInfo(): cdef readonly int shards_count - cdef readonly str partitioner - cdef readonly str sharding_algorithm + cdef readonly unicode partitioner + cdef readonly unicode sharding_algorithm cdef readonly int sharding_ignore_msb + cdef readonly int shard_aware_port + cdef readonly int shard_aware_port_ssl cdef object __weakref__ - def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb): + def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, shard_aware_port, + shard_aware_port_ssl): self.shards_count = int(shards_count) self.partitioner = partitioner self.sharding_algorithm = sharding_algorithm self.sharding_ignore_msb = int(sharding_ignore_msb) + self.shard_aware_port = int(shard_aware_port) if shard_aware_port else 0 + self.shard_aware_port_ssl = int(shard_aware_port_ssl) if shard_aware_port_ssl else 0 - - @staticmethod - def parse_sharding_info(message): - shard_id = message.options.get('SCYLLA_SHARD', [''])[0] or None - shards_count = message.options.get('SCYLLA_NR_SHARDS', [''])[0] or None - partitioner = message.options.get('SCYLLA_PARTITIONER', [''])[0] or None - sharding_algorithm = message.options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None - sharding_ignore_msb = message.options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None - - if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or - sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): - return 0, None - - return int(shard_id), ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb) - - def shard_id_from_token(self, int64_t token_input): cdef uint64_t biased_token = token_input + (1 << 63); biased_token <<= self.sharding_ignore_msb; diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ca5e2c9ed6..66bf7c7049 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -19,18 +19,21 @@ from __future__ import absolute_import import atexit +import datetime from binascii import hexlify from collections import defaultdict +from collections.abc import Mapping from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy -from functools import partial, wraps +from functools import partial, reduce, wraps from itertools import groupby, count, chain import json import logging +from typing import Optional, Union from warnings import warn from random import random -import six -from six.moves import filter, range, queue as Queue +import re +import queue import socket import sys import time @@ -40,16 +43,17 @@ import weakref from weakref import WeakValueDictionary -from cassandra import (ConsistencyLevel, AuthenticationFailed, +from cassandra import (ConsistencyLevel, AuthenticationFailed, InvalidRequest, OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException, ProtocolVersion, - UnresolvableContactPoints) + UnresolvableContactPoints, DependencyException) from cassandra.auth import _proxy_execute_key, PlainTextAuthProvider from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported, EndPoint, DefaultEndPoint, DefaultEndPointFactory, - ContinuousPagingState, SniEndPointFactory, ConnectionBusy) + SniEndPointFactory, ConnectionBusy, locally_supported_compressions) from cassandra.cqltypes import UserType +import cassandra.cqltypes as types from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, ErrorMessage, ReadTimeoutErrorMessage, @@ -71,16 +75,16 @@ NoSpeculativeExecutionPolicy, DefaultLoadBalancingPolicy, NeverRetryPolicy) from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, - HostConnectionPool, HostConnection, + HostConnection, NoConnectionsAvailable) from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, BatchStatement, bind_params, QueryTrace, TraceUnavailable, named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET, HostTargetingStatement) from cassandra.marshal import int64_pack +from cassandra.tablets import Tablet, Tablets from cassandra.timestamps import MonotonicTimestampGenerator -from cassandra.compat import Mapping -from cassandra.util import _resolve_contact_points_to_string_map, Version +from cassandra.util import _resolve_contact_points_to_string_map, Version, maybe_add_timeout_to_query from cassandra.datastax.insights.reporter import MonitorReporter from cassandra.datastax.insights.util import version_supports_insights @@ -91,6 +95,7 @@ GraphSON3Serializer) from cassandra.datastax.graph.query import _request_timeout_key, _GraphSONContextRowFactory from cassandra.datastax import cloud as dscloud +from cassandra.application_info import ApplicationInfoBase try: from cassandra.io.twistedreactor import TwistedConnection @@ -99,7 +104,9 @@ try: from cassandra.io.eventletreactor import EventletConnection -except ImportError: +except (ImportError, AttributeError): + # AttributeError was add for handling python 3.12 https://github.com/eventlet/eventlet/issues/812 + # TODO: remove it when eventlet issue would be fixed EventletConnection = None try: @@ -107,35 +114,73 @@ except ImportError: from cassandra.util import WeakSet # NOQA -if six.PY3: - long = int +def _is_gevent_monkey_patched(): + if 'gevent.monkey' not in sys.modules: + return False + try: + import gevent.socket + return socket.socket is gevent.socket.socket # Another case related to PYTHON-1364 + except (AttributeError, ImportError): + return False + +def _try_gevent_import(): + if _is_gevent_monkey_patched(): + from cassandra.io.geventreactor import GeventConnection + return (GeventConnection,None) + else: + return (None,None) def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False - import eventlet.patcher - return eventlet.patcher.is_monkey_patched('socket') + try: + import eventlet.patcher + return eventlet.patcher.is_monkey_patched('socket') + except (ImportError, AttributeError): + # AttributeError was add for handling python 3.12 https://github.com/eventlet/eventlet/issues/812 + # TODO: remove it when eventlet issue would be fixed + return False +def _try_eventlet_import(): + if _is_eventlet_monkey_patched(): + from cassandra.io.eventletreactor import EventletConnection + return (EventletConnection,None) + else: + return (None,None) -def _is_gevent_monkey_patched(): - if 'gevent.monkey' not in sys.modules: - return False - import gevent.socket - return socket.socket is gevent.socket.socket - - -# default to gevent when we are monkey patched with gevent, eventlet when -# monkey patched with eventlet, otherwise if libev is available, use that as -# the default because it's fastest. Otherwise, use asyncore. -if _is_gevent_monkey_patched(): - from cassandra.io.geventreactor import GeventConnection as DefaultConnection -elif _is_eventlet_monkey_patched(): - from cassandra.io.eventletreactor import EventletConnection as DefaultConnection -else: +def _try_libev_import(): try: - from cassandra.io.libevreactor import LibevConnection as DefaultConnection # NOQA - except ImportError: - from cassandra.io.asyncorereactor import AsyncoreConnection as DefaultConnection # NOQA + from cassandra.io.libevreactor import LibevConnection + return (LibevConnection,None) + except DependencyException as e: + return (None, e) + +def _try_asyncore_import(): + try: + from cassandra.io.asyncorereactor import AsyncoreConnection + return (AsyncoreConnection,None) + except DependencyException as e: + return (None, e) + +def _try_asyncio_import(): + from cassandra.io.asyncioreactor import AsyncioConnection + return (AsyncioConnection, None) + +def _connection_reduce_fn(val,import_fn): + (rv, excs) = val + # If we've already found a workable Connection class return immediately + if rv: + return val + (import_result, exc) = import_fn() + if exc: + excs.append(exc) + return (rv or import_result, excs) + +conn_fns = (_try_gevent_import, _try_eventlet_import, _try_libev_import, _try_asyncore_import, _try_asyncio_import) +(conn_class, excs) = reduce(_connection_reduce_fn, conn_fns, (None,[])) +if not conn_class: + raise DependencyException("Exception loading connection class dependencies", excs) +DefaultConnection = conn_class # Forces load of utf8 encoding module to avoid deadlock that occurs # if code that is being imported tries to import the module in a seperate @@ -146,15 +191,6 @@ def _is_gevent_monkey_patched(): log = logging.getLogger(__name__) -DEFAULT_MIN_REQUESTS = 5 -DEFAULT_MAX_REQUESTS = 100 - -DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST = 2 -DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST = 8 - -DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST = 1 -DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST = 2 - _GRAPH_PAGING_MIN_DSE_VERSION = Version('6.8.0') _NOT_SET = object() @@ -483,7 +519,8 @@ def _profiles_without_explicit_lbps(self): def distance(self, host): distances = set(p.load_balancing_policy.distance(host) for p in self.profiles.values()) - return HostDistance.LOCAL if HostDistance.LOCAL in distances else \ + return HostDistance.LOCAL_RACK if HostDistance.LOCAL_RACK in distances else \ + HostDistance.LOCAL if HostDistance.LOCAL in distances else \ HostDistance.REMOTE if HostDistance.REMOTE in distances else \ HostDistance.IGNORED @@ -532,7 +569,7 @@ def default(self): Key for the default graph execution profile, used when no other profile is selected in ``Session.execute_graph(execution_profile)``. -Use this as the key in :doc:`Cluster(execution_profiles) ` +Use this as the key in :doc:`Cluster(execution_profiles) ` to override the default graph profile. """ @@ -553,6 +590,20 @@ def default(self): """ +class ShardAwareOptions: + disable = None + disable_shardaware_port = False + + def __init__(self, opts=None, disable=None, disable_shardaware_port=None): + self.disable = disable + self.disable_shardaware_port = disable_shardaware_port + if opts: + if isinstance(opts, ShardAwareOptions): + self.__dict__.update(opts.__dict__) + elif isinstance(opts, dict): + self.__dict__.update(opts) + + class _ConfigMode(object): UNCOMMITTED = 0 LEGACY = 1 @@ -586,7 +637,7 @@ class Cluster(object): Defaults to loopback interface. - Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit + Note: When using :class:`.DCAwareRoundRobinPolicy` with no explicit local_dc set (as is the default), the DC is chosen from an arbitrary host in contact_points. In this case, contact_points should contain only nodes from a single, local DC. @@ -611,7 +662,7 @@ class Cluster(object): server will be automatically used. """ - protocol_version = ProtocolVersion.DSE_V2 + protocol_version = ProtocolVersion.V5 """ The maximum version of the native protocol to use. @@ -619,7 +670,7 @@ class Cluster(object): If not set in the constructor, the driver will automatically downgrade version based on a negotiation with the server, but it is most efficient - to set this to the maximum supported by your version of Cassandra. + to set this to the maximum supported by your version of ScyllaDB. Setting this will also prevent conflicting versions negotiated if your cluster is upgraded. @@ -634,7 +685,7 @@ class Cluster(object): Used for testing new protocol features incrementally before the new version is complete. """ - compression = True + compression: Union[bool, str, None] = True """ Controls compression for communications between the driver and Cassandra. If left as the default of :const:`True`, either lz4 or snappy compression @@ -644,9 +695,22 @@ class Cluster(object): You may also set this to 'snappy' or 'lz4' to request that specific compression type. - Setting this to :const:`False` disables compression. + Setting this to :const:`False` or :const:`None` disables compression. """ + _application_info: Optional[ApplicationInfoBase] = None + + @property + def application_info(self) -> Optional[ApplicationInfoBase]: + """ + An instance of any subclass of :class:`.application_info.ApplicationInfoBase`. + + Defaults to None + + When set makes driver sends information about application that uses driver in startup frame + """ + return self._application_info + _auth_provider = None _auth_provider_callable = None @@ -657,9 +721,6 @@ def auth_provider(self): be an instance of a subclass of :class:`~cassandra.auth.AuthProvider`, such as :class:`~.PlainTextAuthProvider`. - When :attr:`~.Cluster.protocol_version` is 1, this should be - a function that accepts one argument, the IP address of a node, - and returns a dict of credentials for that node. When not using authentication, this should be left as :const:`None`. """ @@ -777,9 +838,9 @@ def default_retry_policy(self, policy): Using ssl_options without ssl_context is deprecated and will be removed in the next major release. - An optional dict which will be used as kwargs for ``ssl.SSLContext.wrap_socket`` (or - ``ssl.wrap_socket()`` if used without ssl_context) when new sockets are created. - This should be used when client encryption is enabled in Cassandra. + An optional dict which will be used as kwargs for ``ssl.SSLContext.wrap_socket`` + when new sockets are created. This should be used when client encryption is enabled + in Cassandra. The following documentation only applies when ssl_options is used without ssl_context. @@ -795,6 +856,12 @@ def default_retry_policy(self, policy): should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname`` with a custom or `back-ported function `_. + + .. versionchanged:: 3.29.0 + + ``ssl.match_hostname`` has been deprecated since Python 3.7 (and removed in Python 3.12). This functionality is now implemented + via ``ssl.SSLContext.check_hostname``. All options specified above (including ``check_hostname``) should continue to behave in a + way that is consistent with prior implementations. """ ssl_context = None @@ -959,7 +1026,7 @@ def default_retry_policy(self, policy): documentation for :meth:`Session.timestamp_generator`. """ - monitor_reporting_enabled = True + monitor_reporting_enabled = False """ A boolean indicating if monitor reporting, which sends gathered data to Insights when running against DSE 6.8 and higher. @@ -990,7 +1057,7 @@ def default_retry_policy(self, policy): cloud = None """ A dict of the cloud configuration. Example:: - + { # path to the secure connect bundle 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip', @@ -1003,6 +1070,33 @@ def default_retry_policy(self, policy): load the configuration and certificates. """ + shard_aware_options = None + """ + Can be set with :class:`ShardAwareOptions` or with a dict, to disable the automatic shardaware, + or to disable the shardaware port (advanced shardaware) + """ + + column_encryption_policy = None + """ + An instance of :class:`cassandra.policies.ColumnEncryptionPolicy` specifying encryption materials to be + used for columns in this cluster. + """ + + metadata_request_timeout: Optional[float] = None + """ + Specifies a server-side timeout (in seconds) for all internal driver queries, + such as schema metadata lookups and cluster topology requests. + + The timeout is enforced by appending `USING TIMEOUT ` to queries + executed by the driver. + + - A value of `0` disables explicit timeout enforcement. In this case, + the driver does not add `USING TIMEOUT`, and the timeout is determined + by the server's defaults. + - Only supported when connected to Scylla clusters. + - If not explicitly set, defaults to the value of `control_connection_timeout`. + """ + @property def schema_metadata_enabled(self): """ @@ -1018,6 +1112,17 @@ def schema_metadata_enabled(self): def schema_metadata_enabled(self, enabled): self.control_connection._schema_meta_enabled = bool(enabled) + @property + def schema_metadata_page_size(self): + """ + Number controling page size when schema metadata is fetched. + """ + return self.control_connection._schema_meta_page_size + + @schema_metadata_page_size.setter + def schema_metadata_page_size(self, size): + self.control_connection._schema_meta_page_size = size + @property def token_metadata_enabled(self): """ @@ -1067,7 +1172,7 @@ def token_metadata_enabled(self, enabled): def __init__(self, contact_points=_NOT_SET, port=9042, - compression=True, + compression: Union[bool, str, None] = True, auth_provider=None, load_balancing_policy=None, reconnection_policy=None, @@ -1088,6 +1193,7 @@ def __init__(self, connect_timeout=5, schema_metadata_enabled=True, token_metadata_enabled=True, + schema_metadata_page_size=1000, address_translator=None, status_event_refresh_window=2, prepare_on_all_hosts=True, @@ -1104,16 +1210,35 @@ def __init__(self, monitor_reporting_enabled=True, monitor_reporting_interval=30, client_id=None, - cloud=None): + cloud=None, + scylla_cloud=None, + shard_aware_options=None, + metadata_request_timeout: Optional[float] = None, + column_encryption_policy=None, + application_info:Optional[ApplicationInfoBase]=None + ): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ + + # Handle port passed as string + if isinstance(port, str): + if not port.isdigit(): + raise ValueError("Only numeric values are supported for port (%s)" % port) + port = int(port) + + if port < 1 or port > 65535: + raise ValueError("Invalid port number (%s) (1-65535)" % port) + if connection_class is not None: self.connection_class = connection_class + if scylla_cloud is not None: + raise NotImplementedError("scylla_cloud was removed and not supported anymore") + if cloud is not None: self.cloud = cloud if contact_points is not _NOT_SET or endpoint_factory or ssl_context or ssl_options: @@ -1143,7 +1268,7 @@ def __init__(self, else: self._contact_points_explicit = True - if isinstance(contact_points, six.string_types): + if isinstance(contact_points, str): raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings") if None in contact_points: @@ -1152,34 +1277,33 @@ def __init__(self, self.port = port + if column_encryption_policy is not None: + self.column_encryption_policy = column_encryption_policy + self.endpoint_factory = endpoint_factory or DefaultEndPointFactory(port=self.port) self.endpoint_factory.configure(self) - raw_contact_points = [] - for cp in [cp for cp in self.contact_points if not isinstance(cp, EndPoint)]: - raw_contact_points.append(cp if isinstance(cp, tuple) else (cp, port)) - - self.endpoints_resolved = [cp for cp in self.contact_points if isinstance(cp, EndPoint)] - self._endpoint_map_for_insights = {repr(ep): '{ip}:{port}'.format(ip=ep.address, port=ep.port) - for ep in self.endpoints_resolved} - - strs_resolved_map = _resolve_contact_points_to_string_map(raw_contact_points) - self.endpoints_resolved.extend(list(chain( - *[ - [DefaultEndPoint(ip, port) for ip, port in xs if ip is not None] - for xs in strs_resolved_map.values() if xs is not None - ] - ))) - - self._endpoint_map_for_insights.update( - {key: ['{ip}:{port}'.format(ip=ip, port=port) for ip, port in value] - for key, value in strs_resolved_map.items() if value is not None} - ) - - if contact_points and (not self.endpoints_resolved): - # only want to raise here if the user specified CPs but resolution failed - raise UnresolvableContactPoints(self._endpoint_map_for_insights) + self._resolve_hostnames() + if isinstance(compression, bool) or compression is None: + compression = bool(compression) + if compression and not locally_supported_compressions: + log.error( + "Compression is enabled, but no compression libraries are available. " + "Disabling compression, consider installing one of the Python packages: lz4 and/or python-snappy." + ) + compression = False + elif isinstance(compression, str): + if not locally_supported_compressions.get(compression): + raise ValueError( + "Compression '%s' was requested, but it is not available. " + "Consider installing the corresponding Python package." % compression + ) + else: + raise TypeError( + "The 'compression' option must be either a string (e.g., 'lz4' or 'snappy') " + "or a boolean (True to enable any available compression, False to disable it)." + ) self.compression = compression if protocol_version is not _NOT_SET: @@ -1218,6 +1342,12 @@ def __init__(self, raise TypeError("address_translator should not be a class, it should be an instance of that class") self.address_translator = address_translator + if application_info is not None: + if not isinstance(application_info, ApplicationInfoBase): + raise TypeError( + "application_info should be an instance of any ApplicationInfoBase class") + self._application_info = application_info + if timestamp_generator is not None: if not callable(timestamp_generator): raise ValueError("timestamp_generator must be callable") @@ -1294,6 +1424,7 @@ def __init__(self, self.cql_version = cql_version self.max_schema_agreement_wait = max_schema_agreement_wait self.control_connection_timeout = control_connection_timeout + self.metadata_request_timeout = self.control_connection_timeout if metadata_request_timeout is None else metadata_request_timeout self.idle_heartbeat_interval = idle_heartbeat_interval self.idle_heartbeat_timeout = idle_heartbeat_timeout self.schema_event_refresh_window = schema_event_refresh_window @@ -1304,6 +1435,7 @@ def __init__(self, self.reprepare_on_up = reprepare_on_up self.monitor_reporting_enabled = monitor_reporting_enabled self.monitor_reporting_interval = monitor_reporting_interval + self.shard_aware_options = ShardAwareOptions(opts=shard_aware_options) self._listeners = set() self._listener_lock = Lock() @@ -1318,26 +1450,6 @@ def __init__(self, self._user_types = defaultdict(dict) - self._min_requests_per_connection = { - HostDistance.LOCAL: DEFAULT_MIN_REQUESTS, - HostDistance.REMOTE: DEFAULT_MIN_REQUESTS - } - - self._max_requests_per_connection = { - HostDistance.LOCAL: DEFAULT_MAX_REQUESTS, - HostDistance.REMOTE: DEFAULT_MAX_REQUESTS - } - - self._core_connections_per_host = { - HostDistance.LOCAL: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, - HostDistance.REMOTE: DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST - } - - self._max_connections_per_host = { - HostDistance.LOCAL: DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, - HostDistance.REMOTE: DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST - } - self.executor = self._create_thread_pool_executor(max_workers=executor_threads) self.scheduler = _Scheduler(self.executor) @@ -1351,7 +1463,8 @@ def __init__(self, self, self.control_connection_timeout, self.schema_event_refresh_window, self.topology_event_refresh_window, self.status_event_refresh_window, - schema_metadata_enabled, token_metadata_enabled) + schema_metadata_enabled, token_metadata_enabled, + schema_meta_page_size=schema_metadata_page_size) if client_id is None: self.client_id = uuid.uuid4() @@ -1360,6 +1473,31 @@ def __init__(self, if application_version is not None: self.application_version = application_version + def _resolve_hostnames(self): + raw_contact_points = [] + for cp in [cp for cp in self.contact_points if not isinstance(cp, EndPoint)]: + raw_contact_points.append(cp if isinstance(cp, tuple) else (cp, self.port)) + + self.endpoints_resolved = [cp for cp in self.contact_points if isinstance(cp, EndPoint)] + self._endpoint_map_for_insights = {repr(ep): '{ip}:{port}'.format(ip=ep.address, port=ep.port) + for ep in self.endpoints_resolved} + strs_resolved_map = _resolve_contact_points_to_string_map(raw_contact_points) + self.endpoints_resolved.extend(list(chain( + *[ + [DefaultEndPoint(ip, port) for ip, port in xs if ip is not None] + for xs in strs_resolved_map.values() if xs is not None + ] + ))) + + self._endpoint_map_for_insights.update( + {key: ['{ip}:{port}'.format(ip=ip, port=port) for ip, port in value] + for key, value in strs_resolved_map.items() if value is not None} + ) + + if self.contact_points and (not self.endpoints_resolved): + # only want to raise here if the user specified CPs but resolution failed + raise UnresolvableContactPoints(self._endpoint_map_for_insights) + def _create_thread_pool_executor(self, **kwargs): """ Create a ThreadPoolExecutor for the cluster. In most cases, the built-in @@ -1441,7 +1579,7 @@ def __init__(self, street, zipcode): # results will include Address instances results = session.execute("SELECT * FROM users") row = results[0] - print row.id, row.location.street, row.location.zipcode + print(row.id, row.location.street, row.location.zipcode) """ if self.protocol_version < 3: @@ -1502,122 +1640,13 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): if not_done: raise OperationTimedOut("Failed to create all new connection pools in the %ss timeout.") - def get_min_requests_per_connection(self, host_distance): - return self._min_requests_per_connection[host_distance] - - def set_min_requests_per_connection(self, host_distance, min_requests): - """ - Sets a threshold for concurrent requests per connection, below which - connections will be considered for disposal (down to core connections; - see :meth:`~Cluster.set_core_connections_per_host`). - - Pertains to connection pool management in protocol versions {1,2}. - """ - if self.protocol_version >= 3: - raise UnsupportedOperation( - "Cluster.set_min_requests_per_connection() only has an effect " - "when using protocol_version 1 or 2.") - if min_requests < 0 or min_requests > 126 or \ - min_requests >= self._max_requests_per_connection[host_distance]: - raise ValueError("min_requests must be 0-126 and less than the max_requests for this host_distance (%d)" % - (self._min_requests_per_connection[host_distance],)) - self._min_requests_per_connection[host_distance] = min_requests - - def get_max_requests_per_connection(self, host_distance): - return self._max_requests_per_connection[host_distance] - - def set_max_requests_per_connection(self, host_distance, max_requests): - """ - Sets a threshold for concurrent requests per connection, above which new - connections will be created to a host (up to max connections; - see :meth:`~Cluster.set_max_connections_per_host`). - - Pertains to connection pool management in protocol versions {1,2}. - """ - if self.protocol_version >= 3: - raise UnsupportedOperation( - "Cluster.set_max_requests_per_connection() only has an effect " - "when using protocol_version 1 or 2.") - if max_requests < 1 or max_requests > 127 or \ - max_requests <= self._min_requests_per_connection[host_distance]: - raise ValueError("max_requests must be 1-127 and greater than the min_requests for this host_distance (%d)" % - (self._min_requests_per_connection[host_distance],)) - self._max_requests_per_connection[host_distance] = max_requests - - def get_core_connections_per_host(self, host_distance): - """ - Gets the minimum number of connections per Session that will be opened - for each host with :class:`~.HostDistance` equal to `host_distance`. - The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for - :attr:`~HostDistance.REMOTE`. - - This property is ignored if :attr:`~.Cluster.protocol_version` is - 3 or higher. - """ - return self._core_connections_per_host[host_distance] - - def set_core_connections_per_host(self, host_distance, core_connections): - """ - Sets the minimum number of connections per Session that will be opened - for each host with :class:`~.HostDistance` equal to `host_distance`. - The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for - :attr:`~HostDistance.REMOTE`. - - Protocol version 1 and 2 are limited in the number of concurrent - requests they can send per connection. The driver implements connection - pooling to support higher levels of concurrency. - - If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this - is not supported (there is always one connection per host, unless - the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`) - and using this will result in an :exc:`~.UnsupportedOperation`. - """ - if self.protocol_version >= 3: - raise UnsupportedOperation( - "Cluster.set_core_connections_per_host() only has an effect " - "when using protocol_version 1 or 2.") - old = self._core_connections_per_host[host_distance] - self._core_connections_per_host[host_distance] = core_connections - if old < core_connections: - self._ensure_core_connections() - - def get_max_connections_per_host(self, host_distance): - """ - Gets the maximum number of connections per Session that will be opened - for each host with :class:`~.HostDistance` equal to `host_distance`. - The default is 8 for :attr:`~HostDistance.LOCAL` and 2 for - :attr:`~HostDistance.REMOTE`. - - This property is ignored if :attr:`~.Cluster.protocol_version` is - 3 or higher. - """ - return self._max_connections_per_host[host_distance] - - def set_max_connections_per_host(self, host_distance, max_connections): - """ - Sets the maximum number of connections per Session that will be opened - for each host with :class:`~.HostDistance` equal to `host_distance`. - The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for - :attr:`~HostDistance.REMOTE`. - - If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this - is not supported (there is always one connection per host, unless - the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`) - and using this will result in an :exc:`~.UnsupportedOperation`. - """ - if self.protocol_version >= 3: - raise UnsupportedOperation( - "Cluster.set_max_connections_per_host() only has an effect " - "when using protocol_version 1 or 2.") - self._max_connections_per_host[host_distance] = max_connections - - def connection_factory(self, endpoint, *args, **kwargs): + def connection_factory(self, endpoint, host_conn = None, *args, **kwargs): """ Called to create a new connection with proper configuration. Intended for internal use only. """ kwargs = self._make_connection_kwargs(endpoint, kwargs) - return self.connection_class.factory(endpoint, self.connect_timeout, *args, **kwargs) + return self.connection_class.factory(endpoint, self.connect_timeout, host_conn, *args, **kwargs) def _make_connection_factory(self, host, *args, **kwargs): kwargs = self._make_connection_kwargs(host.endpoint, kwargs) @@ -1637,6 +1666,7 @@ def _make_connection_kwargs(self, endpoint, kwargs_dict): kwargs_dict.setdefault('user_type_map', self._user_types) kwargs_dict.setdefault('allow_beta_protocol_version', self.allow_beta_protocol_version) kwargs_dict.setdefault('no_compact', self.no_compact) + kwargs_dict.setdefault('application_info', self.application_info) return kwargs_dict @@ -1653,6 +1683,20 @@ def protocol_downgrade(self, host_endpoint, previous_version): "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_endpoint) self.protocol_version = new_version + def _add_resolved_hosts(self): + for endpoint in self.endpoints_resolved: + host, new = self.add_host(endpoint, signal=False) + if new: + host.set_up() + for listener in self.listeners: + listener.on_add(host) + + self.profile_manager.populate( + weakref.proxy(self), self.metadata.all_hosts()) + self.load_balancing_policy.populate( + weakref.proxy(self), self.metadata.all_hosts() + ) + def connect(self, keyspace=None, wait_for_all_pools=False): """ Creates and returns a new :class:`~.Session` object. @@ -1673,18 +1717,8 @@ def connect(self, keyspace=None, wait_for_all_pools=False): self.contact_points, self.protocol_version) self.connection_class.initialize_reactor() _register_cluster_shutdown(self) - for endpoint in self.endpoints_resolved: - host, new = self.add_host(endpoint, signal=False) - if new: - host.set_up() - for listener in self.listeners: - listener.on_add(host) - - self.profile_manager.populate( - weakref.proxy(self), self.metadata.all_hosts()) - self.load_balancing_policy.populate( - weakref.proxy(self), self.metadata.all_hosts() - ) + + self._add_resolved_hosts() try: self.control_connection.connect() @@ -1734,14 +1768,20 @@ def get_connection_holders(self): holders.append(self.control_connection) return holders + def get_all_pools(self): + pools = [] + for s in tuple(self.sessions): + pools.extend(s.get_pools()) + return pools + def is_shard_aware(self): - return bool(self.get_connection_holders()[:-1][0].host.sharding_info) + return bool(self.get_all_pools()[0].host.sharding_info) def shard_aware_stats(self): if self.is_shard_aware(): return {str(pool.host.endpoint): {'shards_count': pool.host.sharding_info.shards_count, 'connected': len(pool._connections.keys())} - for pool in self.get_connection_holders()[:-1]} + for pool in self.get_all_pools()} def shutdown(self): """ @@ -1784,8 +1824,8 @@ def _new_session(self, keyspace): return session def _session_register_user_types(self, session): - for keyspace, type_map in six.iteritems(self._user_types): - for udt_name, klass in six.iteritems(type_map): + for keyspace, type_map in self._user_types.items(): + for udt_name, klass in type_map.items(): session.user_type_registered(keyspace, udt_name, klass) def _cleanup_failed_on_up_handling(self, host): @@ -1930,6 +1970,17 @@ def _start_reconnector(self, host, is_host_addition): reconnector.start() @run_in_executor + def on_down_potentially_blocking(self, host, is_host_addition): + self.profile_manager.on_down(host) + self.control_connection.on_down(host) + for session in tuple(self.sessions): + session.on_down(host) + + for listener in self.listeners: + listener.on_down(host) + + self._start_reconnector(host, is_host_addition) + def on_down(self, host, is_host_addition, expect_host_to_be_down=False): """ Intended for internal use only. @@ -1955,18 +2006,9 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): host.set_down() if (not was_up and not expect_host_to_be_down) or host.is_currently_reconnecting(): return - log.warning("Host %s has been marked down", host) - self.profile_manager.on_down(host) - self.control_connection.on_down(host) - for session in tuple(self.sessions): - session.on_down(host) - - for listener in self.listeners: - listener.on_down(host) - - self._start_reconnector(host, is_host_addition) + self.on_down_potentially_blocking(host, is_host_addition) def on_add(self, host, refresh_nodes=True): if self.is_shutdown: @@ -2042,7 +2084,7 @@ def on_remove(self, host): if self.is_shutdown: return - log.debug("Removing host %s", host) + log.debug("[cluster] Removing host %s", host) host.set_down() self.profile_manager.on_remove(host) for session in tuple(self.sessions): @@ -2061,7 +2103,7 @@ def signal_connection_failure(self, host, connection_exc, is_host_addition, expe self.on_down(host, is_host_addition, expect_host_to_be_down) return is_down - def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_nodes=True): + def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_nodes=True, host_id=None): """ Called when adding initial contact points and when the control connection subsequently discovers a new node. @@ -2069,7 +2111,10 @@ def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_no the metadata. Intended for internal use only. """ - host, new = self.metadata.add_or_return_host(Host(endpoint, self.conviction_policy_factory, datacenter, rack)) + with self.metadata._hosts_lock: + if endpoint in self.metadata._host_id_by_endpoint: + return self.metadata._hosts[self.metadata._host_id_by_endpoint[endpoint]], False + host, new = self.metadata.add_or_return_host(Host(endpoint, self.conviction_policy_factory, datacenter, rack, host_id=host_id)) if new and signal: log.info("New Cassandra host %r discovered", host) self.on_add(host, refresh_nodes) @@ -2308,7 +2353,6 @@ def add_prepared(self, query_id, prepared_statement): with self._prepared_statement_lock: self._prepared_statements[query_id] = prepared_statement - class Session(object): """ A collection of connection pools for each host in the cluster. @@ -2400,7 +2444,7 @@ def default_consistency_level(self, cl): *Deprecated:* use execution profiles instead """ warn("Setting the consistency level at the session level will be removed in 4.0. Consider using " - "execution profiles and setting the desired consitency level to the EXEC_PROFILE_DEFAULT profile." + "execution profiles and setting the desired consistency level to the EXEC_PROFILE_DEFAULT profile." , DeprecationWarning) self._validate_set_legacy_config('default_consistency_level', cl) @@ -2562,7 +2606,17 @@ def __init__(self, cluster, hosts, keyspace=None): raise NoHostAvailable(msg, [h.address for h in hosts]) self.session_id = uuid.uuid4() - self._graph_paging_available = self._check_graph_paging_available() + + if self.cluster.column_encryption_policy is not None: + try: + self.client_protocol_handler = type( + str(self.session_id) + "-ProtocolHandler", + (ProtocolHandler,), + {"column_encryption_policy": self.cluster.column_encryption_policy}) + except AttributeError: + log.info("Unable to set column encryption policy for session") + raise Exception( + "column_encryption_policy is temporary disabled, until https://github.com/scylladb/python-driver/issues/365 is sorted out") if self.cluster.monitor_reporting_enabled: cc_host = self.cluster.get_control_connection_host() @@ -2665,7 +2719,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None """ custom_payload = custom_payload if custom_payload else {} if execute_as: - custom_payload[_proxy_execute_key] = six.b(execute_as) + custom_payload[_proxy_execute_key] = execute_as.encode() future = self._create_response_future( query, parameters, trace, custom_payload, timeout, @@ -2729,8 +2783,8 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro custom_payload = execution_profile.graph_options.get_options_map() if execute_as: - custom_payload[_proxy_execute_key] = six.b(execute_as) - custom_payload[_request_timeout_key] = int64_pack(long(execution_profile.request_timeout * 1000)) + custom_payload[_proxy_execute_key] = execute_as.encode() + custom_payload[_request_timeout_key] = int64_pack(int(execution_profile.request_timeout * 1000)) future = self._create_response_future(query, parameters=None, trace=trace, custom_payload=custom_payload, timeout=_NOT_SET, execution_profile=execution_profile) @@ -2748,26 +2802,10 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro def _maybe_set_graph_paging(self, execution_profile): graph_paging = execution_profile.continuous_paging_options if execution_profile.continuous_paging_options is _NOT_SET: - graph_paging = ContinuousPagingOptions() if self._graph_paging_available else None + graph_paging = None execution_profile.continuous_paging_options = graph_paging - def _check_graph_paging_available(self): - """Verify if we can enable graph paging. This executed only once when the session is created.""" - - if not ProtocolVersion.has_continuous_paging_next_pages(self._protocol_version): - return False - - for host in self.cluster.metadata.all_hosts(): - if host.dse_version is None: - return False - - version = Version(host.dse_version) - if version < _GRAPH_PAGING_MIN_DSE_VERSION: - return False - - return True - def _resolve_execution_profile_options(self, execution_profile): """ Determine the GraphSON protocol and row factory for a graph query. This is useful @@ -2867,7 +2905,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, prepared_statement = None - if isinstance(query, six.string_types): + if isinstance(query, str): query = SimpleStatement(query) elif isinstance(query, PreparedStatement): query = query.bind(parameters) @@ -2903,25 +2941,15 @@ def _create_response_future(self, query, parameters, trace, custom_payload, spec_exec_policy = execution_profile.speculative_execution_policy fetch_size = query.fetch_size - if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: + if fetch_size is FETCH_SIZE_UNSET: fetch_size = self.default_fetch_size - elif self._protocol_version == 1: - fetch_size = None start_time = time.time() - if self._protocol_version >= 3 and self.use_client_timestamp: + if self.use_client_timestamp: timestamp = self.cluster.timestamp_generator() else: timestamp = None - supports_continuous_paging_state = ( - ProtocolVersion.has_continuous_paging_next_pages(self._protocol_version) - ) - if continuous_paging_options and supports_continuous_paging_state: - continuous_paging_state = ContinuousPagingState(continuous_paging_options.max_queue_size) - else: - continuous_paging_state = None - if isinstance(query, SimpleStatement): query_string = query.query_string statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None @@ -2965,7 +2993,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan, - continuous_paging_state=continuous_paging_state, host=host) + continuous_paging_state=None, host=host) def get_execution_profile(self, name): """ @@ -3083,7 +3111,7 @@ def prepare(self, query, custom_payload=None, keyspace=None): prepared_keyspace = keyspace if keyspace else None prepared_statement = PreparedStatement.from_message( response.query_id, response.bind_metadata, response.pk_indexes, self.cluster.metadata, query, prepared_keyspace, - self._protocol_version, response.column_metadata, response.result_metadata_id) + self._protocol_version, response.column_metadata, response.result_metadata_id, response.is_lwt, self.cluster.column_encryption_policy) prepared_statement.custom_payload = future.custom_payload self.cluster.add_prepared(response.query_id, prepared_statement) @@ -3180,11 +3208,7 @@ def add_or_renew_pool(self, host, is_host_addition): def run_add_or_renew_pool(): try: - if self._protocol_version >= 3: - new_pool = HostConnection(host, distance, self) - else: - # TODO remove host pool again ??? - new_pool = HostConnectionPool(host, distance, self) + new_pool = HostConnection(host, distance, self) except AuthenticationFailed as auth_exc: conn_exc = ConnectionException(str(auth_exc), endpoint=host) self.cluster.signal_connection_failure(host, conn_exc, is_host_addition) @@ -3335,10 +3359,6 @@ def user_type_registered(self, keyspace, user_type, klass): 'User type %s does not exist in keyspace %s' % (user_type, keyspace)) field_names = type_meta.field_names - if six.PY2: - # go from unicode to string to avoid decode errors from implicit - # decode when formatting non-ascii values - field_names = [fn.encode('utf-8') for fn in field_names] def encode(val): return '{ %s }' % ' , '.join('%s : %s' % ( @@ -3426,10 +3446,10 @@ class ControlConnection(object): Internal """ - _SELECT_PEERS = "SELECT * FROM system.peers" + _SELECT_PEERS = "SELECT peer, data_center, host_id, rack, release_version, rpc_address, schema_version, tokens FROM system.peers" _SELECT_PEERS_NO_TOKENS_TEMPLATE = "SELECT host_id, peer, data_center, rack, rpc_address, {nt_col_name}, release_version, schema_version FROM system.peers" - _SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'" - _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL = "SELECT broadcast_address, cluster_name, data_center, host_id, listen_address, partitioner, rack, release_version, rpc_address, schema_version, tokens FROM system.local WHERE key='local'" + _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, cluster_name, data_center, rack, partitioner, release_version, schema_version, rpc_address FROM system.local WHERE key='local'" # Used only when token_metadata_enabled is set to False _SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS = "SELECT rpc_address FROM system.local WHERE key='local'" @@ -3450,6 +3470,7 @@ class PeersQueryType(object): _is_shutdown = False _timeout = None _protocol_version = None + _metadata_request_timeout = None _schema_event_refresh_window = None _topology_event_refresh_window = None @@ -3457,8 +3478,10 @@ class PeersQueryType(object): _schema_meta_enabled = True _token_meta_enabled = True + _schema_meta_page_size = 1000 _uses_peers_v2 = True + _tablets_routing_v1 = False # for testing purposes _time = time @@ -3468,7 +3491,8 @@ def __init__(self, cluster, timeout, topology_event_refresh_window, status_event_refresh_window, schema_meta_enabled=True, - token_meta_enabled=True): + token_meta_enabled=True, + schema_meta_page_size=1000): # use a weak reference to allow the Cluster instance to be GC'ed (and # shutdown) since implementing __del__ disables the cycle detector self._cluster = weakref.proxy(cluster) @@ -3480,6 +3504,7 @@ def __init__(self, cluster, timeout, self._status_event_refresh_window = status_event_refresh_window self._schema_meta_enabled = schema_meta_enabled self._token_meta_enabled = token_meta_enabled + self._schema_meta_page_size = schema_meta_page_size self._lock = RLock() self._schema_agreement_lock = Lock() @@ -3509,16 +3534,8 @@ def _set_new_connection(self, conn): if old: log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn) old.close() - - def _reconnect_internal(self): - """ - Tries to connect to each host in the query plan until one succeeds - or every attempt fails. If successful, a new Connection will be - returned. Otherwise, :exc:`NoHostAvailable` will be raised - with an "errors" arg that is a dict mapping host addresses - to the exception that was raised when an attempt was made to open - a connection to that host. - """ + + def _connect_host_in_lbp(self): errors = {} lbp = ( self._cluster.load_balancing_policy @@ -3528,7 +3545,7 @@ def _reconnect_internal(self): for host in lbp.make_query_plan(): try: - return self._try_connect(host) + return (self._try_connect(host), None) except ConnectionException as exc: errors[str(host.endpoint)] = exc log.warning("[control connection] Error connecting to %s:", host, exc_info=True) @@ -3538,7 +3555,31 @@ def _reconnect_internal(self): log.warning("[control connection] Error connecting to %s:", host, exc_info=True) if self._is_shutdown: raise DriverException("[control connection] Reconnection in progress during shutdown") + + return (None, errors) + + def _reconnect_internal(self): + """ + Tries to connect to each host in the query plan until one succeeds + or every attempt fails. If successful, a new Connection will be + returned. Otherwise, :exc:`NoHostAvailable` will be raised + with an "errors" arg that is a dict mapping host addresses + to the exception that was raised when an attempt was made to open + a connection to that host. + """ + (conn, _) = self._connect_host_in_lbp() + if conn is not None: + return conn + + # Try to re-resolve hostnames as a fallback when all hosts are unreachable + self._cluster._resolve_hostnames() + self._cluster._add_resolved_hosts() + + (conn, errors) = self._connect_host_in_lbp() + if conn is not None: + return conn + raise NoHostAvailable("Unable to connect to any servers", errors) def _try_connect(self, host): @@ -3570,6 +3611,18 @@ def _try_connect(self, host): "registering watchers and refreshing schema and topology", connection) + # Indirect way to determine if conencted to a ScyllaDB cluster, which does not support peers_v2 + # If sharding information is available, it's a ScyllaDB cluster, so do not use peers_v2 table. + if connection.features.sharding_info is not None: + self._uses_peers_v2 = False + + # Only ScyllaDB supports "USING TIMEOUT" + # Sharding information signals it is ScyllaDB + self._metadata_request_timeout = None if connection.features.sharding_info is None or not self._cluster.metadata_request_timeout \ + else datetime.timedelta(seconds=self._cluster.metadata_request_timeout) + + self._tablets_routing_v1 = connection.features.tablets_routing_v1 + # use weak references in both directions # _clear_watcher will be called when this ControlConnection is about to be finalized # _watch_callback will get the actual callback from the Connection and relay it to @@ -3584,8 +3637,10 @@ def _try_connect(self, host): sel_peers = self._get_peers_query(self.PeersQueryType.PEERS, connection) sel_local = self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS - peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) - local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(sel_peers, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) + local_query = QueryMessage(query=maybe_add_timeout_to_query(sel_local, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) (peers_success, peers_result), (local_success, local_result) = connection.wait_for_responses( peers_query, local_query, timeout=self._timeout, fail_on_error=False) @@ -3596,7 +3651,8 @@ def _try_connect(self, host): # error with the peers v2 query, fallback to peers v1 self._uses_peers_v2 = False sel_peers = self._get_peers_query(self.PeersQueryType.PEERS, connection) - peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(sel_peers, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) peers_result = connection.wait_for_response( peers_query, timeout=self._timeout) @@ -3704,7 +3760,12 @@ def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_w log.debug("Skipping schema refresh due to lack of schema agreement") return False - self._cluster.metadata.refresh(connection, self._timeout, **kwargs) + self._cluster.metadata.refresh( + connection, + self._timeout, + fetch_size=self._schema_meta_page_size, + metadata_request_timeout=self._metadata_request_timeout, + **kwargs) return True @@ -3735,8 +3796,10 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, else: log.debug("[control connection] Refreshing node list and token map") sel_local = self._SELECT_LOCAL - peers_query = QueryMessage(query=sel_peers, consistency_level=cl) - local_query = QueryMessage(query=sel_local, consistency_level=cl) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(sel_peers, self._metadata_request_timeout), + consistency_level=cl) + local_query = QueryMessage(query=maybe_add_timeout_to_query(sel_local, self._metadata_request_timeout), + consistency_level=cl) peers_result, local_result = connection.wait_for_responses( peers_query, local_query, timeout=self._timeout) @@ -3745,9 +3808,10 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, partitioner = None token_map = {} - found_hosts = set() + found_host_ids = set() + found_endpoints = set() + if local_result.parsed_rows: - found_hosts.add(connection.endpoint) local_rows = dict_factory(local_result.column_names, local_result.parsed_rows) local_row = local_rows[0] cluster_name = local_row["cluster_name"] @@ -3756,12 +3820,24 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, partitioner = local_row.get("partitioner") tokens = local_row.get("tokens") - host = self._cluster.metadata.get_host(connection.endpoint) + host = self._cluster.metadata.get_host(connection.original_endpoint) if host: datacenter = local_row.get("data_center") rack = local_row.get("rack") self._update_location_info(host, datacenter, rack) + + # support the use case of connecting only with public address + if isinstance(self._cluster.endpoint_factory, SniEndPointFactory): + new_endpoint = self._cluster.endpoint_factory.create(local_row) + + if new_endpoint.address: + host.endpoint = new_endpoint + host.host_id = local_row.get("host_id") + + found_host_ids.add(host.host_id) + found_endpoints.add(host.endpoint) + host.listen_address = local_row.get("listen_address") host.listen_port = local_row.get("listen_port") host.broadcast_address = _NodeInfo.get_broadcast_address(local_row) @@ -3778,8 +3854,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, # local rpc_address has not been queried yet, try to fetch it # separately, which might fail because C* < 2.1.6 doesn't have rpc_address # in system.local. See CASSANDRA-9436. - local_rpc_address_query = QueryMessage(query=self._SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS, - consistency_level=ConsistencyLevel.ONE) + local_rpc_address_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) success, local_rpc_address_result = connection.wait_for_response( local_rpc_address_query, timeout=self._timeout, fail_on_error=False) if success: @@ -3800,36 +3877,53 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if partitioner and tokens: token_map[host] = tokens + self._cluster.metadata.update_host(host, old_endpoint=connection.endpoint) + connection.original_endpoint = connection.endpoint = host.endpoint # Check metadata.partitioner to see if we haven't built anything yet. If # every node in the cluster was in the contact points, we won't discover # any new nodes, so we need this additional check. (See PYTHON-90) should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None for row in peers_result: if not self._is_valid_peer(row): - log.warning( - "Found an invalid row for peer (%s). Ignoring host." % - _NodeInfo.get_broadcast_rpc_address(row)) continue endpoint = self._cluster.endpoint_factory.create(row) + host_id = row.get("host_id") - if endpoint in found_hosts: - log.warning("Found multiple hosts with the same endpoint (%s). Excluding peer %s", endpoint, row.get("peer")) + if endpoint in found_endpoints: + log.warning("Found multiple hosts with the same endpoint(%s). Excluding peer %s - %s", endpoint, row.get("peer"), host_id) continue - found_hosts.add(endpoint) + if host_id in found_host_ids: + log.warning("Found multiple hosts with the same host_id (%s). Excluding peer %s", host_id, row.get("peer")) + continue + found_host_ids.add(host_id) + found_endpoints.add(endpoint) host = self._cluster.metadata.get_host(endpoint) datacenter = row.get("data_center") rack = row.get("rack") + + if host is None: + host = self._cluster.metadata.get_host_by_host_id(host_id) + if host and host.endpoint != endpoint: + log.debug("[control connection] Updating host ip from %s to %s for (%s)", host.endpoint, endpoint, host_id) + old_endpoint = host.endpoint + host.endpoint = endpoint + self._cluster.metadata.update_host(host, old_endpoint) + reconnector = host.get_and_set_reconnection_handler(None) + if reconnector: + reconnector.cancel() + self._cluster.on_down(host, is_host_addition=False, expect_host_to_be_down=True) + if host is None: log.debug("[control connection] Found new host to connect to: %s", endpoint) - host, _ = self._cluster.add_host(endpoint, datacenter, rack, signal=True, refresh_nodes=False) + host, _ = self._cluster.add_host(endpoint, datacenter=datacenter, rack=rack, signal=True, refresh_nodes=False, host_id=host_id) should_rebuild_token_map = True else: should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) - host.host_id = row.get("host_id") + host.host_id = host_id host.broadcast_address = _NodeInfo.get_broadcast_address(row) host.broadcast_port = _NodeInfo.get_broadcast_port(row) host.broadcast_rpc_address = _NodeInfo.get_broadcast_rpc_address(row) @@ -3842,12 +3936,13 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, tokens = row.get("tokens", None) if partitioner and tokens and self._token_meta_enabled: token_map[host] = tokens + self._cluster.metadata.update_host(host, old_endpoint=endpoint) - for old_host in self._cluster.metadata.all_hosts(): - if old_host.endpoint.address != connection.endpoint and old_host.endpoint not in found_hosts: + for old_host_id, old_host in self._cluster.metadata.all_hosts_items(): + if old_host_id not in found_host_ids: should_rebuild_token_map = True log.debug("[control connection] Removing host not found in peers metadata: %r", old_host) - self._cluster.remove_host(old_host) + self._cluster.metadata.remove_host_by_host_id(old_host_id, old_host.endpoint) log.debug("[control connection] Finished fetching ring info") if partitioner and should_rebuild_token_map: @@ -3856,9 +3951,40 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, @staticmethod def _is_valid_peer(row): - return bool(_NodeInfo.get_broadcast_rpc_address(row) and row.get("host_id") and - row.get("data_center") and row.get("rack") and - ('tokens' not in row or row.get('tokens'))) + broadcast_rpc = _NodeInfo.get_broadcast_rpc_address(row) + host_id = row.get("host_id") + + if not broadcast_rpc: + log.warning( + "Found an invalid row for peer - missing broadcast_rpc (full row: %s). Ignoring host." % + row) + return False + + if not host_id: + log.warning( + "Found an invalid row for peer - missing host_id (broadcast_rpc: %s). Ignoring host." % + broadcast_rpc) + return False + + if not row.get("data_center"): + log.warning( + "Found an invalid row for peer - missing data_center (broadcast_rpc: %s, host_id: %s). Ignoring host." % + (broadcast_rpc, host_id)) + return False + + if not row.get("rack"): + log.warning( + "Found an invalid row for peer - missing rack (broadcast_rpc: %s, host_id: %s). Ignoring host." % + (broadcast_rpc, host_id)) + return False + + if "tokens" in row and not row.get("tokens"): + log.debug( + "Found a zero-token node - tokens is None (broadcast_rpc: %s, host_id: %s). Ignoring host." % + (broadcast_rpc, host_id)) + return False + + return True def _update_location_info(self, host, datacenter, rack): if host.datacenter == datacenter and host.rack == rack: @@ -3965,8 +4091,10 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai select_peers_query = self._get_peers_query(self.PeersQueryType.PEERS_SCHEMA, connection) while elapsed < total_timeout: - peers_query = QueryMessage(query=select_peers_query, consistency_level=cl) - local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(select_peers_query, self._metadata_request_timeout), + consistency_level=cl) + local_query = QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_SCHEMA_LOCAL, self._metadata_request_timeout), + consistency_level=cl) try: timeout = min(self._timeout, total_timeout - elapsed) peers_result, local_result = connection.wait_for_responses( @@ -4017,7 +4145,7 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): log.debug("[control connection] Schemas match") return None - return dict((version, list(nodes)) for version, nodes in six.iteritems(versions)) + return dict((version, list(nodes)) for version, nodes in versions.items()) def _get_peers_query(self, peers_query_type, connection=None): """ @@ -4049,9 +4177,8 @@ def _get_peers_query(self, peers_query_type, connection=None): query_template = (self._SELECT_SCHEMA_PEERS_TEMPLATE if peers_query_type == self.PeersQueryType.PEERS_SCHEMA else self._SELECT_PEERS_NO_TOKENS_TEMPLATE) - - host_release_version = self._cluster.metadata.get_host(connection.endpoint).release_version - host_dse_version = self._cluster.metadata.get_host(connection.endpoint).dse_version + host_release_version = self._cluster.metadata.get_host(connection.original_endpoint).release_version + host_dse_version = self._cluster.metadata.get_host(connection.original_endpoint).dse_version uses_native_address_query = ( host_dse_version and Version(host_dse_version) >= self._MINIMUM_NATIVE_ADDRESS_DSE_VERSION) @@ -4137,7 +4264,7 @@ class _Scheduler(Thread): is_shutdown = False def __init__(self, executor): - self._queue = Queue.PriorityQueue() + self._queue = queue.PriorityQueue() self._scheduled_tasks = set() self._count = count() self._executor = executor @@ -4195,7 +4322,7 @@ def run(self): else: self._queue.put_nowait((run_at, i, task)) break - except Queue.Empty: + except queue.Empty: pass time.sleep(0.1) @@ -4287,7 +4414,6 @@ class ResponseFuture(object): _timer = None _protocol_handler = ProtocolHandler _spec_execution_plan = NoSpeculativeExecutionPlan() - _continuous_paging_options = None _continuous_paging_session = None _host = None @@ -4462,7 +4588,10 @@ def _query(self, host, message=None, cb=None): connection = None try: # TODO get connectTimeout from cluster settings - connection, request_id = pool.borrow_connection(timeout=2.0, routing_key=self.query.routing_key if self.query else None) + if self.query: + connection, request_id = pool.borrow_connection(timeout=2.0, routing_key=self.query.routing_key, keyspace=self.query.keyspace, table=self.query.table) + else: + connection, request_id = pool.borrow_connection(timeout=2.0) self._connection = connection result_meta = self.prepared_statement.result_metadata if self.prepared_statement else [] @@ -4581,6 +4710,19 @@ def _set_result(self, host, connection, pool, response): self._warnings = getattr(response, 'warnings', None) self._custom_payload = getattr(response, 'custom_payload', None) + if self._custom_payload and self.session.cluster.control_connection._tablets_routing_v1 and 'tablets-routing-v1' in self._custom_payload: + protocol = self.session.cluster.protocol_version + info = self._custom_payload.get('tablets-routing-v1') + ctype = types.lookup_casstype('TupleType(LongType, LongType, ListType(TupleType(UUIDType, Int32Type)))') + tablet_routing_info = ctype.from_binary(info, protocol) + first_token = tablet_routing_info[0] + last_token = tablet_routing_info[1] + tablet_replicas = tablet_routing_info[2] + tablet = Tablet.from_row(first_token, last_token, tablet_replicas) + keyspace = self.query.keyspace + table = self.query.table + self.session.cluster.metadata._tablets.add_tablet(keyspace, table, tablet) + if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_SET_KEYSPACE: session = getattr(self, 'session', None) @@ -4837,12 +4979,16 @@ def exception_from_response(response): return response.to_exception() else: return response + if len(retry_decision) == 2: + retry_type, consistency = retry_decision + delay = 0 + elif len(retry_decision) == 3: + retry_type, consistency, delay = retry_decision - retry_type, consistency = retry_decision if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST): self._query_retries += 1 reuse = retry_type == RetryPolicy.RETRY - self._retry(reuse, consistency, host) + self._retry(reuse, consistency, host, delay) elif retry_type is RetryPolicy.RETHROW: self._set_final_exception(exception_from_response(response)) else: # IGNORE @@ -4852,7 +4998,7 @@ def exception_from_response(response): self._errors[host] = exception_from_response(response) - def _retry(self, reuse_connection, consistency_level, host): + def _retry(self, reuse_connection, consistency_level, host, delay): if self._final_exception: # the connection probably broke while we were waiting # to retry the operation @@ -4864,7 +5010,7 @@ def _retry(self, reuse_connection, consistency_level, host): self.message.consistency_level = consistency_level # don't retry on the event loop thread - self.session.submit(self._retry_task, reuse_connection, host) + self.session.cluster.scheduler.schedule(delay, self._retry_task, reuse_connection, host) def _retry_task(self, reuse_connection, host): if self._final_exception: @@ -5109,8 +5255,8 @@ def has_more_pages(self): @property def current_rows(self): """ - The list of current page rows. May be empty if the result was empty, - or this is the last page. + The list of current page rows. May be empty; this does not mean + there is no more data. Use `has_more_pages()` for that. """ return self._current_rows or [] @@ -5157,6 +5303,11 @@ def next(self): if not self.response_future._continuous_paging_session: self.fetch_next_page() self._page_iter = iter(self._current_rows) + + # Some servers can return empty pages in this case; Scylla is known to do + # so in some circumstances. Guard against this by recursing to handle + # the next(iter) call. If we have an empty page in that case it will + # get handled by the StopIteration handler when we recurse. return self.next() return next(self._page_iter) @@ -5236,6 +5387,8 @@ def cancel_continuous_paging(self): except AttributeError: raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousdPagingOptions.") + batch_regex = re.compile(r'^\s*BEGIN\s+[a-zA-Z]*\s*BATCH') + @property def was_applied(self): """ @@ -5250,7 +5403,8 @@ def was_applied(self): if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,)) - is_batch_statement = isinstance(self.response_future.query, BatchStatement) + is_batch_statement = isinstance(self.response_future.query, BatchStatement) \ + or (isinstance(self.response_future.query, SimpleStatement) and self.batch_regex.match(self.response_future.query.query_string)) if is_batch_statement and (not self.column_names or self.column_names[0] != "[applied]"): raise RuntimeError("No LWT were present in the BatchStatement") diff --git a/cassandra/column_encryption/_policies.py b/cassandra/column_encryption/_policies.py new file mode 100644 index 0000000000..ef8097bfbd --- /dev/null +++ b/cassandra/column_encryption/_policies.py @@ -0,0 +1,139 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from functools import lru_cache + +import logging +import os + +log = logging.getLogger(__name__) + +from cassandra.cqltypes import _cqltypes +from cassandra.policies import ColumnEncryptionPolicy + +from cryptography.hazmat.primitives import padding +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + +AES256_BLOCK_SIZE = 128 +AES256_BLOCK_SIZE_BYTES = int(AES256_BLOCK_SIZE / 8) +AES256_KEY_SIZE = 256 +AES256_KEY_SIZE_BYTES = int(AES256_KEY_SIZE / 8) + +ColData = namedtuple('ColData', ['key','type']) + +class AES256ColumnEncryptionPolicy(ColumnEncryptionPolicy): + + # Fix block cipher mode for now. IV size is a function of block cipher used + # so fixing this avoids (possibly unnecessary) validation logic here. + mode = modes.CBC + + # "iv" param here expects a bytearray that's the same size as the block + # size for AES-256 (128 bits or 16 bytes). If none is provided a new one + # will be randomly generated, but in this case the IV should be recorded and + # preserved or else you will not be able to decrypt any data encrypted by this + # policy. + def __init__(self, iv=None): + + # CBC uses an IV that's the same size as the block size + # + # Avoid defining IV with a default arg in order to stay away from + # any issues around the caching of default args + self.iv = iv + if self.iv: + if not len(self.iv) == AES256_BLOCK_SIZE_BYTES: + raise ValueError("This policy uses AES-256 with CBC mode and therefore expects a 128-bit initialization vector") + else: + self.iv = os.urandom(AES256_BLOCK_SIZE_BYTES) + + # ColData for a given ColDesc is always preserved. We only create a Cipher + # when there's an actual need to for a given ColDesc + self.coldata = {} + self.ciphers = {} + + def encrypt(self, coldesc, obj_bytes): + + # AES256 has a 128-bit block size so if the input bytes don't align perfectly on + # those blocks we have to pad them. There's plenty of room for optimization here: + # + # * Instances of the PKCS7 padder should be managed in a bounded pool + # * It would be nice if we could get a flag from encrypted data to indicate + # whether it was padded or not + # * Might be able to make this happen with a leading block of flags in encrypted data + padder = padding.PKCS7(AES256_BLOCK_SIZE).padder() + padded_bytes = padder.update(obj_bytes) + padder.finalize() + + cipher = self._get_cipher(coldesc) + encryptor = cipher.encryptor() + return self.iv + encryptor.update(padded_bytes) + encryptor.finalize() + + def decrypt(self, coldesc, bytes): + + iv = bytes[:AES256_BLOCK_SIZE_BYTES] + encrypted_bytes = bytes[AES256_BLOCK_SIZE_BYTES:] + cipher = self._get_cipher(coldesc, iv=iv) + decryptor = cipher.decryptor() + padded_bytes = decryptor.update(encrypted_bytes) + decryptor.finalize() + + unpadder = padding.PKCS7(AES256_BLOCK_SIZE).unpadder() + return unpadder.update(padded_bytes) + unpadder.finalize() + + def add_column(self, coldesc, key, type): + + if not coldesc: + raise ValueError("ColDesc supplied to add_column cannot be None") + if not key: + raise ValueError("Key supplied to add_column cannot be None") + if not type: + raise ValueError("Type supplied to add_column cannot be None") + if type not in _cqltypes.keys(): + raise ValueError("Type %s is not a supported type".format(type)) + if not len(key) == AES256_KEY_SIZE_BYTES: + raise ValueError("AES256 column encryption policy expects a 256-bit encryption key") + self.coldata[coldesc] = ColData(key, _cqltypes[type]) + + def contains_column(self, coldesc): + return coldesc in self.coldata + + def encode_and_encrypt(self, coldesc, obj): + if not coldesc: + raise ValueError("ColDesc supplied to encode_and_encrypt cannot be None") + if not obj: + raise ValueError("Object supplied to encode_and_encrypt cannot be None") + coldata = self.coldata.get(coldesc) + if not coldata: + raise ValueError("Could not find ColData for ColDesc %s".format(coldesc)) + return self.encrypt(coldesc, coldata.type.serialize(obj, None)) + + def cache_info(self): + return AES256ColumnEncryptionPolicy._build_cipher.cache_info() + + def column_type(self, coldesc): + return self.coldata[coldesc].type + + def _get_cipher(self, coldesc, iv=None): + """ + Access relevant state from this instance necessary to create a Cipher and then get one, + hopefully returning a cached instance if we've already done so (and it hasn't been evicted) + """ + try: + coldata = self.coldata[coldesc] + return AES256ColumnEncryptionPolicy._build_cipher(coldata.key, iv or self.iv) + except KeyError: + raise ValueError("Could not find column {}".format(coldesc)) + + # Explicitly use a class method here to avoid caching self + @lru_cache(maxsize=128) + def _build_cipher(key, iv): + return Cipher(algorithms.AES256(key), AES256ColumnEncryptionPolicy.mode(iv)) diff --git a/cassandra/compat.py b/cassandra/column_encryption/policies.py similarity index 79% rename from cassandra/compat.py rename to cassandra/column_encryption/policies.py index 83c1b104e5..770084bd48 100644 --- a/cassandra/compat.py +++ b/cassandra/column_encryption/policies.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - -if six.PY2: - from collections import Mapping -elif six.PY3: - from collections.abc import Mapping +try: + import cryptography + from cassandra.column_encryption._policies import * +except ImportError: + # Cryptography is not installed + pass diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index a8bddcbdab..d6345ca452 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -16,12 +16,10 @@ from collections import namedtuple from heapq import heappush, heappop from itertools import cycle -import six -from six.moves import xrange, zip from threading import Condition import sys -from cassandra.cluster import ResultSet +from cassandra.cluster import ResultSet, EXEC_PROFILE_DEFAULT import logging log = logging.getLogger(__name__) @@ -29,19 +27,13 @@ ExecutionResult = namedtuple('ExecutionResult', ['success', 'result_or_exc']) -def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False): +def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False, execution_profile=EXEC_PROFILE_DEFAULT): """ Executes a sequence of (statement, parameters) tuples concurrently. Each ``parameters`` item must be a sequence or :const:`None`. The `concurrency` parameter controls how many statements will be executed - concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2, - it is recommended that this be kept below 100 times the number of - core connections per host times the number of connected hosts (see - :meth:`.Cluster.set_core_connections_per_host`). If that amount is exceeded, - the event loop thread may attempt to block on new connection creation, - substantially impacting throughput. If :attr:`~.Cluster.protocol_version` - is 3 or higher, you can safely experiment with higher levels of concurrency. + concurrently. If `raise_on_first_error` is left as :const:`True`, execution will stop after the first failed statement and the corresponding exception will be @@ -56,6 +48,9 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results on-the-fly). + `execution_profile` argument is the execution profile to use for this + request, it is passed directly to :meth:`Session.execute_async`. + A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned in the same order that the statements were passed in. If ``success`` is :const:`False`, there was an error executing the statement, and ``result_or_exc`` will be @@ -90,7 +85,8 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais if not statements_and_parameters: return [] - executor = ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters) + executor = ConcurrentExecutorGenResults(session, statements_and_parameters, execution_profile) \ + if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters, execution_profile) return executor.execute(concurrency, raise_on_first_error) @@ -98,9 +94,10 @@ class _ConcurrentExecutor(object): max_error_recursion = 100 - def __init__(self, session, statements_and_params): + def __init__(self, session, statements_and_params, execution_profile): self.session = session self._enum_statements = enumerate(iter(statements_and_params)) + self._execution_profile = execution_profile self._condition = Condition() self._fail_fast = False self._results_queue = [] @@ -114,7 +111,7 @@ def execute(self, concurrency, fail_fast): self._current = 0 self._exec_count = 0 with self._condition: - for n in xrange(concurrency): + for n in range(concurrency): if not self._execute_next(): break return self._results() @@ -132,23 +129,19 @@ def _execute_next(self): def _execute(self, idx, statement, params): self._exec_depth += 1 try: - future = self.session.execute_async(statement, params, timeout=None) + future = self.session.execute_async(statement, params, timeout=None, execution_profile=self._execution_profile) args = (future, idx) future.add_callbacks( callback=self._on_success, callback_args=args, errback=self._on_error, errback_args=args) except Exception as exc: - # exc_info with fail_fast to preserve stack trace info when raising on the client thread - # (matches previous behavior -- not sure why we wouldn't want stack trace in the other case) - e = sys.exc_info() if self._fail_fast and six.PY2 else exc - # If we're not failing fast and all executions are raising, there is a chance of recursing # here as subsequent requests are attempted. If we hit this threshold, schedule this result/retry # and let the event loop thread return. if self._exec_depth < self.max_error_recursion: - self._put_result(e, idx, False) + self._put_result(exc, idx, False) else: - self.session.submit(self._put_result, e, idx, False) + self.session.submit(self._put_result, exc, idx, False) self._exec_depth -= 1 def _on_success(self, result, future, idx): @@ -158,14 +151,6 @@ def _on_success(self, result, future, idx): def _on_error(self, result, future, idx): self._put_result(result, idx, False) - @staticmethod - def _raise(exc): - if six.PY2 and isinstance(exc, tuple): - (exc_type, value, traceback) = exc - six.reraise(exc_type, value, traceback) - else: - raise exc - class ConcurrentExecutorGenResults(_ConcurrentExecutor): @@ -185,7 +170,7 @@ def _results(self): try: self._condition.release() if self._fail_fast and not res[0]: - self._raise(res[1]) + raise res[1] yield res finally: self._condition.acquire() @@ -216,9 +201,9 @@ def _results(self): while self._current < self._exec_count: self._condition.wait() if self._exception and self._fail_fast: - self._raise(self._exception) + raise self._exception if self._exception and self._fail_fast: # raise the exception even if there was no wait - self._raise(self._exception) + raise self._exception return [r[1] for r in sorted(self._results_queue)] diff --git a/cassandra/connection.py b/cassandra/connection.py index 8218b00117..9ac02c9776 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -19,8 +19,6 @@ from heapq import heappush, heappop import io import logging -import six -from six.moves import range import socket import struct import sys @@ -28,12 +26,17 @@ import time import ssl import weakref +import random +import itertools +from typing import Optional, Union +from cassandra.application_info import ApplicationInfoBase +from cassandra.protocol_features import ProtocolFeatures if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty else: - from six.moves.queue import Queue, Empty # noqa + from queue import Queue, Empty # noqa from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion from cassandra.marshal import int32_pack @@ -61,6 +64,7 @@ try: import lz4 except ImportError: + log.debug("lz4 package could not be imported. LZ4 Compression will not be available") pass else: # The compress and decompress functions we need were moved from the lz4 to @@ -99,6 +103,7 @@ def lz4_decompress(byts): try: import snappy except ImportError: + log.debug("snappy package could not be imported. Snappy Compression will not be available") pass else: # work around apparently buggy snappy decompress @@ -108,7 +113,7 @@ def decompress(byts): return snappy.decompress(byts) locally_supported_compressions['snappy'] = (snappy.compress, decompress) -DRIVER_NAME, DRIVER_VERSION = 'Scylla Python Driver', sys.modules['cassandra'].__version__ +DRIVER_NAME, DRIVER_VERSION = 'ScyllaDB Python Driver', sys.modules['cassandra'].__version__ PROTOCOL_VERSION_MASK = 0x7f @@ -116,7 +121,10 @@ def decompress(byts): HEADER_DIRECTION_TO_CLIENT = 0x80 HEADER_DIRECTION_MASK = 0x80 -frame_header_v1_v2 = struct.Struct('>BbBi') +# shard aware default for opening per shard connection +DEFAULT_LOCAL_PORT_LOW = 49152 +DEFAULT_LOCAL_PORT_HIGH = 65535 + frame_header_v3 = struct.Struct('>BhBi') @@ -304,16 +312,17 @@ def __repr__(self): class SniEndPointFactory(EndPointFactory): - def __init__(self, proxy_address, port): + def __init__(self, proxy_address, port, node_domain=None): self._proxy_address = proxy_address self._port = port + self._node_domain = node_domain def create(self, row): host_id = row.get("host_id") if host_id is None: raise ValueError("No host_id to create the SniEndPoint") - - return SniEndPoint(self._proxy_address, str(host_id), self._port) + address = "{}.{}".format(host_id, self._node_domain) if self._node_domain else str(host_id) + return SniEndPoint(self._proxy_address, str(address), self._port) def create_from_sni(self, sni): return SniEndPoint(self._proxy_address, sni, self._port) @@ -436,33 +445,6 @@ class ProtocolError(Exception): class CrcMismatchException(ConnectionException): pass - -class ContinuousPagingState(object): - """ - A class for specifying continuous paging state, only supported starting with DSE_V2. - """ - - num_pages_requested = None - """ - How many pages we have already requested - """ - - num_pages_received = None - """ - How many pages we have already received - """ - - max_queue_size = None - """ - The max queue size chosen by the user via the options - """ - - def __init__(self, max_queue_size): - self.num_pages_requested = max_queue_size # the initial query requests max_queue_size - self.num_pages_received = 0 - self.max_queue_size = max_queue_size - - class ContinuousPagingSession(object): def __init__(self, stream_id, decoder, row_factory, connection, state): self.stream_id = stream_id @@ -605,12 +587,6 @@ def wrapper(self, *args, **kwargs): DEFAULT_CQL_VERSION = '3.0.0' -if six.PY3: - def int_from_buf_item(i): - return i -else: - int_from_buf_item = ord - class _ConnectionIOBuffer(object): """ @@ -666,6 +642,31 @@ def reset_cql_frame_buffer(self): self.reset_io_buffer() +class ShardAwarePortGenerator: + def __init__(self, start_port: int, end_port: int): + self.start_port = start_port + self.end_port = end_port + + @staticmethod + def _align(value: int, total_shards: int): + shift = value % total_shards + if shift == 0: + return value + return value + total_shards - shift + + def generate(self, shard_id: int, total_shards: int): + start = self._align(random.randrange(self.start_port, self.end_port), total_shards) + shard_id + beginning = self._align(self.start_port, total_shards) + shard_id + available_ports = itertools.chain(range(start, self.end_port, total_shards), + range(beginning, start, total_shards)) + + for port in available_ports: + yield port + + +DefaultShardAwarePortGenerator = ShardAwarePortGenerator(DEFAULT_LOCAL_PORT_LOW, DEFAULT_LOCAL_PORT_HIGH) + + class Connection(object): CALLBACK_ERR_THREAD_THRESHOLD = 100 @@ -678,7 +679,7 @@ class Connection(object): protocol_version = ProtocolVersion.MAX_SUPPORTED keyspace = None - compression = True + compression: Union[bool, str] = True _compression_type = None compressor = None decompressor = None @@ -741,34 +742,34 @@ class Connection(object): _socket = None _socket_impl = socket - _ssl_impl = ssl _check_hostname = False _product_type = None _owning_pool = None - shard_id = 0 - sharding_info = None - _is_checksumming_enabled = False + _on_orphaned_stream_released = None + features = None + _application_info: Optional[ApplicationInfoBase] = None + @property def _iobuf(self): # backward compatibility, to avoid any change in the reactors return self._io_buffer.io_buffer def __init__(self, host='127.0.0.1', port=9042, authenticator=None, - ssl_options=None, sockopts=None, compression=True, + ssl_options=None, sockopts=None, compression: Union[bool, str] = True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False, - ssl_context=None, owning_pool=None): - + ssl_context=None, owning_pool=None, shard_id=None, total_shards=None, + on_orphaned_stream_released=None, application_info: Optional[ApplicationInfoBase] = None): # TODO next major rename host to endpoint and remove port kwarg. self.endpoint = host if isinstance(host, EndPoint) else DefaultEndPoint(host, port) self.authenticator = authenticator - self.ssl_options = ssl_options.copy() if ssl_options else None + self.ssl_options = ssl_options.copy() if ssl_options else {} self.ssl_context = ssl_context self.sockopts = sockopts self.compression = compression @@ -785,33 +786,37 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._continuous_paging_sessions = {} self._socket_writable = True self.orphaned_request_ids = set() - self._owning_pool = owning_pool + self._on_orphaned_stream_released = on_orphaned_stream_released + self._application_info = application_info if ssl_options: - self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) - if self._check_hostname: - if not getattr(ssl, 'match_hostname', None): - raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. " - "Patch or upgrade Python to use this option.") self.ssl_options.update(self.endpoint.ssl_options or {}) elif self.endpoint.ssl_options: self.ssl_options = self.endpoint.ssl_options - - if protocol_version >= 3: - self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1) - # Don't fill the deque with 2**15 items right away. Start with some and add - # more if needed. - initial_size = min(300, self.max_in_flight) - self.request_ids = deque(range(initial_size)) - self.highest_request_id = initial_size - 1 - else: - self.max_request_id = min(self.max_in_flight, (2 ** 7) - 1) - self.request_ids = deque(range(self.max_request_id + 1)) - self.highest_request_id = self.max_request_id + # PYTHON-1331 + # + # We always use SSLContext.wrap_socket() now but legacy configs may have other params that were passed to ssl.wrap_socket()... + # and either could have 'check_hostname'. Remove these params into a separate map and use them to build an SSLContext if + # we need to do so. + # + # Note the use of pop() here; we are very deliberately removing these params from ssl_options if they're present. After this + # operation ssl_options should contain only args needed for the ssl_context.wrap_socket() call. + if not self.ssl_context and self.ssl_options: + self.ssl_context = self._build_ssl_context_from_options() + + self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1) + # Don't fill the deque with 2**15 items right away. Start with some and add + # more if needed. + initial_size = min(300, self.max_in_flight) + self.request_ids = deque(range(initial_size)) + self.highest_request_id = initial_size - 1 self.lock = RLock() self.connected_event = Event() + self.features = ProtocolFeatures(shard_id=shard_id) + self.total_shards = total_shards + self.original_endpoint = self.endpoint @property def host(self): @@ -842,7 +847,7 @@ def create_timer(cls, timeout, callback): raise NotImplementedError() @classmethod - def factory(cls, endpoint, timeout, *args, **kwargs): + def factory(cls, endpoint, timeout, host_conn = None, *args, **kwargs): """ A factory function which returns connections which have succeeded in connecting and are ready for service (or @@ -851,6 +856,10 @@ def factory(cls, endpoint, timeout, *args, **kwargs): start = time.time() kwargs['connect_timeout'] = timeout conn = cls(endpoint, *args, **kwargs) + if host_conn is not None: + host_conn._pending_connections.append(conn) + if host_conn.is_shutdown: + conn.close() elapsed = time.time() - start conn.connected_event.wait(timeout - elapsed) if conn.last_error: @@ -863,21 +872,66 @@ def factory(cls, endpoint, timeout, *args, **kwargs): else: return conn + def _build_ssl_context_from_options(self): + + # Extract a subset of names from self.ssl_options which apply to SSLContext creation + ssl_context_opt_names = ['ssl_version', 'cert_reqs', 'check_hostname', 'keyfile', 'certfile', 'ca_certs', 'ciphers'] + opts = {k:self.ssl_options.get(k, None) for k in ssl_context_opt_names if k in self.ssl_options} + + # Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER so we'll get ahead of things by always + # being explicit + ssl_version = opts.get('ssl_version', None) or ssl.PROTOCOL_TLS_CLIENT + cert_reqs = opts.get('cert_reqs', None) or ssl.CERT_REQUIRED + rv = ssl.SSLContext(protocol=int(ssl_version)) + rv.check_hostname = bool(opts.get('check_hostname', False)) + rv.options = int(cert_reqs) + + certfile = opts.get('certfile', None) + keyfile = opts.get('keyfile', None) + if certfile: + rv.load_cert_chain(certfile, keyfile) + ca_certs = opts.get('ca_certs', None) + if ca_certs: + rv.load_verify_locations(ca_certs) + ciphers = opts.get('ciphers', None) + if ciphers: + rv.set_ciphers(ciphers) + + return rv + def _wrap_socket_from_context(self): - ssl_options = self.ssl_options or {} + + # Extract a subset of names from self.ssl_options which apply to SSLContext.wrap_socket (or at least the parts + # of it that don't involve building an SSLContext under the covers) + wrap_socket_opt_names = ['server_side', 'do_handshake_on_connect', 'suppress_ragged_eofs', 'server_hostname'] + opts = {k:self.ssl_options.get(k, None) for k in wrap_socket_opt_names if k in self.ssl_options} + # PYTHON-1186: set the server_hostname only if the SSLContext has # check_hostname enabled and it is not already provided by the EndPoint ssl options - if (self.ssl_context.check_hostname and - 'server_hostname' not in ssl_options): - ssl_options = ssl_options.copy() - ssl_options['server_hostname'] = self.endpoint.address - self._socket = self.ssl_context.wrap_socket(self._socket, **ssl_options) + #opts['server_hostname'] = self.endpoint.address + if (self.ssl_context.check_hostname and 'server_hostname' not in opts): + server_hostname = self.endpoint.address + opts['server_hostname'] = server_hostname + + return self.ssl_context.wrap_socket(self._socket, **opts) def _initiate_connection(self, sockaddr): + if self.features.shard_id is not None: + for port in DefaultShardAwarePortGenerator.generate(self.features.shard_id, self.total_shards): + try: + self._socket.bind(('', port)) + break + except Exception as ex: + log.debug("port=%d couldn't bind cause: %s", port, str(ex)) + log.debug('connection (%r) port=%d should be shard_id=%d', id(self), port, port % self.total_shards) + self._socket.connect(sockaddr) - def _match_hostname(self): - ssl.match_hostname(self._socket.getpeercert(), self.endpoint.address) + # PYTHON-1331 + # + # Allow implementations specific to an event loop to add additional behaviours + def _validate_hostname(self): + pass def _get_socket_addresses(self): address, port = self.endpoint.resolve() @@ -894,22 +948,26 @@ def _get_socket_addresses(self): def _connect_socket(self): sockerr = None addresses = self._get_socket_addresses() + port = None for (af, socktype, proto, _, sockaddr) in addresses: try: self._socket = self._socket_impl.socket(af, socktype, proto) if self.ssl_context: - self._wrap_socket_from_context() - elif self.ssl_options: - if not self._ssl_impl: - raise RuntimeError("This version of Python was not compiled with SSL support") - self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) + self._socket = self._wrap_socket_from_context() self._socket.settimeout(self.connect_timeout) self._initiate_connection(sockaddr) self._socket.settimeout(None) + local_addr = self._socket.getsockname() - log.debug('Connection %s %s:%s -> %s:%s', id(self), local_addr[0], local_addr[1], sockaddr[0], sockaddr[1]) + log.debug("Connection %s: '%s' -> '%s'", id(self), local_addr, sockaddr) + + # PYTHON-1331 + # + # Most checking is done via the check_hostname param on the SSLContext. + # Subclasses can add additional behaviours via _validate_hostname() so + # run that here. if self._check_hostname: - self._match_hostname() + self._validate_hostname() sockerr = None break except socket.error as err: @@ -1127,14 +1185,13 @@ def _read_frame_header(self): buf = self._io_buffer.cql_frame_buffer.getvalue() pos = len(buf) if pos: - version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK + version = buf[0] & PROTOCOL_VERSION_MASK if version not in ProtocolVersion.SUPPORTED_VERSIONS: raise ProtocolError("This version of the driver does not support protocol version %d" % version) - frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 # this frame header struct is everything after the version byte - header_size = frame_header.size + 1 + header_size = frame_header_v3.size + 1 if pos >= header_size: - flags, stream, op, body_len = frame_header.unpack_from(buf, 1) + flags, stream, op, body_len = frame_header_v3.unpack_from(buf, 1) if body_len < 0: raise ProtocolError("Received negative body length: %r" % body_len) self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size) @@ -1218,8 +1275,8 @@ def process_msg(self, header, body): self.in_flight -= 1 self.orphaned_request_ids.remove(stream_id) need_notify_of_release = True - if need_notify_of_release and self._owning_pool: - self._owning_pool.on_orphaned_stream_released() + if need_notify_of_release and self._on_orphaned_stream_released: + self._on_orphaned_stream_released() try: callback, decoder, result_metadata = self._requests.pop(stream_id) @@ -1231,7 +1288,7 @@ def process_msg(self, header, body): return try: - response = decoder(header.version, self.user_type_map, stream_id, + response = decoder(header.version, self.features, self.user_type_map, stream_id, header.flags, header.opcode, body, self.decompressor, result_metadata) except Exception as exc: log.exception("Error decoding response from Cassandra. " @@ -1286,7 +1343,7 @@ def _send_options_message(self): @defunct_on_error def _handle_options_response(self, options_response): - self.shard_id, self.sharding_info = ShardingInfo.parse_sharding_info(options_response) + self.features = ProtocolFeatures.parse_from_supported(options_response.options) if self.is_defunct: return @@ -1306,6 +1363,11 @@ def _handle_options_response(self, options_response): remote_supported_compressions = options_response.options['COMPRESSION'] self._product_type = options_response.options.get('PRODUCT_TYPE', [None])[0] + options = {} + if self._application_info: + self._application_info.add_startup_options(options) + self.features.add_startup_options(options) + if self.cql_version: if self.cql_version not in supported_cql_versions: raise ProtocolError( @@ -1321,13 +1383,14 @@ def _handle_options_response(self, options_response): overlap = (set(locally_supported_compressions.keys()) & set(remote_supported_compressions)) if len(overlap) == 0: - log.debug("No available compression types supported on both ends." - " locally supported: %r. remotely supported: %r", - locally_supported_compressions.keys(), - remote_supported_compressions) + if locally_supported_compressions: + log.error("No available compression types supported on both ends." + " locally supported: %r. remotely supported: %r", + locally_supported_compressions.keys(), + remote_supported_compressions) else: compression_type = None - if isinstance(self.compression, six.string_types): + if isinstance(self.compression, str): # the user picked a specific compression type ('snappy' or 'lz4') if self.compression not in remote_supported_compressions: raise ProtocolError( @@ -1356,13 +1419,14 @@ def _handle_options_response(self, options_response): self._compressor, self.decompressor = \ locally_supported_compressions[compression_type] - self._send_startup_message(compression_type, no_compact=self.no_compact) + self._send_startup_message(compression_type, no_compact=self.no_compact, extra_options=options) @defunct_on_error - def _send_startup_message(self, compression=None, no_compact=False): + def _send_startup_message(self, compression=None, no_compact=False, extra_options=None): log.debug("Sending StartupMessage on %s", self) opts = {'DRIVER_NAME': DRIVER_NAME, - 'DRIVER_VERSION': DRIVER_VERSION} + 'DRIVER_VERSION': DRIVER_VERSION, + **extra_options} if compression: opts['COMPRESSION'] = compression if no_compact: diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index e2a952d682..b9466e961b 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - - # Caching constants. CACHING_ALL = "ALL" CACHING_KEYS_ONLY = "KEYS_ONLY" @@ -31,7 +28,4 @@ class ValidationError(CQLEngineException): class UnicodeMixin(object): - if six.PY3: - __str__ = lambda x: x.__unicode__() - else: - __str__ = lambda x: six.text_type(x).encode('utf-8') + __str__ = lambda x: x.__unicode__() diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 49116129fc..3d85587524 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -13,9 +13,8 @@ # limitations under the License. from copy import deepcopy, copy -from datetime import date, datetime, timedelta +from datetime import date, datetime, timedelta, timezone import logging -import six from uuid import UUID as _UUID from cassandra import util @@ -327,7 +326,7 @@ class Blob(Column): def to_database(self, value): - if not isinstance(value, (six.binary_type, bytearray)): + if not isinstance(value, (bytes, bytearray)): raise Exception("expecting a binary, got a %s" % type(value)) val = super(Bytes, self).to_database(value) @@ -381,7 +380,7 @@ def __init__(self, min_length=None, max_length=None, **kwargs): def validate(self, value): value = super(Text, self).validate(value) - if not isinstance(value, (six.string_types, bytearray)) and value is not None: + if not isinstance(value, (str, bytearray)) and value is not None: raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value))) if self.max_length is not None: if value and len(value) > self.max_length: @@ -552,7 +551,7 @@ def to_python(self, value): elif isinstance(value, date): return datetime(*(value.timetuple()[:6])) - return datetime.utcfromtimestamp(value) + return datetime.fromtimestamp(value, tz=timezone.utc).replace(tzinfo=None) def to_database(self, value): value = super(DateTime, self).to_database(value) @@ -655,7 +654,7 @@ def validate(self, value): return if isinstance(val, _UUID): return val - if isinstance(val, six.string_types): + if isinstance(val, str): try: return _UUID(val) except ValueError: @@ -1038,12 +1037,11 @@ def to_python(self, value): if value is None: return - copied_value = deepcopy(value) for name, field in self.user_type._fields.items(): - if copied_value[name] is not None or isinstance(field, BaseContainerColumn): - copied_value[name] = field.to_python(copied_value[name]) + if value[name] is not None or isinstance(field, BaseContainerColumn): + value[name] = field.to_python(value[name]) - return copied_value + return value def to_database(self, value): if value is None: diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index d98020b8a8..bf3e55a2e8 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -14,7 +14,6 @@ from collections import defaultdict import logging -import six import threading from cassandra.cluster import Cluster, _ConfigMode, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist, ConsistencyLevel @@ -317,14 +316,14 @@ def setup( retry_connect=False, **kwargs): """ - Setup a the driver connection used by the mapper + Setup the driver connection used by the mapper :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use :param int consistency: The global default :class:`~.ConsistencyLevel` - default is the same as :attr:`.Session.default_consistency_level` :param bool lazy_connect: True if should not connect until first use :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially - :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` + :param kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` """ from cassandra.cqlengine import models @@ -346,7 +345,7 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET, connect elif isinstance(query, BaseCQLStatement): params = query.get_context() query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) - elif isinstance(query, six.string_types): + elif isinstance(query, str): query = SimpleStatement(query, consistency_level=consistency_level) log.debug(format_log_context('Query: {}, Params: {}'.format(query.query_string, params), connection=connection)) diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 5cb0f673d1..606f5bc330 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -12,21 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division from datetime import datetime from cassandra.cqlengine import UnicodeMixin, ValidationError -import sys - -if sys.version_info >= (2, 7): - def get_total_seconds(td): - return td.total_seconds() -else: - def get_total_seconds(td): - # integer division used here to emulate built-in total_seconds - return ((86400 * td.days + td.seconds) * 10 ** 6 + td.microseconds) / 10 ** 6 - +def get_total_seconds(td): + return td.total_seconds() class QueryValue(UnicodeMixin): """ diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 536bde6349..4ac4192a80 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -16,7 +16,6 @@ import json import logging import os -import six import warnings from itertools import product @@ -232,7 +231,7 @@ def _sync_table(model, connection=None): except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception # and ignore if it says the column family already exists - if "Cannot add already existing column family" not in six.text_type(ex): + if "Cannot add already existing column family" not in str(ex): raise else: log.debug(format_log_context("sync_table checking existing table %s", keyspace=ks_name, connection=connection), cf_name) @@ -257,7 +256,7 @@ def _sync_table(model, connection=None): continue - if col.primary_key or col.primary_key: + if col.primary_key or col.partition_key: msg = format_log_context("Cannot add primary key '{0}' (with db_field '{1}') to existing table {2}", keyspace=ks_name, connection=connection) raise CQLEngineException(msg.format(model_name, db_name, cf_name)) @@ -477,15 +476,23 @@ def _update_options(model, connection=None): except KeyError: msg = format_log_context("Invalid table option: '%s'; known options: %s", keyspace=ks_name, connection=connection) raise KeyError(msg % (name, existing_options.keys())) - if isinstance(existing_value, six.string_types): + if isinstance(existing_value, str): if value != existing_value: update_options[name] = value else: try: for k, v in value.items(): - if existing_value[k] != v: - update_options[name] = value - break + # When creating table with compaction 'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy' in Scylla, + # it will be silently changed to 'class': 'LeveledCompactionStrategy' - same for at least SizeTieredCompactionStrategy, + # probably others too. We need to handle this case here. + if k == 'class' and name == 'compaction': + if existing_value[k] != v and existing_value[k] != v.split('.')[-1]: + update_options[name] = value + break + else: + if existing_value[k] != v: + update_options[name] = value + break except KeyError: update_options[name] = value diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index b3c7c9e37f..bc00001666 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -14,7 +14,6 @@ import logging import re -import six from warnings import warn from cassandra.cqlengine import CQLEngineException, ValidationError @@ -614,7 +613,7 @@ def __iter__(self): def __getitem__(self, key): """ Returns column's value. """ - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._columns.keys(): raise KeyError @@ -622,7 +621,7 @@ def __getitem__(self, key): def __setitem__(self, key, val): """ Sets a column's value. """ - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._columns.keys(): raise KeyError @@ -1042,8 +1041,7 @@ def _transform_column(col_name, col_obj): return klass -@six.add_metaclass(ModelMetaClass) -class Model(BaseModel): +class Model(BaseModel, metaclass=ModelMetaClass): __abstract__ = True """ *Optional.* Indicates that this model is only intended to be used as a base class for other models. diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index bba505583c..2adf51758d 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import six from cassandra.cqlengine import UnicodeMixin @@ -44,8 +43,7 @@ def __init__(cls, name, bases, dct): super(OpMapMeta, cls).__init__(name, bases, dct) -@six.add_metaclass(OpMapMeta) -class BaseWhereOperator(BaseQueryOperator): +class BaseWhereOperator(BaseQueryOperator, metaclass=OpMapMeta): """ base operator used for where clauses """ @classmethod def get_operator(cls, symbol): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 11f664ec02..afc7ceeef6 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -16,7 +16,6 @@ from datetime import datetime, timedelta from functools import partial import time -import six from warnings import warn from cassandra.query import SimpleStatement, BatchType as CBatchType, BatchStatement @@ -103,29 +102,29 @@ def in_(self, item): used where you'd typically want to use python's `in` operator """ - return WhereClause(six.text_type(self), InOperator(), item) + return WhereClause(str(self), InOperator(), item) def contains_(self, item): """ Returns a CONTAINS operator """ - return WhereClause(six.text_type(self), ContainsOperator(), item) + return WhereClause(str(self), ContainsOperator(), item) def __eq__(self, other): - return WhereClause(six.text_type(self), EqualsOperator(), self._to_database(other)) + return WhereClause(str(self), EqualsOperator(), self._to_database(other)) def __gt__(self, other): - return WhereClause(six.text_type(self), GreaterThanOperator(), self._to_database(other)) + return WhereClause(str(self), GreaterThanOperator(), self._to_database(other)) def __ge__(self, other): - return WhereClause(six.text_type(self), GreaterThanOrEqualOperator(), self._to_database(other)) + return WhereClause(str(self), GreaterThanOrEqualOperator(), self._to_database(other)) def __lt__(self, other): - return WhereClause(six.text_type(self), LessThanOperator(), self._to_database(other)) + return WhereClause(str(self), LessThanOperator(), self._to_database(other)) def __le__(self, other): - return WhereClause(six.text_type(self), LessThanOrEqualOperator(), self._to_database(other)) + return WhereClause(str(self), LessThanOrEqualOperator(), self._to_database(other)) class BatchType(object): @@ -206,8 +205,8 @@ def add_callback(self, fn, *args, **kwargs): :param fn: Callable object :type fn: callable - :param \*args: Positional arguments to be passed to the callback at the time of execution - :param \*\*kwargs: Named arguments to be passed to the callback at the time of execution + :param args: Positional arguments to be passed to the callback at the time of execution + :param kwargs: Named arguments to be passed to the callback at the time of execution """ if not callable(fn): raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn))) @@ -231,7 +230,7 @@ def execute(self): opener = 'BEGIN ' + (str(batch_type) + ' ' if batch_type else '') + ' BATCH' if self.timestamp: - if isinstance(self.timestamp, six.integer_types): + if isinstance(self.timestamp, int): ts = self.timestamp elif isinstance(self.timestamp, (datetime, timedelta)): ts = self.timestamp @@ -277,8 +276,8 @@ class ContextQuery(object): A Context manager to allow a Model to switch context easily. Presently, the context only specifies a keyspace for model IO. - :param \*args: One or more models. A model should be a class type, not an instance. - :param \*\*kwargs: (optional) Context parameters: can be *keyspace* or *connection* + :param args: One or more models. A model should be a class type, not an instance. + :param kwargs: (optional) Context parameters: can be *keyspace* or *connection* For example: @@ -286,15 +285,15 @@ class ContextQuery(object): with ContextQuery(Automobile, keyspace='test2') as A: A.objects.create(manufacturer='honda', year=2008, model='civic') - print len(A.objects.all()) # 1 result + print(len(A.objects.all())) # 1 result with ContextQuery(Automobile, keyspace='test4') as A: - print len(A.objects.all()) # 0 result + print(len(A.objects.all())) # 0 result # Multiple models with ContextQuery(Automobile, Automobile2, connection='cluster2') as (A, A2): - print len(A.objects.all()) - print len(A2.objects.all()) + print(len(A.objects.all())) + print(len(A2.objects.all())) """ @@ -407,7 +406,7 @@ def _execute(self, statement): return result def __unicode__(self): - return six.text_type(self._select_query()) + return str(self._select_query()) def __str__(self): return str(self.__unicode__()) @@ -604,7 +603,7 @@ def batch(self, batch_obj): def first(self): try: - return six.next(iter(self)) + return next(iter(self)) except StopIteration: return None @@ -809,11 +808,11 @@ class Comment(Model): print("Normal") for comment in Comment.objects(photo_id=u): - print comment.comment_id + print(comment.comment_id) print("Reversed") for comment in Comment.objects(photo_id=u).order_by("-comment_id"): - print comment.comment_id + print(comment.comment_id) """ if len(colnames) == 0: clone = copy.deepcopy(self) @@ -901,7 +900,7 @@ def limit(self, v): if v is None: v = 0 - if not isinstance(v, six.integer_types): + if not isinstance(v, int): raise TypeError if v == self._limit: return self @@ -925,7 +924,7 @@ def fetch_size(self, v): print(user) """ - if not isinstance(v, six.integer_types): + if not isinstance(v, int): raise TypeError if v == self._fetch_size: return self diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index c6ceb16607..4782fdccd8 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -14,8 +14,6 @@ from datetime import datetime, timedelta import time -import six -from six.moves import filter from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine import columns @@ -114,7 +112,7 @@ def __init__(self, field, operator, value, quote_field=True): def __unicode__(self): field = ('"{0}"' if self.quote_field else '{0}').format(self.field) - return u'{0} {1} {2}'.format(field, self.operator, six.text_type(self.query_value)) + return u'{0} {1} {2}'.format(field, self.operator, str(self.query_value)) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -186,8 +184,7 @@ def __init__(cls, name, bases, dct): super(ContainerUpdateTypeMapMeta, cls).__init__(name, bases, dct) -@six.add_metaclass(ContainerUpdateTypeMapMeta) -class ContainerUpdateClause(AssignmentClause): +class ContainerUpdateClause(AssignmentClause, metaclass=ContainerUpdateTypeMapMeta): def __init__(self, field, value, operation=None, previous=None): super(ContainerUpdateClause, self).__init__(field, value) @@ -563,7 +560,7 @@ def add_conditional_clause(self, clause): self.conditionals.append(clause) def _get_conditionals(self): - return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.conditionals])) + return 'IF {0}'.format(' AND '.join([str(c) for c in self.conditionals])) def get_context_size(self): return len(self.get_context()) @@ -584,7 +581,7 @@ def timestamp_normalized(self): if not self.timestamp: return None - if isinstance(self.timestamp, six.integer_types): + if isinstance(self.timestamp, int): return self.timestamp if isinstance(self.timestamp, timedelta): @@ -602,7 +599,7 @@ def __repr__(self): @property def _where(self): - return 'WHERE {0}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + return 'WHERE {0}'.format(' AND '.join([str(c) for c in self.where_clauses])) class SelectStatement(BaseCQLStatement): @@ -629,10 +626,10 @@ def __init__(self, fetch_size=fetch_size ) - self.fields = [fields] if isinstance(fields, six.string_types) else (fields or []) + self.fields = [fields] if isinstance(fields, str) else (fields or []) self.distinct_fields = distinct_fields self.count = count - self.order_by = [order_by] if isinstance(order_by, six.string_types) else order_by + self.order_by = [order_by] if isinstance(order_by, str) else order_by self.limit = limit self.allow_filtering = allow_filtering @@ -653,7 +650,7 @@ def __unicode__(self): qs += [self._where] if self.order_by and not self.count: - qs += ['ORDER BY {0}'.format(', '.join(six.text_type(o) for o in self.order_by))] + qs += ['ORDER BY {0}'.format(', '.join(str(o) for o in self.order_by))] if self.limit: qs += ['LIMIT {0}'.format(self.limit)] @@ -798,7 +795,7 @@ def __unicode__(self): qs += ["USING {0}".format(" AND ".join(using_options))] qs += ['SET'] - qs += [', '.join([six.text_type(c) for c in self.assignments])] + qs += [', '.join([str(c) for c in self.assignments])] if self.where_clauses: qs += [self._where] @@ -824,7 +821,9 @@ def update_context_id(self, i): self.context_counter += conditional.get_context_size() def add_update(self, column, value, operation=None, previous=None): - value = column.to_database(value) + # For remove all values are None, no need to convert them + if operation != 'remove': + value = column.to_database(value) col_type = type(column) container_update_type = ContainerUpdateClause.type_map.get(col_type) if container_update_type: @@ -849,7 +848,7 @@ def __init__(self, table, fields=None, where=None, timestamp=None, conditionals= conditionals=conditionals ) self.fields = [] - if isinstance(fields, six.string_types): + if isinstance(fields, str): fields = [fields] for field in fields or []: self.add_field(field) @@ -874,7 +873,7 @@ def get_context(self): return ctx def add_field(self, field): - if isinstance(field, six.string_types): + if isinstance(field, str): field = FieldDeleteClause(field) if not isinstance(field, BaseClause): raise StatementException("only instances of AssignmentClause can be added to statements") diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 155068d99e..7fa85f1919 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -13,7 +13,6 @@ # limitations under the License. import re -import six from cassandra.util import OrderedDict from cassandra.cqlengine import CQLEngineException @@ -72,7 +71,7 @@ def __ne__(self, other): return not self.__eq__(other) def __str__(self): - return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values))) + return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in self._values.items())) def has_changed_fields(self): return any(v.changed for v in self._values.values()) @@ -93,14 +92,14 @@ def __getattr__(self, attr): raise AttributeError(attr) def __getitem__(self, key): - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._fields.keys(): raise KeyError return getattr(self, key) def __setitem__(self, key, val): - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._fields.keys(): raise KeyError @@ -198,8 +197,7 @@ def _transform_column(field_name, field_obj): return klass -@six.add_metaclass(UserTypeMetaClass) -class UserType(BaseUserType): +class UserType(BaseUserType, metaclass=UserTypeMetaClass): """ This class is used to model User Defined Types. To define a type, declare a class inheriting from this, and assign field types as class attributes: diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7946a63af8..e36c48563c 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -39,8 +39,6 @@ import re import socket import time -import six -from six.moves import range import struct import sys from uuid import UUID @@ -50,14 +48,11 @@ int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack, point_be, point_le, - vints_pack, vints_unpack) + vints_pack, vints_unpack, uvint_unpack, uvint_pack) from cassandra import util _little_endian_flag = 1 # we always serialize LE -if six.PY3: - import ipaddress - -_ord = ord if six.PY2 else lambda x: x +import ipaddress apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -66,16 +61,12 @@ log = logging.getLogger(__name__) -if six.PY3: - _number_types = frozenset((int, float)) - long = int +_number_types = frozenset((int, float)) + - def _name_from_hex_string(encoded_name): - bin_str = unhexlify(encoded_name) - return bin_str.decode('ascii') -else: - _number_types = frozenset((int, long, float)) - _name_from_hex_string = unhexlify +def _name_from_hex_string(encoded_name): + bin_str = unhexlify(encoded_name) + return bin_str.decode('ascii') def trim_if_startswith(s, prefix): @@ -235,13 +226,15 @@ def parse_casstype_args(typestring): else: names.append(None) - ctype = lookup_casstype_simple(tok) + try: + ctype = int(tok) + except ValueError: + ctype = lookup_casstype_simple(tok) types.append(ctype) # return the first (outer) type, which will have all parameters applied return args[0][0][0] - def lookup_casstype(casstype): """ Given a Cassandra type as a string (possibly including parameters), hand @@ -276,8 +269,7 @@ def __str__(self): EMPTY = EmptyValue() -@six.add_metaclass(CassandraTypeType) -class _CassandraType(object): +class _CassandraType(object, metaclass=CassandraTypeType): subtypes = () num_subtypes = 0 empty_binary_ok = False @@ -296,7 +288,7 @@ class _CassandraType(object): """ def __repr__(self): - return '<%s( %r )>' % (self.cql_parameterized_type(), self.val) + return '<%s>' % (self.cql_parameterized_type()) @classmethod def from_binary(cls, byts, protocol_version): @@ -380,8 +372,6 @@ def apply_parameters(cls, subtypes, names=None): raise ValueError("%s types require %d subtypes (%d given)" % (cls.typename, cls.num_subtypes, len(subtypes))) newname = cls.cass_parameterized_type_with(subtypes) - if six.PY2 and isinstance(newname, unicode): - newname = newname.encode('utf-8') return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname, 'fieldnames': names}) @classmethod @@ -402,6 +392,9 @@ def cass_parameterized_type(cls, full=False): """ return cls.cass_parameterized_type_with(cls.subtypes, full=full) + @classmethod + def serial_size(cls): + return None # it's initially named with a _ to avoid registering it as a real type, but # client programs may want to use the name still for isinstance(), etc @@ -412,16 +405,10 @@ class _UnrecognizedType(_CassandraType): num_subtypes = 'UNKNOWN' -if six.PY3: - def mkUnrecognizedType(casstypename): - return CassandraTypeType(casstypename, - (_UnrecognizedType,), - {'typename': "'%s'" % casstypename}) -else: - def mkUnrecognizedType(casstypename): # noqa - return CassandraTypeType(casstypename.encode('utf8'), - (_UnrecognizedType,), - {'typename': "'%s'" % casstypename}) +def mkUnrecognizedType(casstypename): + return CassandraTypeType(casstypename, + (_UnrecognizedType,), + {'typename': "'%s'" % casstypename}) class BytesType(_CassandraType): @@ -430,7 +417,7 @@ class BytesType(_CassandraType): @staticmethod def serialize(val, protocol_version): - return six.binary_type(val) + return bytes(val) class DecimalType(_CassandraType): @@ -473,6 +460,9 @@ def serialize(uuid, protocol_version): except AttributeError: raise TypeError("Got a non-UUID object for a UUID value") + @classmethod + def serial_size(cls): + return 16 class BooleanType(_CassandraType): typename = 'boolean' @@ -485,6 +475,10 @@ def deserialize(byts, protocol_version): def serialize(truth, protocol_version): return int8_pack(truth) + @classmethod + def serial_size(cls): + return 1 + class ByteType(_CassandraType): typename = 'tinyint' @@ -497,25 +491,20 @@ def serialize(byts, protocol_version): return int8_pack(byts) -if six.PY2: - class AsciiType(_CassandraType): - typename = 'ascii' - empty_binary_ok = True -else: - class AsciiType(_CassandraType): - typename = 'ascii' - empty_binary_ok = True +class AsciiType(_CassandraType): + typename = 'ascii' + empty_binary_ok = True - @staticmethod - def deserialize(byts, protocol_version): - return byts.decode('ascii') + @staticmethod + def deserialize(byts, protocol_version): + return byts.decode('ascii') - @staticmethod - def serialize(var, protocol_version): - try: - return var.encode('ascii') - except UnicodeDecodeError: - return var + @staticmethod + def serialize(var, protocol_version): + try: + return var.encode('ascii') + except UnicodeDecodeError: + return var class FloatType(_CassandraType): @@ -529,6 +518,9 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return float_pack(byts) + @classmethod + def serial_size(cls): + return 4 class DoubleType(_CassandraType): typename = 'double' @@ -541,6 +533,9 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return double_pack(byts) + @classmethod + def serial_size(cls): + return 8 class LongType(_CassandraType): typename = 'bigint' @@ -553,6 +548,9 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return int64_pack(byts) + @classmethod + def serial_size(cls): + return 8 class Int32Type(_CassandraType): typename = 'int' @@ -565,6 +563,9 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return int32_pack(byts) + @classmethod + def serial_size(cls): + return 4 class IntegerType(_CassandraType): typename = 'varint' @@ -600,7 +601,7 @@ def serialize(addr, protocol_version): # since we've already determined the AF return socket.inet_aton(addr) except: - if six.PY3 and isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): + if isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): return addr.packed raise ValueError("can't interpret %r as an inet address" % (addr,)) @@ -659,8 +660,11 @@ def serialize(v, protocol_version): raise TypeError('DateType arguments must be a datetime, date, or timestamp') timestamp = v - return int64_pack(long(timestamp)) + return int64_pack(int(timestamp)) + @classmethod + def serial_size(cls): + return 8 class TimestampType(DateType): pass @@ -683,6 +687,9 @@ def serialize(timeuuid, protocol_version): except AttributeError: raise TypeError("Got a non-UUID object for a UUID value") + @classmethod + def serial_size(cls): + return 16 class SimpleDateType(_CassandraType): typename = 'date' @@ -703,7 +710,7 @@ def serialize(val, protocol_version): try: days = val.days_from_epoch except AttributeError: - if isinstance(val, six.integer_types): + if isinstance(val, int): # the DB wants offset int values, but util.Date init takes days from epoch # here we assume int values are offset, as they would appear in CQL # short circuit to avoid subtracting just to add offset @@ -723,9 +730,14 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return int16_pack(byts) - class TimeType(_CassandraType): typename = 'time' + # Time should be a fixed size 8 byte type but Cassandra 5.0 code marks it as + # variable size... and we have to match what the server expects since the server + # uses that specification to encode data of that type. + #@classmethod + #def serial_size(cls): + # return 8 @staticmethod def deserialize(byts, protocol_version): @@ -800,18 +812,13 @@ class _SimpleParameterizedType(_ParameterizedType): @classmethod def deserialize_safe(cls, byts, protocol_version): subtype, = cls.subtypes - if protocol_version >= 3: - unpack = int32_unpack - length = 4 - else: - unpack = uint16_unpack - length = 2 - numelements = unpack(byts[:length]) + length = 4 + numelements = int32_unpack(byts[:length]) p = length result = [] inner_proto = max(3, protocol_version) for _ in range(numelements): - itemlen = unpack(byts[p:p + length]) + itemlen = int32_unpack(byts[p:p + length]) p += length if itemlen < 0: result.append(None) @@ -823,18 +830,20 @@ def deserialize_safe(cls, byts, protocol_version): @classmethod def serialize_safe(cls, items, protocol_version): - if isinstance(items, six.string_types): + if isinstance(items, str): raise TypeError("Received a string for a type that expects a sequence") subtype, = cls.subtypes - pack = int32_pack if protocol_version >= 3 else uint16_pack buf = io.BytesIO() - buf.write(pack(len(items))) + buf.write(int32_pack(len(items))) inner_proto = max(3, protocol_version) for item in items: - itembytes = subtype.to_binary(item, inner_proto) - buf.write(pack(len(itembytes))) - buf.write(itembytes) + if item is None: + buf.write(int32_pack(-1)) + else: + itembytes = subtype.to_binary(item, inner_proto) + buf.write(int32_pack(len(itembytes))) + buf.write(itembytes) return buf.getvalue() @@ -857,18 +866,13 @@ class MapType(_ParameterizedType): @classmethod def deserialize_safe(cls, byts, protocol_version): key_type, value_type = cls.subtypes - if protocol_version >= 3: - unpack = int32_unpack - length = 4 - else: - unpack = uint16_unpack - length = 2 - numelements = unpack(byts[:length]) + length = 4 + numelements = int32_unpack(byts[:length]) p = length themap = util.OrderedMapSerializedKey(key_type, protocol_version) inner_proto = max(3, protocol_version) for _ in range(numelements): - key_len = unpack(byts[p:p + length]) + key_len = int32_unpack(byts[p:p + length]) p += length if key_len < 0: keybytes = None @@ -878,7 +882,7 @@ def deserialize_safe(cls, byts, protocol_version): p += key_len key = key_type.from_binary(keybytes, inner_proto) - val_len = unpack(byts[p:p + length]) + val_len = int32_unpack(byts[p:p + length]) p += length if val_len < 0: val = None @@ -893,21 +897,26 @@ def deserialize_safe(cls, byts, protocol_version): @classmethod def serialize_safe(cls, themap, protocol_version): key_type, value_type = cls.subtypes - pack = int32_pack if protocol_version >= 3 else uint16_pack buf = io.BytesIO() - buf.write(pack(len(themap))) + buf.write(int32_pack(len(themap))) try: - items = six.iteritems(themap) + items = themap.items() except AttributeError: raise TypeError("Got a non-map object for a map value") inner_proto = max(3, protocol_version) for key, val in items: - keybytes = key_type.to_binary(key, inner_proto) - valbytes = value_type.to_binary(val, inner_proto) - buf.write(pack(len(keybytes))) - buf.write(keybytes) - buf.write(pack(len(valbytes))) - buf.write(valbytes) + if key is not None: + keybytes = key_type.to_binary(key, inner_proto) + buf.write(int32_pack(len(keybytes))) + buf.write(keybytes) + else: + buf.write(int32_pack(-1)) + if val is not None: + valbytes = value_type.to_binary(val, inner_proto) + buf.write(int32_pack(len(valbytes))) + buf.write(valbytes) + else: + buf.write(int32_pack(-1)) return buf.getvalue() @@ -972,9 +981,6 @@ class UserType(TupleType): def make_udt_class(cls, keyspace, udt_name, field_names, field_types): assert len(field_names) == len(field_types) - if six.PY2 and isinstance(udt_name, unicode): - udt_name = udt_name.encode('utf-8') - instance = cls._cache.get((keyspace, udt_name)) if not instance or instance.fieldnames != field_names or instance.subtypes != field_types: instance = type(udt_name, (cls,), {'subtypes': field_types, @@ -989,8 +995,6 @@ def make_udt_class(cls, keyspace, udt_name, field_names, field_types): @classmethod def evict_udt_class(cls, keyspace, udt_name): - if six.PY2 and isinstance(udt_name, unicode): - udt_name = udt_name.encode('utf-8') try: del cls._cache[(keyspace, udt_name)] except KeyError: @@ -1026,7 +1030,9 @@ def serialize_safe(cls, val, protocol_version): try: item = val[i] except TypeError: - item = getattr(val, fieldname) + item = getattr(val, fieldname, None) + if item is None and not hasattr(val, fieldname): + log.warning(f"field {fieldname} is part of the UDT {cls.typename} but is not present in the value {val}") if item is not None: packed_item = subtype.to_binary(item, proto_version) @@ -1145,7 +1151,7 @@ def serialize_safe(cls, val, protocol_version): def is_counter_type(t): - if isinstance(t, six.string_types): + if isinstance(t, str): t = lookup_casstype(t) return issubclass(t, CounterColumnType) @@ -1181,7 +1187,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) point = point_le if is_little_endian else point_be return util.Point(*point.unpack_from(byts, 5)) # ofs = endian byte + int type @@ -1198,7 +1204,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) point = point_le if is_little_endian else point_be coords = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count return util.LineString(coords) @@ -1227,7 +1233,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) if is_little_endian: int_fmt = '" % (cls.typename, cls.subtype.cql_parameterized_type(), cls.vector_size) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index e4f30e6a85..0a926b6eef 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from libc.stdint cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t) from libc.string cimport memcpy @@ -24,8 +22,6 @@ from cassandra.buffer cimport Buffer, buf_read, to_bytes cdef bint is_little_endian from cassandra.util import is_little_endian -cdef bint PY3 = six.PY3 - ctypedef fused num_t: int64_t int32_t @@ -57,10 +53,7 @@ cdef inline num_t unpack_num(Buffer *buf, num_t *dummy=NULL): # dummy pointer be cdef varint_unpack(Buffer *term): """Unpack a variable-sized integer""" - if PY3: - return varint_unpack_py3(to_bytes(term)) - else: - return varint_unpack_py2(to_bytes(term)) + return varint_unpack_py3(to_bytes(term)) # TODO: Optimize these two functions cdef varint_unpack_py3(bytes term): @@ -70,13 +63,6 @@ cdef varint_unpack_py3(bytes term): val -= 1 << shift return val -cdef varint_unpack_py2(bytes term): # noqa - val = int(term.encode('hex'), 16) - if (ord(term[0]) & 128) != 0: - shift = len(term) * 8 # * Note below - val = val - (1 << shift) - return val - # * Note * # '1 << (len(term) * 8)' Cython tries to do native # integer shifts, which overflows. We need this to diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index ecb4a73fd4..0f042ff1c8 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -18,8 +18,7 @@ import sys import tempfile import shutil -import six -from six.moves.urllib.request import urlopen +from urllib.request import urlopen _HAS_SSL = True try: @@ -182,11 +181,9 @@ def _pyopenssl_context_from_cert(ca_cert_location, cert_location, key_location): try: from OpenSSL import SSL except ImportError as e: - six.reraise( - ImportError, - ImportError("PyOpenSSL must be installed to connect to Astra with the Eventlet or Twisted event loops"), - sys.exc_info()[2] - ) + raise ImportError( + "PyOpenSSL must be installed to connect to Astra with the Eventlet or Twisted event loops")\ + .with_traceback(e.__traceback__) ssl_context = SSL.Context(SSL.TLSv1_METHOD) ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: ok) ssl_context.use_certificate_file(cert_location) diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py index 44a0d136e0..92f148721e 100644 --- a/cassandra/datastax/graph/fluent/__init__.py +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -257,7 +257,7 @@ def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFI session = c.connect() g = DseGraph.traversal_source(session, 'my_graph') - print g.V().valueMap().toList() + print(g.V().valueMap().toList()) """ diff --git a/cassandra/datastax/graph/fluent/_query.py b/cassandra/datastax/graph/fluent/_query.py index bd89046852..d5eb7f6373 100644 --- a/cassandra/datastax/graph/fluent/_query.py +++ b/cassandra/datastax/graph/fluent/_query.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import logging from cassandra.graph import SimpleGraphStatement, GraphProtocol @@ -55,7 +54,7 @@ def get_serializer(self, value): if self.user_types is None: try: user_types = self.context['cluster']._user_types[self.context['graph_name']] - self.user_types = dict(map(reversed, six.iteritems(user_types))) + self.user_types = dict(map(reversed, user_types.items())) except KeyError: self.user_types = {} diff --git a/cassandra/datastax/graph/fluent/_serializers.py b/cassandra/datastax/graph/fluent/_serializers.py index db8e715ef8..83b3afb22d 100644 --- a/cassandra/datastax/graph/fluent/_serializers.py +++ b/cassandra/datastax/graph/fluent/_serializers.py @@ -14,8 +14,6 @@ from collections import OrderedDict -import six - from gremlin_python.structure.io.graphsonV2d0 import ( GraphSONReader as GraphSONReaderV2, GraphSONUtil as GraphSONUtil, # no difference between v2 and v3 @@ -175,7 +173,7 @@ def dictify(cls, p, writer): class DistanceIO(object): @classmethod def dictify(cls, v, _): - return GraphSONUtil.typedValue('Distance', six.text_type(v), prefix='dse') + return GraphSONUtil.typedValue('Distance', str(v), prefix='dse') GremlinUserTypeIO = _GremlinGraphSONTypeSerializer(UserTypeIO) @@ -183,7 +181,7 @@ def dictify(cls, v, _): # GraphSON2 dse_graphson2_serializers = OrderedDict([ (t, _GremlinGraphSONTypeSerializer(s)) - for t, s in six.iteritems(GraphSON2Serializer.get_type_definitions()) + for t, s in GraphSON2Serializer.get_type_definitions().items() ]) dse_graphson2_serializers.update(OrderedDict([ @@ -197,7 +195,7 @@ def dictify(cls, v, _): dse_graphson2_deserializers = { k: _make_gremlin_graphson2_deserializer(v) - for k, v in six.iteritems(GraphSON2Deserializer.get_type_definitions()) + for k, v in GraphSON2Deserializer.get_type_definitions().items() } dse_graphson2_deserializers.update({ @@ -228,7 +226,7 @@ def dictify(cls, v, _): # GraphSON3 dse_graphson3_serializers = OrderedDict([ (t, _GremlinGraphSONTypeSerializer(s)) - for t, s in six.iteritems(GraphSON3Serializer.get_type_definitions()) + for t, s in GraphSON3Serializer.get_type_definitions().items() ]) dse_graphson3_serializers.update(OrderedDict([ @@ -239,7 +237,7 @@ def dictify(cls, v, _): dse_graphson3_deserializers = { k: _make_gremlin_graphson3_deserializer(v) - for k, v in six.iteritems(GraphSON3Deserializer.get_type_definitions()) + for k, v in GraphSON3Deserializer.get_type_definitions().items() } dse_graphson3_deserializers.update({ diff --git a/cassandra/datastax/graph/graphson.py b/cassandra/datastax/graph/graphson.py index 4b333eb1bf..335c7f7825 100644 --- a/cassandra/datastax/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -23,12 +23,7 @@ import itertools from functools import partial -import six - -try: - import ipaddress -except: - ipaddress = None +import ipaddress from cassandra.cqltypes import cql_types_from_string @@ -95,8 +90,7 @@ def graphson_type(cls): return "{0}:{1}".format(cls.prefix, cls.graphson_base_type) -@six.add_metaclass(_GraphSONTypeType) -class GraphSONTypeIO(object): +class GraphSONTypeIO(object, metaclass=_GraphSONTypeType): """Represent a serializable GraphSON type""" prefix = 'g' @@ -109,7 +103,7 @@ def definition(cls, value, writer=None): @classmethod def serialize(cls, value, writer=None): - return six.text_type(value) + return str(value) @classmethod def deserialize(cls, value, reader=None): @@ -141,7 +135,7 @@ def serialize(cls, value, writer=None): @classmethod def get_specialized_serializer(cls, value): - if type(value) in six.integer_types and (value > MAX_INT32 or value < MIN_INT32): + if type(value) is int and (value > MAX_INT32 or value < MIN_INT32): return Int64TypeIO return Int32TypeIO @@ -164,9 +158,7 @@ class Int64TypeIO(IntegerTypeIO): @classmethod def deserialize(cls, value, reader=None): - if six.PY3: - return value - return long(value) + return value class FloatTypeIO(GraphSONTypeIO): @@ -274,8 +266,7 @@ class BlobTypeIO(GraphSONTypeIO): @classmethod def serialize(cls, value, writer=None): value = base64.b64encode(value) - if six.PY3: - value = value.decode('utf-8') + value = value.decode('utf-8') return value @classmethod @@ -343,7 +334,7 @@ def deserialize(cls, value, reader=None): raise ValueError('Invalid duration: {0}'.format(value)) duration = {k: float(v) if v is not None else 0 - for k, v in six.iteritems(duration.groupdict())} + for k, v in duration.groupdict().items()} return datetime.timedelta(days=duration['days'], hours=duration['hours'], minutes=duration['minutes'], seconds=duration['seconds']) @@ -512,7 +503,7 @@ class JsonMapTypeIO(GraphSONTypeIO): @classmethod def serialize(cls, value, writer=None): out = {} - for k, v in six.iteritems(value): + for k, v in value.items(): out[k] = writer.serialize(v, writer) return out @@ -528,7 +519,7 @@ class MapTypeIO(GraphSONTypeIO): def definition(cls, value, writer=None): out = OrderedDict([('cqlType', cls.cql_type)]) out['definition'] = [] - for k, v in six.iteritems(value): + for k, v in value.items(): # we just need the first pair to write the def out['definition'].append(writer.definition(k)) out['definition'].append(writer.definition(v)) @@ -538,7 +529,7 @@ def definition(cls, value, writer=None): @classmethod def serialize(cls, value, writer=None): out = [] - for k, v in six.iteritems(value): + for k, v in value.items(): out.append(writer.serialize(k, writer)) out.append(writer.serialize(v, writer)) @@ -841,16 +832,10 @@ class GraphSON1Serializer(_BaseGraphSONSerializer): ]) -if ipaddress: - GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) - GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) - -if six.PY2: - GraphSON1Serializer.register(buffer, ByteBufferTypeIO) - GraphSON1Serializer.register(unicode, TextTypeIO) -else: - GraphSON1Serializer.register(memoryview, ByteBufferTypeIO) - GraphSON1Serializer.register(bytes, ByteBufferTypeIO) +GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) +GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) +GraphSON1Serializer.register(memoryview, ByteBufferTypeIO) +GraphSON1Serializer.register(bytes, ByteBufferTypeIO) class _BaseGraphSONDeserializer(object): @@ -922,9 +907,7 @@ def deserialize_int(cls, value): @classmethod def deserialize_bigint(cls, value): - if six.PY3: - return cls.deserialize_int(value) - return long(value) + return cls.deserialize_int(value) @classmethod def deserialize_double(cls, value): @@ -1007,8 +990,6 @@ def serialize(self, value, writer=None): GraphSON2Serializer.register(int, IntegerTypeIO) -if six.PY2: - GraphSON2Serializer.register(long, IntegerTypeIO) class GraphSON2Deserializer(_BaseGraphSONDeserializer): @@ -1055,7 +1036,7 @@ def deserialize(self, obj): except KeyError: pass # list and map are treated as normal json objs (could be isolated deserializers) - return {self.deserialize(k): self.deserialize(v) for k, v in six.iteritems(obj)} + return {self.deserialize(k): self.deserialize(v) for k, v in obj.items()} elif isinstance(obj, list): return [self.deserialize(o) for o in obj] else: @@ -1109,7 +1090,7 @@ def get_serializer(self, value): if self.user_types is None: try: user_types = self.context['cluster']._user_types[self.context['graph_name']] - self.user_types = dict(map(reversed, six.iteritems(user_types))) + self.user_types = dict(map(reversed, user_types.items())) except KeyError: self.user_types = {} diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 7c0e265dbf..866df7a94c 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -15,8 +15,6 @@ import json from warnings import warn -import six - from cassandra import ConsistencyLevel from cassandra.query import Statement, SimpleStatement from cassandra.datastax.graph.types import Vertex, Edge, Path, VertexProperty @@ -77,7 +75,7 @@ def __init__(self, **kwargs): self._graph_options = {} kwargs.setdefault('graph_source', 'g') kwargs.setdefault('graph_language', GraphOptions.DEFAULT_GRAPH_LANGUAGE) - for attr, value in six.iteritems(kwargs): + for attr, value in kwargs.items(): if attr not in _graph_option_names: warn("Unknown keyword argument received for GraphOptions: {0}".format(attr)) setattr(self, attr, value) @@ -103,7 +101,7 @@ def get_options_map(self, other_options=None): for cl in ('graph-write-consistency', 'graph-read-consistency'): cl_enum = options.get(cl) if cl_enum is not None: - options[cl] = six.b(ConsistencyLevel.value_to_name[cl_enum]) + options[cl] = ConsistencyLevel.value_to_name[cl_enum].encode() return options def set_source_default(self): @@ -157,8 +155,8 @@ def get(self, key=opt[2]): def set(self, value, key=opt[2]): if value is not None: # normalize text here so it doesn't have to be done every time we get options map - if isinstance(value, six.text_type) and not isinstance(value, six.binary_type): - value = six.b(value) + if isinstance(value, str): + value = value.encode() self._graph_options[key] = value else: self._graph_options.pop(key, None) @@ -278,7 +276,7 @@ def __getattr__(self, attr): raise AttributeError("Result has no top-level attribute %r" % (attr,)) def __getitem__(self, item): - if isinstance(self.value, dict) and isinstance(item, six.string_types): + if isinstance(self.value, dict) and isinstance(item, str): return self.value[item] elif isinstance(self.value, list) and isinstance(item, int): return self.value[item] diff --git a/cassandra/datastax/insights/registry.py b/cassandra/datastax/insights/registry.py index 3dd1d255ae..03daebd86e 100644 --- a/cassandra/datastax/insights/registry.py +++ b/cassandra/datastax/insights/registry.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from collections import OrderedDict from warnings import warn @@ -59,7 +58,7 @@ def _get_serializer(self, cls): try: return self._mapping_dict[cls] except KeyError: - for registered_cls, serializer in six.iteritems(self._mapping_dict): + for registered_cls, serializer in self._mapping_dict.items(): if issubclass(cls, registered_cls): return self._mapping_dict[registered_cls] raise ValueError diff --git a/cassandra/datastax/insights/reporter.py b/cassandra/datastax/insights/reporter.py index b05a88deb0..83205fc458 100644 --- a/cassandra/datastax/insights/reporter.py +++ b/cassandra/datastax/insights/reporter.py @@ -24,7 +24,6 @@ import sys from threading import Event, Thread import time -import six from cassandra.policies import HostDistance from cassandra.util import ms_timestamp_from_datetime @@ -199,9 +198,9 @@ def _get_startup_data(self): }, 'platformInfo': { 'os': { - 'name': uname_info.system if six.PY3 else uname_info[0], - 'version': uname_info.release if six.PY3 else uname_info[2], - 'arch': uname_info.machine if six.PY3 else uname_info[4] + 'name': uname_info.system, + 'version': uname_info.release, + 'arch': uname_info.machine }, 'cpus': { 'length': multiprocessing.cpu_count(), diff --git a/cassandra/datastax/insights/serializers.py b/cassandra/datastax/insights/serializers.py index aec4467a6a..289c165e8a 100644 --- a/cassandra/datastax/insights/serializers.py +++ b/cassandra/datastax/insights/serializers.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - def initialize_registry(insights_registry): # This will be called from the cluster module, so we put all this behavior @@ -203,8 +201,8 @@ def graph_options_insights_serializer(options): 'language': options.graph_language, 'graphProtocol': options.graph_protocol } - updates = {k: v.decode('utf-8') for k, v in six.iteritems(rv) - if isinstance(v, six.binary_type)} + updates = {k: v.decode('utf-8') for k, v in rv.items() + if isinstance(v, bytes)} rv.update(updates) return rv diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 7de6949099..7c256674b0 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -29,8 +29,6 @@ from uuid import UUID from cassandra import cqltypes from cassandra import util -cdef bint PY2 = six.PY2 - cdef class Deserializer: """Cython-based deserializer class for a cqltype""" @@ -90,8 +88,6 @@ cdef class DesAsciiType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): if buf.size == 0: return "" - if PY2: - return to_bytes(buf) return to_bytes(buf).decode('ascii') diff --git a/cassandra/encoder.py b/cassandra/encoder.py index f2c3f8dfed..e834550fd3 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -21,34 +21,22 @@ log = logging.getLogger(__name__) from binascii import hexlify +from decimal import Decimal import calendar import datetime import math import sys import types from uuid import UUID -import six +import ipaddress from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, sortedset, Time, Date, Point, LineString, Polygon) -if six.PY3: - import ipaddress - -if six.PY3: - long = int - def cql_quote(term): - # The ordering of this method is important for the result of this method to - # be a native str type (for both Python 2 and 3) - if isinstance(term, str): return "'%s'" % str(term).replace("'", "''") - # This branch of the if statement will only be used by Python 2 to catch - # unicode strings, text_type is used to prevent type errors with Python 3. - elif isinstance(term, six.text_type): - return "'%s'" % term.encode('utf8').replace("'", "''") else: return str(term) @@ -72,6 +60,7 @@ class Encoder(object): def __init__(self): self.mapping = { float: self.cql_encode_float, + Decimal: self.cql_encode_decimal, bytearray: self.cql_encode_bytes, str: self.cql_encode_str, int: self.cql_encode_object, @@ -97,21 +86,13 @@ def __init__(self): Polygon: self.cql_encode_str_quoted } - if six.PY2: - self.mapping.update({ - unicode: self.cql_encode_unicode, - buffer: self.cql_encode_bytes, - long: self.cql_encode_object, - types.NoneType: self.cql_encode_none, - }) - else: - self.mapping.update({ - memoryview: self.cql_encode_bytes, - bytes: self.cql_encode_bytes, - type(None): self.cql_encode_none, - ipaddress.IPv4Address: self.cql_encode_ipaddress, - ipaddress.IPv6Address: self.cql_encode_ipaddress - }) + self.mapping.update({ + memoryview: self.cql_encode_bytes, + bytes: self.cql_encode_bytes, + type(None): self.cql_encode_none, + ipaddress.IPv4Address: self.cql_encode_ipaddress, + ipaddress.IPv6Address: self.cql_encode_ipaddress + }) def cql_encode_none(self, val): """ @@ -134,16 +115,8 @@ def cql_encode_str(self, val): def cql_encode_str_quoted(self, val): return "'%s'" % val - if six.PY3: - def cql_encode_bytes(self, val): - return (b'0x' + hexlify(val)).decode('utf-8') - elif sys.version_info >= (2, 7): - def cql_encode_bytes(self, val): # noqa - return b'0x' + hexlify(val) - else: - # python 2.6 requires string or read-only buffer for hexlify - def cql_encode_bytes(self, val): # noqa - return b'0x' + hexlify(buffer(val)) + def cql_encode_bytes(self, val): + return (b'0x' + hexlify(val)).decode('utf-8') def cql_encode_object(self, val): """ @@ -169,7 +142,7 @@ def cql_encode_datetime(self, val): with millisecond precision. """ timestamp = calendar.timegm(val.utctimetuple()) - return str(long(timestamp * 1e3 + getattr(val, 'microsecond', 0) / 1e3)) + return str(int(timestamp * 1e3 + getattr(val, 'microsecond', 0) / 1e3)) def cql_encode_date(self, val): """ @@ -214,7 +187,7 @@ def cql_encode_map_collection(self, val): return '{%s}' % ', '.join('%s: %s' % ( self.mapping.get(type(k), self.cql_encode_object)(k), self.mapping.get(type(v), self.cql_encode_object)(v) - ) for k, v in six.iteritems(val)) + ) for k, v in val.items()) def cql_encode_list_collection(self, val): """ @@ -236,14 +209,16 @@ def cql_encode_all_types(self, val, as_text_type=False): if :attr:`~Encoder.mapping` does not contain an entry for the type. """ encoded = self.mapping.get(type(val), self.cql_encode_object)(val) - if as_text_type and not isinstance(encoded, six.text_type): + if as_text_type and not isinstance(encoded, str): return encoded.decode('utf-8') return encoded - if six.PY3: - def cql_encode_ipaddress(self, val): - """ - Converts an ipaddress (IPV4Address, IPV6Address) to a CQL string. This - is suitable for ``inet`` type columns. - """ - return "'%s'" % val.compressed + def cql_encode_ipaddress(self, val): + """ + Converts an ipaddress (IPV4Address, IPV6Address) to a CQL string. This + is suitable for ``inet`` type columns. + """ + return "'%s'" % val.compressed + + def cql_encode_decimal(self, val): + return self.cql_encode_float(float(val)) \ No newline at end of file diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index 7cb0444a32..41b744602d 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -1,5 +1,7 @@ -from cassandra.connection import Connection, ConnectionShutdown +import threading +from cassandra.connection import Connection, ConnectionShutdown +import sys import asyncio import logging import os @@ -41,14 +43,12 @@ def end(self): def __init__(self, timeout, callback, loop): delayed = self._call_delayed_coro(timeout=timeout, - callback=callback, - loop=loop) + callback=callback) self._handle = asyncio.run_coroutine_threadsafe(delayed, loop=loop) @staticmethod - @asyncio.coroutine - def _call_delayed_coro(timeout, callback, loop): - yield from asyncio.sleep(timeout, loop=loop) + async def _call_delayed_coro(timeout, callback): + await asyncio.sleep(timeout) return callback() def __lt__(self, other): @@ -87,12 +87,15 @@ class AsyncioConnection(Connection): def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) + self._background_tasks = set() self._connect_socket() self._socket.setblocking(0) - - self._write_queue = asyncio.Queue(loop=self._loop) - self._write_queue_lock = asyncio.Lock(loop=self._loop) + loop_args = dict() + if sys.version_info[0] == 3 and sys.version_info[1] < 10: + loop_args['loop'] = self._loop + self._write_queue = asyncio.Queue(**loop_args) + self._write_queue_lock = asyncio.Lock(**loop_args) # see initialize_reactor -- loop is running in a separate thread, so we # have to use a threadsafe call @@ -104,16 +107,23 @@ def __init__(self, *args, **kwargs): ) self._send_options_message() + + @classmethod def initialize_reactor(cls): with cls._lock: if cls._pid != os.getpid(): + # This means that class was passed to another process, + # e.g. using multiprocessing. + # In such case the class instance will be different and passing + # tasks to loop thread won't work. + # To fix we need to re-initialize the class cls._loop = None + cls._loop_thread = None + cls._pid = os.getpid() if cls._loop is None: + assert cls._loop_thread is None cls._loop = asyncio.new_event_loop() - asyncio.set_event_loop(cls._loop) - - if not cls._loop_thread: # daemonize so the loop will be shut down on interpreter # shutdown cls._loop_thread = Thread(target=cls._loop.run_forever, @@ -136,8 +146,7 @@ def close(self): self._close(), loop=self._loop ) - @asyncio.coroutine - def _close(self): + async def _close(self): log.debug("Closing connection (%s) to %s" % (id(self), self.endpoint)) if self._write_watcher: self._write_watcher.cancel() @@ -165,30 +174,31 @@ def push(self, data): else: chunks = [data] - if self._loop_thread.ident != get_ident(): + if self._loop_thread != threading.current_thread(): asyncio.run_coroutine_threadsafe( self._push_msg(chunks), loop=self._loop ) else: # avoid races/hangs by just scheduling this, not using threadsafe - self._loop.create_task(self._push_msg(chunks)) + task = self._loop.create_task(self._push_msg(chunks)) + + self._background_tasks.add(task) + task.add_done_callback(self._background_tasks.discard) - @asyncio.coroutine - def _push_msg(self, chunks): + async def _push_msg(self, chunks): # This lock ensures all chunks of a message are sequential in the Queue - with (yield from self._write_queue_lock): + async with self._write_queue_lock: for chunk in chunks: self._write_queue.put_nowait(chunk) - @asyncio.coroutine - def handle_write(self): + async def handle_write(self): while True: try: - next_msg = yield from self._write_queue.get() + next_msg = await self._write_queue.get() if next_msg: - yield from self._loop.sock_sendall(self._socket, next_msg) + await self._loop.sock_sendall(self._socket, next_msg) except socket.error as err: log.debug("Exception in send for %s: %s", self, err) self.defunct(err) @@ -196,18 +206,19 @@ def handle_write(self): except asyncio.CancelledError: return - @asyncio.coroutine - def handle_read(self): + async def handle_read(self): while True: try: - buf = yield from self._loop.sock_recv(self._socket, self.in_buffer_size) + buf = await self._loop.sock_recv(self._socket, self.in_buffer_size) self._iobuf.write(buf) # sock_recv expects EWOULDBLOCK if socket provides no data, but # nonblocking ssl sockets raise these instead, so we handle them # ourselves by yielding to the event loop, where the socket will # get the reading/writing it "wants" before retrying except (ssl.SSLWantWriteError, ssl.SSLWantReadError): - yield + # Apparently the preferred way to yield to the event loop from within + # a native coroutine based on https://github.com/python/asyncio/issues/284 + await asyncio.sleep(0) continue except socket.error as err: log.debug("Exception during socket recv for %s: %s", diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index a8cf380a7a..2c75e7139d 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -24,14 +24,20 @@ import sys import ssl -from six.moves import range - try: from weakref import WeakSet except ImportError: from cassandra.util import WeakSet # noqa -import asyncore +from cassandra import DependencyException +try: + import asyncore +except ModuleNotFoundError: + raise DependencyException( + "Unable to import asyncore module. Note that this module has been removed in Python 3.12 " + "so when using the driver with this version (or anything newer) you will need to use one of the " + "other event loop implementations." + ) from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager @@ -249,18 +255,21 @@ def _run_loop(self): try: self._loop_dispatcher.loop(self.timer_resolution) self._timers.service_timeouts() - except Exception: - try: - log.debug("Asyncore event loop stopped unexpectedly", exc_info=True) - except Exception: - # TODO: Remove when Python 2 support is removed - # PYTHON-1266. If our logger has disappeared, there's nothing we - # can do, so just log nothing. - pass + except Exception as exc: + self._maybe_log_debug("Asyncore event loop stopped unexpectedly", exc_info=exc) break self._started = False - log.debug("Asyncore event loop ended") + self._maybe_log_debug("Asyncore event loop ended") + + def _maybe_log_debug(self, *args, **kwargs): + try: + log.debug(*args, **kwargs) + except Exception: + # TODO: Remove when Python 2 support is removed + # PYTHON-1266. If our logger has disappeared, there's nothing we + # can do, so just log nothing. + pass def add_timer(self, timer): self._timers.add_timer(timer) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 162661f468..42874036d5 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -23,8 +23,6 @@ from threading import Event import time -from six.moves import xrange - from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager try: from eventlet.green.OpenSSL import SSL @@ -190,5 +188,5 @@ def handle_read(self): def push(self, data): chunk_size = self.out_buffer_size - for i in xrange(0, len(data), chunk_size): + for i in range(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index ebc664d485..4f1f158aa7 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -20,7 +20,6 @@ import logging import time -from six.moves import range from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 54e2d0de03..58c876fdcc 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -21,14 +21,11 @@ from threading import Lock, Thread import time -from six.moves import range - -from cassandra.connection import (Connection, ConnectionShutdown, - NONBLOCKING, Timer, TimerManager) +from cassandra import DependencyException try: import cassandra.io.libevwrapper as libev except ImportError: - raise ImportError( + raise DependencyException( "The C extension needed to use libev was not found. This " "probably means that you didn't have the required build dependencies " "when installing the driver. See " @@ -36,6 +33,9 @@ "for instructions on installing build dependencies and building " "the C extension.") +from cassandra.connection import (Connection, ConnectionShutdown, + NONBLOCKING, Timer, TimerManager) + log = logging.getLogger(__name__) @@ -165,6 +165,10 @@ def connection_created(self, conn): def connection_destroyed(self, conn): with self._conn_set_lock: + new_conns = self._new_conns.copy() + new_conns.discard(conn) + self._new_conns = new_conns + new_live_conns = self._live_conns.copy() new_live_conns.discard(conn) self._live_conns = new_live_conns @@ -194,7 +198,8 @@ def _loop_will_run(self, prepare): self._new_conns = set() for conn in to_start: - conn._read_watcher.start() + if conn._read_watcher: + conn._read_watcher.start() changed = True @@ -294,6 +299,7 @@ def close(self): if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.endpoint)) + self.connected_event.set() def handle_write(self, watcher, revents, errno=None): if revents & libev.EV_ERROR: diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index 99e1df30f7..f32504fa34 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -1,3 +1,5 @@ +#pragma comment(lib, "Ws2_32.Lib") + #include #include @@ -665,9 +667,6 @@ initlibevwrapper(void) if (PyModule_AddObject(module, "Timer", (PyObject *)&libevwrapper_TimerType) == -1) INITERROR; - if (!PyEval_ThreadsInitialized()) { - PyEval_InitThreads(); - } #if PY_MAJOR_VERSION >= 3 return module; diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 9b3ff09398..e4605a7446 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -102,6 +102,9 @@ def maybe_start(self): self._thread.start() atexit.register(partial(_cleanup, weakref.ref(self))) + def _reactor_stopped(self): + return reactor._stopped + def _cleanup(self): if self._thread: reactor.callFromThread(reactor.stop) diff --git a/tests/integration/cqlengine/advanced/__init__.py b/cassandra/lwt_info.py similarity index 60% rename from tests/integration/cqlengine/advanced/__init__.py rename to cassandra/lwt_info.py index 386372eb4a..d64c08bbcf 100644 --- a/tests/integration/cqlengine/advanced/__init__.py +++ b/cassandra/lwt_info.py @@ -1,4 +1,4 @@ -# Copyright DataStax, Inc. +# Copyright 2020 ScyllaDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,3 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +class _LwtInfo: + """ + Holds LWT-related information parsed from the server's supported features. + """ + + def __init__(self, lwt_meta_bit_mask): + self.lwt_meta_bit_mask = lwt_meta_bit_mask + + def get_lwt_flag(self, flags): + return (flags & self.lwt_meta_bit_mask) == self.lwt_meta_bit_mask diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 43cb627b08..413e1831d4 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import struct @@ -34,46 +33,22 @@ def _make_packer(format_string): float_pack, float_unpack = _make_packer('>f') double_pack, double_unpack = _make_packer('>d') -# Special case for cassandra header -header_struct = struct.Struct('>BBbB') -header_pack = header_struct.pack -header_unpack = header_struct.unpack - # in protocol version 3 and higher, the stream ID is two bytes v3_header_struct = struct.Struct('>BBhB') v3_header_pack = v3_header_struct.pack v3_header_unpack = v3_header_struct.unpack -if six.PY3: - def byte2int(b): - return b - - - def varint_unpack(term): - val = int(''.join("%02x" % i for i in term), 16) - if (term[0] & 128) != 0: - len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code - val -= 1 << (len_term * 8) - return val -else: - def byte2int(b): - return ord(b) - - - def varint_unpack(term): # noqa - val = int(term.encode('hex'), 16) - if (ord(term[0]) & 128) != 0: - len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code - val = val - (1 << (len_term * 8)) - return val +def varint_unpack(term): + val = int(''.join("%02x" % i for i in term), 16) + if (term[0] & 128) != 0: + len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code + val -= 1 << (len_term * 8) + return val def bit_length(n): - if six.PY3 or isinstance(n, int): - return int.bit_length(n) - else: - return long.bit_length(n) + return int.bit_length(n) def varint_pack(big): @@ -91,7 +66,7 @@ def varint_pack(big): if pos and revbytes[-1] & 0x80: revbytes.append(0) revbytes.reverse() - return six.binary_type(revbytes) + return bytes(revbytes) point_be = struct.Struct('>dd') @@ -113,7 +88,7 @@ def vints_unpack(term): # noqa values = [] n = 0 while n < len(term): - first_byte = byte2int(term[n]) + first_byte = term[n] if (first_byte & 128) == 0: val = first_byte @@ -124,14 +99,13 @@ def vints_unpack(term): # noqa while n < end: n += 1 val <<= 8 - val |= byte2int(term[n]) & 0xff + val |= term[n] & 0xff n += 1 values.append(decode_zig_zag(val)) return tuple(values) - def vints_pack(values): revbytes = bytearray() values = [int(v) for v in values[::-1]] @@ -162,4 +136,49 @@ def vints_pack(values): revbytes.append(abs(v)) revbytes.reverse() - return six.binary_type(revbytes) + return bytes(revbytes) + +def uvint_unpack(bytes): + first_byte = bytes[0] + + if (first_byte & 128) == 0: + return (first_byte,1) + + num_extra_bytes = 8 - (~first_byte & 0xff).bit_length() + rv = first_byte & (0xff >> num_extra_bytes) + for idx in range(1,num_extra_bytes + 1): + new_byte = bytes[idx] + rv <<= 8 + rv |= new_byte & 0xff + + return (rv, num_extra_bytes + 1) + +def uvint_pack(val): + rv = bytearray() + if val < 128: + rv.append(val) + else: + v = val + num_extra_bytes = 0 + num_bits = v.bit_length() + # We need to reserve (num_extra_bytes+1) bits in the first byte + # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved + # ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved + reserved_bits = num_extra_bytes + 1 + while num_bits > (8-(reserved_bits)): + num_extra_bytes += 1 + num_bits -= 8 + reserved_bits = min(num_extra_bytes + 1, 8) + rv.append(v & 0xff) + v >>= 8 + + if num_extra_bytes > 8: + raise ValueError('Value %d is too big and cannot be encoded as vint' % val) + + # We can now store the last bits in the first byte + n = 8 - num_extra_bytes + v |= (0xff >> n << n) + rv.append(abs(v)) + + rv.reverse() + return bytes(rv) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 83beb6190c..bbfaf2605b 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -15,17 +15,18 @@ from binascii import unhexlify from bisect import bisect_left from collections import defaultdict +from collections.abc import Mapping from functools import total_ordering from hashlib import md5 import json import logging import re -import six -from six.moves import zip import sys from threading import RLock import struct import random +import itertools +from typing import Optional murmur3 = None try: @@ -42,22 +43,23 @@ from cassandra.util import OrderedDict, Version from cassandra.pool import HostDistance from cassandra.connection import EndPoint -from cassandra.compat import Mapping +from cassandra.tablets import Tablets +from cassandra.util import maybe_add_timeout_to_query log = logging.getLogger(__name__) cql_keywords = set(( 'add', 'aggregate', 'all', 'allow', 'alter', 'and', 'apply', 'as', 'asc', 'ascii', 'authorize', 'batch', 'begin', - 'bigint', 'blob', 'boolean', 'by', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', + 'bigint', 'blob', 'boolean', 'by', 'cast', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', 'counter', 'create', 'custom', 'date', 'decimal', 'default', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'double', 'drop', 'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function', 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'is', 'json', 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'mbean', 'mbeans', 'modify', 'monotonic', 'nan', 'nologin', 'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission', - 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set', - 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid', - 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'unset', 'update', 'use', 'user', - 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime', + 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'scylla_clustering_bound', + 'scylla_counter_shard_list', 'scylla_timeuuid_list_index', 'select', 'set', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', + 'table', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', + 'unset', 'update', 'use', 'user', 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime', # DSE specifics "node", "nodes", "plan", "active", "application", "applications", "java", "executor", "executors", "std_out", "std_err", @@ -123,7 +125,9 @@ def __init__(self): self.keyspaces = {} self.dbaas = False self._hosts = {} + self._host_id_by_endpoint = {} self._hosts_lock = RLock() + self._tablets = Tablets({}) def export_schema_as_string(self): """ @@ -132,11 +136,12 @@ def export_schema_as_string(self): """ return "\n\n".join(ks.export_as_string() for ks in self.keyspaces.values()) - def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): + def refresh(self, connection, timeout, target_type=None, change_type=None, fetch_size=None, + metadata_request_timeout=None, **kwargs): - server_version = self.get_host(connection.endpoint).release_version - dse_version = self.get_host(connection.endpoint).dse_version - parser = get_schema_parser(connection, server_version, dse_version, timeout) + server_version = self.get_host(connection.original_endpoint).release_version + dse_version = self.get_host(connection.original_endpoint).dse_version + parser = get_schema_parser(connection, server_version, dse_version, timeout, metadata_request_timeout, fetch_size) if not target_type: self._rebuild_all(parser) @@ -148,12 +153,7 @@ def refresh(self, connection, timeout, target_type=None, change_type=None, **kwa meta = parse_method(self.keyspaces, **kwargs) if meta: update_method = getattr(self, '_update_' + tt_lower) - if tt_lower == 'keyspace' and connection.protocol_version < 3: - # we didn't have 'type' target in legacy protocol versions, so we need to query those too - user_types = parser.get_types_map(self.keyspaces, **kwargs) - self._update_keyspace(meta, user_types) - else: - update_method(meta) + update_method(meta) else: drop_method = getattr(self, '_drop_' + tt_lower) drop_method(**kwargs) @@ -164,10 +164,13 @@ def _rebuild_all(self, parser): current_keyspaces = set() for keyspace_meta in parser.get_all_keyspaces(): current_keyspaces.add(keyspace_meta.name) - old_keyspace_meta = self.keyspaces.get(keyspace_meta.name, None) + old_keyspace_meta: Optional[KeyspaceMetadata] = self.keyspaces.get(keyspace_meta.name, None) self.keyspaces[keyspace_meta.name] = keyspace_meta if old_keyspace_meta: self._keyspace_updated(keyspace_meta.name) + for table_name in old_keyspace_meta.tables.keys(): + if table_name not in keyspace_meta.tables: + self._table_removed(keyspace_meta.name, table_name) else: self._keyspace_added(keyspace_meta.name) @@ -261,6 +264,9 @@ def _drop_aggregate(self, keyspace, aggregate): except KeyError: pass + def _table_removed(self, keyspace, table): + self._tablets.drop_tablets(keyspace, table) + def _keyspace_added(self, ksname): if self.token_map: self.token_map.rebuild_keyspace(ksname, build_if_absent=False) @@ -268,10 +274,12 @@ def _keyspace_added(self, ksname): def _keyspace_updated(self, ksname): if self.token_map: self.token_map.rebuild_keyspace(ksname, build_if_absent=False) + self._tablets.drop_tablets(ksname) def _keyspace_removed(self, ksname): if self.token_map: self.token_map.remove_keyspace(ksname) + self._tablets.drop_tablets(ksname) def rebuild_token_map(self, partitioner, token_map): """ @@ -292,7 +300,7 @@ def rebuild_token_map(self, partitioner, token_map): token_to_host_owner = {} ring = [] - for host, token_strings in six.iteritems(token_map): + for host, token_strings in token_map.items(): for token_string in token_strings: token = token_class.from_string(token_string) ring.append(token) @@ -329,14 +337,30 @@ def add_or_return_host(self, host): """ with self._hosts_lock: try: - return self._hosts[host.endpoint], False + return self._hosts[host.host_id], False except KeyError: - self._hosts[host.endpoint] = host + self._host_id_by_endpoint[host.endpoint] = host.host_id + self._hosts[host.host_id] = host return host, True def remove_host(self, host): + self._tablets.drop_tablets_by_host_id(host.host_id) + with self._hosts_lock: + self._host_id_by_endpoint.pop(host.endpoint, False) + return bool(self._hosts.pop(host.host_id, False)) + + def remove_host_by_host_id(self, host_id, endpoint=None): + self._tablets.drop_tablets_by_host_id(host_id) with self._hosts_lock: - return bool(self._hosts.pop(host.endpoint, False)) + if endpoint and self._host_id_by_endpoint[endpoint] == host_id: + self._host_id_by_endpoint.pop(endpoint, False) + return bool(self._hosts.pop(host_id, False)) + + def update_host(self, host, old_endpoint): + host, created = self.add_or_return_host(host) + with self._hosts_lock: + self._host_id_by_endpoint.pop(old_endpoint, False) + self._host_id_by_endpoint[host.endpoint] = host.host_id def get_host(self, endpoint_or_address, port=None): """ @@ -344,13 +368,22 @@ def get_host(self, endpoint_or_address, port=None): iterate all hosts to match the :attr:`~.pool.Host.broadcast_rpc_address` and :attr:`~.pool.Host.broadcast_rpc_port` attributes. """ - if not isinstance(endpoint_or_address, EndPoint): - return self._get_host_by_address(endpoint_or_address, port) + with self._hosts_lock: + if not isinstance(endpoint_or_address, EndPoint): + return self._get_host_by_address(endpoint_or_address, port) - return self._hosts.get(endpoint_or_address) + host_id = self._host_id_by_endpoint.get(endpoint_or_address) + return self._hosts.get(host_id) + + def get_host_by_host_id(self, host_id): + """ + Same as get_host() but use host_id for lookup. + """ + with self._hosts_lock: + return self._hosts.get(host_id) def _get_host_by_address(self, address, port=None): - for host in six.itervalues(self._hosts): + for host in self._hosts.values(): if (host.broadcast_rpc_address == address and (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host @@ -364,6 +397,10 @@ def all_hosts(self): with self._hosts_lock: return list(self._hosts.values()) + def all_hosts_items(self): + with self._hosts_lock: + return list(self._hosts.items()) + REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." @@ -387,8 +424,7 @@ def __new__(metacls, name, bases, dct): -@six.add_metaclass(ReplicationStrategyTypeType) -class _ReplicationStrategy(object): +class _ReplicationStrategy(object, metaclass=ReplicationStrategyTypeType): options_map = None @classmethod @@ -533,10 +569,11 @@ def __init__(self, options_map): def make_token_replica_map(self, token_to_host_owner, ring): replica_map = {} - for i in range(len(ring)): + ring_len = len(ring) + for i in range(ring_len): j, hosts = 0, list() - while len(hosts) < self.replication_factor and j < len(ring): - token = ring[(i + j) % len(ring)] + while len(hosts) < self.replication_factor and j < ring_len: + token = ring[(i + j) % ring_len] host = token_to_host_owner[token] if host not in hosts: hosts.append(host) @@ -593,10 +630,14 @@ def make_token_replica_map(self, token_to_host_owner, ring): hosts_per_dc = defaultdict(set) for i, token in enumerate(ring): host = token_to_host_owner[token] - dc_to_token_offset[host.datacenter].append(i) - if host.datacenter and host.rack: - dc_racks[host.datacenter].add(host.rack) - hosts_per_dc[host.datacenter].add(host) + host_dc = host.datacenter + if host_dc in dc_rf_map: + # if the host is in a DC that has a replication factor, add it + # to the list of token offsets for that DC + dc_to_token_offset[host_dc].append(i) + if host.rack: + dc_racks[host_dc].add(host.rack) + hosts_per_dc[host_dc].add(host) # A map of DCs to an index into the dc_to_token_offset value for that dc. # This is how we keep track of advancing around the ring for each DC. @@ -608,8 +649,6 @@ def make_token_replica_map(self, token_to_host_owner, ring): # go through each DC and find the replicas in that DC for dc in dc_to_token_offset.keys(): - if dc not in dc_rf_map: - continue # advance our per-DC index until we're up to at least the # current token in the ring @@ -621,34 +660,34 @@ def make_token_replica_map(self, token_to_host_owner, ring): dc_to_current_index[dc] = index replicas_remaining = dc_rf_map[dc] - replicas_this_dc = 0 + num_replicas_this_dc = 0 skipped_hosts = [] racks_placed = set() - racks_this_dc = dc_racks[dc] - hosts_this_dc = len(hosts_per_dc[dc]) + num_racks_this_dc = len(dc_racks[dc]) + num_hosts_this_dc = len(hosts_per_dc[dc]) - for token_offset_index in six.moves.range(index, index+num_tokens): - if token_offset_index >= len(token_offsets): - token_offset_index = token_offset_index - len(token_offsets) + for token_offset_index in range(index, index+num_tokens): + if replicas_remaining == 0 or num_replicas_this_dc == num_hosts_this_dc: + break + + if token_offset_index >= num_tokens: + token_offset_index = token_offset_index - num_tokens token_offset = token_offsets[token_offset_index] host = token_to_host_owner[ring[token_offset]] - if replicas_remaining == 0 or replicas_this_dc == hosts_this_dc: - break - if host in replicas: continue - if host.rack in racks_placed and len(racks_placed) < len(racks_this_dc): + if host.rack in racks_placed and len(racks_placed) < num_racks_this_dc: skipped_hosts.append(host) continue replicas.append(host) - replicas_this_dc += 1 + num_replicas_this_dc += 1 replicas_remaining -= 1 racks_placed.add(host.rack) - if len(racks_placed) == len(racks_this_dc): + if len(racks_placed) == num_racks_this_dc: for host in skipped_hosts: if replicas_remaining == 0: break @@ -854,7 +893,7 @@ def _add_table_metadata(self, table_metadata): # note the intentional order of add before remove # this makes sure the maps are never absent something that existed before this update - for index_name, index_metadata in six.iteritems(table_metadata.indexes): + for index_name, index_metadata in table_metadata.indexes.items(): self.indexes[index_name] = index_metadata for index_name in (n for n in old_indexes if n not in table_metadata.indexes): @@ -1341,7 +1380,7 @@ def _all_as_cql(self): if self.extensions: registry = _RegisteredExtensionType._extension_registry - for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + for k in registry.keys() & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: @@ -1557,8 +1596,7 @@ def __new__(mcs, name, bases, dct): return cls -@six.add_metaclass(_RegisteredExtensionType) -class RegisteredTableExtension(TableExtensionInterface): +class RegisteredTableExtension(TableExtensionInterface, metaclass=_RegisteredExtensionType): """ Extending this class registers it by name (associated by key in the `system_schema.tables.extensions` map). """ @@ -1854,7 +1892,7 @@ def hash_fn(cls, key): def __init__(self, token): """ `token` is an int or string representing the token. """ - self.value = int(token) + super().__init__(int(token)) class MD5Token(HashToken): @@ -1864,7 +1902,7 @@ class MD5Token(HashToken): @classmethod def hash_fn(cls, key): - if isinstance(key, six.text_type): + if isinstance(key, str): key = key.encode('UTF-8') return abs(varint_unpack(md5(key).digest())) @@ -1878,7 +1916,7 @@ class BytesToken(Token): def from_string(cls, token_string): """ `token_string` should be the string representation from the server. """ # unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface" - if isinstance(token_string, six.text_type): + if isinstance(token_string, str): token_string = token_string.encode('ascii') # The BOP stores a hex string return cls(unhexlify(token_string)) @@ -1919,12 +1957,13 @@ def export_as_string(self): class _SchemaParser(object): - - def __init__(self, connection, timeout): + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): self.connection = connection self.timeout = timeout + self.fetch_size = fetch_size + self.metadata_request_timeout = metadata_request_timeout - def _handle_results(self, success, result, expected_failures=tuple()): + def _handle_results(self, success, result, expected_failures=tuple(), query_msg=None, timeout=None): """ Given a bool and a ResultSet (the form returned per result from Connection.wait_for_responses), return a dictionary containing the @@ -1945,9 +1984,28 @@ def _handle_results(self, success, result, expected_failures=tuple()): query failed, but raised an instance of an expected failure class, this will ignore the failure and return an empty list. """ + timeout = timeout or self.timeout if not success and isinstance(result, expected_failures): return [] elif success: + if result.paging_state and query_msg: + def get_next_pages(): + next_result = None + while True: + query_msg.paging_state = next_result.paging_state if next_result else result.paging_state + next_success, next_result = self.connection.wait_for_response(query_msg, timeout=timeout, + fail_on_error=False) + if not next_success and isinstance(next_result, expected_failures): + continue + elif not next_success: + raise next_result + if not next_result.paging_state: + if next_result.parsed_rows: + yield next_result.parsed_rows + break + yield next_result.parsed_rows + + result.parsed_rows += itertools.chain(*get_next_pages()) return dict_factory(result.column_names, result.parsed_rows) if result else [] else: raise result @@ -1957,17 +2015,14 @@ def _query_build_row(self, query_string, build_func): return result[0] if result else None def _query_build_rows(self, query_string, build_func): - query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE) + query = QueryMessage(query=maybe_add_timeout_to_query(query_string, self.metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE, fetch_size=self.fetch_size) responses = self.connection.wait_for_responses((query), timeout=self.timeout, fail_on_error=False) (success, response) = responses[0] - if success: - result = dict_factory(response.column_names, response.parsed_rows) - return [build_func(row) for row in result] - elif isinstance(response, InvalidRequest): + results = self._handle_results(success, response, expected_failures=(InvalidRequest), query_msg=query) + if not results: log.debug("user types table not found") - return [] - else: - raise response + return [build_func(row) for row in results] class SchemaParserV22(_SchemaParser): @@ -2011,8 +2066,8 @@ class SchemaParserV22(_SchemaParser): "compression", "default_time_to_live") - def __init__(self, connection, timeout): - super(SchemaParserV22, self).__init__(connection, timeout) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserV22, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.keyspaces_result = [] self.tables_result = [] self.columns_result = [] @@ -2020,7 +2075,6 @@ def __init__(self, connection, timeout): self.types_result = [] self.functions_result = [] self.aggregates_result = [] - self.scylla_result = [] self.keyspace_table_rows = defaultdict(list) self.keyspace_table_col_rows = defaultdict(lambda: defaultdict(list)) @@ -2028,7 +2082,6 @@ def __init__(self, connection, timeout): self.keyspace_func_rows = defaultdict(list) self.keyspace_agg_rows = defaultdict(list) self.keyspace_table_trigger_rows = defaultdict(lambda: defaultdict(list)) - self.keyspace_scylla_rows = defaultdict(lambda: defaultdict(list)) def get_all_keyspaces(self): self._query_all() @@ -2061,9 +2114,18 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col,), (keyspace, table), _encoder) - cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) - col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) - triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) + cf_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMN_FAMILIES + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) + col_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMNS + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) + triggers_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) (cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \ = self.connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self.timeout, fail_on_error=False) table_result = self._handle_results(cf_success, cf_result) @@ -2377,13 +2439,34 @@ def _build_trigger_metadata(table_metadata, row): def _query_all(self): cl = ConsistencyLevel.ONE queries = [ - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMN_FAMILIES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), + consistency_level=cl, + ) ] ((ks_success, ks_result), @@ -2444,23 +2527,9 @@ def _query_all(self): self._aggregate_results() def _aggregate_results(self): - m = self.keyspace_scylla_rows - for row in self.scylla_result: - ksname = row["keyspace_name"] - cfname = row[self._table_name_col] - m[ksname][cfname].append(row) - m = self.keyspace_table_rows for row in self.tables_result: ksname = row["keyspace_name"] - cfname = row[self._table_name_col] - # in_memory property is stored in scylla private table - # add it to table properties if enabled - try: - if self.keyspace_scylla_rows[ksname][cfname][0]["in_memory"] == True: - row["in_memory"] = True - except (IndexError, KeyError): - pass m[ksname].append(row) m = self.keyspace_table_col_rows @@ -2506,7 +2575,6 @@ class SchemaParserV3(SchemaParserV22): _SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions" _SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates" _SELECT_VIEWS = "SELECT * FROM system_schema.views" - _SELECT_SCYLLA = "SELECT * FROM system_schema.scylla_tables" _table_name_col = 'table_name' @@ -2532,8 +2600,8 @@ class SchemaParserV3(SchemaParserV22): 'read_repair_chance', 'speculative_retry') - def __init__(self, connection, timeout): - super(SchemaParserV3, self).__init__(connection, timeout) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserV3, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.indexes_result = [] self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list)) self.keyspace_view_rows = defaultdict(list) @@ -2547,41 +2615,41 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE + fetch_size = self.fetch_size where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder) - cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl) - col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) - indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl) - triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) - scylla_query = QueryMessage(query=self._SELECT_SCYLLA + where_clause, consistency_level=cl) + cf_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TABLES + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) + col_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMNS + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) + indexes_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_INDEXES + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) + triggers_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) # in protocol v4 we don't know if this event is a view or a table, so we look for both where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), _encoder) - view_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, - consistency_level=cl) + view_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_VIEWS + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) ((cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), (triggers_success, triggers_result), - (view_success, view_result), - (scylla_success, scylla_result)) = ( + (view_success, view_result)) = ( self.connection.wait_for_responses( cf_query, col_query, indexes_query, triggers_query, - view_query, scylla_query, timeout=self.timeout, fail_on_error=False) + view_query, timeout=self.timeout, fail_on_error=False) ) - table_result = self._handle_results(cf_success, cf_result) - col_result = self._handle_results(col_success, col_result) + table_result = self._handle_results(cf_success, cf_result, query_msg=cf_query) + col_result = self._handle_results(col_success, col_result, query_msg=col_query) if table_result: - indexes_result = self._handle_results(indexes_sucess, indexes_result) - triggers_result = self._handle_results(triggers_success, triggers_result) - # in_memory property is stored in scylla private table - # add it to table properties if enabled - scylla_result = self._handle_results(scylla_success, scylla_result, expected_failures=(InvalidRequest,)) - try: - if scylla_result[0]["in_memory"] == True: - table_result[0]["in_memory"] = True - except (IndexError, KeyError): - pass + indexes_result = self._handle_results(indexes_sucess, indexes_result, query_msg=indexes_query) + triggers_result = self._handle_results(triggers_success, triggers_result, query_msg=triggers_query) return self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result) - view_result = self._handle_results(view_success, view_result) + view_result = self._handle_results(view_success, view_result, query_msg=view_query) if view_result: return self._build_view_metadata(view_result[0], col_result) @@ -2726,17 +2794,26 @@ def _build_trigger_metadata(table_metadata, row): def _query_all(self): cl = ConsistencyLevel.ONE + fetch_size = self.fetch_size queries = [ - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), - QueryMessage(query=self._SELECT_SCYLLA, consistency_level=cl) + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TABLES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_INDEXES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIEWS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), ] ((ks_success, ks_result), @@ -2747,21 +2824,19 @@ def _query_all(self): (aggregates_success, aggregates_result), (triggers_success, triggers_result), (indexes_success, indexes_result), - (views_success, views_result), - (scylla_success, scylla_result)) = self.connection.wait_for_responses( + (views_success, views_result)) = self.connection.wait_for_responses( *queries, timeout=self.timeout, fail_on_error=False ) - self.keyspaces_result = self._handle_results(ks_success, ks_result) - self.tables_result = self._handle_results(table_success, table_result) - self.columns_result = self._handle_results(col_success, col_result) - self.triggers_result = self._handle_results(triggers_success, triggers_result) - self.types_result = self._handle_results(types_success, types_result) - self.functions_result = self._handle_results(functions_success, functions_result) - self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) - self.indexes_result = self._handle_results(indexes_success, indexes_result) - self.views_result = self._handle_results(views_success, views_result) - self.scylla_result = self._handle_results(scylla_success, scylla_result, expected_failures=(InvalidRequest,)) + self.keyspaces_result = self._handle_results(ks_success, ks_result, query_msg=queries[0]) + self.tables_result = self._handle_results(table_success, table_result, query_msg=queries[1]) + self.columns_result = self._handle_results(col_success, col_result, query_msg=queries[2]) + self.triggers_result = self._handle_results(triggers_success, triggers_result, query_msg=queries[6]) + self.types_result = self._handle_results(types_success, types_result, query_msg=queries[3]) + self.functions_result = self._handle_results(functions_success, functions_result, query_msg=queries[4]) + self.aggregates_result = self._handle_results(aggregates_success, aggregates_result, query_msg=queries[5]) + self.indexes_result = self._handle_results(indexes_success, indexes_result, query_msg=queries[7]) + self.views_result = self._handle_results(views_success, views_result, query_msg=queries[8]) self._aggregate_results() @@ -2814,8 +2889,8 @@ class SchemaParserV4(SchemaParserV3): _SELECT_VIRTUAL_TABLES = 'SELECT * from system_virtual_schema.tables' _SELECT_VIRTUAL_COLUMNS = 'SELECT * from system_virtual_schema.columns' - def __init__(self, connection, timeout): - super(SchemaParserV4, self).__init__(connection, timeout) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserV4, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.virtual_keyspaces_rows = defaultdict(list) self.virtual_tables_rows = defaultdict(list) self.virtual_columns_rows = defaultdict(lambda: defaultdict(list)) @@ -2824,21 +2899,34 @@ def _query_all(self): cl = ConsistencyLevel.ONE # todo: this duplicates V3; we should find a way for _query_all methods # to extend each other. + fetch_size = self.fetch_size queries = [ # copied from V3 - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TABLES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_INDEXES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIEWS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), # V4-only queries - QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl) + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_KEYSPACES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_TABLES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_COLUMNS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), ] responses = self.connection.wait_for_responses( @@ -2861,29 +2949,29 @@ def _query_all(self): ) = responses # copied from V3 - self.keyspaces_result = self._handle_results(ks_success, ks_result) - self.tables_result = self._handle_results(table_success, table_result) - self.columns_result = self._handle_results(col_success, col_result) - self.triggers_result = self._handle_results(triggers_success, triggers_result) - self.types_result = self._handle_results(types_success, types_result) - self.functions_result = self._handle_results(functions_success, functions_result) - self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) - self.indexes_result = self._handle_results(indexes_success, indexes_result) - self.views_result = self._handle_results(views_success, views_result) + self.keyspaces_result = self._handle_results(ks_success, ks_result, query_msg=queries[0]) + self.tables_result = self._handle_results(table_success, table_result, query_msg=queries[1]) + self.columns_result = self._handle_results(col_success, col_result, query_msg=queries[2]) + self.triggers_result = self._handle_results(triggers_success, triggers_result, query_msg=queries[6]) + self.types_result = self._handle_results(types_success, types_result, query_msg=queries[3]) + self.functions_result = self._handle_results(functions_success, functions_result, query_msg=queries[4]) + self.aggregates_result = self._handle_results(aggregates_success, aggregates_result, query_msg=queries[5]) + self.indexes_result = self._handle_results(indexes_success, indexes_result, query_msg=queries[7]) + self.views_result = self._handle_results(views_success, views_result, query_msg=queries[8]) # V4-only results # These tables don't exist in some DSE versions reporting 4.X so we can # ignore them if we got an error self.virtual_keyspaces_result = self._handle_results( virtual_ks_success, virtual_ks_result, - expected_failures=(InvalidRequest,) + expected_failures=(InvalidRequest,), query_msg=queries[9] ) self.virtual_tables_result = self._handle_results( virtual_table_success, virtual_table_result, - expected_failures=(InvalidRequest,) + expected_failures=(InvalidRequest,), query_msg=queries[10] ) self.virtual_columns_result = self._handle_results( virtual_column_success, virtual_column_result, - expected_failures=(InvalidRequest,) + expected_failures=(InvalidRequest,), query_msg=queries[11] ) self._aggregate_results() @@ -2948,8 +3036,8 @@ class SchemaParserDSE68(SchemaParserDSE67): _table_metadata_class = TableMetadataDSE68 - def __init__(self, connection, timeout): - super(SchemaParserDSE68, self).__init__(connection, timeout) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserDSE68, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.keyspace_table_vertex_rows = defaultdict(lambda: defaultdict(list)) self.keyspace_table_edge_rows = defaultdict(lambda: defaultdict(list)) @@ -2962,8 +3050,14 @@ def get_table(self, keyspaces, keyspace, table): table_meta = super(SchemaParserDSE68, self).get_table(keyspaces, keyspace, table) cl = ConsistencyLevel.ONE where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder) - vertices_query = QueryMessage(query=self._SELECT_VERTICES + where_clause, consistency_level=cl) - edges_query = QueryMessage(query=self._SELECT_EDGES + where_clause, consistency_level=cl) + vertices_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_VERTICES + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) + edges_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_EDGES + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) (vertices_success, vertices_result), (edges_success, edges_result) \ = self.connection.wait_for_responses(vertices_query, edges_query, timeout=self.timeout, fail_on_error=False) @@ -3003,17 +3097,17 @@ def _build_table_graph_metadata(table_meta): try: # Make sure we process vertices before edges - for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + for table_meta in [t for t in keyspace_meta.tables.values() if t.name in self.keyspace_table_vertex_rows[keyspace_meta.name]]: _build_table_graph_metadata(table_meta) # all other tables... - for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + for table_meta in [t for t in keyspace_meta.tables.values() if t.name not in self.keyspace_table_vertex_rows[keyspace_meta.name]]: _build_table_graph_metadata(table_meta) except Exception: # schema error, remove all graph metadata for this keyspace - for t in six.itervalues(keyspace_meta.tables): + for t in keyspace_meta.tables.values(): t.edge = t.vertex = None keyspace_meta._exc_info = sys.exc_info() log.exception("Error while parsing graph metadata for keyspace %s", keyspace_meta.name) @@ -3044,21 +3138,22 @@ def _query_all(self): cl = ConsistencyLevel.ONE queries = [ # copied from v4 - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TABLES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_INDEXES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIEWS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_KEYSPACES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_TABLES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_COLUMNS, self.metadata_request_timeout), consistency_level=cl), # dse6.8 only - QueryMessage(query=self._SELECT_VERTICES, consistency_level=cl), - QueryMessage(query=self._SELECT_EDGES, consistency_level=cl) + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VERTICES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_EDGES, self.metadata_request_timeout), consistency_level=cl) ] responses = self.connection.wait_for_responses( @@ -3227,7 +3322,7 @@ def as_cql_query(self, formatted=False): if self.extensions: registry = _RegisteredExtensionType._extension_registry - for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + for k in registry.keys() & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: @@ -3314,25 +3409,25 @@ def __init__( self.to_clustering_columns = to_clustering_columns -def get_schema_parser(connection, server_version, dse_version, timeout): +def get_schema_parser(connection, server_version, dse_version, timeout, metadata_request_timeout, fetch_size=None): version = Version(server_version) if dse_version: v = Version(dse_version) if v >= Version('6.8.0'): - return SchemaParserDSE68(connection, timeout) + return SchemaParserDSE68(connection, timeout, fetch_size, metadata_request_timeout) elif v >= Version('6.7.0'): - return SchemaParserDSE67(connection, timeout) + return SchemaParserDSE67(connection, timeout, fetch_size, metadata_request_timeout) elif v >= Version('6.0.0'): - return SchemaParserDSE60(connection, timeout) + return SchemaParserDSE60(connection, timeout, fetch_size, metadata_request_timeout) if version >= Version('4-a'): - return SchemaParserV4(connection, timeout) + return SchemaParserV4(connection, timeout, fetch_size, metadata_request_timeout) elif version >= Version('3.0.0'): - return SchemaParserV3(connection, timeout) + return SchemaParserV3(connection, timeout, fetch_size, metadata_request_timeout) else: # we could further specialize by version. Right now just refactoring the # multi-version parser we have as of C* 2.2.0rc1. - return SchemaParserV22(connection, timeout) + return SchemaParserV22(connection, timeout, fetch_size, metadata_request_timeout) def _cql_from_cass_type(cass_type): @@ -3389,7 +3484,7 @@ def group_keys_by_replica(session, keyspace, table, keys): all_replicas = cluster.metadata.get_replicas(keyspace, routing_key) # First check if there are local replicas valid_replicas = [host for host in all_replicas if - host.is_up and distance(host) == HostDistance.LOCAL] + host.is_up and distance(host) in [HostDistance.LOCAL, HostDistance.LOCAL_RACK]] if not valid_replicas: valid_replicas = [host for host in all_replicas if host.is_up] diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 223b0c7c6e..abfc863b55 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -134,9 +134,9 @@ def __init__(self, cluster_proxy): scales.Stat('known_hosts', lambda: len(cluster_proxy.metadata.all_hosts())), scales.Stat('connected_to', - lambda: len(set(chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))), + lambda: len(set(chain.from_iterable(list(s._pools.keys()) for s in cluster_proxy.sessions)))), scales.Stat('open_connections', - lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions))) + lambda: sum(sum(p.open_count for p in list(s._pools.values())) for s in cluster_proxy.sessions))) # TODO, to be removed in 4.0 # /cassandra contains the metrics of the first cluster registered diff --git a/cassandra/murmur3.py b/cassandra/murmur3.py index 7c8d641b32..282c43578d 100644 --- a/cassandra/murmur3.py +++ b/cassandra/murmur3.py @@ -1,4 +1,3 @@ -from six.moves import range import struct diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index bb5b9a1c8c..030c2c65c7 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -134,7 +134,7 @@ def make_array(coltype, array_size): """ try: a = np.ma.empty((array_size,), dtype=_cqltype_to_numpy[coltype]) - a.mask = np.zeros((array_size,), dtype=np.bool) + a.mask = np.zeros((array_size,), dtype=bool) except KeyError: a = np.empty((array_size,), dtype=obj_dtype) return a diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index a0b5316a33..cf43771dd7 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -17,9 +17,12 @@ include "ioutils.pyx" from cassandra import DriverException from cassandra.bytesio cimport BytesIOReader from cassandra.deserializers cimport Deserializer, from_binary +from cassandra.deserializers import find_deserializer from cassandra.parsing cimport ParseDesc, ColumnParser, RowParser from cassandra.tuple cimport tuple_new, tuple_set +from cpython.bytes cimport PyBytes_AsStringAndSize + cdef class ListParser(ColumnParser): """Decode a ResultMessage into a list of tuples (or other objects)""" @@ -58,18 +61,29 @@ cdef class TupleRowParser(RowParser): assert desc.rowsize >= 0 cdef Buffer buf + cdef Buffer newbuf cdef Py_ssize_t i, rowsize = desc.rowsize cdef Deserializer deserializer cdef tuple res = tuple_new(desc.rowsize) + ce_policy = desc.column_encryption_policy for i in range(rowsize): # Read the next few bytes get_buf(reader, &buf) # Deserialize bytes to python object deserializer = desc.deserializers[i] + coldesc = desc.coldescs[i] + uses_ce = ce_policy and ce_policy.contains_column(coldesc) try: - val = from_binary(deserializer, &buf, desc.protocol_version) + if uses_ce: + col_type = ce_policy.column_type(coldesc) + decrypted_bytes = ce_policy.decrypt(coldesc, to_bytes(&buf)) + PyBytes_AsStringAndSize(decrypted_bytes, &newbuf.ptr, &newbuf.size) + deserializer = find_deserializer(ce_policy.column_type(coldesc)) + val = from_binary(deserializer, &newbuf, desc.protocol_version) + else: + val = from_binary(deserializer, &buf, desc.protocol_version) except Exception as e: raise DriverException('Failed decoding result column "%s" of type %s: %s' % (desc.colnames[i], desc.coltypes[i].cql_parameterized_type(), diff --git a/cassandra/parsing.pxd b/cassandra/parsing.pxd index aa9478cd14..27dc368b07 100644 --- a/cassandra/parsing.pxd +++ b/cassandra/parsing.pxd @@ -18,6 +18,8 @@ from cassandra.deserializers cimport Deserializer cdef class ParseDesc: cdef public object colnames cdef public object coltypes + cdef public object column_encryption_policy + cdef public list coldescs cdef Deserializer[::1] deserializers cdef public int protocol_version cdef Py_ssize_t rowsize diff --git a/cassandra/parsing.pyx b/cassandra/parsing.pyx index d2bc0a3abe..954767d227 100644 --- a/cassandra/parsing.pyx +++ b/cassandra/parsing.pyx @@ -19,9 +19,11 @@ Module containing the definitions and declarations (parsing.pxd) for parsers. cdef class ParseDesc: """Description of what structure to parse""" - def __init__(self, colnames, coltypes, deserializers, protocol_version): + def __init__(self, colnames, coltypes, column_encryption_policy, coldescs, deserializers, protocol_version): self.colnames = colnames self.coltypes = coltypes + self.column_encryption_policy = column_encryption_policy + self.coldescs = coldescs self.deserializers = deserializers self.protocol_version = protocol_version self.rowsize = len(colnames) diff --git a/cassandra/policies.py b/cassandra/policies.py index fa1e8cf385..bcfd797706 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -11,13 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import random +from collections import namedtuple from itertools import islice, cycle, groupby, repeat import logging from random import randint, shuffle from threading import Lock import socket import warnings + +log = logging.getLogger(__name__) + from cassandra import WriteType as WT @@ -26,12 +31,8 @@ # It may removed in the next mayor. WriteType = WT - from cassandra import ConsistencyLevel, OperationTimedOut -log = logging.getLogger(__name__) - - class HostDistance(object): """ A measure of how "distant" a node is from the client, which @@ -45,7 +46,18 @@ class HostDistance(object): connections opened to it. """ - LOCAL = 0 + LOCAL_RACK = 0 + """ + Nodes with ``LOCAL_RACK`` distance will be preferred for operations + under some load balancing policies (such as :class:`.RackAwareRoundRobinPolicy`) + and will have a greater number of connections opened against + them by default. + + This distance is typically used for nodes within the same + datacenter and the same rack as the client. + """ + + LOCAL = 1 """ Nodes with ``LOCAL`` distance will be preferred for operations under some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy`) @@ -56,12 +68,12 @@ class HostDistance(object): datacenter as the client. """ - REMOTE = 1 + REMOTE = 2 """ Nodes with ``REMOTE`` distance will be treated as a last resort - by some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy`) - and will have a smaller number of connections opened against - them by default. + by some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy` + and :class:`.RackAwareRoundRobinPolicy`)and will have a smaller number of + connections opened against them by default. This distance is typically used for nodes outside of the datacenter that the client is running in. @@ -101,6 +113,11 @@ class LoadBalancingPolicy(HostStateListener): You may also use subclasses of :class:`.LoadBalancingPolicy` for custom behavior. + + You should always use immutable collections (e.g., tuples or + frozensets) to store information about hosts to prevent accidental + modification. When there are changes to the hosts (e.g., a host is + down or up), the old collection should be replaced with a new one. """ _hosts_lock = None @@ -236,7 +253,7 @@ def _dc(self, host): def populate(self, cluster, hosts): for dc, dc_hosts in groupby(hosts, lambda h: self._dc(h)): - self._dc_live_hosts[dc] = tuple(set(dc_hosts)) + self._dc_live_hosts[dc] = tuple({*dc_hosts, *self._dc_live_hosts.get(dc, [])}) if not self.local_dc: self._endpoints = [ @@ -315,6 +332,130 @@ def on_add(self, host): def on_remove(self, host): self.on_down(host) +class RackAwareRoundRobinPolicy(LoadBalancingPolicy): + """ + Similar to :class:`.DCAwareRoundRobinPolicy`, but prefers hosts + in the local rack, before hosts in the local datacenter but a + different rack, before hosts in all other datercentres + """ + + local_dc = None + local_rack = None + used_hosts_per_remote_dc = 0 + + def __init__(self, local_dc, local_rack, used_hosts_per_remote_dc=0): + """ + The `local_dc` and `local_rack` parameters should be the name of the + datacenter and rack (such as is reported by ``nodetool ring``) that + should be considered local. + + `used_hosts_per_remote_dc` controls how many nodes in + each remote datacenter will have connections opened + against them. In other words, `used_hosts_per_remote_dc` hosts + will be considered :attr:`~.HostDistance.REMOTE` and the + rest will be considered :attr:`~.HostDistance.IGNORED`. + By default, all remote hosts are ignored. + """ + self.local_rack = local_rack + self.local_dc = local_dc + self.used_hosts_per_remote_dc = used_hosts_per_remote_dc + self._live_hosts = {} + self._dc_live_hosts = {} + self._endpoints = [] + self._position = 0 + LoadBalancingPolicy.__init__(self) + + def _rack(self, host): + return host.rack or self.local_rack + + def _dc(self, host): + return host.datacenter or self.local_dc + + def populate(self, cluster, hosts): + for (dc, rack), rack_hosts in groupby(hosts, lambda host: (self._dc(host), self._rack(host))): + self._live_hosts[(dc, rack)] = tuple({*rack_hosts, *self._live_hosts.get((dc, rack), [])}) + for dc, dc_hosts in groupby(hosts, lambda host: self._dc(host)): + self._dc_live_hosts[dc] = tuple({*dc_hosts, *self._dc_live_hosts.get(dc, [])}) + + self._position = randint(0, len(hosts) - 1) if hosts else 0 + + def distance(self, host): + rack = self._rack(host) + dc = self._dc(host) + if rack == self.local_rack and dc == self.local_dc: + return HostDistance.LOCAL_RACK + + if dc == self.local_dc: + return HostDistance.LOCAL + + if not self.used_hosts_per_remote_dc: + return HostDistance.IGNORED + + dc_hosts = self._dc_live_hosts.get(dc, ()) + if not dc_hosts: + return HostDistance.IGNORED + if host in dc_hosts and dc_hosts.index(host) < self.used_hosts_per_remote_dc: + return HostDistance.REMOTE + else: + return HostDistance.IGNORED + + def make_query_plan(self, working_keyspace=None, query=None): + pos = self._position + self._position += 1 + + local_rack_live = self._live_hosts.get((self.local_dc, self.local_rack), ()) + pos = (pos % len(local_rack_live)) if local_rack_live else 0 + # Slice the cyclic iterator to start from pos and include the next len(local_live) elements + # This ensures we get exactly one full cycle starting from pos + for host in islice(cycle(local_rack_live), pos, pos + len(local_rack_live)): + yield host + + local_live = [host for host in self._dc_live_hosts.get(self.local_dc, ()) if host.rack != self.local_rack] + pos = (pos % len(local_live)) if local_live else 0 + for host in islice(cycle(local_live), pos, pos + len(local_live)): + yield host + + # the dict can change, so get candidate DCs iterating over keys of a copy + for dc, remote_live in self._dc_live_hosts.copy().items(): + if dc != self.local_dc: + for host in remote_live[:self.used_hosts_per_remote_dc]: + yield host + + def on_up(self, host): + dc = self._dc(host) + rack = self._rack(host) + with self._hosts_lock: + current_rack_hosts = self._live_hosts.get((dc, rack), ()) + if host not in current_rack_hosts: + self._live_hosts[(dc, rack)] = current_rack_hosts + (host, ) + current_dc_hosts = self._dc_live_hosts.get(dc, ()) + if host not in current_dc_hosts: + self._dc_live_hosts[dc] = current_dc_hosts + (host, ) + + def on_down(self, host): + dc = self._dc(host) + rack = self._rack(host) + with self._hosts_lock: + current_rack_hosts = self._live_hosts.get((dc, rack), ()) + if host in current_rack_hosts: + hosts = tuple(h for h in current_rack_hosts if h != host) + if hosts: + self._live_hosts[(dc, rack)] = hosts + else: + del self._live_hosts[(dc, rack)] + current_dc_hosts = self._dc_live_hosts.get(dc, ()) + if host in current_dc_hosts: + hosts = tuple(h for h in current_dc_hosts if h != host) + if hosts: + self._dc_live_hosts[dc] = hosts + else: + del self._dc_live_hosts[dc] + + def on_add(self, host): + self.on_up(host) + + def on_remove(self, host): + self.on_down(host) class TokenAwarePolicy(LoadBalancingPolicy): """ @@ -335,12 +476,12 @@ class TokenAwarePolicy(LoadBalancingPolicy): _child_policy = None _cluster_metadata = None - shuffle_replicas = False + shuffle_replicas = True """ Yield local replicas in a random order. """ - def __init__(self, child_policy, shuffle_replicas=False): + def __init__(self, child_policy, shuffle_replicas=True): self._child_policy = child_policy self.shuffle_replicas = shuffle_replicas @@ -361,34 +502,40 @@ def distance(self, *args, **kwargs): return self._child_policy.distance(*args, **kwargs) def make_query_plan(self, working_keyspace=None, query=None): - if query and query.keyspace: - keyspace = query.keyspace - else: - keyspace = working_keyspace + keyspace = query.keyspace if query and query.keyspace else working_keyspace child = self._child_policy - if query is None: + if query is None or query.routing_key is None or keyspace is None: for host in child.make_query_plan(keyspace, query): yield host + return + + replicas = [] + if self._cluster_metadata._tablets.table_has_tablets(keyspace, query.table): + tablet = self._cluster_metadata._tablets.get_tablet_for_key( + keyspace, query.table, self._cluster_metadata.token_map.token_class.from_key(query.routing_key)) + + if tablet is not None: + replicas_mapped = set(map(lambda r: r[0], tablet.replicas)) + child_plan = child.make_query_plan(keyspace, query) + + replicas = [host for host in child_plan if host.host_id in replicas_mapped] else: - routing_key = query.routing_key - if routing_key is None or keyspace is None: - for host in child.make_query_plan(keyspace, query): - yield host - else: - replicas = self._cluster_metadata.get_replicas(keyspace, routing_key) - if self.shuffle_replicas: - shuffle(replicas) - for replica in replicas: - if replica.is_up and \ - child.distance(replica) == HostDistance.LOCAL: + replicas = self._cluster_metadata.get_replicas(keyspace, query.routing_key) + + if self.shuffle_replicas and not query.is_lwt(): + shuffle(replicas) + + def yield_in_order(hosts): + for distance in [HostDistance.LOCAL_RACK, HostDistance.LOCAL, HostDistance.REMOTE]: + for replica in hosts: + if replica.is_up and child.distance(replica) == distance: yield replica - for host in child.make_query_plan(keyspace, query): - # skip if we've already listed this host - if host not in replicas or \ - child.distance(host) == HostDistance.REMOTE: - yield host + # yield replicas: local_rack, local, remote + yield from yield_in_order(replicas) + # yield rest of the cluster: local_rack, local, remote + yield from yield_in_order([host for host in child.make_query_plan(keyspace, query) if host not in replicas]) def on_up(self, *args, **kwargs): return self._child_policy.on_up(*args, **kwargs) @@ -422,8 +569,14 @@ def __init__(self, hosts): connections to. """ self._allowed_hosts = tuple(hosts) - self._allowed_hosts_resolved = [endpoint[4][0] for a in self._allowed_hosts - for endpoint in socket.getaddrinfo(a, None, socket.AF_UNSPEC, socket.SOCK_STREAM)] + self._allowed_hosts_resolved = [] + for h in self._allowed_hosts: + unix_socket_path = getattr(h, "_unix_socket_path", None) + if unix_socket_path: + self._allowed_hosts_resolved.append(unix_socket_path) + else: + self._allowed_hosts_resolved.extend([endpoint[4][0] + for endpoint in socket.getaddrinfo(h, None, socket.AF_UNSPEC, socket.SOCK_STREAM)]) RoundRobinPolicy.__init__(self) @@ -455,7 +608,7 @@ class HostFilterPolicy(LoadBalancingPolicy): A :class:`.LoadBalancingPolicy` subclass configured with a child policy, and a single-argument predicate. This policy defers to the child policy for hosts where ``predicate(host)`` is truthy. Hosts for which - ``predicate(host)`` is falsey will be considered :attr:`.IGNORED`, and will + ``predicate(host)`` is falsy will be considered :attr:`.IGNORED`, and will not be used in a query plan. This can be used in the cases where you need a whitelist or blacklist @@ -491,7 +644,7 @@ def __init__(self, child_policy, predicate): :param child_policy: an instantiated :class:`.LoadBalancingPolicy` that this one will defer to. :param predicate: a one-parameter function that takes a :class:`.Host`. - If it returns a falsey value, the :class:`.Host` will + If it returns a falsy value, the :class:`.Host` will be :attr:`.IGNORED` and not returned in query plans. """ super(HostFilterPolicy, self).__init__() @@ -527,7 +680,7 @@ def predicate(self): def distance(self, host): """ Checks if ``predicate(host)``, then returns - :attr:`~HostDistance.IGNORED` if falsey, and defers to the child policy + :attr:`~HostDistance.IGNORED` if falsy, and defers to the child policy otherwise. """ if self.predicate(host): @@ -616,7 +769,7 @@ class ReconnectionPolicy(object): def new_schedule(self): """ This should return a finite or infinite iterable of delays (each as a - floating point number of seconds) inbetween each failed reconnection + floating point number of seconds) in-between each failed reconnection attempt. Note that if the iterable is finite, reconnection attempts will cease once the iterable is exhausted. """ @@ -626,12 +779,12 @@ def new_schedule(self): class ConstantReconnectionPolicy(ReconnectionPolicy): """ A :class:`.ReconnectionPolicy` subclass which sleeps for a fixed delay - inbetween each reconnection attempt. + in-between each reconnection attempt. """ def __init__(self, delay, max_attempts=64): """ - `delay` should be a floating point number of seconds to wait inbetween + `delay` should be a floating point number of seconds to wait in-between each attempt. `max_attempts` should be a total number of attempts to be made before @@ -655,7 +808,7 @@ def new_schedule(self): class ExponentialReconnectionPolicy(ReconnectionPolicy): """ A :class:`.ReconnectionPolicy` subclass which exponentially increases - the length of the delay inbetween each reconnection attempt up to + the length of the delay in-between each reconnection attempt up to a set maximum delay. A random amount of jitter (+/- 15%) will be added to the pure exponential @@ -715,7 +868,7 @@ class RetryPolicy(object): timeout and unavailable failures. These are failures reported from the server side. Timeouts are configured by `settings in cassandra.yaml `_. - Unavailable failures occur when the coordinator cannot acheive the consistency + Unavailable failures occur when the coordinator cannot achieve the consistency level for a request. For further information see the method descriptions below. @@ -865,7 +1018,7 @@ def on_request_error(self, query, consistency, error, retry_num): `retry_num` counts how many times the operation has been retried, so the first time this method is called, `retry_num` will be 0. - The default, it triggers a retry on the next host in the query plan + By default, it triggers a retry on the next host in the query plan with the same consistency level. """ # TODO revisit this for the next major @@ -999,6 +1152,57 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, return self._pick_consistency(alive_replicas) +class ExponentialBackoffRetryPolicy(RetryPolicy): + """ + A policy that do retries with exponential backoff + """ + + def __init__(self, max_num_retries: float, min_interval: float = 0.1, max_interval: float = 10.0, + *args, **kwargs): + """ + `max_num_retries` counts how many times the operation would be retried, + `min_interval` is the initial time in seconds to wait before first retry + `max_interval` is the maximum time to wait between retries + """ + self.min_interval = min_interval + self.max_num_retries = max_num_retries + self.max_interval = max_interval + super(ExponentialBackoffRetryPolicy).__init__(*args, **kwargs) + + def _calculate_backoff(self, attempt: int): + delay = min(self.max_interval, self.min_interval * 2 ** attempt) + # add some jitter + delay += random.random() * self.min_interval - (self.min_interval / 2) + return delay + + def on_read_timeout(self, query, consistency, required_responses, + received_responses, data_retrieved, retry_num): + if retry_num < self.max_num_retries and received_responses >= required_responses and not data_retrieved: + return self.RETRY, consistency, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + def on_write_timeout(self, query, consistency, write_type, + required_responses, received_responses, retry_num): + if retry_num < self.max_num_retries and write_type == WriteType.BATCH_LOG: + return self.RETRY, consistency, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + def on_unavailable(self, query, consistency, required_replicas, + alive_replicas, retry_num): + if retry_num < self.max_num_retries: + return self.RETRY_NEXT_HOST, None, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + def on_request_error(self, query, consistency, error, retry_num): + if retry_num < self.max_num_retries: + return self.RETRY_NEXT_HOST, None, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + class AddressTranslator(object): """ Interface for translating cluster-defined endpoints. @@ -1181,3 +1385,62 @@ def _rethrow(self, *args, **kwargs): on_read_timeout = _rethrow on_write_timeout = _rethrow on_unavailable = _rethrow + + +ColDesc = namedtuple('ColDesc', ['ks', 'table', 'col']) + +class ColumnEncryptionPolicy(object): + """ + A policy enabling (mostly) transparent encryption and decryption of data before it is + sent to the cluster. + + Key materials and other configurations are specified on a per-column basis. This policy can + then be used by driver structures which are aware of the underlying columns involved in their + work. In practice this includes the following cases: + + * Prepared statements - data for columns specified by the cluster's policy will be transparently + encrypted before they are sent + * Rows returned from any query - data for columns specified by the cluster's policy will be + transparently decrypted before they are returned to the user + + To enable this functionality, create an instance of this class (or more likely a subclass) + before creating a cluster. This policy should then be configured and supplied to the Cluster + at creation time via the :attr:`.Cluster.column_encryption_policy` attribute. + """ + + def encrypt(self, coldesc, obj_bytes): + """ + Encrypt the specified bytes using the cryptography materials for the specified column. + Largely used internally, although this could also be used to encrypt values supplied + to non-prepared statements in a way that is consistent with this policy. + """ + raise NotImplementedError() + + def decrypt(self, coldesc, encrypted_bytes): + """ + Decrypt the specified (encrypted) bytes using the cryptography materials for the + specified column. Used internally; could be used externally as well but there's + not currently an obvious use case. + """ + raise NotImplementedError() + + def add_column(self, coldesc, key): + """ + Provide cryptography materials to be used when encrypted and/or decrypting data + for the specified column. + """ + raise NotImplementedError() + + def contains_column(self, coldesc): + """ + Predicate to determine if a specific column is supported by this policy. + Currently only used internally. + """ + raise NotImplementedError() + + def encode_and_encrypt(self, coldesc, obj): + """ + Helper function to enable use of this policy on simple (i.e. non-prepared) + statements. + """ + raise NotImplementedError() diff --git a/cassandra/pool.py b/cassandra/pool.py index 87b66dd85b..b8a8ef7493 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -21,6 +21,8 @@ import socket import time import random +import copy +import uuid from threading import Lock, RLock, Condition import weakref try: @@ -173,6 +175,8 @@ def __init__(self, endpoint, conviction_policy_factory, datacenter=None, rack=No self.endpoint = endpoint if isinstance(endpoint, EndPoint) else DefaultEndPoint(endpoint) self.conviction_policy = conviction_policy_factory(self) + if not host_id: + host_id = uuid.uuid4() self.host_id = host_id self.set_location_info(datacenter, rack) self.lock = RLock() @@ -370,8 +374,7 @@ def on_exception(self, exc, next_delay): class HostConnection(object): """ - When using v3 of the native protocol, this is used instead of a connection - pool per host (HostConnectionPool) due to the increased in-flight capacity + When using v3 of the native protocol, this is useddue to the increased in-flight capacity of individual connections. """ @@ -388,6 +391,8 @@ class HostConnection(object): # the number below, all excess connections will be closed. max_excess_connections_per_shard_multiplier = 3 + tablets_routing_v1 = False + def __init__(self, host, host_distance, session): self.host = host self.host_distance = host_distance @@ -398,6 +403,7 @@ def __init__(self, host, host_distance, session): self._is_replacing = False self._connecting = set() self._connections = {} + self._pending_connections = [] # A pool of additional connections which are not used but affect how Scylla # assigns shards to them. Scylla tends to assign the shard which has # the lowest number of connections. If connections are not distributed @@ -412,6 +418,7 @@ def __init__(self, host, host_distance, session): # so that we can dispose of them. self._trash = set() self._shard_connections_futures = [] + self.advanced_shardaware_block_until = 0 if host_distance == HostDistance.IGNORED: log.debug("Not opening connection to ignored host %s", self.host) @@ -421,21 +428,21 @@ def __init__(self, host, host_distance, session): return log.debug("Initializing connection for host %s", self.host) - first_connection = session.cluster.connection_factory(self.host.endpoint, owning_pool=self) - log.debug("First connection created to %s for shard_id=%i", self.host, first_connection.shard_id) - self._connections[first_connection.shard_id] = first_connection + first_connection = session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) + log.debug("First connection created to %s for shard_id=%i", self.host, first_connection.features.shard_id) + self._connections[first_connection.features.shard_id] = first_connection self._keyspace = session.keyspace if self._keyspace: first_connection.set_keyspace_blocking(self._keyspace) - - if first_connection.sharding_info: - self.host.sharding_info = first_connection.sharding_info - self._open_connections_for_all_shards() + if first_connection.features.sharding_info and not self._session.cluster.shard_aware_options.disable: + self.host.sharding_info = first_connection.features.sharding_info + self._open_connections_for_all_shards(first_connection.features.shard_id) + self.tablets_routing_v1 = first_connection.features.tablets_routing_v1 log.debug("Finished initializing connection for host %s", self.host) - def _get_connection_for_routing_key(self, routing_key=None): + def _get_connection_for_routing_key(self, routing_key=None, keyspace=None, table=None): if self.is_shutdown: raise ConnectionException( "Pool for %s is shutdown" % (self.host,), self.host) @@ -444,9 +451,24 @@ def _get_connection_for_routing_key(self, routing_key=None): raise NoConnectionsAvailable() shard_id = None - if self.host.sharding_info and routing_key: + if not self._session.cluster.shard_aware_options.disable and self.host.sharding_info and routing_key: t = self._session.cluster.metadata.token_map.token_class.from_key(routing_key) - shard_id = self.host.sharding_info.shard_id_from_token(t.value) + + shard_id = None + if self.tablets_routing_v1 and table is not None: + if keyspace is None: + keyspace = self._keyspace + + tablet = self._session.cluster.metadata._tablets.get_tablet_for_key(keyspace, table, t) + + if tablet is not None: + for replica in tablet.replicas: + if replica[0] == self.host.host_id: + shard_id = replica[1] + break + + if shard_id is None: + shard_id = self.host.sharding_info.shard_id_from_token(t.value) conn = self._connections.get(shard_id) @@ -470,7 +492,7 @@ def _get_connection_for_routing_key(self, routing_key=None): "Connection to shard_id=%i reached orphaned stream limit, replacing on host %s (%s/%i)", shard_id, self.host, - len(self._connections.keys()), + len(self._connections), self.host.sharding_info.shards_count ) elif shard_id not in self._connecting: @@ -481,7 +503,7 @@ def _get_connection_for_routing_key(self, routing_key=None): "Trying to connect to missing shard_id=%i on host %s (%s/%i)", shard_id, self.host, - len(self._connections.keys()), + len(self._connections), self.host.sharding_info.shards_count ) @@ -492,15 +514,15 @@ def _get_connection_for_routing_key(self, routing_key=None): return random.choice(active_connections) return random.choice(list(self._connections.values())) - def borrow_connection(self, timeout, routing_key=None): - conn = self._get_connection_for_routing_key(routing_key) + def borrow_connection(self, timeout, routing_key=None, keyspace=None, table=None): + conn = self._get_connection_for_routing_key(routing_key, keyspace, table) start = time.time() remaining = timeout last_retry = False while True: if conn.is_closed: # The connection might have been closed in the meantime - if so, try again - conn = self._get_connection_for_routing_key(routing_key) + conn = self._get_connection_for_routing_key(routing_key, keyspace, table) with conn.lock: if (not conn.is_closed or last_retry) and conn.in_flight < conn.max_request_id: # On last retry we ignore connection status, since it is better to return closed connection than @@ -516,7 +538,10 @@ def borrow_connection(self, timeout, routing_key=None): last_retry = True continue with self._stream_available_condition: - self._stream_available_condition.wait(remaining) + if conn.orphaned_threshold_reached and conn.is_closed: + conn = self._get_connection() + else: + self._stream_available_condition.wait(remaining) raise NoConnectionsAvailable("All request IDs are currently in use") @@ -535,36 +560,37 @@ def return_connection(self, connection, stream_was_orphaned=False): if not connection.signaled_error: log.debug("Defunct or closed connection (%s) returned to pool, potentially " "marking host %s as down", id(connection), self.host) - is_down = self._session.cluster.signal_connection_failure( - self.host, connection.last_error, is_host_addition=False) + is_down = self.host.signal_connection_failure(connection.last_error) connection.signaled_error = True if self.shutdown_on_error and not is_down: is_down = True - self._session.cluster.on_down(self.host, is_host_addition=False) if is_down: self.shutdown() + self._session.cluster.on_down(self.host, is_host_addition=False) else: connection.close() with self._lock: if self.is_shutdown: return - self._connections.pop(connection.shard_id, None) + self._connections.pop(connection.features.shard_id, None) if self._is_replacing: return self._is_replacing = True self._session.submit(self._replace, connection) - else: - if connection in self._trash: - with connection.lock: - if connection.in_flight == len(connection.orphaned_request_ids): - with self._lock: - if connection in self._trash: - self._trash.remove(connection) - log.debug("Closing trashed connection (%s) to %s", id(connection), self.host) - connection.close() - return + elif connection in self._trash: + with connection.lock: + no_pending_requests = connection.in_flight <= len(connection.orphaned_request_ids) + if no_pending_requests: + with self._lock: + close_connection = False + if connection in self._trash: + self._trash.remove(connection) + close_connection = True + if close_connection: + log.debug("Closing trashed connection (%s) to %s", id(connection), self.host) + connection.close() def on_orphaned_stream_released(self): """ @@ -581,16 +607,17 @@ def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: - if connection.shard_id in self._connections.keys(): - del self._connections[connection.shard_id] - if self.host.sharding_info: - self._connecting.add(connection.shard_id) - self._session.submit(self._open_connection_to_missing_shard, connection.shard_id) + if connection.features.shard_id in self._connections: + del self._connections[connection.features.shard_id] + if self.host.sharding_info and not self._session.cluster.shard_aware_options.disable: + self._connecting.add(connection.features.shard_id) + self._session.submit(self._open_connection_to_missing_shard, connection.features.shard_id) else: - connection = self._session.cluster.connection_factory(self.host.endpoint, owning_pool=self) + connection = self._session.cluster.connection_factory(self.host.endpoint, + on_orphaned_stream_released=self.on_orphaned_stream_released) if self._keyspace: connection.set_keyspace_blocking(self._keyspace) - self._connections[connection.shard_id] = connection + self._connections[connection.features.shard_id] = connection except Exception: log.warning("Failed reconnecting %s. Retrying." % (self.host.endpoint,)) self._session.submit(self._replace, connection) @@ -613,7 +640,9 @@ def shutdown(self): future.cancel() connections_to_close = self._connections.copy() + pending_connections_to_close = self._pending_connections.copy() self._connections.clear() + self._pending_connections.clear() # connection.close can call pool.return_connection, which will # obtain self._lock via self._stream_available_condition. @@ -622,6 +651,10 @@ def shutdown(self): log.debug("Closing connection (%s) to %s", id(connection), self.host) connection.close() + for connection in pending_connections_to_close: + log.debug("Closing pending connection (%s) to %s", id(connection), self.host) + connection.close() + self._close_excess_connections() trash_conns = None @@ -645,6 +678,25 @@ def _close_excess_connections(self): log.debug("Closing excess connection (%s) to %s", id(c), self.host) c.close() + def disable_advanced_shard_aware(self, secs): + log.warning("disabling advanced_shard_aware for %i seconds, could be that this client is behind NAT?", secs) + self.advanced_shardaware_block_until = max(time.time() + secs, self.advanced_shardaware_block_until) + + def _get_shard_aware_endpoint(self): + if (self.advanced_shardaware_block_until and self.advanced_shardaware_block_until < time.time()) or \ + self._session.cluster.shard_aware_options.disable_shardaware_port: + return None + + endpoint = None + if self._session.cluster.ssl_options and self.host.sharding_info.shard_aware_port_ssl: + endpoint = copy.copy(self.host.endpoint) + endpoint._port = self.host.sharding_info.shard_aware_port_ssl + elif self.host.sharding_info.shard_aware_port: + endpoint = copy.copy(self.host.endpoint) + endpoint._port = self.host.sharding_info.shard_aware_port + + return endpoint + def _open_connection_to_missing_shard(self, shard_id): """ Creates a new connection, checks its shard_id and populates our shard @@ -666,44 +718,72 @@ def _open_connection_to_missing_shard(self, shard_id): with self._lock: if self.is_shutdown: return + shard_aware_endpoint = self._get_shard_aware_endpoint() + log.debug("shard_aware_endpoint=%r", shard_aware_endpoint) + if shard_aware_endpoint: + try: + conn = self._session.cluster.connection_factory(shard_aware_endpoint, host_conn=self, on_orphaned_stream_released=self.on_orphaned_stream_released, + shard_id=shard_id, + total_shards=self.host.sharding_info.shards_count) + conn.original_endpoint = self.host.endpoint + except Exception as exc: + log.error("Failed to open connection to %s, on shard_id=%i: %s", self.host, shard_id, exc) + raise + else: + conn = self._session.cluster.connection_factory(self.host.endpoint, host_conn=self, on_orphaned_stream_released=self.on_orphaned_stream_released) - conn = self._session.cluster.connection_factory(self.host.endpoint) - log.debug("Received a connection %s for shard_id=%i on host %s", id(conn), conn.shard_id, self.host) + log.debug( + "Received a connection %s for shard_id=%i on host %s", + id(conn), + conn.features.shard_id if conn.features.shard_id is not None else -1, + self.host) if self.is_shutdown: log.debug("Pool for host %s is in shutdown, closing the new connection (%s)", self.host, id(conn)) conn.close() return - old_conn = self._connections.get(conn.shard_id) + + if shard_aware_endpoint and shard_id != conn.features.shard_id: + # connection didn't land on expected shared + # assuming behind a NAT, disabling advanced shard aware for a while + self.disable_advanced_shard_aware(10 * 60) + + old_conn = self._connections.get(conn.features.shard_id) if old_conn is None or old_conn.orphaned_threshold_reached: log.debug( "New connection (%s) created to shard_id=%i on host %s", id(conn), - conn.shard_id, + conn.features.shard_id, self.host ) old_conn = None with self._lock: - if self.is_shutdown: - conn.close() - return - if conn.shard_id in self._connections.keys(): - # Move the current connection to the trash and use the new one from now on - old_conn = self._connections[conn.shard_id] - log.debug( - "Replacing overloaded connection (%s) with (%s) for shard %i for host %s", - id(old_conn), - id(conn), - conn.shard_id, - self.host - ) - self._connections[conn.shard_id] = conn + is_shutdown = self.is_shutdown + if not is_shutdown: + if conn.features.shard_id in self._connections: + # Move the current connection to the trash and use the new one from now on + old_conn = self._connections[conn.features.shard_id] + log.debug( + "Replacing overloaded connection (%s) with (%s) for shard %i for host %s", + id(old_conn), + id(conn), + conn.features.shard_id, + self.host + ) + if self._keyspace: + conn.set_keyspace_blocking(self._keyspace) + self._connections[conn.features.shard_id] = conn + + if is_shutdown: + conn.close() + return + if old_conn is not None: remaining = old_conn.in_flight - len(old_conn.orphaned_request_ids) if remaining == 0: log.debug( "Immediately closing the old connection (%s) for shard %i on host %s", id(old_conn), - old_conn.shard_id, + old_conn.features.shard_id, self.host ) old_conn.close() @@ -711,26 +791,20 @@ def _open_connection_to_missing_shard(self, shard_id): log.debug( "Moving the connection (%s) for shard %i to trash on host %s, %i requests remaining", id(old_conn), - old_conn.shard_id, + old_conn.features.shard_id, self.host, remaining, ) with self._lock: - if self.is_shutdown: - old_conn.close() - else: + is_shutdown = self.is_shutdown + if not is_shutdown: self._trash.add(old_conn) - if self._keyspace: - with self._lock: - if self.is_shutdown: + if is_shutdown: conn.close() - old_conn = self._connections.get(conn.shard_id) - if old_conn: - old_conn.set_keyspace_blocking(self._keyspace) num_missing_or_needing_replacement = self.num_missing_or_needing_replacement log.debug( "Connected to %s/%i shards on host %s (%i missing or needs replacement)", - len(self._connections.keys()), + len(self._connections), self.host.sharding_info.shards_count, self.host, num_missing_or_needing_replacement @@ -742,7 +816,7 @@ def _open_connection_to_missing_shard(self, shard_id): len(self._excess_connections) ) self._close_excess_connections() - elif self.host.sharding_info.shards_count == len(self._connections.keys()) and self.num_missing_or_needing_replacement == 0: + elif self.host.sharding_info.shards_count == len(self._connections) and self.num_missing_or_needing_replacement == 0: log.debug( "All shards are already covered, closing newly opened excess connection %s for host %s", id(self), @@ -763,7 +837,7 @@ def _open_connection_to_missing_shard(self, shard_id): log.debug( "Putting a connection %s to shard %i to the excess pool of host %s", id(conn), - conn.shard_id, + conn.features.shard_id, self.host ) close_connection = False @@ -776,7 +850,7 @@ def _open_connection_to_missing_shard(self, shard_id): conn.close() self._connecting.discard(shard_id) - def _open_connections_for_all_shards(self): + def _open_connections_for_all_shards(self, skip_shard_id=None): """ Loop over all the shards and try to open a connection to each one. """ @@ -785,11 +859,23 @@ def _open_connections_for_all_shards(self): return for shard_id in range(self.host.sharding_info.shards_count): + if skip_shard_id is not None and skip_shard_id == shard_id: + continue future = self._session.submit(self._open_connection_to_missing_shard, shard_id) if isinstance(future, Future): self._connecting.add(shard_id) self._shard_connections_futures.append(future) + trash_conns = None + with self._lock: + if self._trash: + trash_conns = self._trash + self._trash = set() + + if trash_conns is not None: + for conn in self._trash: + conn.close() + def _set_keyspace_for_all_conns(self, keyspace, callback): """ Asynchronously sets the keyspace for all connections. When all @@ -824,12 +910,14 @@ def get_connections(self): def get_state(self): in_flights = [c.in_flight for c in list(self._connections.values())] - return {'shutdown': self.is_shutdown, 'open_count': self.open_count, 'in_flights': in_flights} + orphan_requests = [c.orphaned_request_ids for c in list(self._connections.values())] + return {'shutdown': self.is_shutdown, 'open_count': self.open_count, \ + 'in_flights': in_flights, 'orphan_requests': orphan_requests} @property def num_missing_or_needing_replacement(self): return self.host.sharding_info.shards_count \ - - sum(1 for c in self._connections.values() if not c.orphaned_threshold_reached) + - sum(1 for c in list(self._connections.values()) if not c.orphaned_threshold_reached) @property def open_count(self): @@ -840,360 +928,3 @@ def _excess_connection_limit(self): return self.host.sharding_info.shards_count * self.max_excess_connections_per_shard_multiplier -_MAX_SIMULTANEOUS_CREATION = 1 -_MIN_TRASH_INTERVAL = 10 - - -class HostConnectionPool(object): - """ - Used to pool connections to a host for v1 and v2 native protocol. - """ - - host = None - host_distance = None - - is_shutdown = False - open_count = 0 - _scheduled_for_creation = 0 - _next_trash_allowed_at = 0 - _keyspace = None - - def __init__(self, host, host_distance, session): - self.host = host - self.host_distance = host_distance - - self._session = weakref.proxy(session) - self._lock = RLock() - self._conn_available_condition = Condition() - - log.debug("Initializing new connection pool for host %s", self.host) - core_conns = session.cluster.get_core_connections_per_host(host_distance) - self._connections = [session.cluster.connection_factory(host.endpoint, owning_pool=self) - for i in range(core_conns)] - - self._keyspace = session.keyspace - if self._keyspace: - for conn in self._connections: - conn.set_keyspace_blocking(self._keyspace) - - self._trash = set() - self._next_trash_allowed_at = time.time() - self.open_count = core_conns - log.debug("Finished initializing new connection pool for host %s", self.host) - - def borrow_connection(self, timeout, routing_key=None): - if self.is_shutdown: - raise ConnectionException( - "Pool for %s is shutdown" % (self.host,), self.host) - - conns = self._connections - if not conns: - # handled specially just for simpler code - log.debug("Detected empty pool, opening core conns to %s", self.host) - core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance) - with self._lock: - # we check the length of self._connections again - # along with self._scheduled_for_creation while holding the lock - # in case multiple threads hit this condition at the same time - to_create = core_conns - (len(self._connections) + self._scheduled_for_creation) - for i in range(to_create): - self._scheduled_for_creation += 1 - self._session.submit(self._create_new_connection) - - # in_flight is incremented by wait_for_conn - conn = self._wait_for_conn(timeout) - return conn - else: - # note: it would be nice to push changes to these config settings - # to pools instead of doing a new lookup on every - # borrow_connection() call - max_reqs = self._session.cluster.get_max_requests_per_connection(self.host_distance) - max_conns = self._session.cluster.get_max_connections_per_host(self.host_distance) - - least_busy = min(conns, key=lambda c: c.in_flight) - request_id = None - # to avoid another thread closing this connection while - # trashing it (through the return_connection process), hold - # the connection lock from this point until we've incremented - # its in_flight count - need_to_wait = False - with least_busy.lock: - if least_busy.in_flight < least_busy.max_request_id: - least_busy.in_flight += 1 - request_id = least_busy.get_request_id() - else: - # once we release the lock, wait for another connection - need_to_wait = True - - if need_to_wait: - # wait_for_conn will increment in_flight on the conn - least_busy, request_id = self._wait_for_conn(timeout) - - # if we have too many requests on this connection but we still - # have space to open a new connection against this host, go ahead - # and schedule the creation of a new connection - if least_busy.in_flight >= max_reqs and len(self._connections) < max_conns: - self._maybe_spawn_new_connection() - - return least_busy, request_id - - def _maybe_spawn_new_connection(self): - with self._lock: - if self._scheduled_for_creation >= _MAX_SIMULTANEOUS_CREATION: - return - if self.open_count >= self._session.cluster.get_max_connections_per_host(self.host_distance): - return - self._scheduled_for_creation += 1 - - log.debug("Submitting task for creation of new Connection to %s", self.host) - self._session.submit(self._create_new_connection) - - def _create_new_connection(self): - try: - self._add_conn_if_under_max() - except (ConnectionException, socket.error) as exc: - log.warning("Failed to create new connection to %s: %s", self.host, exc) - except Exception: - log.exception("Unexpectedly failed to create new connection") - finally: - with self._lock: - self._scheduled_for_creation -= 1 - - def _add_conn_if_under_max(self): - max_conns = self._session.cluster.get_max_connections_per_host(self.host_distance) - with self._lock: - if self.is_shutdown: - return True - - if self.open_count >= max_conns: - return True - - self.open_count += 1 - - log.debug("Going to open new connection to host %s", self.host) - try: - conn = self._session.cluster.connection_factory(self.host.endpoint, owning_pool=self) - if self._keyspace: - conn.set_keyspace_blocking(self._session.keyspace) - self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL - with self._lock: - new_connections = self._connections[:] + [conn] - self._connections = new_connections - log.debug("Added new connection (%s) to pool for host %s, signaling availablility", - id(conn), self.host) - self._signal_available_conn() - return True - except (ConnectionException, socket.error) as exc: - log.warning("Failed to add new connection to pool for host %s: %s", self.host, exc) - with self._lock: - self.open_count -= 1 - if self._session.cluster.signal_connection_failure(self.host, exc, is_host_addition=False): - self.shutdown() - return False - except AuthenticationFailed: - with self._lock: - self.open_count -= 1 - return False - - def _await_available_conn(self, timeout): - with self._conn_available_condition: - self._conn_available_condition.wait(timeout) - - def _signal_available_conn(self): - with self._conn_available_condition: - self._conn_available_condition.notify() - - def _signal_all_available_conn(self): - with self._conn_available_condition: - self._conn_available_condition.notify_all() - - def _wait_for_conn(self, timeout): - start = time.time() - remaining = timeout - - while remaining > 0: - # wait on our condition for the possibility that a connection - # is useable - self._await_available_conn(remaining) - - # self.shutdown() may trigger the above Condition - if self.is_shutdown: - raise ConnectionException("Pool is shutdown") - - conns = self._connections - if conns: - least_busy = min(conns, key=lambda c: c.in_flight) - with least_busy.lock: - if least_busy.in_flight < least_busy.max_request_id: - least_busy.in_flight += 1 - return least_busy, least_busy.get_request_id() - - remaining = timeout - (time.time() - start) - - raise NoConnectionsAvailable() - - def return_connection(self, connection, stream_was_orphaned=False): - with connection.lock: - if not stream_was_orphaned: - connection.in_flight -= 1 - in_flight = connection.in_flight - - if connection.is_defunct or connection.is_closed: - if not connection.signaled_error: - log.debug("Defunct or closed connection (%s) returned to pool, potentially " - "marking host %s as down", id(connection), self.host) - is_down = self._session.cluster.signal_connection_failure( - self.host, connection.last_error, is_host_addition=False) - connection.signaled_error = True - if is_down: - self.shutdown() - else: - self._replace(connection) - else: - if connection in self._trash: - with connection.lock: - if connection.in_flight == 0: - with self._lock: - if connection in self._trash: - self._trash.remove(connection) - log.debug("Closing trashed connection (%s) to %s", id(connection), self.host) - connection.close() - return - - core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance) - min_reqs = self._session.cluster.get_min_requests_per_connection(self.host_distance) - # we can use in_flight here without holding the connection lock - # because the fact that in_flight dipped below the min at some - # point is enough to start the trashing procedure - if len(self._connections) > core_conns and in_flight <= min_reqs and \ - time.time() >= self._next_trash_allowed_at: - self._maybe_trash_connection(connection) - else: - self._signal_available_conn() - - def on_orphaned_stream_released(self): - """ - Called when a response for an orphaned stream (timed out on the client - side) was received. - """ - self._signal_available_conn() - - def _maybe_trash_connection(self, connection): - core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance) - did_trash = False - with self._lock: - if connection not in self._connections: - return - - if self.open_count > core_conns: - did_trash = True - self.open_count -= 1 - new_connections = self._connections[:] - new_connections.remove(connection) - self._connections = new_connections - - with connection.lock: - if connection.in_flight == 0: - log.debug("Skipping trash and closing unused connection (%s) to %s", id(connection), self.host) - connection.close() - - # skip adding it to the trash if we're already closing it - return - - self._trash.add(connection) - - if did_trash: - self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL - log.debug("Trashed connection (%s) to %s", id(connection), self.host) - - def _replace(self, connection): - should_replace = False - with self._lock: - if connection in self._connections: - new_connections = self._connections[:] - new_connections.remove(connection) - self._connections = new_connections - self.open_count -= 1 - should_replace = True - - if should_replace: - log.debug("Replacing connection (%s) to %s", id(connection), self.host) - connection.close() - self._session.submit(self._retrying_replace) - else: - log.debug("Closing connection (%s) to %s", id(connection), self.host) - connection.close() - - def _retrying_replace(self): - replaced = False - try: - replaced = self._add_conn_if_under_max() - except Exception: - log.exception("Failed replacing connection to %s", self.host) - if not replaced: - log.debug("Failed replacing connection to %s. Retrying.", self.host) - self._session.submit(self._retrying_replace) - - def shutdown(self): - with self._lock: - if self.is_shutdown: - return - else: - self.is_shutdown = True - - self._signal_all_available_conn() - - connections_to_close = [] - with self._lock: - connections_to_close.extend(self._connections) - self.open_count -= len(self._connections) - self._connections.clear() - connections_to_close.extend(self._trash) - self._trash.clear() - - for conn in connections_to_close: - conn.close() - - def ensure_core_connections(self): - if self.is_shutdown: - return - - core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance) - with self._lock: - to_create = core_conns - (len(self._connections) + self._scheduled_for_creation) - for i in range(to_create): - self._scheduled_for_creation += 1 - self._session.submit(self._create_new_connection) - - def _set_keyspace_for_all_conns(self, keyspace, callback): - """ - Asynchronously sets the keyspace for all connections. When all - connections have been set, `callback` will be called with two - arguments: this pool, and a list of any errors that occurred. - """ - remaining_callbacks = set(self._connections) - errors = [] - - if not remaining_callbacks: - callback(self, errors) - return - - def connection_finished_setting_keyspace(conn, error): - self.return_connection(conn) - remaining_callbacks.remove(conn) - if error: - errors.append(error) - - if not remaining_callbacks: - callback(self, errors) - - self._keyspace = keyspace - for conn in self._connections: - conn.set_keyspace_async(keyspace, connection_finished_setting_keyspace) - - def get_connections(self): - return self._connections - - def get_state(self): - in_flights = [c.in_flight for c in self._connections] - return {'shutdown': self.is_shutdown, 'open_count': self.open_count, 'in_flights': in_flights} diff --git a/cassandra/protocol.py b/cassandra/protocol.py index ed92a76679..e574965de8 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -18,20 +18,15 @@ import socket from uuid import UUID -import six -from six.moves import range import io -from cassandra import ProtocolVersion +from cassandra import OperationType, ProtocolVersion from cassandra import type_codes, DriverException -from cassandra import (Unavailable, WriteTimeout, ReadTimeout, +from cassandra import (Unavailable, WriteTimeout, RateLimitReached, ReadTimeout, WriteFailure, ReadFailure, FunctionFailure, AlreadyExists, InvalidRequest, Unauthorized, UnsupportedOperation, UserFunctionDescriptor, UserAggregateDescriptor, SchemaTargetType) -from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, - uint8_pack, int8_unpack, uint64_pack, header_pack, - v3_header_pack, uint32_pack, uint32_le_unpack, uint32_le_pack) from cassandra.cqltypes import (AsciiType, BytesType, BooleanType, CounterColumnType, DateType, DecimalType, DoubleType, FloatType, Int32Type, @@ -40,6 +35,10 @@ UTF8Type, VarcharType, UUIDType, UserType, TupleType, lookup_casstype, SimpleDateType, TimeType, ByteType, ShortType, DurationType) +from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, + uint8_pack, int8_unpack, uint64_pack, + v3_header_pack, uint32_pack, uint32_le_unpack, uint32_le_pack) +from cassandra.policies import ColDesc from cassandra import WriteType from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY from cassandra import util @@ -85,8 +84,7 @@ def __init__(cls, name, bases, dct): register_class(cls) -@six.add_metaclass(_RegisterMessageType) -class _MessageType(object): +class _MessageType(object, metaclass=_RegisterMessageType): tracing = False custom_payload = None @@ -126,18 +124,19 @@ def __init__(self, code, message, info): self.info = info @classmethod - def recv_body(cls, f, protocol_version, *args): + def recv_body(cls, f, protocol_version, protocol_features, *args): code = read_int(f) msg = read_string(f) - subcls = error_classes.get(code, cls) + if code == protocol_features.rate_limit_error: + subcls = RateLimitReachedException + else: + subcls = error_classes.get(code, cls) extra_info = subcls.recv_error_info(f, protocol_version) return subcls(code=code, message=msg, info=extra_info) def summary_msg(self): msg = 'Error from server: code=%04x [%s] message="%s"' \ % (self.code, self.summary, self.message) - if six.PY2 and isinstance(msg, six.text_type): - msg = msg.encode('utf-8') return msg def __str__(self): @@ -158,8 +157,7 @@ def __init__(cls, name, bases, dct): error_classes[cls.error_code] = cls -@six.add_metaclass(ErrorMessageSubclass) -class ErrorMessageSub(ErrorMessage): +class ErrorMessageSub(ErrorMessage, metaclass=ErrorMessageSubclass): error_code = None @@ -390,6 +388,19 @@ def recv_error_info(f, protocol_version): def to_exception(self): return AlreadyExists(**self.info) +class RateLimitReachedException(ConfigurationException): + summary= 'Rate limit was exceeded for a partition affected by the request' + error_code = 0x4321 + + @staticmethod + def recv_error_info(f, protocol_version): + return { + 'op_type': OperationType(read_byte(f)), + 'rejected_by_coordinator': read_byte(f) != 0 + } + + def to_exception(self): + return RateLimitReached(**self.info) class ClientWriteError(RequestExecutionException): summary = 'Client write failure.' @@ -542,7 +553,6 @@ def __init__(self, query_params, consistency_level, self.paging_state = paging_state self.timestamp = timestamp self.skip_meta = skip_meta - self.continuous_paging_options = continuous_paging_options self.keyspace = keyspace def _write_query_params(self, f, protocol_version): @@ -552,41 +562,17 @@ def _write_query_params(self, f, protocol_version): flags |= _VALUES_FLAG # also v2+, but we're only setting params internally right now if self.serial_consistency_level: - if protocol_version >= 2: - flags |= _WITH_SERIAL_CONSISTENCY_FLAG - else: - raise UnsupportedOperation( - "Serial consistency levels require the use of protocol version " - "2 or higher. Consider setting Cluster.protocol_version to 2 " - "to support serial consistency levels.") + flags |= _WITH_SERIAL_CONSISTENCY_FLAG if self.fetch_size: - if protocol_version >= 2: - flags |= _PAGE_SIZE_FLAG - else: - raise UnsupportedOperation( - "Automatic query paging may only be used with protocol version " - "2 or higher. Consider setting Cluster.protocol_version to 2.") + flags |= _PAGE_SIZE_FLAG if self.paging_state: - if protocol_version >= 2: - flags |= _WITH_PAGING_STATE_FLAG - else: - raise UnsupportedOperation( - "Automatic query paging may only be used with protocol version " - "2 or higher. Consider setting Cluster.protocol_version to 2.") + flags |= _WITH_PAGING_STATE_FLAG if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP_FLAG - if self.continuous_paging_options: - if ProtocolVersion.has_continuous_paging_support(protocol_version): - flags |= _PAGING_OPTIONS_FLAG - else: - raise UnsupportedOperation( - "Continuous paging may only be used with protocol version " - "ProtocolVersion.DSE_V1 or higher. Consider setting Cluster.protocol_version to ProtocolVersion.DSE_V1.") - if self.keyspace is not None: if ProtocolVersion.uses_keyspace_flag(protocol_version): flags |= _WITH_KEYSPACE_FLAG @@ -614,14 +600,10 @@ def _write_query_params(self, f, protocol_version): write_long(f, self.timestamp) if self.keyspace is not None: write_string(f, self.keyspace) - if self.continuous_paging_options: - self._write_paging_options(f, self.continuous_paging_options, protocol_version) def _write_paging_options(self, f, paging_options, protocol_version): write_int(f, paging_options.max_pages) write_int(f, paging_options.max_pages_per_second) - if ProtocolVersion.has_continuous_paging_next_pages(protocol_version): - write_int(f, paging_options.max_queue_size) class QueryMessage(_QueryMessage): @@ -653,22 +635,7 @@ def __init__(self, query_id, query_params, consistency_level, paging_state, timestamp, skip_meta, continuous_paging_options) def _write_query_params(self, f, protocol_version): - if protocol_version == 1: - if self.serial_consistency_level: - raise UnsupportedOperation( - "Serial consistency levels require the use of protocol version " - "2 or higher. Consider setting Cluster.protocol_version to 2 " - "to support serial consistency levels.") - if self.fetch_size or self.paging_state: - raise UnsupportedOperation( - "Automatic query paging may only be used with protocol version " - "2 or higher. Consider setting Cluster.protocol_version to 2.") - write_short(f, len(self.query_params)) - for param in self.query_params: - write_value(f, param) - write_consistency_level(f, self.consistency_level) - else: - super(ExecuteMessage, self)._write_query_params(f, protocol_version) + super(ExecuteMessage, self)._write_query_params(f, protocol_version) def send_body(self, f, protocol_version): write_string(f, self.query_id) @@ -719,60 +686,69 @@ class ResultMessage(_MessageType): bind_metadata = None pk_indexes = None schema_change_event = None + is_lwt = False def __init__(self, kind): self.kind = kind - def recv(self, f, protocol_version, user_type_map, result_metadata): + def recv(self, f, protocol_version, protocol_features, user_type_map, result_metadata, column_encryption_policy): if self.kind == RESULT_KIND_VOID: return elif self.kind == RESULT_KIND_ROWS: - self.recv_results_rows(f, protocol_version, user_type_map, result_metadata) + self.recv_results_rows(f, protocol_version, user_type_map, result_metadata, column_encryption_policy) elif self.kind == RESULT_KIND_SET_KEYSPACE: self.new_keyspace = read_string(f) elif self.kind == RESULT_KIND_PREPARED: - self.recv_results_prepared(f, protocol_version, user_type_map) + self.recv_results_prepared(f, protocol_version, protocol_features, user_type_map) elif self.kind == RESULT_KIND_SCHEMA_CHANGE: self.recv_results_schema_change(f, protocol_version) else: raise DriverException("Unknown RESULT kind: %d" % self.kind) @classmethod - def recv_body(cls, f, protocol_version, user_type_map, result_metadata): + def recv_body(cls, f, protocol_version, protocol_features, user_type_map, result_metadata, column_encryption_policy): kind = read_int(f) msg = cls(kind) - msg.recv(f, protocol_version, user_type_map, result_metadata) + msg.recv(f, protocol_version, protocol_features, user_type_map, result_metadata, column_encryption_policy) return msg - def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata): + def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, column_encryption_policy): self.recv_results_metadata(f, user_type_map) column_metadata = self.column_metadata or result_metadata rowcount = read_int(f) rows = [self.recv_row(f, len(column_metadata)) for _ in range(rowcount)] self.column_names = [c[2] for c in column_metadata] self.column_types = [c[3] for c in column_metadata] + col_descs = [ColDesc(md[0], md[1], md[2]) for md in column_metadata] + + def decode_val(val, col_md, col_desc): + uses_ce = column_encryption_policy and column_encryption_policy.contains_column(col_desc) + col_type = column_encryption_policy.column_type(col_desc) if uses_ce else col_md[3] + raw_bytes = column_encryption_policy.decrypt(col_desc, val) if uses_ce else val + return col_type.from_binary(raw_bytes, protocol_version) + + def decode_row(row): + return tuple(decode_val(val, col_md, col_desc) for val, col_md, col_desc in zip(row, column_metadata, col_descs)) + try: - self.parsed_rows = [ - tuple(ctype.from_binary(val, protocol_version) - for ctype, val in zip(self.column_types, row)) - for row in rows] + self.parsed_rows = [decode_row(row) for row in rows] except Exception: for row in rows: - for i in range(len(row)): + for val, col_md, col_desc in zip(row, column_metadata, col_descs): try: - self.column_types[i].from_binary(row[i], protocol_version) + decode_val(val, col_md, col_desc) except Exception as e: - raise DriverException('Failed decoding result column "%s" of type %s: %s' % (self.column_names[i], - self.column_types[i].cql_parameterized_type(), + raise DriverException('Failed decoding result column "%s" of type %s: %s' % (col_md[2], + col_md[3].cql_parameterized_type(), str(e))) - def recv_results_prepared(self, f, protocol_version, user_type_map): + def recv_results_prepared(self, f, protocol_version, protocol_features, user_type_map): self.query_id = read_binary_string(f) if ProtocolVersion.uses_prepared_metadata(protocol_version): self.result_metadata_id = read_binary_string(f) else: self.result_metadata_id = None - self.recv_prepared_metadata(f, protocol_version, user_type_map) + self.recv_prepared_metadata(f, protocol_version, protocol_features, user_type_map) def recv_results_metadata(self, f, user_type_map): flags = read_int(f) @@ -810,8 +786,9 @@ def recv_results_metadata(self, f, user_type_map): self.column_metadata = column_metadata - def recv_prepared_metadata(self, f, protocol_version, user_type_map): + def recv_prepared_metadata(self, f, protocol_version, protocol_features, user_type_map): flags = read_int(f) + self.is_lwt = protocol_features.lwt_info.get_lwt_flag(flags) if protocol_features.lwt_info is not None else False colcount = read_int(f) pk_indexes = None if protocol_version >= 4: @@ -834,8 +811,7 @@ def recv_prepared_metadata(self, f, protocol_version, user_type_map): coltype = self.read_type(f, user_type_map) bind_metadata.append(ColumnMetadata(colksname, colcfname, colname, coltype)) - if protocol_version >= 2: - self.recv_results_metadata(f, user_type_map) + self.recv_results_metadata(f, user_type_map) self.bind_metadata = bind_metadata self.pk_indexes = pk_indexes @@ -950,39 +926,38 @@ def send_body(self, f, protocol_version): write_value(f, param) write_consistency_level(f, self.consistency_level) - if protocol_version >= 3: - flags = 0 - if self.serial_consistency_level: - flags |= _WITH_SERIAL_CONSISTENCY_FLAG - if self.timestamp is not None: - flags |= _PROTOCOL_TIMESTAMP_FLAG - if self.keyspace: - if ProtocolVersion.uses_keyspace_flag(protocol_version): - flags |= _WITH_KEYSPACE_FLAG - else: - raise UnsupportedOperation( - "Keyspaces may only be set on queries with protocol version " - "5 or higher. Consider setting Cluster.protocol_version to 5.") - - if ProtocolVersion.uses_int_query_flags(protocol_version): - write_int(f, flags) + flags = 0 + if self.serial_consistency_level: + flags |= _WITH_SERIAL_CONSISTENCY_FLAG + if self.timestamp is not None: + flags |= _PROTOCOL_TIMESTAMP_FLAG + if self.keyspace: + if ProtocolVersion.uses_keyspace_flag(protocol_version): + flags |= _WITH_KEYSPACE_FLAG else: - write_byte(f, flags) + raise UnsupportedOperation( + "Keyspaces may only be set on queries with protocol version " + "5 or higher. Consider setting Cluster.protocol_version to 5.") + if ProtocolVersion.uses_int_query_flags(protocol_version): + write_int(f, flags) + else: + write_byte(f, flags) - if self.serial_consistency_level: - write_consistency_level(f, self.serial_consistency_level) - if self.timestamp is not None: - write_long(f, self.timestamp) + if self.serial_consistency_level: + write_consistency_level(f, self.serial_consistency_level) + if self.timestamp is not None: + write_long(f, self.timestamp) - if ProtocolVersion.uses_keyspace_flag(protocol_version): - if self.keyspace is not None: - write_string(f, self.keyspace) + if ProtocolVersion.uses_keyspace_flag(protocol_version): + if self.keyspace is not None: + write_string(f, self.keyspace) known_event_types = frozenset(( 'TOPOLOGY_CHANGE', 'STATUS_CHANGE', - 'SCHEMA_CHANGE' + 'SCHEMA_CHANGE', + 'CLIENT_ROUTES_CHANGE' )) @@ -1013,6 +988,14 @@ def recv_body(cls, f, protocol_version, *args): return cls(event_type=event_type, event_args=read_method(f, protocol_version)) raise NotSupportedError('Unknown event type %r' % event_type) + @classmethod + def recv_client_routes_change(cls, f, protocol_version): + # "UPDATE_NODES" + change_type = read_string(f) + connection_ids = read_stringlist(f) + host_ids = read_stringlist(f) + return dict(change_type=change_type, connection_ids=connection_ids, host_ids=host_ids) + @classmethod def recv_topology_change(cls, f, protocol_version): # "NEW_NODE" or "REMOVED_NODE" @@ -1031,25 +1014,17 @@ def recv_status_change(cls, f, protocol_version): def recv_schema_change(cls, f, protocol_version): # "CREATED", "DROPPED", or "UPDATED" change_type = read_string(f) - if protocol_version >= 3: - target = read_string(f) - keyspace = read_string(f) - event = {'target_type': target, 'change_type': change_type, 'keyspace': keyspace} - if target != SchemaTargetType.KEYSPACE: - target_name = read_string(f) - if target == SchemaTargetType.FUNCTION: - event['function'] = UserFunctionDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) - elif target == SchemaTargetType.AGGREGATE: - event['aggregate'] = UserAggregateDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) - else: - event[target.lower()] = target_name - else: - keyspace = read_string(f) - table = read_string(f) - if table: - event = {'target_type': SchemaTargetType.TABLE, 'change_type': change_type, 'keyspace': keyspace, 'table': table} + target = read_string(f) + keyspace = read_string(f) + event = {'target_type': target, 'change_type': change_type, 'keyspace': keyspace} + if target != SchemaTargetType.KEYSPACE: + target_name = read_string(f) + if target == SchemaTargetType.FUNCTION: + event['function'] = UserFunctionDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) + elif target == SchemaTargetType.AGGREGATE: + event['aggregate'] = UserAggregateDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) else: - event = {'target_type': SchemaTargetType.KEYSPACE, 'change_type': change_type, 'keyspace': keyspace} + event[target.lower()] = target_name return event @@ -1073,12 +1048,9 @@ def send_body(self, f, protocol_version): if self.op_type == ReviseRequestMessage.RevisionType.PAGING_BACKPRESSURE: if self.next_pages <= 0: raise UnsupportedOperation("Continuous paging backpressure requires next_pages > 0") - elif not ProtocolVersion.has_continuous_paging_next_pages(protocol_version): - raise UnsupportedOperation( - "Continuous paging backpressure may only be used with protocol version " - "ProtocolVersion.DSE_V2 or higher. Consider setting Cluster.protocol_version to ProtocolVersion.DSE_V2.") else: - write_int(f, self.next_pages) + raise UnsupportedOperation( + "Continuous paging backpressure is not supported.") class _ProtocolHandler(object): @@ -1099,6 +1071,9 @@ class _ProtocolHandler(object): result decoding implementations. """ + column_encryption_policy = None + """Instance of :class:`cassandra.policies.ColumnEncryptionPolicy` in use by this handler""" + @classmethod def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta_protocol_version): """ @@ -1142,12 +1117,11 @@ def _write_header(f, version, flags, stream_id, opcode, length): """ Write a CQL protocol frame header. """ - pack = v3_header_pack if version >= 3 else header_pack - f.write(pack(version, flags, stream_id, opcode)) + f.write(v3_header_pack(version, flags, stream_id, opcode)) write_int(f, length) @classmethod - def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body, + def decode_message(cls, protocol_version, protocol_features, user_type_map, stream_id, flags, opcode, body, decompressor, result_metadata): """ Decodes a native protocol message body @@ -1193,7 +1167,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) msg_class = cls.message_types_by_opcode[opcode] - msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata) + msg = msg_class.recv_body(body, protocol_version, protocol_features, user_type_map, result_metadata, cls.column_encryption_policy) msg.stream_id = stream_id msg.trace_id = trace_id msg.custom_payload = custom_payload @@ -1346,7 +1320,7 @@ def read_binary_string(f): def write_string(f, s): - if isinstance(s, six.text_type): + if isinstance(s, str): s = s.encode('utf8') write_short(f, len(s)) f.write(s) @@ -1363,7 +1337,7 @@ def read_longstring(f): def write_longstring(f, s): - if isinstance(s, six.text_type): + if isinstance(s, str): s = s.encode('utf8') write_int(f, len(s)) f.write(s) diff --git a/cassandra/protocol_features.py b/cassandra/protocol_features.py new file mode 100644 index 0000000000..877998be7d --- /dev/null +++ b/cassandra/protocol_features.py @@ -0,0 +1,97 @@ +import logging + +from cassandra.shard_info import _ShardingInfo +from cassandra.lwt_info import _LwtInfo + +log = logging.getLogger(__name__) + + +LWT_ADD_METADATA_MARK = "SCYLLA_LWT_ADD_METADATA_MARK" +LWT_OPTIMIZATION_META_BIT_MASK = "LWT_OPTIMIZATION_META_BIT_MASK" +RATE_LIMIT_ERROR_EXTENSION = "SCYLLA_RATE_LIMIT_ERROR" +TABLETS_ROUTING_V1 = "TABLETS_ROUTING_V1" + +class ProtocolFeatures(object): + rate_limit_error = None + shard_id = 0 + sharding_info = None + tablets_routing_v1 = False + lwt_info = None + + def __init__(self, rate_limit_error=None, shard_id=0, sharding_info=None, tablets_routing_v1=False, lwt_info=None): + self.rate_limit_error = rate_limit_error + self.shard_id = shard_id + self.sharding_info = sharding_info + self.tablets_routing_v1 = tablets_routing_v1 + self.lwt_info = lwt_info + + @staticmethod + def parse_from_supported(supported): + rate_limit_error = ProtocolFeatures.maybe_parse_rate_limit_error(supported) + shard_id, sharding_info = ProtocolFeatures.parse_sharding_info(supported) + tablets_routing_v1 = ProtocolFeatures.parse_tablets_info(supported) + lwt_info = ProtocolFeatures.parse_lwt_info(supported) + return ProtocolFeatures(rate_limit_error, shard_id, sharding_info, tablets_routing_v1, lwt_info) + + @staticmethod + def maybe_parse_rate_limit_error(supported): + vals = supported.get(RATE_LIMIT_ERROR_EXTENSION) + if vals is not None: + code_str = ProtocolFeatures.get_cql_extension_field(vals, "ERROR_CODE") + return int(code_str) + + # Looks up a field which starts with `key=` and returns the rest + @staticmethod + def get_cql_extension_field(vals, key): + for v in vals: + stripped_v = v.strip() + if stripped_v.startswith(key) and stripped_v[len(key)] == '=': + result = stripped_v[len(key) + 1:] + return result + return None + + def add_startup_options(self, options): + if self.rate_limit_error is not None: + options[RATE_LIMIT_ERROR_EXTENSION] = "" + if self.tablets_routing_v1: + options[TABLETS_ROUTING_V1] = "" + if self.lwt_info is not None: + options[LWT_ADD_METADATA_MARK] = str(self.lwt_info.lwt_meta_bit_mask) + + @staticmethod + def parse_sharding_info(options): + shard_id = options.get('SCYLLA_SHARD', [''])[0] or None + shards_count = options.get('SCYLLA_NR_SHARDS', [''])[0] or None + partitioner = options.get('SCYLLA_PARTITIONER', [''])[0] or None + sharding_algorithm = options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None + sharding_ignore_msb = options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None + shard_aware_port = options.get('SCYLLA_SHARD_AWARE_PORT', [''])[0] or None + shard_aware_port_ssl = options.get('SCYLLA_SHARD_AWARE_PORT_SSL', [''])[0] or None + log.debug("Parsing sharding info from message options %s", options) + + if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or + sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): + return 0, None + + return int(shard_id), _ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, + shard_aware_port, shard_aware_port_ssl) + + + @staticmethod + def parse_tablets_info(options): + return TABLETS_ROUTING_V1 in options + + @staticmethod + def parse_lwt_info(options): + value_list = options.get(LWT_ADD_METADATA_MARK, [None]) + for value in value_list: + if value is None or not value.startswith(LWT_OPTIMIZATION_META_BIT_MASK + "="): + continue + try: + lwt_meta_bit_mask = int(value[len(LWT_OPTIMIZATION_META_BIT_MASK + "="):]) + return _LwtInfo(lwt_meta_bit_mask) + except Exception as e: + log.exception(f"Error while parsing {LWT_ADD_METADATA_MARK}: {e}") + return None + + return None diff --git a/cassandra/query.py b/cassandra/query.py index 0e7a41dc2d..6c6878fdb4 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -19,18 +19,17 @@ """ from collections import namedtuple -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import re import struct import time -import six -from six.moves import range, zip import warnings from cassandra import ConsistencyLevel, OperationTimedOut -from cassandra.util import unix_time_from_uuid1 +from cassandra.util import unix_time_from_uuid1, maybe_add_timeout_to_query from cassandra.encoder import Encoder import cassandra.encoder +from cassandra.policies import ColDesc from cassandra.protocol import _UNSET_VALUE from cassandra.util import OrderedDict, _sanitize_identifiers @@ -76,7 +75,7 @@ def tuple_factory(colnames, rows): >>> session = cluster.connect('mykeyspace') >>> session.row_factory = tuple_factory >>> rows = session.execute("SELECT name, age FROM users LIMIT 1") - >>> print rows[0] + >>> print(rows[0]) ('Bob', 42) .. versionchanged:: 2.0.0 @@ -132,16 +131,16 @@ def named_tuple_factory(colnames, rows): >>> user = rows[0] >>> # you can access field by their name: - >>> print "name: %s, age: %d" % (user.name, user.age) + >>> print("name: %s, age: %d" % (user.name, user.age)) name: Bob, age: 42 >>> # or you can access fields by their position (like a tuple) >>> name, age = user - >>> print "name: %s, age: %d" % (name, age) + >>> print("name: %s, age: %d" % (name, age)) name: Bob, age: 42 >>> name = user[0] >>> age = user[1] - >>> print "name: %s, age: %d" % (name, age) + >>> print("name: %s, age: %d" % (name, age)) name: Bob, age: 42 .. versionchanged:: 2.0.0 @@ -187,7 +186,7 @@ def dict_factory(colnames, rows): >>> session = cluster.connect('mykeyspace') >>> session.row_factory = dict_factory >>> rows = session.execute("SELECT name, age FROM users LIMIT 1") - >>> print rows[0] + >>> print(rows[0]) {u'age': 42, u'name': u'Bob'} .. versionchanged:: 2.0.0 @@ -253,6 +252,13 @@ class Statement(object): .. versionadded:: 2.1.3 """ + table = None + """ + The string name of the table this query acts on. This is used when the tablet + feature is enabled and in the same time :class`~.TokenAwarePolicy` is configured + in the profile load balancing policy. + """ + custom_payload = None """ :ref:`custom_payload` to be passed to the server. @@ -272,7 +278,7 @@ class Statement(object): def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None, - is_idempotent=False): + is_idempotent=False, table=None): if retry_policy and not hasattr(retry_policy, 'on_read_timeout'): # just checking one method to detect positional parameter errors raise ValueError('retry_policy should implement cassandra.policies.RetryPolicy') if retry_policy is not None: @@ -286,6 +292,8 @@ def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, self.fetch_size = fetch_size if keyspace is not None: self.keyspace = keyspace + if table is not None: + self.table = table if custom_payload is not None: self.custom_payload = custom_payload self.is_idempotent = is_idempotent @@ -337,6 +345,9 @@ def _set_serial_consistency_level(self, serial_consistency_level): def _del_serial_consistency_level(self): self._serial_consistency_level = None + def is_lwt(self): + return False + serial_consistency_level = property( _get_serial_consistency_level, _set_serial_consistency_level, @@ -442,12 +453,15 @@ class PreparedStatement(object): query_string = None result_metadata = None result_metadata_id = None + column_encryption_policy = None routing_key_indexes = None _routing_key_index_set = None serial_consistency_level = None # TODO never used? + _is_lwt = False def __init__(self, column_metadata, query_id, routing_key_indexes, query, - keyspace, protocol_version, result_metadata, result_metadata_id): + keyspace, protocol_version, result_metadata, result_metadata_id, + is_lwt=False, column_encryption_policy=None): self.column_metadata = column_metadata self.query_id = query_id self.routing_key_indexes = routing_key_indexes @@ -456,14 +470,18 @@ def __init__(self, column_metadata, query_id, routing_key_indexes, query, self.protocol_version = protocol_version self.result_metadata = result_metadata self.result_metadata_id = result_metadata_id + self.column_encryption_policy = column_encryption_policy self.is_idempotent = False + self._is_lwt = is_lwt @classmethod def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, query, prepared_keyspace, protocol_version, result_metadata, - result_metadata_id): + result_metadata_id, is_lwt, column_encryption_policy=None): if not column_metadata: - return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version, result_metadata, result_metadata_id) + return PreparedStatement(column_metadata, query_id, None, + query, prepared_keyspace, protocol_version, result_metadata, + result_metadata_id, is_lwt, column_encryption_policy) if pk_indexes: routing_key_indexes = pk_indexes @@ -489,7 +507,7 @@ def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, return PreparedStatement(column_metadata, query_id, routing_key_indexes, query, prepared_keyspace, protocol_version, result_metadata, - result_metadata_id) + result_metadata_id, is_lwt, column_encryption_policy) def bind(self, values): """ @@ -504,6 +522,9 @@ def is_routing_key_index(self, i): self._routing_key_index_set = set(self.routing_key_indexes) if self.routing_key_indexes else set() return i in self._routing_key_index_set + def is_lwt(self): + return self._is_lwt + def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') return (u'' % @@ -548,6 +569,7 @@ def __init__(self, prepared_statement, retry_policy=None, consistency_level=None meta = prepared_statement.column_metadata if meta: self.keyspace = meta[0].keyspace_name + self.table = meta[0].table_name Statement.__init__(self, retry_policy, consistency_level, routing_key, serial_consistency_level, fetch_size, keyspace, custom_payload, @@ -577,6 +599,7 @@ def bind(self, values): values = () proto_version = self.prepared_statement.protocol_version col_meta = self.prepared_statement.column_metadata + ce_policy = self.prepared_statement.column_encryption_policy # special case for binding dicts if isinstance(values, dict): @@ -623,7 +646,13 @@ def bind(self, values): raise ValueError("Attempt to bind UNSET_VALUE while using unsuitable protocol version (%d < 4)" % proto_version) else: try: - self.values.append(col_spec.type.serialize(value, proto_version)) + col_desc = ColDesc(col_spec.keyspace_name, col_spec.table_name, col_spec.name) + uses_ce = ce_policy and ce_policy.contains_column(col_desc) + col_type = ce_policy.column_type(col_desc) if uses_ce else col_spec.type + col_bytes = col_type.serialize(value, proto_version) + if uses_ce: + col_bytes = ce_policy.encrypt(col_desc, col_bytes) + self.values.append(col_bytes) except (TypeError, struct.error) as exc: actual_type = type(value) message = ('Received an argument of invalid type for column "%s". ' @@ -661,6 +690,9 @@ def routing_key(self): return self._routing_key + def is_lwt(self): + return self.prepared_statement.is_lwt() + def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') return (u'' % @@ -729,6 +761,7 @@ class BatchStatement(Statement): _statements_and_parameters = None _session = None + _is_lwt = False def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, consistency_level=None, serial_consistency_level=None, @@ -804,7 +837,7 @@ def add(self, statement, parameters=None): Like with other statements, parameters must be a sequence, even if there is only one item. """ - if isinstance(statement, six.string_types): + if isinstance(statement, str): if parameters: encoder = Encoder() if self._session is None else self._session.encoder statement = bind_params(statement, parameters, encoder) @@ -813,6 +846,8 @@ def add(self, statement, parameters=None): query_id = statement.query_id bound_statement = statement.bind(() if parameters is None else parameters) self._update_state(bound_statement) + if statement.is_lwt(): + self._is_lwt = True self._add_statement_and_params(True, query_id, bound_statement.values) elif isinstance(statement, BoundStatement): if parameters: @@ -820,6 +855,8 @@ def add(self, statement, parameters=None): "Parameters cannot be passed with a BoundStatement " "to BatchStatement.add()") self._update_state(statement) + if statement.is_lwt(): + self._is_lwt = True self._add_statement_and_params(True, statement.prepared_statement.query_id, statement.values) else: # it must be a SimpleStatement @@ -828,6 +865,8 @@ def add(self, statement, parameters=None): encoder = Encoder() if self._session is None else self._session.encoder query_string = bind_params(query_string, parameters, encoder) self._update_state(statement) + if statement.is_lwt(): + self._is_lwt = True self._add_statement_and_params(False, query_string, ()) return self @@ -861,6 +900,9 @@ def _update_state(self, statement): self._maybe_set_routing_attributes(statement) self._update_custom_payload(statement) + def is_lwt(self): + return self._is_lwt + def __len__(self): return len(self._statements_and_parameters) @@ -888,10 +930,8 @@ def __str__(self): def bind_params(query, params, encoder): - if six.PY2 and isinstance(query, six.text_type): - query = query.encode('utf-8') if isinstance(params, dict): - return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in six.iteritems(params)) + return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in params.items()) else: return query % tuple(encoder.cql_encode_all_types(v) for v in params) @@ -992,11 +1032,13 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): "Trace information was not available within %f seconds. Consider raising Session.max_trace_wait." % (max_wait,)) log.debug("Attempting to fetch trace info for trace ID: %s", self.trace_id) + metadata_request_timeout = self._session.cluster.control_connection and self._session.cluster.control_connection._metadata_request_timeout session_results = self._execute( - SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) + SimpleStatement(maybe_add_timeout_to_query(self._SELECT_SESSIONS_FORMAT, metadata_request_timeout), consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) # PYTHON-730: There is race condition that the duration mutation is written before started_at the for fast queries - is_complete = session_results and session_results[0].duration is not None and session_results[0].started_at is not None + session_row = session_results.one() if session_results else None + is_complete = session_row is not None and session_row.duration is not None and session_row.started_at is not None if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) attempt += 1 @@ -1006,7 +1048,6 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): else: log.debug("Fetching parital trace info for trace ID: %s", self.trace_id) - session_row = session_results[0] self.request_type = session_row.request self.duration = timedelta(microseconds=session_row.duration) if is_complete else None self.started_at = session_row.started_at @@ -1018,7 +1059,11 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): log.debug("Attempting to fetch trace events for trace ID: %s", self.trace_id) time_spent = time.time() - start event_results = self._execute( - SimpleStatement(self._SELECT_EVENTS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) + SimpleStatement(maybe_add_timeout_to_query(self._SELECT_EVENTS_FORMAT, metadata_request_timeout), + consistency_level=query_cl), + (self.trace_id,), + time_spent, + max_wait) log.debug("Fetched trace events for trace ID: %s", self.trace_id) self.events = tuple(TraceEvent(r.activity, r.event_id, r.source, r.source_elapsed, r.thread) for r in event_results) @@ -1076,7 +1121,7 @@ class TraceEvent(object): def __init__(self, description, timeuuid, source, source_elapsed, thread_name): self.description = description - self.datetime = datetime.utcfromtimestamp(unix_time_from_uuid1(timeuuid)) + self.datetime = datetime.fromtimestamp(unix_time_from_uuid1(timeuuid), tz=timezone.utc) self.source = source if source_elapsed is not None: self.source_elapsed = timedelta(microseconds=source_elapsed) diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index 3a4b2f4604..88277a4593 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -13,13 +13,14 @@ # limitations under the License. from cassandra.parsing cimport ParseDesc, ColumnParser +from cassandra.policies import ColDesc from cassandra.obj_parser import TupleRowParser from cassandra.deserializers import make_deserializers include "ioutils.pyx" def make_recv_results_rows(ColumnParser colparser): - def recv_results_rows(self, f, int protocol_version, user_type_map, result_metadata): + def recv_results_rows(self, f, int protocol_version, user_type_map, result_metadata, column_encryption_policy): """ Parse protocol data given as a BytesIO f into a set of columns (e.g. list of tuples) This is used as the recv_results_rows method of (Fast)ResultMessage @@ -28,11 +29,12 @@ def make_recv_results_rows(ColumnParser colparser): column_metadata = self.column_metadata or result_metadata - self.column_names = [c[2] for c in column_metadata] - self.column_types = [c[3] for c in column_metadata] + self.column_names = [md[2] for md in column_metadata] + self.column_types = [md[3] for md in column_metadata] - desc = ParseDesc(self.column_names, self.column_types, make_deserializers(self.column_types), - protocol_version) + desc = ParseDesc(self.column_names, self.column_types, column_encryption_policy, + [ColDesc(md[0], md[1], md[2]) for md in column_metadata], + make_deserializers(self.column_types), protocol_version) reader = BytesIOReader(f.read()) try: self.parsed_rows = colparser.parse_rows(reader, desc) diff --git a/tests/unit/advanced/cloud/__init__.py b/cassandra/scylla/__init__.py similarity index 100% rename from tests/unit/advanced/cloud/__init__.py rename to cassandra/scylla/__init__.py diff --git a/cassandra/segment.py b/cassandra/segment.py index e3881c4402..78161fe520 100644 --- a/cassandra/segment.py +++ b/cassandra/segment.py @@ -13,7 +13,6 @@ # limitations under the License. import zlib -import six from cassandra import DriverException from cassandra.marshal import int32_pack @@ -54,9 +53,6 @@ def compute_crc24(data, length): def compute_crc32(data, value): crc32 = zlib.crc32(data, value) - if six.PY2: - crc32 &= 0xffffffff - return crc32 diff --git a/cassandra/shard_info.py b/cassandra/shard_info.py index 6bd56fa796..8f62252193 100644 --- a/cassandra/shard_info.py +++ b/cassandra/shard_info.py @@ -20,26 +20,13 @@ class _ShardingInfo(object): - def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb): + def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, shard_aware_port, shard_aware_port_ssl): self.shards_count = int(shards_count) self.partitioner = partitioner self.sharding_algorithm = sharding_algorithm self.sharding_ignore_msb = int(sharding_ignore_msb) - - @staticmethod - def parse_sharding_info(message): - shard_id = message.options.get('SCYLLA_SHARD', [''])[0] or None - shards_count = message.options.get('SCYLLA_NR_SHARDS', [''])[0] or None - partitioner = message.options.get('SCYLLA_PARTITIONER', [''])[0] or None - sharding_algorithm = message.options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None - sharding_ignore_msb = message.options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None - log.debug("Parsing sharding info from message options %s", message.options) - - if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or - sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): - return 0, None - - return int(shard_id), _ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb) + self.shard_aware_port = int(shard_aware_port) if shard_aware_port else None + self.shard_aware_port_ssl = int(shard_aware_port_ssl) if shard_aware_port_ssl else None def shard_id_from_token(self, token): """ diff --git a/cassandra/tablets.py b/cassandra/tablets.py new file mode 100644 index 0000000000..dca26ab0df --- /dev/null +++ b/cassandra/tablets.py @@ -0,0 +1,146 @@ +from threading import Lock +from typing import Optional +from uuid import UUID + + +class Tablet(object): + """ + Represents a single ScyllaDB tablet. + It stores information about each replica, its host and shard, + and the token interval in the format (first_token, last_token]. + """ + first_token = 0 + last_token = 0 + replicas = None + + def __init__(self, first_token=0, last_token=0, replicas=None): + self.first_token = first_token + self.last_token = last_token + self.replicas = replicas + + def __str__(self): + return "" \ + % (self.first_token, self.last_token, self.replicas) + __repr__ = __str__ + + @staticmethod + def _is_valid_tablet(replicas): + return replicas is not None and len(replicas) != 0 + + @staticmethod + def from_row(first_token, last_token, replicas): + if Tablet._is_valid_tablet(replicas): + tablet = Tablet(first_token, last_token, replicas) + return tablet + return None + + def replica_contains_host_id(self, uuid: UUID) -> bool: + for replica in self.replicas: + if replica[0] == uuid: + return True + return False + + +class Tablets(object): + _lock = None + _tablets = {} + + def __init__(self, tablets): + self._tablets = tablets + self._lock = Lock() + + def table_has_tablets(self, keyspace, table) -> bool: + return bool(self._tablets.get((keyspace, table), [])) + + def get_tablet_for_key(self, keyspace, table, t): + tablet = self._tablets.get((keyspace, table), []) + if not tablet: + return None + + id = bisect_left(tablet, t.value, key=lambda tablet: tablet.last_token) + if id < len(tablet) and t.value > tablet[id].first_token: + return tablet[id] + return None + + def drop_tablets(self, keyspace: str, table: Optional[str] = None): + with self._lock: + if table is not None: + self._tablets.pop((keyspace, table), None) + return + + to_be_deleted = [] + for key in self._tablets.keys(): + if key[0] == keyspace: + to_be_deleted.append(key) + + for key in to_be_deleted: + del self._tablets[key] + + def drop_tablets_by_host_id(self, host_id: Optional[UUID]): + if host_id is None: + return + with self._lock: + for key, tablets in self._tablets.items(): + to_be_deleted = [] + for tablet_id, tablet in enumerate(tablets): + if tablet.replica_contains_host_id(host_id): + to_be_deleted.append(tablet_id) + + for tablet_id in reversed(to_be_deleted): + tablets.pop(tablet_id) + + def add_tablet(self, keyspace, table, tablet): + with self._lock: + tablets_for_table = self._tablets.setdefault((keyspace, table), []) + + # find first overlapping range + start = bisect_left(tablets_for_table, tablet.first_token, key=lambda t: t.first_token) + if start > 0 and tablets_for_table[start - 1].last_token > tablet.first_token: + start = start - 1 + + # find last overlapping range + end = bisect_left(tablets_for_table, tablet.last_token, key=lambda t: t.last_token) + if end < len(tablets_for_table) and tablets_for_table[end].first_token >= tablet.last_token: + end = end - 1 + + if start <= end: + del tablets_for_table[start:end + 1] + + tablets_for_table.insert(start, tablet) + + +# bisect.bisect_left implementation from Python 3.11, needed untill support for +# Python < 3.10 is dropped, it is needed to use `key` to extract last_token from +# Tablet list - better solution performance-wise than materialize list of last_tokens +def bisect_left(a, x, lo=0, hi=None, *, key=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(i, x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + # Note, the comparison uses "<" to match the + # __lt__() logic in list.sort() and in heapq. + if key is None: + while lo < hi: + mid = (lo + hi) // 2 + if a[mid] < x: + lo = mid + 1 + else: + hi = mid + return + while lo < hi: + mid = (lo + hi) // 2 + if key(a[mid]) < x: + lo = mid + 1 + else: + hi = mid + return lo diff --git a/cassandra/util.py b/cassandra/util.py index f896ff4f86..12886d05ab 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -12,17 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import with_statement +from _weakref import ref import calendar +from collections import OrderedDict +from collections.abc import Mapping import datetime from functools import total_ordering -import logging from itertools import chain +import keyword +import logging +import pickle import random import re -import six -import uuid +import socket import sys +import time +import uuid +from typing import Optional _HAS_GEOMET = True try: @@ -33,8 +39,8 @@ from cassandra import DriverException -DATETIME_EPOC = datetime.datetime(1970, 1, 1) -UTC_DATETIME_EPOC = datetime.datetime.utcfromtimestamp(0) +DATETIME_EPOC = datetime.datetime(1970, 1, 1).replace(tzinfo=None) +UTC_DATETIME_EPOC = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) _nan = float('nan') @@ -212,147 +218,6 @@ def _resolve_contact_points_to_string_map(contact_points): ) -try: - from collections import OrderedDict -except ImportError: - # OrderedDict from Python 2.7+ - - # Copyright (c) 2009 Raymond Hettinger - # - # Permission is hereby granted, free of charge, to any person - # obtaining a copy of this software and associated documentation files - # (the "Software"), to deal in the Software without restriction, - # including without limitation the rights to use, copy, modify, merge, - # publish, distribute, sublicense, and/or sell copies of the Software, - # and to permit persons to whom the Software is furnished to do so, - # subject to the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - # OTHER DEALINGS IN THE SOFTWARE. - from UserDict import DictMixin - - class OrderedDict(dict, DictMixin): # noqa - """ A dictionary which maintains the insertion order of keys. """ - - def __init__(self, *args, **kwds): - """ A dictionary which maintains the insertion order of keys. """ - - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = next(reversed(self)) - else: - key = next(iter(self)) - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other - - -# WeakSet from Python 2.7+ (https://code.google.com/p/weakrefset) - -from _weakref import ref - - class _IterationGuard(object): # This context manager registers itself in the current iterators of the # weak container, such as to delay all removals until the context manager @@ -789,15 +654,11 @@ def _find_insertion(self, x): sortedset = SortedSet # backwards-compatibility -from cassandra.compat import Mapping -from six.moves import cPickle - - class OrderedMap(Mapping): ''' An ordered map that accepts non-hashable types for keys. It also maintains the insertion order of items, behaving as OrderedDict in that regard. These maps - are constructed and read just as normal mapping types, exept that they may + are constructed and read just as normal mapping types, except that they may contain arbitrary collections and other non-hashable items as keys:: >>> od = OrderedMap([({'one': 1, 'two': 2}, 'value'), @@ -835,7 +696,7 @@ def __init__(self, *args, **kwargs): for k, v in e: self._insert(k, v) - for k, v in six.iteritems(kwargs): + for k, v in kwargs.items(): self._insert(k, v) def _insert(self, key, value): @@ -901,7 +762,7 @@ def popitem(self): raise KeyError() def _serialize_key(self, key): - return cPickle.dumps(key) + return pickle.dumps(key) class OrderedMapSerializedKey(OrderedMap): @@ -919,13 +780,6 @@ def _serialize_key(self, key): return self.cass_key_type.serialize(key, self.protocol_version) -import datetime -import time - -if six.PY3: - long = int - - @total_ordering class Time(object): ''' @@ -951,11 +805,11 @@ def __init__(self, value): - datetime.time: built-in time - string_type: a string time of the form "HH:MM:SS[.mmmuuunnn]" """ - if isinstance(value, six.integer_types): + if isinstance(value, int): self._from_timestamp(value) elif isinstance(value, datetime.time): self._from_time(value) - elif isinstance(value, six.string_types): + elif isinstance(value, str): self._from_timestring(value) else: raise TypeError('Time arguments must be a whole number, datetime.time, or string') @@ -1031,7 +885,7 @@ def __eq__(self, other): if isinstance(other, Time): return self.nanosecond_time == other.nanosecond_time - if isinstance(other, six.integer_types): + if isinstance(other, int): return self.nanosecond_time == other return self.nanosecond_time % Time.MICRO == 0 and \ @@ -1080,11 +934,11 @@ def __init__(self, value): - datetime.date: built-in date - string_type: a string time of the form "yyyy-mm-dd" """ - if isinstance(value, six.integer_types): + if isinstance(value, int): self.days_from_epoch = value elif isinstance(value, (datetime.date, datetime.datetime)): self._from_timetuple(value.timetuple()) - elif isinstance(value, six.string_types): + elif isinstance(value, str): self._from_datestring(value) else: raise TypeError('Date arguments must be a whole number, datetime.date, or string') @@ -1124,7 +978,7 @@ def __eq__(self, other): if isinstance(other, Date): return self.days_from_epoch == other.days_from_epoch - if isinstance(other, six.integer_types): + if isinstance(other, int): return self.days_from_epoch == other try: @@ -1151,97 +1005,9 @@ def __str__(self): # If we overflow datetime.[MIN|MAX] return str(self.days_from_epoch) -import socket -if hasattr(socket, 'inet_pton'): - inet_pton = socket.inet_pton - inet_ntop = socket.inet_ntop -else: - """ - Windows doesn't have socket.inet_pton and socket.inet_ntop until Python 3.4 - This is an alternative impl using ctypes, based on this win_inet_pton project: - https://github.com/hickeroar/win_inet_pton - """ - import ctypes - - class sockaddr(ctypes.Structure): - """ - Shared struct for ipv4 and ipv6. - https://msdn.microsoft.com/en-us/library/windows/desktop/ms740496(v=vs.85).aspx - - ``__pad1`` always covers the port. - - When being used for ``sockaddr_in6``, ``ipv4_addr`` actually covers ``sin6_flowinfo``, resulting - in proper alignment for ``ipv6_addr``. - """ - _fields_ = [("sa_family", ctypes.c_short), - ("__pad1", ctypes.c_ushort), - ("ipv4_addr", ctypes.c_byte * 4), - ("ipv6_addr", ctypes.c_byte * 16), - ("__pad2", ctypes.c_ulong)] - - if hasattr(ctypes, 'windll'): - WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA - WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA - else: - def not_windows(*args): - raise OSError("IPv6 addresses cannot be handled on Windows. " - "Missing ctypes.windll") - WSAStringToAddressA = not_windows - WSAAddressToStringA = not_windows - - def inet_pton(address_family, ip_string): - if address_family == socket.AF_INET: - return socket.inet_aton(ip_string) - - addr = sockaddr() - addr.sa_family = address_family - addr_size = ctypes.c_int(ctypes.sizeof(addr)) - - if WSAStringToAddressA( - ip_string, - address_family, - None, - ctypes.byref(addr), - ctypes.byref(addr_size) - ) != 0: - raise socket.error(ctypes.FormatError()) - - if address_family == socket.AF_INET6: - return ctypes.string_at(addr.ipv6_addr, 16) - - raise socket.error('unknown address family') - - def inet_ntop(address_family, packed_ip): - if address_family == socket.AF_INET: - return socket.inet_ntoa(packed_ip) - - addr = sockaddr() - addr.sa_family = address_family - addr_size = ctypes.c_int(ctypes.sizeof(addr)) - ip_string = ctypes.create_string_buffer(128) - ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string)) - - if address_family == socket.AF_INET6: - if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr): - raise socket.error('packed IP wrong length for inet_ntoa') - ctypes.memmove(addr.ipv6_addr, packed_ip, 16) - else: - raise socket.error('unknown address family') - - if WSAAddressToStringA( - ctypes.byref(addr), - addr_size, - None, - ip_string, - ctypes.byref(ip_string_size) - ) != 0: - raise socket.error(ctypes.FormatError()) - - return ip_string[:ip_string_size.value - 1] - - -import keyword +inet_pton = socket.inet_pton +inet_ntop = socket.inet_ntop # similar to collections.namedtuple, reproduced here because Python 2.6 did not have the rename logic @@ -1688,7 +1454,7 @@ def __init__(self, value, precision): if value is None: milliseconds = None - elif isinstance(value, six.integer_types): + elif isinstance(value, int): milliseconds = value elif isinstance(value, datetime.datetime): value = value.replace( @@ -1956,12 +1722,10 @@ def __init__(self, version): try: self.major = int(parts.pop()) - except ValueError: - six.reraise( - ValueError, - ValueError("Couldn't parse version {}. Version should start with a number".format(version)), - sys.exc_info()[2] - ) + except ValueError as e: + raise ValueError( + "Couldn't parse version {}. Version should start with a number".format(version))\ + .with_traceback(e.__traceback__) try: self.minor = int(parts.pop()) if parts else 0 self.patch = int(parts.pop()) if parts else 0 @@ -1994,8 +1758,8 @@ def __str__(self): @staticmethod def _compare_version_part(version, other_version, cmp): - if not (isinstance(version, six.integer_types) and - isinstance(other_version, six.integer_types)): + if not (isinstance(version, int) and + isinstance(other_version, int)): version = str(version) other_version = str(other_version) @@ -2037,3 +1801,12 @@ def __gt__(self, other): (is_major_ge and is_minor_ge and is_patch_ge and is_build_gt) or (is_major_ge and is_minor_ge and is_patch_ge and is_build_ge and is_prerelease_gt) ) + + +def maybe_add_timeout_to_query(stmt: str, metadata_request_timeout: Optional[datetime.timedelta]) -> str: + if metadata_request_timeout is None: + return stmt + ms = int(metadata_request_timeout / datetime.timedelta(milliseconds=1)) + if ms == 0: + return stmt + return f"{stmt} USING TIMEOUT {ms}ms" diff --git a/ci/install_openssl.sh b/ci/install_openssl.sh deleted file mode 100755 index 4545cb0d68..0000000000 --- a/ci/install_openssl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash -e - -echo "Download and build openssl==1.1.1f" -cd /usr/src -if [[ -f openssl-1.1.1f.tar.gz ]]; then - exit 0 -fi -wget -q https://www.openssl.org/source/openssl-1.1.1f.tar.gz -if [[ -d openssl-1.1.1f ]]; then - exit 0 -fi - -tar -zxf openssl-1.1.1f.tar.gz -cd openssl-1.1.1f -./config -make -s -j2 -make install > /dev/null - -set +e -mv -f /usr/bin/openssl /root/ -mv -f /usr/bin64/openssl /root/ -ln -s /usr/local/ssl/bin/openssl /usr/bin/openssl diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh deleted file mode 100755 index f5a36a76df..0000000000 --- a/ci/run_integration_test.sh +++ /dev/null @@ -1,56 +0,0 @@ -#! /bin/bash -e - -aio_max_nr_recommended_value=1048576 -aio_max_nr=$(cat /proc/sys/fs/aio-max-nr) -echo "The current aio-max-nr value is $aio_max_nr" -if (( aio_max_nr != aio_max_nr_recommended_value )); then - sudo sh -c "echo 'fs.aio-max-nr = $aio_max_nr_recommended_value' >> /etc/sysctl.conf" - sudo sysctl -p /etc/sysctl.conf - echo "The aio-max-nr was changed from $aio_max_nr to $(cat /proc/sys/fs/aio-max-nr)" - if (( $(cat /proc/sys/fs/aio-max-nr) != aio_max_nr_recommended_value )); then - echo "The aio-max-nr value was not changed to $aio_max_nr_recommended_value" - exit 1 - fi -fi - -BRANCH='branch-4.5' - -python3 -m venv .test-venv -source .test-venv/bin/activate -pip install -U pip wheel setuptools - -# install driver wheel -pip install --ignore-installed -r test-requirements.txt pytest -pip install . - -# download awscli -pip install awscli - -# install scylla-ccm -pip install https://github.com/scylladb/scylla-ccm/archive/master.zip - -# download version -LATEST_MASTER_JOB_ID=`aws --no-sign-request s3 ls downloads.scylladb.com/unstable/scylla/${BRANCH}/relocatable/ | grep '2021-' | tr -s ' ' | cut -d ' ' -f 3 | tr -d '\/' | sort -g | tail -n 1` -AWS_BASE=s3://downloads.scylladb.com/unstable/scylla/${BRANCH}/relocatable/${LATEST_MASTER_JOB_ID} - -aws s3 --no-sign-request cp ${AWS_BASE}/scylla-package.tar.gz . & -aws s3 --no-sign-request cp ${AWS_BASE}/scylla-tools-package.tar.gz . & -aws s3 --no-sign-request cp ${AWS_BASE}/scylla-jmx-package.tar.gz . & -wait - -ccm create scylla-driver-temp -n 1 --scylla --version unstable/${BRANCH}:$LATEST_MASTER_JOB_ID \ - --scylla-core-package-uri=./scylla-package.tar.gz \ - --scylla-tools-java-package-uri=./scylla-tools-package.tar.gz \ - --scylla-jmx-package-uri=./scylla-jmx-package.tar.gz - -ccm remove - -# run test - -echo "export SCYLLA_VERSION=unstable/${BRANCH}:${LATEST_MASTER_JOB_ID}" -echo "PROTOCOL_VERSION=4 EVENT_LOOP_MANAGER=asyncio pytest --import-mode append tests/integration/standard/" -export SCYLLA_VERSION=unstable/${BRANCH}:${LATEST_MASTER_JOB_ID} -export MAPPED_SCYLLA_VERSION=3.11.4 -PROTOCOL_VERSION=4 EVENT_LOOP_MANAGER=asyncio pytest -rf --import-mode append $* - - diff --git a/conanfile.py b/conanfile.py new file mode 100644 index 0000000000..bc2b27c1c6 --- /dev/null +++ b/conanfile.py @@ -0,0 +1,57 @@ +import json +from pathlib import Path + +from conan import ConanFile +from conan.tools.layout import basic_layout +from conan.internal import check_duplicated_generator +from conan.tools.files import save + + +CONAN_COMMANDLINE_FILENAME = "conandeps.env" + +class CommandlineDeps: + def __init__(self, conanfile): + """ + :param conanfile: ``< ConanFile object >`` The current recipe object. Always use ``self``. + """ + self._conanfile = conanfile + + def generate(self) -> None: + """ + Collects all dependencies and components, then, generating a Makefile + """ + check_duplicated_generator(self, self._conanfile) + + host_req = self._conanfile.dependencies.host + build_req = self._conanfile.dependencies.build # tool_requires + test_req = self._conanfile.dependencies.test + + content_buffer = "" + + # Filter the build_requires not activated for any requirement + dependencies = [tup for tup in list(host_req.items()) + list(build_req.items()) + list(test_req.items()) if not tup[0].build] + + for require, dep in dependencies: + # Require is not used at the moment, but its information could be used, and will be used in Conan 2.0 + if require.build: + continue + include_dir = Path(dep.package_folder) / 'include' + package_dir = Path(dep.package_folder) / 'lib' + content_buffer += json.dumps(dict(include_dirs=str(include_dir), library_dirs=str(package_dir))) + + save(self._conanfile, CONAN_COMMANDLINE_FILENAME, content_buffer) + self._conanfile.output.info(f"Generated {CONAN_COMMANDLINE_FILENAME}") + + +class python_driverConan(ConanFile): + win_bash = False + + settings = "os", "compiler", "build_type", "arch" + requires = "libev/4.33" + + def layout(self): + basic_layout(self) + + def generate(self): + pc = CommandlineDeps(self) + pc.generate() diff --git a/docs.yaml b/docs.yaml deleted file mode 100644 index 8e29b942e3..0000000000 --- a/docs.yaml +++ /dev/null @@ -1,75 +0,0 @@ -title: DataStax Python Driver -summary: DataStax Python Driver for Apache Cassandra® -output: docs/_build/ -swiftype_drivers: pythondrivers -checks: - external_links: - exclude: - - 'http://aka.ms/vcpython27' -sections: - - title: N/A - prefix: / - type: sphinx - directory: docs - virtualenv_init: | - set -x - CASS_DRIVER_NO_CYTHON=1 pip install -r test-datastax-requirements.txt - # for newer versions this is redundant, but in older versions we need to - # install, e.g., the cassandra driver, and those versions don't specify - # the cassandra driver version in requirements files - CASS_DRIVER_NO_CYTHON=1 python setup.py develop - pip install "jinja2==2.8.1;python_version<'3.6'" "sphinx>=1.3,<2" geomet - # build extensions like libev - CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force -versions: - - name: '3.25' - ref: a83c36a5 - - name: '3.24' - ref: 21cac12b - - name: '3.23' - ref: a40a2af7 - - name: '3.22' - ref: 1ccd5b99 - - name: '3.21' - ref: 5589d96b - - name: '3.20' - ref: d30d166f - - name: '3.19' - ref: ac2471f9 - - name: '3.18' - ref: ec36b957 - - name: '3.17' - ref: 38e359e1 - - name: '3.16' - ref: '3.16.0' - - name: '3.15' - ref: '2ce0bd97' - - name: '3.14' - ref: '9af8bd19' - - name: '3.13' - ref: '3.13.0' - - name: '3.12' - ref: '43b9c995' - - name: '3.11' - ref: '3.11.0' - - name: '3.10' - ref: 64572368 - - name: 3.9 - ref: 3.9-doc - - name: 3.8 - ref: 3.8-doc - - name: 3.7 - ref: 3.7-doc - - name: 3.6 - ref: 3.6-doc - - name: 3.5 - ref: 3.5-doc -redirects: - - \A\/(.*)/\Z: /\1.html -rewrites: - - search: cassandra.apache.org/doc/cql3/CQL.html - replace: cassandra.apache.org/doc/cql3/CQL-3.0.html - - search: http://www.datastax.com/documentation/cql/3.1/ - replace: https://docs.datastax.com/en/archived/cql/3.1/ - - search: http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH - replace: https://docs.datastax.com/en/dse/6.7/cql/cql/cql_reference/cql_commands/cqlBatch.html diff --git a/docs/.nav b/docs/.nav index 807bfd3e6f..e57bdd5bcc 100644 --- a/docs/.nav +++ b/docs/.nav @@ -4,16 +4,11 @@ scylla_specific execution_profiles lwt object_mapper -geo_types -graph -graph_fluent -classic_graph performance query_paging security upgrading user_defined_types -dates_and_times -cloud +dates-and-times faq api diff --git a/docs/Makefile b/docs/Makefile index 0374c9de04..09512be470 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,86 +1,94 @@ +# Global variables # You can set these variables from the command line. -POETRY = $(HOME)/.poetry/bin/poetry -SPHINXOPTS = -SPHINXBUILD = $(POETRY) run sphinx-build +SHELL = bash +UV = uv +SPHINXOPTS = -j auto +SPHINXBUILD = $(UV) run --frozen sphinx-build PAPER = BUILDDIR = _build SOURCEDIR = . -# Internal variables. +# Internal variables PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) TESTSPHINXOPTS = $(ALLSPHINXOPTS) -W --keep-going -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) - .PHONY: all all: dirhtml +# Setup commands +#.PHONY: setupenv +#setupenv: +# uv pip install -r <(uv pip compile pyproject.toml) + +.PHONY: update +update: + $(UV) lock --upgrade + +# Clean commands .PHONY: pristine pristine: clean git clean -dfX -.PHONY: setup -setup: - ./_utils/setup.sh - .PHONY: clean clean: rm -rf $(BUILDDIR)/* -.PHONY: preview -preview: setup - $(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --port 5500 - +# Generate output commands .PHONY: dirhtml -dirhtml: setup +dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml -singlehtml: setup +singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: epub -epub: setup +epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 -epub3: setup +epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." -.PHONY: dummy -dummy: setup - $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy +.PHONY: multiversion +multiversion: + $(UV) run --frozen sphinx-multiversion $(SOURCEDIR) $(BUILDDIR)/dirhtml @echo - @echo "Build finished. Dummy builder generates no files." - -.PHONY: linkcheck -linkcheck: setup - $(SPHINXBUILD) -b linkcheck $(SOURCEDIR) $(BUILDDIR)/linkcheck + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." -.PHONY: multiversion -multiversion: setup - @mkdir -p $(HOME)/.cache/pypoetry/virtualenvs - $(POETRY) run sphinx-multiversion $(SOURCEDIR) $(BUILDDIR)/dirhtml +.PHONY: redirects +redirects: + $(UV) run --frozen redirects-cli fromfile --yaml-file _utils/redirects.yaml --output-dir $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +# Preview commands +.PHONY: preview +preview: + $(UV) run --frozen sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --port 5500 + .PHONY: multiversionpreview multiversionpreview: multiversion - $(POETRY) run python3 -m http.server 5500 --directory $(BUILDDIR)/dirhtml + $(UV) run --frozen python -m http.server 5500 --directory $(BUILDDIR)/dirhtml +# Test commands .PHONY: test -test: setup +test: $(SPHINXBUILD) -b dirhtml $(TESTSPHINXOPTS) $(BUILDDIR)/dirhtml @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." \ No newline at end of file + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: linkcheck +linkcheck: + $(SPHINXBUILD) -b linkcheck $(SOURCEDIR) $(BUILDDIR)/linkcheck + diff --git a/docs/_templates/notice.html b/docs/_templates/notice.html new file mode 100644 index 0000000000..a47acce544 --- /dev/null +++ b/docs/_templates/notice.html @@ -0,0 +1,6 @@ +
+

+ScyllaDB Python Driver is available under the Apache v2 License. +ScyllaDB Python Driver is a fork of DataStax Python Driver. +See Copyright here.

+
diff --git a/.travis.yml b/docs/_utils/redirects.yaml similarity index 100% rename from .travis.yml rename to docs/_utils/redirects.yaml diff --git a/docs/_utils/setup.sh b/docs/_utils/setup.sh deleted file mode 100755 index b8f50243e4..0000000000 --- a/docs/_utils/setup.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash - -if pwd | egrep -q '\s'; then - echo "Working directory name contains one or more spaces." - exit 1 -fi - -which python3 || { echo "Failed to find python3. Try installing Python for your operative system: https://www.python.org/downloads/" && exit 1; } -which poetry || curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/1.1.3/get-poetry.py | python3 - && source ${HOME}/.poetry/env -poetry install -poetry update diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index d46aae56cb..53789b9582 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -1,5 +1,7 @@ -:mod:`cassandra` - Exceptions and Enums -======================================= +cassandra +========= + +Exceptions and Enums .. module:: cassandra diff --git a/docs/api/cassandra/auth.rst b/docs/api/cassandra/auth.rst index 58c964cf89..91bb4e9139 100644 --- a/docs/api/cassandra/auth.rst +++ b/docs/api/cassandra/auth.rst @@ -1,5 +1,7 @@ -``cassandra.auth`` - Authentication -=================================== +cassandra.auth +============== + +Authentication .. module:: cassandra.auth diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 2b3d7828a8..51f03f3d97 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -1,5 +1,7 @@ -``cassandra.cluster`` - Clusters and Sessions -============================================= +cassandra.cluster +================= + +Clusters and Sessions .. module:: cassandra.cluster @@ -86,22 +88,6 @@ .. automethod:: add_execution_profile - .. automethod:: set_max_requests_per_connection - - .. automethod:: get_max_requests_per_connection - - .. automethod:: set_min_requests_per_connection - - .. automethod:: get_min_requests_per_connection - - .. automethod:: get_core_connections_per_host - - .. automethod:: set_core_connections_per_host - - .. automethod:: get_max_connections_per_host - - .. automethod:: set_max_connections_per_host - .. automethod:: get_control_connection_host .. automethod:: refresh_schema_metadata @@ -215,7 +201,7 @@ .. automethod:: add_errback(fn, *args, **kwargs) - .. automethod:: add_callbacks(callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_args=None) + .. automethod:: add_callbacks(callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_kwargs=None) .. autoclass:: ResultSet () :members: diff --git a/docs/api/cassandra/concurrent.rst b/docs/api/cassandra/concurrent.rst index f4bab6f048..8f403a3e3c 100644 --- a/docs/api/cassandra/concurrent.rst +++ b/docs/api/cassandra/concurrent.rst @@ -1,5 +1,7 @@ -``cassandra.concurrent`` - Utilities for Concurrent Statement Execution -======================================================================= +cassandra.concurrent +==================== + +Utilities for Concurrent Statement Execution .. module:: cassandra.concurrent diff --git a/docs/api/cassandra/connection.rst b/docs/api/cassandra/connection.rst index 32cca590c0..f9ec4eef61 100644 --- a/docs/api/cassandra/connection.rst +++ b/docs/api/cassandra/connection.rst @@ -1,5 +1,7 @@ -``cassandra.connection`` - Low Level Connection Info -==================================================== +cassandra.connection +==================== + +Low Level Connection Info .. module:: cassandra.connection diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index d44be8adb8..35a47f0ef4 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -1,5 +1,7 @@ -``cassandra.cqlengine.columns`` - Column types for object mapping models -======================================================================== +cassandra.cqlengine.columns +=========================== + +Column types for object mapping models .. module:: cassandra.cqlengine.columns diff --git a/docs/api/cassandra/cqlengine/connection.rst b/docs/api/cassandra/cqlengine/connection.rst index 0f584fcca2..6270b75c4e 100644 --- a/docs/api/cassandra/cqlengine/connection.rst +++ b/docs/api/cassandra/cqlengine/connection.rst @@ -1,5 +1,7 @@ -``cassandra.cqlengine.connection`` - Connection management for cqlengine -======================================================================== +cassandra.cqlengine.connection +============================== + +Connection management for cqlengine .. module:: cassandra.cqlengine.connection diff --git a/docs/api/cassandra/cqlengine/management.rst b/docs/api/cassandra/cqlengine/management.rst index fb483abc81..62709019da 100644 --- a/docs/api/cassandra/cqlengine/management.rst +++ b/docs/api/cassandra/cqlengine/management.rst @@ -1,5 +1,7 @@ -``cassandra.cqlengine.management`` - Schema management for cqlengine -======================================================================== +cassandra.cqlengine.management +============================== + +Schema management for cqlengine .. module:: cassandra.cqlengine.management diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index 60b1471184..9905926c4e 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -1,5 +1,7 @@ -``cassandra.cqlengine.models`` - Table models for object mapping -================================================================ +cassandra.cqlengine.models +========================== + +Table models for object mapping .. module:: cassandra.cqlengine.models @@ -103,7 +105,7 @@ Model TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') except LWTException as e: # handle failure case - print e.existing # dict containing LWT result fields + print(e.existing) # dict containing LWT result fields) This method is supported on Cassandra 2.0 or later. @@ -144,7 +146,7 @@ Model t.iff(count=5).update('other text') except LWTException as e: # handle failure case - print e.existing # existing object + print(e.existing) # existing object .. automethod:: get diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index ce8f764b6b..0d8c52164f 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -1,5 +1,7 @@ -``cassandra.cqlengine.query`` - Query and filter model objects -================================================================= +cassandra.cqlengine.query +========================= + +Query and filter model objects .. module:: cassandra.cqlengine.query diff --git a/docs/api/cassandra/cqlengine/usertype.rst b/docs/api/cassandra/cqlengine/usertype.rst index ebed187da9..219de8c300 100644 --- a/docs/api/cassandra/cqlengine/usertype.rst +++ b/docs/api/cassandra/cqlengine/usertype.rst @@ -1,5 +1,7 @@ -``cassandra.cqlengine.usertype`` - Model classes for User Defined Types -======================================================================= +cassandra.cqlengine.usertype +============================ + +Model classes for User Defined Types .. module:: cassandra.cqlengine.usertype diff --git a/docs/api/cassandra/datastax/graph/fluent/index.rst b/docs/api/cassandra/datastax/graph/fluent/index.rst deleted file mode 100644 index 5547e0fdd7..0000000000 --- a/docs/api/cassandra/datastax/graph/fluent/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -:mod:`cassandra.datastax.graph.fluent` -====================================== - -.. module:: cassandra.datastax.graph.fluent - -.. autoclass:: DseGraph - - .. autoattribute:: DSE_GRAPH_QUERY_LANGUAGE - - .. automethod:: create_execution_profile - - .. automethod:: query_from_traversal - - .. automethod:: traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, traversal_class=None) - - .. automethod:: batch(session=None, execution_profile=None) - -.. autoclass:: DSESessionRemoteGraphConnection(session[, graph_name, execution_profile]) - -.. autoclass:: BaseGraphRowFactory - -.. autoclass:: graph_traversal_row_factory - -.. autoclass:: graph_traversal_dse_object_row_factory diff --git a/docs/api/cassandra/datastax/graph/fluent/predicates.rst b/docs/api/cassandra/datastax/graph/fluent/predicates.rst deleted file mode 100644 index f6e86f6451..0000000000 --- a/docs/api/cassandra/datastax/graph/fluent/predicates.rst +++ /dev/null @@ -1,14 +0,0 @@ -:mod:`cassandra.datastax.graph.fluent.predicates` -================================================= - -.. module:: cassandra.datastax.graph.fluent.predicates - - -.. autoclass:: Search - :members: - -.. autoclass:: CqlCollection - :members: - -.. autoclass:: Geo - :members: diff --git a/docs/api/cassandra/datastax/graph/fluent/query.rst b/docs/api/cassandra/datastax/graph/fluent/query.rst deleted file mode 100644 index 3dd859f96e..0000000000 --- a/docs/api/cassandra/datastax/graph/fluent/query.rst +++ /dev/null @@ -1,8 +0,0 @@ -:mod:`cassandra.datastax.graph.fluent.query` -============================================ - -.. module:: cassandra.datastax.graph.fluent.query - - -.. autoclass:: TraversalBatch - :members: diff --git a/docs/api/cassandra/datastax/graph/index.rst b/docs/api/cassandra/datastax/graph/index.rst deleted file mode 100644 index dafd5f65fd..0000000000 --- a/docs/api/cassandra/datastax/graph/index.rst +++ /dev/null @@ -1,121 +0,0 @@ -``cassandra.datastax.graph`` - Graph Statements, Options, and Row Factories -=========================================================================== - -.. _api-datastax-graph: - -.. module:: cassandra.datastax.graph - -.. autofunction:: single_object_row_factory - -.. autofunction:: graph_result_row_factory - -.. autofunction:: graph_object_row_factory - -.. autofunction:: graph_graphson2_row_factory - -.. autofunction:: graph_graphson3_row_factory - -.. function:: to_int(value) - - Wraps a value to be explicitly serialized as a graphson Int. - -.. function:: to_bigint(value) - - Wraps a value to be explicitly serialized as a graphson Bigint. - -.. function:: to_smallint(value) - - Wraps a value to be explicitly serialized as a graphson Smallint. - -.. function:: to_float(value) - - Wraps a value to be explicitly serialized as a graphson Float. - -.. function:: to_double(value) - - Wraps a value to be explicitly serialized as a graphson Double. - -.. autoclass:: GraphProtocol - :members: - -.. autoclass:: GraphOptions - - .. autoattribute:: graph_name - - .. autoattribute:: graph_source - - .. autoattribute:: graph_language - - .. autoattribute:: graph_read_consistency_level - - .. autoattribute:: graph_write_consistency_level - - .. autoattribute:: is_default_source - - .. autoattribute:: is_analytics_source - - .. autoattribute:: is_graph_source - - .. automethod:: set_source_default - - .. automethod:: set_source_analytics - - .. automethod:: set_source_graph - - -.. autoclass:: SimpleGraphStatement - :members: - -.. autoclass:: Result - :members: - -.. autoclass:: Vertex - :members: - -.. autoclass:: VertexProperty - :members: - -.. autoclass:: Edge - :members: - -.. autoclass:: Path - :members: - -.. autoclass:: T - :members: - -.. autoclass:: GraphSON1Serializer - :members: - -.. autoclass:: GraphSON1Deserializer - - .. automethod:: deserialize_date - - .. automethod:: deserialize_timestamp - - .. automethod:: deserialize_time - - .. automethod:: deserialize_duration - - .. automethod:: deserialize_int - - .. automethod:: deserialize_bigint - - .. automethod:: deserialize_double - - .. automethod:: deserialize_float - - .. automethod:: deserialize_uuid - - .. automethod:: deserialize_blob - - .. automethod:: deserialize_decimal - - .. automethod:: deserialize_point - - .. automethod:: deserialize_linestring - - .. automethod:: deserialize_polygon - -.. autoclass:: GraphSON2Reader - :members: diff --git a/docs/api/cassandra/decoder.rst b/docs/api/cassandra/decoder.rst index e213cc6d74..6341664cb3 100644 --- a/docs/api/cassandra/decoder.rst +++ b/docs/api/cassandra/decoder.rst @@ -1,5 +1,7 @@ -``cassandra.decoder`` - Data Return Formats -=========================================== +cassandra.decoder +================= + +Data Return Formats .. module:: cassandra.decoder diff --git a/docs/api/cassandra/encoder.rst b/docs/api/cassandra/encoder.rst index de3b180510..8919c87ddd 100644 --- a/docs/api/cassandra/encoder.rst +++ b/docs/api/cassandra/encoder.rst @@ -1,5 +1,7 @@ -``cassandra.encoder`` - Encoders for non-prepared Statements -============================================================ +cassandra.encoder +================= + +Encoders for non-prepared Statements .. module:: cassandra.encoder diff --git a/docs/api/cassandra/graph.rst b/docs/api/cassandra/graph.rst deleted file mode 100644 index 43ddd3086c..0000000000 --- a/docs/api/cassandra/graph.rst +++ /dev/null @@ -1,121 +0,0 @@ -``cassandra.graph`` - Graph Statements, Options, and Row Factories -================================================================== - -.. note:: This module is only for backward compatibility for dse-driver users. Consider using :ref:`cassandra.datastax.graph `. - -.. module:: cassandra.graph - -.. autofunction:: single_object_row_factory - -.. autofunction:: graph_result_row_factory - -.. autofunction:: graph_object_row_factory - -.. autofunction:: graph_graphson2_row_factory - -.. autofunction:: graph_graphson3_row_factory - -.. function:: to_int(value) - - Wraps a value to be explicitly serialized as a graphson Int. - -.. function:: to_bigint(value) - - Wraps a value to be explicitly serialized as a graphson Bigint. - -.. function:: to_smallint(value) - - Wraps a value to be explicitly serialized as a graphson Smallint. - -.. function:: to_float(value) - - Wraps a value to be explicitly serialized as a graphson Float. - -.. function:: to_double(value) - - Wraps a value to be explicitly serialized as a graphson Double. - -.. autoclass:: GraphProtocol - :members: - -.. autoclass:: GraphOptions - - .. autoattribute:: graph_name - - .. autoattribute:: graph_source - - .. autoattribute:: graph_language - - .. autoattribute:: graph_read_consistency_level - - .. autoattribute:: graph_write_consistency_level - - .. autoattribute:: is_default_source - - .. autoattribute:: is_analytics_source - - .. autoattribute:: is_graph_source - - .. automethod:: set_source_default - - .. automethod:: set_source_analytics - - .. automethod:: set_source_graph - - -.. autoclass:: SimpleGraphStatement - :members: - -.. autoclass:: Result - :members: - -.. autoclass:: Vertex - :members: - -.. autoclass:: VertexProperty - :members: - -.. autoclass:: Edge - :members: - -.. autoclass:: Path - :members: - -.. autoclass:: GraphSON1Serializer - :members: - -.. autoclass:: GraphSON1Deserializer - - .. automethod:: deserialize_date - - .. automethod:: deserialize_timestamp - - .. automethod:: deserialize_time - - .. automethod:: deserialize_duration - - .. automethod:: deserialize_int - - .. automethod:: deserialize_bigint - - .. automethod:: deserialize_double - - .. automethod:: deserialize_float - - .. automethod:: deserialize_uuid - - .. automethod:: deserialize_blob - - .. automethod:: deserialize_decimal - - .. automethod:: deserialize_point - - .. automethod:: deserialize_linestring - - .. automethod:: deserialize_polygon - -.. autoclass:: GraphSON2Reader - :members: - -.. autoclass:: GraphSON3Reader - :members: diff --git a/docs/api/cassandra/io/asyncioreactor.rst b/docs/api/cassandra/io/asyncioreactor.rst index 38ae63ca7f..a7509ed6a8 100644 --- a/docs/api/cassandra/io/asyncioreactor.rst +++ b/docs/api/cassandra/io/asyncioreactor.rst @@ -1,5 +1,7 @@ -``cassandra.io.asyncioreactor`` - ``asyncio`` Event Loop -===================================================================== +cassandra.io.asyncioreactor +=========================== + +``asyncio`` Event Loop .. module:: cassandra.io.asyncioreactor diff --git a/docs/api/cassandra/io/asyncorereactor.rst b/docs/api/cassandra/io/asyncorereactor.rst index ade7887e70..661fd9c1ec 100644 --- a/docs/api/cassandra/io/asyncorereactor.rst +++ b/docs/api/cassandra/io/asyncorereactor.rst @@ -1,5 +1,7 @@ -``cassandra.io.asyncorereactor`` - ``asyncore`` Event Loop -========================================================== +cassandra.io.asyncorereactor +============================ + +``asyncore`` Event Loop .. module:: cassandra.io.asyncorereactor diff --git a/docs/api/cassandra/io/eventletreactor.rst b/docs/api/cassandra/io/eventletreactor.rst index 1ba742c7e9..2e71153b70 100644 --- a/docs/api/cassandra/io/eventletreactor.rst +++ b/docs/api/cassandra/io/eventletreactor.rst @@ -1,5 +1,7 @@ -``cassandra.io.eventletreactor`` - ``eventlet``-compatible Connection -===================================================================== +cassandra.io.eventletreactor +============================ + +``eventlet``-compatible Connection .. module:: cassandra.io.eventletreactor diff --git a/docs/api/cassandra/io/geventreactor.rst b/docs/api/cassandra/io/geventreactor.rst index 603affe140..a4b0235c6a 100644 --- a/docs/api/cassandra/io/geventreactor.rst +++ b/docs/api/cassandra/io/geventreactor.rst @@ -1,5 +1,7 @@ -``cassandra.io.geventreactor`` - ``gevent``-compatible Event Loop -================================================================= +cassandra.io.geventreactor +========================== + +``gevent``-compatible Event Loop .. module:: cassandra.io.geventreactor diff --git a/docs/api/cassandra/io/libevreactor.rst b/docs/api/cassandra/io/libevreactor.rst index 5b7288edf2..2269d0822a 100644 --- a/docs/api/cassandra/io/libevreactor.rst +++ b/docs/api/cassandra/io/libevreactor.rst @@ -1,5 +1,7 @@ -``cassandra.io.libevreactor`` - ``libev`` Event Loop -==================================================== +cassandra.io.libevreactor +========================= + +``libev`` Event Loop .. module:: cassandra.io.libevreactor diff --git a/docs/api/cassandra/io/twistedreactor.rst b/docs/api/cassandra/io/twistedreactor.rst index 24e93bd432..cc6944c9fd 100644 --- a/docs/api/cassandra/io/twistedreactor.rst +++ b/docs/api/cassandra/io/twistedreactor.rst @@ -1,5 +1,7 @@ -``cassandra.io.twistedreactor`` - Twisted Event Loop -==================================================== +cassandra.io.twistedreactor +=========================== + +Twisted Event Loop .. module:: cassandra.io.twistedreactor diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index 7c1280bcf7..25526f61ec 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -1,5 +1,7 @@ -``cassandra.metadata`` - Schema and Ring Topology -================================================= +cassandra.metadata +================== + +Schema and Ring Topology .. module:: cassandra.metadata diff --git a/docs/api/cassandra/metrics.rst b/docs/api/cassandra/metrics.rst index 0df7f8b5b9..d2ee997bca 100644 --- a/docs/api/cassandra/metrics.rst +++ b/docs/api/cassandra/metrics.rst @@ -1,5 +1,7 @@ -``cassandra.metrics`` - Performance Metrics -=========================================== +cassandra.metrics +================= + +Performance Metrics .. module:: cassandra.metrics diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index 387b19ed95..84d5575a40 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -1,5 +1,7 @@ -``cassandra.policies`` - Load balancing and Failure Handling Policies -===================================================================== +cassandra.policies +================== + +Load balancing and Failure Handling Policies .. module:: cassandra.policies @@ -18,6 +20,9 @@ Load Balancing .. autoclass:: DCAwareRoundRobinPolicy :members: +.. autoclass:: RackAwareRoundRobinPolicy + :members: + .. autoclass:: WhiteListRoundRobinPolicy :members: diff --git a/docs/api/cassandra/pool.rst b/docs/api/cassandra/pool.rst index b14d30e19c..f6a59ce58a 100644 --- a/docs/api/cassandra/pool.rst +++ b/docs/api/cassandra/pool.rst @@ -1,5 +1,7 @@ -``cassandra.pool`` - Hosts and Connection Pools -=============================================== +cassandra.pool +============== + +Hosts and Connection Pools .. automodule:: cassandra.pool diff --git a/docs/api/cassandra/protocol.rst b/docs/api/cassandra/protocol.rst index f615ab1a70..8b8f303574 100644 --- a/docs/api/cassandra/protocol.rst +++ b/docs/api/cassandra/protocol.rst @@ -1,5 +1,7 @@ -``cassandra.protocol`` - Protocol Features -===================================================================== +cassandra.protocol +================== + +Protocol Features .. module:: cassandra.protocol @@ -14,7 +16,7 @@ holding custom key/value pairs. By default these are ignored by the server. They can be useful for servers implementing a custom QueryHandler. -See :meth:`.Session.execute`, ::meth:`.Session.execute_async`, :attr:`.ResponseFuture.custom_payload`. +See :meth:`.Session.execute`, :meth:`.Session.execute_async`, :attr:`.ResponseFuture.custom_payload`. .. autoclass:: _ProtocolHandler @@ -51,5 +53,5 @@ These protocol handlers comprise different parsers, and return results as descri - LazyProtocolHandler: near drop-in replacement for the above, except that it returns an iterator over rows, lazily decoded into the default row format (this is more efficient since all decoded results are not materialized at once) -- NumpyProtocolHander: deserializes results directly into NumPy arrays. This facilitates efficient integration with +- NumpyProtocolHandler: deserializes results directly into NumPy arrays. This facilitates efficient integration with analysis toolkits such as Pandas. diff --git a/docs/api/cassandra/query.rst b/docs/api/cassandra/query.rst index fcd79739b9..aa3a8c1035 100644 --- a/docs/api/cassandra/query.rst +++ b/docs/api/cassandra/query.rst @@ -1,5 +1,7 @@ -``cassandra.query`` - Prepared Statements, Batch Statements, Tracing, and Row Factories -======================================================================================= +cassandra.query +=============== + +Prepared Statements, Batch Statements, Tracing, and Row Factories .. module:: cassandra.query diff --git a/docs/api/cassandra/timestamps.rst b/docs/api/cassandra/timestamps.rst index 00d25b06d9..4335784de3 100644 --- a/docs/api/cassandra/timestamps.rst +++ b/docs/api/cassandra/timestamps.rst @@ -1,5 +1,7 @@ -``cassandra.timestamps`` - Timestamp Generation -=============================================== +cassandra.timestamps +==================== + +Timestamp Generation .. module:: cassandra.timestamps diff --git a/docs/api/cassandra/util.rst b/docs/api/cassandra/util.rst index 848d4d5fc2..ace39f86dd 100644 --- a/docs/api/cassandra/util.rst +++ b/docs/api/cassandra/util.rst @@ -1,5 +1,7 @@ -``cassandra.util`` - Utilities -=================================== +cassandra.util +============== + +Utilities .. automodule:: cassandra.util :members: diff --git a/docs/api/index.rst b/docs/api/index.rst index 9e778d508c..cecbea5e75 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -4,13 +4,12 @@ API Documentation Core Driver ----------- .. toctree:: - :maxdepth: 2 + :maxdepth: 1 cassandra cassandra/cluster cassandra/policies cassandra/auth - cassandra/graph cassandra/metadata cassandra/metrics cassandra/query @@ -42,13 +41,3 @@ Object Mapper cassandra/cqlengine/connection cassandra/cqlengine/management cassandra/cqlengine/usertype - -DataStax Graph --------------- -.. toctree:: - :maxdepth: 1 - - cassandra/datastax/graph/index - cassandra/datastax/graph/fluent/index - cassandra/datastax/graph/fluent/query - cassandra/datastax/graph/fluent/predicates diff --git a/docs/classic_graph.rst b/docs/classic_graph.rst deleted file mode 100644 index ef68c86359..0000000000 --- a/docs/classic_graph.rst +++ /dev/null @@ -1,299 +0,0 @@ -DataStax Classic Graph Queries -============================== - -Getting Started -~~~~~~~~~~~~~~~ - -First, we need to create a graph in the system. To access the system API, we -use the system execution profile :: - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT - - cluster = Cluster() - session = cluster.connect() - - graph_name = 'movies' - session.execute_graph("system.graph(name).ifNotExists().engine(Classic).create()", {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - - -To execute requests on our newly created graph, we need to setup an execution -profile. Additionally, we also need to set the schema_mode to `development` -for the schema creation:: - - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions - - graph_name = 'movies' - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - - session.execute_graph("schema.config().option('graph.schema_mode').set('development')") - - -We are ready to configure our graph schema. We will create a simple one for movies:: - - # properties are used to define a vertex - properties = """ - schema.propertyKey("genreId").Text().create(); - schema.propertyKey("personId").Text().create(); - schema.propertyKey("movieId").Text().create(); - schema.propertyKey("name").Text().create(); - schema.propertyKey("title").Text().create(); - schema.propertyKey("year").Int().create(); - schema.propertyKey("country").Text().create(); - """ - - session.execute_graph(properties) # we can execute multiple statements in a single request - - # A Vertex represents a "thing" in the world. - vertices = """ - schema.vertexLabel("genre").properties("genreId","name").create(); - schema.vertexLabel("person").properties("personId","name").create(); - schema.vertexLabel("movie").properties("movieId","title","year","country").create(); - """ - - session.execute_graph(vertices) - - # An edge represents a relationship between two vertices - edges = """ - schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); - schema.edgeLabel("actor").connection("movie","person").create(); - """ - - session.execute_graph(edges) - - # Indexes to execute graph requests efficiently - indexes = """ - schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); - schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); - schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); - schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); - schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); - schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); - schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); - """ - -Next, we'll add some data:: - - session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); - """) - - session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); - """) - - session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); - """) - -Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: - - session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); - - the_happening.addEdge('belongsTo', genre_horror); - the_italian_job.addEdge('belongsTo', genre_action); - rev_road.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_action); - dead_man.addEdge('belongsTo', genre_drama); - - the_happening.addEdge('actor', mark); - the_italian_job.addEdge('actor', mark); - rev_road.addEdge('actor', leo); - man_mask.addEdge('actor', leo); - dead_man.addEdge('actor', iggy); - """) - -We are all set. You can now query your graph. Here are some examples:: - - # Find all movies of the genre Drama - for r in session.execute_graph(""" - g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of the same genre than the movie 'Dead Man' - for r in session.execute_graph(""" - g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of Mark Wahlberg - for r in session.execute_graph(""" - g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): - print(r) - -To see a more graph examples, see `DataStax Graph Examples `_. - -Graph Types -~~~~~~~~~~~ - -Here are the supported graph types with their python representations: - -========== ================ -DSE Graph Python -========== ================ -boolean bool -bigint long, int (PY3) -int int -smallint int -varint int -float float -double double -uuid uuid.UUID -Decimal Decimal -inet str -timestamp datetime.datetime -date datetime.date -time datetime.time -duration datetime.timedelta -point Point -linestring LineString -polygon Polygon -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -========== ================ - -Graph Row Factory -~~~~~~~~~~~~~~~~~ - -By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.graph.graph_object_row_factory`), known graph result -types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these -types, a :class:`.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. -The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), -or lists by index:: - - # dicts with `__getattr__` or `__getitem__` - result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined - result # dse.graph.Result({u'key_str': u'value', u'key_int': 3}) - result.value # {u'key_int': 3, u'key_str': u'value'} (dict) - result.key_str # u'value' - result.key_int # 3 - result['key_str'] # u'value' - result['key_int'] # 3 - - # lists with `__getitem__` - result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] - result # dse.graph.Result([0, 1, 2]) - result.value # [0, 1, 2] (list) - result[1] # 1 (list[1]) - -You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to -:meth:`.Session.execute_graph`. For example, :func:`.graph.single_object_row_factory` returns the JSON result string`, -unparsed. :func:`.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, -unlike :func:`.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results -also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). - -Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to -deserialize properties, use the :class:`.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. -deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: - - # ... - from cassandra.graph import GraphSON1Deserializer - - row = session.execute_graph("g.V().toList()")[0] - value = row.properties['my_property_key'][0].value # accessing the VertexProperty value - value = GraphSON1Deserializer.deserialize_timestamp(value) - - print(value) # 2017-06-26 08:27:05 - print(type(value)) # - - -Named Parameters -~~~~~~~~~~~~~~~~ - -Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: - - result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - [r.value for r in result_set] # [1, 2] - -All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized -automatically to their graph representation: - -Example:: - - session.execute_graph(""" - g.addV('person'). - property('name', text_value). - property('age', integer_value). - property('birthday', timestamp_value). - property('house_yard', polygon_value).toList() - """, { - 'text_value': 'Mike Smith', - 'integer_value': 34, - 'timestamp_value': datetime.datetime(1967, 12, 30), - 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) - }) - - -As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) -or specified per execution:: - - ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, - graph_options=GraphOptions(graph_name='something-else')) - session.execute_graph(statement, execution_profile=ep) - -Using GraphSON2 Protocol -~~~~~~~~~~~~~~~~~~~~~~~~ - -The default graph protocol used is GraphSON1. However GraphSON1 may -cause problems of type conversion happening during the serialization -of the query to the DSE Graph server, or the deserialization of the -responses back from a string Gremlin query. GraphSON2 offers better -support for the complex data types handled by DSE Graph. - -DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol -for graph queries. Enabling GraphSON2 can be done by `changing the -graph protocol of the execution profile` and `setting the graphson2 row factory`:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory - - # Create a GraphSON2 execution profile - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', - graph_protocol=GraphProtocol.GRAPHSON_2_0), - row_factory=graph_graphson2_row_factory) - - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - session.execute_graph(...) - -Using GraphSON2, all properties will be automatically deserialized to -its Python representation. Note that it may bring significant -behavioral change at runtime. - -It is generally recommended to switch to GraphSON2 as it brings more -consistent support for complex data types in the Graph driver and will -be activated by default in the next major version (Python dse-driver -driver 3.0). diff --git a/docs/cloud.rst b/docs/cloud.rst deleted file mode 100644 index acabe62993..0000000000 --- a/docs/cloud.rst +++ /dev/null @@ -1,91 +0,0 @@ -:orphan: - -Cloud ------ -Connecting -========== -To connect to a DataStax Astra cluster: - -1. Download the secure connect bundle from your Astra account. -2. Connect to your cluster with - -.. code-block:: python - - from cassandra.cluster import Cluster - from cassandra.auth import PlainTextAuthProvider - - cloud_config = { - 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' - } - auth_provider = PlainTextAuthProvider(username='user', password='pass') - cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) - session = cluster.connect() - -Cloud Config Options -==================== - -use_default_tempdir -+++++++++++++++++++ - -The secure connect bundle needs to be extracted to load the certificates into the SSLContext. -By default, the zip location is used as the base dir for the extraction. In some environments, -the zip location file system is read-only (e.g Azure Function). With *use_default_tempdir* set to *True*, -the default temporary directory of the system will be used as base dir. - -.. code:: python - - cloud_config = { - 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip', - 'use_default_tempdir': True - } - ... - -Astra Differences -================== -In most circumstances, the client code for interacting with an Astra cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: - -* A cloud configuration must be passed to a :class:`~.Cluster` instance via the `cloud` attribute (as demonstrated above). -* An SSL connection will be established automatically. Manual SSL configuration is not allowed, and using `ssl_context` or `ssl_options` will result in an exception. -* A :class:`~.Cluster`'s `contact_points` attribute should not be used. The cloud config contains all of the necessary contact information. -* If a consistency level is not specified for an execution profile or query, then :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used as the default. - - -Limitations -=========== - -Event loops -^^^^^^^^^^^ -Evenlet isn't yet supported for python 3.7+ due to an `issue in Eventlet `_. - - -CqlEngine -========= - -When using the object mapper, you can configure cqlengine with :func:`~.cqlengine.connection.set_session`: - -.. code:: python - - from cassandra.cqlengine import connection - ... - - c = Cluster(cloud={'secure_connect_bundle':'/path/to/secure-connect-test.zip'}, - auth_provider=PlainTextAuthProvider('user', 'pass')) - s = c.connect('myastrakeyspace') - connection.set_session(s) - ... - -If you are using some third-party libraries (flask, django, etc.), you might not be able to change the -configuration mechanism. For this reason, the `hosts` argument of the default -:func:`~.cqlengine.connection.setup` function will be ignored if a `cloud` config is provided: - -.. code:: python - - from cassandra.cqlengine import connection - ... - - connection.setup( - None, # or anything else - "myastrakeyspace", cloud={ - 'secure_connect_bundle':'/path/to/secure-connect-test.zip' - }, - auth_provider=PlainTextAuthProvider('user', 'pass')) diff --git a/docs/conf.py b/docs/conf.py index 4fb79b1e3c..1b0361db39 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,31 +1,63 @@ # -*- coding: utf-8 -*- - import os import sys -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. + +from sphinx_scylladb_theme.utils import multiversion_regex_builder + sys.path.insert(0, os.path.abspath('..')) import cassandra -import recommonmark -from recommonmark.transform import AutoStructify -from sphinx_scylladb_theme.utils import multiversion_regex_builder +# -- Global variables + +# Build documentation for the following tags and branches +TAGS = [ + '3.21.0-scylla', + '3.22.3-scylla', + '3.24.8-scylla', + '3.25.4-scylla', + '3.25.11-scylla', + '3.26.9-scylla', + '3.28.0-scylla', + '3.28.1-scylla', + '3.28.2-scylla', + '3.29.0-scylla', + '3.29.1-scylla', + '3.29.2-scylla', + '3.29.3-scylla', + '3.29.4-scylla', + '3.29.5-scylla', + '3.29.6-scylla', + '3.29.7-scylla', +] +BRANCHES = ['master'] +# Set the latest version. +LATEST_VERSION = '3.29.7-scylla' +# Set which versions are not released yet. +UNSTABLE_VERSIONS = ['master'] +# Set which versions are deprecated +DEPRECATED_VERSIONS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.9-scylla', '3.28.1-scylla', '3.29.1-scylla'] -# -- General configuration ----------------------------------------------------- +# -- General configuration # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.githubpages', 'sphinx.ext.viewcode', 'sphinx_scylladb_theme', 'sphinx_multiversion', 'recommonmark'] +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.mathjax', + 'sphinx.ext.githubpages', + 'sphinx.ext.extlinks', + 'sphinx_sitemap', + 'sphinx_scylladb_theme', + 'sphinx_multiversion', # optional + 'recommonmark', # optional +] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -source_suffix = ['.rst', '.md'] -autosectionlabel_prefix_document = True +source_suffix = [".rst", ".md"] # The encoding of source files. #source_encoding = 'utf-8-sig' @@ -34,10 +66,9 @@ master_doc = 'index' # General information about the project. -project = u'Cassandra Driver' +project = u'Scylla Python Driver' copyright = u'ScyllaDB 2021 and © DataStax 2013-2017' - # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. @@ -52,20 +83,50 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'cloud.rst', 'core_graph.rst', 'classic_graph.rst', 'geo_types.rst', 'graph.rst', 'graph_fluent.rst'] +exclude_patterns = [ + "_build", + "Thumbs.db", + ".DS_Store", + ".venv", + ".venv/**", + "**/site-packages/**", + "**/*.dist-info/**", + "**/licenses/**", +] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' -# Setup Sphinx -def setup(sphinx): - sphinx.add_config_value('recommonmark_config', { - 'enable_eval_rst': True, - 'enable_auto_toc_tree': False, - }, True) - sphinx.add_transform(AutoStructify) +# -- Options for not found extension + +# Template used to render the 404.html generated by this extension. +notfound_template = '404.html' + +# Prefix added to all the URLs generated in the 404 page. +notfound_urls_prefix = '' + +# -- Options for multiversion + +# Whitelist pattern for tags +smv_tag_whitelist = multiversion_regex_builder(TAGS) +# Whitelist pattern for branches +smv_branch_whitelist = multiversion_regex_builder(BRANCHES) +# Defines which version is considered to be the latest stable version. +smv_latest_version = LATEST_VERSION +# Defines the new name for the latest version. +smv_rename_latest_version = 'stable' +# Whitelist pattern for remotes (set to None to use local branches only) +smv_remote_whitelist = r'^origin$' +# Pattern for released versions +smv_released_pattern = r'^tags/.*$' +# Format for versioned output directories inside the build directory +smv_outputdir_format = '{ref.name}' + +# -- Options for sitemap extension -# -- Options for HTML output --------------------------------------------------- +sitemap_url_scheme = "/stable/{link}" + +# -- Options for HTML output # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. @@ -79,8 +140,10 @@ def setup(sphinx): 'github_repository': 'scylladb/python-driver', 'github_issues_repository': 'scylladb/python-driver', 'hide_edit_this_page_button': 'false', - 'hide_sidebar_index': 'false', 'hide_version_dropdown': ['master'], + 'hide_feedback_buttons': 'false', + 'versions_unstable': UNSTABLE_VERSIONS, + 'versions_deprecated': DEPRECATED_VERSIONS, } # Custom sidebar templates, maps document names to template names. @@ -98,33 +161,9 @@ def setup(sphinx): # Dictionary of values to pass into the template engine’s context for all pages html_context = {'html_baseurl': html_baseurl} -# -- Options for not found extension ------------------------------------------- - -# Template used to render the 404.html generated by this extension. -notfound_template = '404.html' - -# Prefix added to all the URLs generated in the 404 page. -notfound_urls_prefix = '' - -# -- Options for redirect extension -------------------------------------------- - -# Read a YAML dictionary of redirections and generate an HTML file for each -redirects_file = "_utils/redirections.yaml" - -# -- Options for multiversion -------------------------------------------------- -# Whitelist pattern for tags (set to None to ignore all tags) -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.1-scylla'] -smv_tag_whitelist = multiversion_regex_builder(TAGS) -# Whitelist pattern for branches (set to None to ignore all branches) -BRANCHES = ['master'] -smv_branch_whitelist = multiversion_regex_builder(BRANCHES) -# Defines which version is considered to be the latest stable version. -# Must be listed in smv_tag_whitelist or smv_branch_whitelist. -smv_latest_version = '3.25.1-scylla' -smv_rename_latest_version = 'stable' -# Whitelist pattern for remotes (set to None to use local branches only) -smv_remote_whitelist = r"^origin$" -# Pattern for released versions -smv_released_pattern = r'^tags/.*$' -# Format for versioned output directories inside the build directory -smv_outputdir_format = '{ref.name}' +autodoc_mock_imports = [ + # Asyncore has been removed from python 3.12, we need to mock it until `cassandra/io/asyncorereactor.py` is dropped + "asyncore", + # Since driver is not built, binary modules also not built, so we need to mock them + "cassandra.io.libevwrapper" +] diff --git a/docs/core_graph.rst b/docs/core_graph.rst deleted file mode 100644 index 6a2109d752..0000000000 --- a/docs/core_graph.rst +++ /dev/null @@ -1,436 +0,0 @@ -:orphan: - -DataStax Graph Queries -====================== - -The driver executes graph queries over the Cassandra native protocol. Use -:meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for -executing gremlin queries in DataStax Graph. - -The driver defines three Execution Profiles suitable for graph execution: - -* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` - -See :doc:`getting_started` and :doc:`execution_profiles` -for more detail on working with profiles. - -In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It -provides a better unified multi-model, performance and scale. This guide -is for graphs that use the core engine. If you work with previous versions of -DSE or existing graphs, see ``classic_graph``. - -Getting Started with Graph and the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, we need to create a graph in the system. To access the system API, we -use the system execution profile :: - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT - - cluster = Cluster() - session = cluster.connect() - - graph_name = 'movies' - session.execute_graph("system.graph(name).create()", {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - - -Graphs that use the core engine only support GraphSON3. Since they are Cassandra tables under -the hood, we can automatically configure the execution profile with the proper options -(row_factory and graph_protocol) when executing queries. You only need to make sure that -the `graph_name` is set and GraphSON3 will be automatically used:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - - graph_name = 'movies' - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - session.execute_graph("g.addV(...)") - - -Note that this graph engine detection is based on the metadata. You might experience -some query errors if the graph has been newly created and is not yet in the metadata. This -would result to a badly configured execution profile. If you really want to avoid that, -configure your execution profile explicitly:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson3_row_factory - - graph_name = 'movies' - ep_graphson3 = GraphExecutionProfile( - row_factory=graph_graphson3_row_factory, - graph_options=GraphOptions( - graph_protocol=GraphProtocol.GRAPHSON_3_0, - graph_name=graph_name)) - - cluster = Cluster(execution_profiles={'core': ep_graphson3}) - session = cluster.connect() - session.execute_graph("g.addV(...)", execution_profile='core') - - -We are ready to configure our graph schema. We will create a simple one for movies:: - - # A Vertex represents a "thing" in the world. - # Create the genre vertex - query = """ - schema.vertexLabel('genre') - .partitionBy('genreId', Int) - .property('name', Text) - .create() - """ - session.execute_graph(query) - - # Create the person vertex - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('name', Text) - .create() - """ - session.execute_graph(query) - - # Create the movie vertex - query = """ - schema.vertexLabel('movie') - .partitionBy('movieId', Int) - .property('title', Text) - .property('year', Int) - .property('country', Text) - .create() - """ - session.execute_graph(query) - - # An edge represents a relationship between two vertices - # Create our edges - queries = """ - schema.edgeLabel('belongsTo').from('movie').to('genre').create(); - schema.edgeLabel('actor').from('movie').to('person').create(); - """ - session.execute_graph(queries) - - # Indexes to execute graph requests efficiently - - # If you have a node with the search workload enabled (solr), use the following: - indexes = """ - schema.vertexLabel('genre').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('person').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('movie').searchIndex() - .by('title') - .by("year") - .create(); - """ - session.execute_graph(indexes) - - # Otherwise, use secondary indexes: - indexes = """ - schema.vertexLabel('genre') - .secondaryIndex('by_genre') - .by('name') - .create() - - schema.vertexLabel('person') - .secondaryIndex('by_name') - .by('name') - .create() - - schema.vertexLabel('movie') - .secondaryIndex('by_title') - .by('title') - .create() - """ - session.execute_graph(indexes) - -Add some edge indexes (materialized views):: - - indexes = """ - schema.edgeLabel('belongsTo') - .from('movie') - .to('genre') - .materializedView('movie__belongsTo__genre_by_in_genreId') - .ifNotExists() - .partitionBy(IN, 'genreId') - .clusterBy(OUT, 'movieId', Asc) - .create() - - schema.edgeLabel('actor') - .from('movie') - .to('person') - .materializedView('movie__actor__person_by_in_personId') - .ifNotExists() - .partitionBy(IN, 'personId') - .clusterBy(OUT, 'movieId', Asc) - .create() - """ - session.execute_graph(indexes) - -Next, we'll add some data:: - - session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); - """) - - session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); - """) - - session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); - """) - -Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: - - session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').id().next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').id().next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').id().next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').id().next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').id().next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').id().next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').id().next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').id().next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').id().next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').id().next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').id().next(); - - g.addE('belongsTo').from(__.V(the_happening)).to(__.V(genre_horror)).next(); - g.addE('belongsTo').from(__.V(the_italian_job)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(rev_road)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(dead_man)).to(__.V(genre_drama)).next(); - - g.addE('actor').from(__.V(the_happening)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(the_italian_job)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(rev_road)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(man_mask)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(dead_man)).to(__.V(iggy)).next(); - """) - -We are all set. You can now query your graph. Here are some examples:: - - # Find all movies of the genre Drama - for r in session.execute_graph(""" - g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of the same genre than the movie 'Dead Man' - for r in session.execute_graph(""" - g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of Mark Wahlberg - for r in session.execute_graph(""" - g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): - print(r) - -To see a more graph examples, see `DataStax Graph Examples `_. - -Graph Types for the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Here are the supported graph types with their python representations: - -============ ================= -DSE Graph Python Driver -============ ================= -text str -boolean bool -bigint long -int int -smallint int -varint long -double float -float float -uuid UUID -bigdecimal Decimal -duration Duration (cassandra.util) -inet str or IPV4Address/IPV6Address (if available) -timestamp datetime.datetime -date datetime.date -time datetime.time -polygon Polygon -point Point -linestring LineString -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -list list -map dict -set set or list - (Can return a list due to numerical values returned by Java) -tuple tuple -udt class or namedtuple -============ ================= - -Named Parameters -~~~~~~~~~~~~~~~~ - -Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: - - result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - [r.value for r in result_set] # [1, 2] - -All python types listed in `Graph Types for the Core Engine`_ can be passed as named parameters and will be serialized -automatically to their graph representation: - -Example:: - - session.execute_graph(""" - g.addV('person'). - property('name', text_value). - property('age', integer_value). - property('birthday', timestamp_value). - property('house_yard', polygon_value).next() - """, { - 'text_value': 'Mike Smith', - 'integer_value': 34, - 'timestamp_value': datetime.datetime(1967, 12, 30), - 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) - }) - - -As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) -or specified per execution:: - - ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, - graph_options=GraphOptions(graph_name='something-else')) - session.execute_graph(statement, execution_profile=ep) - -CQL collections, Tuple and UDT -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is a very interesting feature of the core engine: we can use all CQL data types, including -list, map, set, tuple and udt. Here is an example using all these types:: - - query = """ - schema.type('address') - .property('address', Text) - .property('city', Text) - .property('state', Text) - .create(); - """ - session.execute_graph(query) - - # It works the same way than normal CQL UDT, so we - # can create an udt class and register it - class Address(object): - def __init__(self, address, city, state): - self.address = address - self.city = city - self.state = state - - session.cluster.register_user_type(graph_name, 'address', Address) - - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('address', typeOf('address')) - .property('friends', listOf(Text)) - .property('skills', setOf(Text)) - .property('scores', mapOf(Text, Int)) - .property('last_workout', tupleOf(Text, Date)) - .create() - """ - session.execute_graph(query) - - # insertion example - query = """ - g.addV('person') - .property('personId', pid) - .property('address', address) - .property('friends', friends) - .property('skills', skills) - .property('scores', scores) - .property('last_workout', last_workout) - .next() - """ - - session.execute_graph(query, { - 'pid': 3, - 'address': Address('42 Smith St', 'Quebec', 'QC'), - 'friends': ['Al', 'Mike', 'Cathy'], - 'skills': {'food', 'fight', 'chess'}, - 'scores': {'math': 98, 'french': 3}, - 'last_workout': ('CrossFit', datetime.date(2018, 11, 20)) - }) - -Limitations ------------ - -Since Python is not a strongly-typed language and the UDT/Tuple graphson representation is, you might -get schema errors when trying to write numerical data. Example:: - - session.execute_graph(""" - schema.vertexLabel('test_tuple').partitionBy('id', Int).property('t', tupleOf(Text, Bigint)).create() - """) - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', 99))} - ) - - # error: [Invalid query] message="Value component 1 is of type int, not bigint" - -This is because the server requires the client to include a GraphSON schema definition -with every UDT or tuple query. In the general case, the driver can't determine what Graph type -is meant by, e.g., an int value, and so it can't serialize the value with the correct type in the schema. -The driver provides some numerical type-wrapper factories that you can use to specify types: - -* :func:`~cassandra.datastax.graph.to_int` -* :func:`~cassandra.datastax.graph.to_bigint` -* :func:`~cassandra.datastax.graph.to_smallint` -* :func:`~cassandra.datastax.graph.to_float` -* :func:`~cassandra.datastax.graph.to_double` - -Here's the working example of the case above:: - - from cassandra.graph import to_bigint - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', to_bigint(99))} - ) - -Continuous Paging -~~~~~~~~~~~~~~~~~ - -This is another nice feature that comes with the core engine: continuous paging with -graph queries. If all nodes of the cluster are >= DSE 6.8.0, it is automatically -enabled under the hood to get the best performance. If you want to explicitly -enable/disable it, you can do it through the execution profile:: - - # Disable it - ep = GraphExecutionProfile(..., continuous_paging_options=None)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - - # Enable with a custom max_pages option - ep = GraphExecutionProfile(..., - continuous_paging_options=ContinuousPagingOptions(max_pages=10))) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) diff --git a/docs/cqlengine/connections.rst b/docs/cqlengine/connections.rst index 03ade27521..fd44303514 100644 --- a/docs/cqlengine/connections.rst +++ b/docs/cqlengine/connections.rst @@ -99,7 +99,7 @@ You can specify a default connection per model: year = columns.Integer(primary_key=True) model = columns.Text(primary_key=True) - print len(Automobile.objects.all()) # executed on the connection 'cluster2' + print(len(Automobile.objects.all())) # executed on the connection 'cluster2' QuerySet and model instance --------------------------- diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index c0ba390119..719513f4a9 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -201,7 +201,7 @@ are only created, presisted, and queried via table Models. A short example to in users.create(name="Joe", addr=address(street="Easy St.", zipcode=99999)) user = users.objects(name="Joe")[0] - print user.name, user.addr + print(user.name, user.addr) # Joe address(street=u'Easy St.', zipcode=99999) UDTs are modeled by inheriting :class:`~.usertype.UserType`, and setting column type attributes. Types are then used in defining diff --git a/docs/cqlengine/third_party.rst b/docs/cqlengine/third-party.rst similarity index 100% rename from docs/cqlengine/third_party.rst rename to docs/cqlengine/third-party.rst diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade-guide.rst similarity index 100% rename from docs/cqlengine/upgrade_guide.rst rename to docs/cqlengine/upgrade-guide.rst diff --git a/docs/dates_and_times.rst b/docs/dates-and-times.rst similarity index 100% rename from docs/dates_and_times.rst rename to docs/dates-and-times.rst diff --git a/docs/execution_profiles.rst b/docs/execution-profiles.rst similarity index 91% rename from docs/execution_profiles.rst rename to docs/execution-profiles.rst index 7be1a85e3f..0965d77f3d 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution-profiles.rst @@ -43,7 +43,7 @@ Default session = cluster.connect() local_query = 'SELECT rpc_address FROM system.local' for _ in cluster.metadata.all_hosts(): - print session.execute(local_query)[0] + print(session.execute(local_query)[0]) .. parsed-literal:: @@ -69,7 +69,7 @@ Initializing cluster with profiles profiles = {'node1': node1_profile, 'node2': node2_profile} session = Cluster(execution_profiles=profiles).connect() for _ in cluster.metadata.all_hosts(): - print session.execute(local_query, execution_profile='node1')[0] + print(session.execute(local_query, execution_profile='node1')[0]) .. parsed-literal:: @@ -81,7 +81,7 @@ Initializing cluster with profiles .. code:: python for _ in cluster.metadata.all_hosts(): - print session.execute(local_query, execution_profile='node2')[0] + print(session.execute(local_query, execution_profile='node2')[0]) .. parsed-literal:: @@ -93,7 +93,7 @@ Initializing cluster with profiles .. code:: python for _ in cluster.metadata.all_hosts(): - print session.execute(local_query)[0] + print(session.execute(local_query)[0]) .. parsed-literal:: @@ -123,7 +123,7 @@ New profiles can be added constructing from scratch, or deriving from default: cluster.add_execution_profile(node1_profile, locked_execution) for _ in cluster.metadata.all_hosts(): - print session.execute(local_query, execution_profile=node1_profile)[0] + print(session.execute(local_query, execution_profile=node1_profile)[0]) .. parsed-literal:: @@ -144,8 +144,8 @@ We also have the ability to pass profile instances to be used for execution, but tmp = session.execution_profile_clone_update('node1', request_timeout=100, row_factory=tuple_factory) - print session.execute(local_query, execution_profile=tmp)[0] - print session.execute(local_query, execution_profile='node1')[0] + print(session.execute(local_query, execution_profile=tmp)[0]) + print(session.execute(local_query, execution_profile='node1')[0]) .. parsed-literal:: diff --git a/docs/faq.rst b/docs/faq.rst index 56cb648a24..194d5520e8 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -44,7 +44,7 @@ Since tracing is done asynchronously to the request, this method polls until the >>> result = future.result() >>> trace = future.get_query_trace() >>> for e in trace.events: - >>> print e.source_elapsed, e.description + >>> print(e.source_elapsed, e.description) 0:00:00.000077 Parsing select * from system.local 0:00:00.000153 Preparing statement @@ -67,7 +67,7 @@ With prepared statements, the replicas are obtained by ``routing_key``, based on >>> bound = prepared.bind((1,)) >>> replicas = cluster.metadata.get_replicas(bound.keyspace, bound.routing_key) >>> for h in replicas: - >>> print h.address + >>> print(h.address) 127.0.0.1 127.0.0.2 diff --git a/docs/geo_types.rst b/docs/geo_types.rst deleted file mode 100644 index d85e1d3c95..0000000000 --- a/docs/geo_types.rst +++ /dev/null @@ -1,41 +0,0 @@ -:orphan: - -DSE Geometry Types -================== -This section shows how to query and work with the geometric types provided by DSE. - -These types are enabled implicitly by creating the Session from :class:`cassandra.cluster.Cluster`. -This module implicitly registers these types for use in the driver. This extension provides -some simple representative types in :mod:`cassandra.util` for inserting and retrieving data:: - - from cassandra.cluster import Cluster - from cassandra.util import Point, LineString, Polygon - session = Cluster().connect() - - session.execute("INSERT INTO ks.geo (k, point, line, poly) VALUES (%s, %s, %s, %s)", - 0, Point(1, 2), LineString(((1, 2), (3, 4))), Polygon(((1, 2), (3, 4), (5, 6)))) - -Queries returning geometric types return the :mod:`dse.util` types. Note that these can easily be used to construct -types from third-party libraries using the common attributes:: - - from shapely.geometry import LineString - shapely_linestrings = [LineString(res.line.coords) for res in session.execute("SELECT line FROM ks.geo")] - -For prepared statements, shapely geometry types can be used interchangeably with the built-in types because their -defining attributes are the same:: - - from shapely.geometry import Point - prepared = session.prepare("UPDATE ks.geo SET point = ? WHERE k = ?") - session.execute(prepared, (0, Point(1.2, 3.4))) - -In order to use shapely types in a CQL-interpolated (non-prepared) query, one must update the encoder with those types, specifying -the same string encoder as set for the internal types:: - - from cassandra import util - from shapely.geometry import Point, LineString, Polygon - - encoder_func = session.encoder.mapping[util.Point] - for t in (Point, LineString, Polygon): - session.encoder.mapping[t] = encoder_func - - session.execute("UPDATE ks.geo SET point = %s where k = %s", (0, Point(1.2, 3.4))) diff --git a/docs/getting_started.rst b/docs/getting-started.rst similarity index 98% rename from docs/getting_started.rst rename to docs/getting-started.rst index 59a2acbd04..76685c5fdf 100644 --- a/docs/getting_started.rst +++ b/docs/getting-started.rst @@ -119,7 +119,7 @@ way to execute a query is to use :meth:`~.Session.execute()`: rows = session.execute('SELECT name, age, email FROM users') for user_row in rows: - print user_row.name, user_row.age, user_row.email + print(user_row.name, user_row.age, user_row.email) This will transparently pick a Cassandra node to execute the query against and handle any retries that are necessary if the operation fails. @@ -135,19 +135,19 @@ examples are equivalent: rows = session.execute('SELECT name, age, email FROM users') for row in rows: - print row.name, row.age, row.email + print(row.name, row.age, row.email) .. code-block:: python rows = session.execute('SELECT name, age, email FROM users') for (name, age, email) in rows: - print name, age, email + print(name, age, email) .. code-block:: python rows = session.execute('SELECT name, age, email FROM users') for row in rows: - print row[0], row[1], row[2] + print(row[0], row[1], row[2]) If you prefer another result format, such as a ``dict`` per row, you can change the :attr:`~.Session.row_factory` attribute. @@ -188,7 +188,7 @@ of the driver may use the same placeholders for both). Passing Parameters to CQL Queries --------------------------------- -Althought it is not recommended, you can also pass parameters to non-prepared +Although it is not recommended, you can also pass parameters to non-prepared statements. The driver supports two forms of parameter place-holders: positional and named. @@ -335,7 +335,7 @@ For example: try: rows = future.result() user = rows[0] - print user.name, user.age + print(user.name, user.age) except ReadTimeout: log.exception("Query timed out:") @@ -352,7 +352,7 @@ This works well for executing many queries concurrently: # wait for them to complete and use the results for future in futures: rows = future.result() - print rows[0].name + print(rows[0].name) Alternatively, instead of calling :meth:`~.ResponseFuture.result()`, you can attach callback and errback functions through the diff --git a/docs/graph.rst b/docs/graph.rst deleted file mode 100644 index b0cad4ea36..0000000000 --- a/docs/graph.rst +++ /dev/null @@ -1,437 +0,0 @@ -:orphan: - -DataStax Graph Queries -====================== - -The driver executes graph queries over the Cassandra native protocol. Use -:meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for -executing gremlin queries in DataStax Graph. - -The driver defines three Execution Profiles suitable for graph execution: - -* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` - -See :doc:`getting_started` and :doc:`execution_profiles` -for more detail on working with profiles. - -In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It -provides a better unified multi-model, performance and scale. This guide -is for graphs that use the core engine. If you work with previous versions of -DSE or existing graphs, see :doc:`classic_graph`. - -Getting Started with Graph and the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, we need to create a graph in the system. To access the system API, we -use the system execution profile :: - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT - - cluster = Cluster() - session = cluster.connect() - - graph_name = 'movies' - session.execute_graph("system.graph(name).create()", {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - - -Graphs that use the core engine only support GraphSON3. Since they are Cassandra tables under -the hood, we can automatically configure the execution profile with the proper options -(row_factory and graph_protocol) when executing queries. You only need to make sure that -the `graph_name` is set and GraphSON3 will be automatically used:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - - graph_name = 'movies' - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - session.execute_graph("g.addV(...)") - - -Note that this graph engine detection is based on the metadata. You might experience -some query errors if the graph has been newly created and is not yet in the metadata. This -would result to a badly configured execution profile. If you really want to avoid that, -configure your execution profile explicitly:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson3_row_factory - - graph_name = 'movies' - ep_graphson3 = GraphExecutionProfile( - row_factory=graph_graphson3_row_factory, - graph_options=GraphOptions( - graph_protocol=GraphProtocol.GRAPHSON_3_0, - graph_name=graph_name)) - - cluster = Cluster(execution_profiles={'core': ep_graphson3}) - session = cluster.connect() - session.execute_graph("g.addV(...)", execution_profile='core') - - -We are ready to configure our graph schema. We will create a simple one for movies:: - - # A Vertex represents a "thing" in the world. - # Create the genre vertex - query = """ - schema.vertexLabel('genre') - .partitionBy('genreId', Int) - .property('name', Text) - .create() - """ - session.execute_graph(query) - - # Create the person vertex - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('name', Text) - .create() - """ - session.execute_graph(query) - - # Create the movie vertex - query = """ - schema.vertexLabel('movie') - .partitionBy('movieId', Int) - .property('title', Text) - .property('year', Int) - .property('country', Text) - .create() - """ - session.execute_graph(query) - - # An edge represents a relationship between two vertices - # Create our edges - queries = """ - schema.edgeLabel('belongsTo').from('movie').to('genre').create(); - schema.edgeLabel('actor').from('movie').to('person').create(); - """ - session.execute_graph(queries) - - # Indexes to execute graph requests efficiently - - # If you have a node with the search workload enabled (solr), use the following: - indexes = """ - schema.vertexLabel('genre').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('person').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('movie').searchIndex() - .by('title') - .by("year") - .create(); - """ - session.execute_graph(indexes) - - # Otherwise, use secondary indexes: - indexes = """ - schema.vertexLabel('genre') - .secondaryIndex('by_genre') - .by('name') - .create() - - schema.vertexLabel('person') - .secondaryIndex('by_name') - .by('name') - .create() - - schema.vertexLabel('movie') - .secondaryIndex('by_title') - .by('title') - .create() - """ - session.execute_graph(indexes) - -Add some edge indexes (materialized views):: - - indexes = """ - schema.edgeLabel('belongsTo') - .from('movie') - .to('genre') - .materializedView('movie__belongsTo__genre_by_in_genreId') - .ifNotExists() - .partitionBy(IN, 'genreId') - .clusterBy(OUT, 'movieId', Asc) - .create() - - schema.edgeLabel('actor') - .from('movie') - .to('person') - .materializedView('movie__actor__person_by_in_personId') - .ifNotExists() - .partitionBy(IN, 'personId') - .clusterBy(OUT, 'movieId', Asc) - .create() - """ - session.execute_graph(indexes) - -Next, we'll add some data:: - - session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); - """) - - session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); - """) - - session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); - """) - -Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: - - session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').id().next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').id().next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').id().next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').id().next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').id().next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').id().next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').id().next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').id().next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').id().next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').id().next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').id().next(); - - g.addE('belongsTo').from(__.V(the_happening)).to(__.V(genre_horror)).next(); - g.addE('belongsTo').from(__.V(the_italian_job)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(rev_road)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(dead_man)).to(__.V(genre_drama)).next(); - - g.addE('actor').from(__.V(the_happening)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(the_italian_job)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(rev_road)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(man_mask)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(dead_man)).to(__.V(iggy)).next(); - """) - -We are all set. You can now query your graph. Here are some examples:: - - # Find all movies of the genre Drama - for r in session.execute_graph(""" - g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of the same genre than the movie 'Dead Man' - for r in session.execute_graph(""" - g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of Mark Wahlberg - for r in session.execute_graph(""" - g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): - print(r) - -To see a more graph examples, see `DataStax Graph Examples `_. - -Graph Types for the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Here are the supported graph types with their python representations: - -============ ================= -DSE Graph Python Driver -============ ================= -text str -boolean bool -bigint long -int int -smallint int -varint long -double float -float float -uuid UUID -bigdecimal Decimal -duration Duration (cassandra.util) -inet str or IPV4Address/IPV6Address (if available) -timestamp datetime.datetime -date datetime.date -time datetime.time -polygon Polygon -point Point -linestring LineString -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -list list -map dict -set set or list - (Can return a list due to numerical values returned by Java) -tuple tuple -udt class or namedtuple -============ ================= -======= - -Named Parameters -~~~~~~~~~~~~~~~~ - -Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: - - result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - [r.value for r in result_set] # [1, 2] - -All python types listed in `Graph Types for the Core Engine`_ can be passed as named parameters and will be serialized -automatically to their graph representation: - -Example:: - - session.execute_graph(""" - g.addV('person'). - property('name', text_value). - property('age', integer_value). - property('birthday', timestamp_value). - property('house_yard', polygon_value).next() - """, { - 'text_value': 'Mike Smith', - 'integer_value': 34, - 'timestamp_value': datetime.datetime(1967, 12, 30), - 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) - }) - - -As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) -or specified per execution:: - - ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, - graph_options=GraphOptions(graph_name='something-else')) - session.execute_graph(statement, execution_profile=ep) - -CQL collections, Tuple and UDT -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is a very interesting feature of the core engine: we can use all CQL data types, including -list, map, set, tuple and udt. Here is an example using all these types:: - - query = """ - schema.type('address') - .property('address', Text) - .property('city', Text) - .property('state', Text) - .create(); - """ - session.execute_graph(query) - - # It works the same way than normal CQL UDT, so we - # can create an udt class and register it - class Address(object): - def __init__(self, address, city, state): - self.address = address - self.city = city - self.state = state - - session.cluster.register_user_type(graph_name, 'address', Address) - - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('address', typeOf('address')) - .property('friends', listOf(Text)) - .property('skills', setOf(Text)) - .property('scores', mapOf(Text, Int)) - .property('last_workout', tupleOf(Text, Date)) - .create() - """ - session.execute_graph(query) - - # insertion example - query = """ - g.addV('person') - .property('personId', pid) - .property('address', address) - .property('friends', friends) - .property('skills', skills) - .property('scores', scores) - .property('last_workout', last_workout) - .next() - """ - - session.execute_graph(query, { - 'pid': 3, - 'address': Address('42 Smith St', 'Quebec', 'QC'), - 'friends': ['Al', 'Mike', 'Cathy'], - 'skills': {'food', 'fight', 'chess'}, - 'scores': {'math': 98, 'french': 3}, - 'last_workout': ('CrossFit', datetime.date(2018, 11, 20)) - }) - -Limitations ------------ - -Since Python is not a strongly-typed language and the UDT/Tuple graphson representation is, you might -get schema errors when trying to write numerical data. Example:: - - session.execute_graph(""" - schema.vertexLabel('test_tuple').partitionBy('id', Int).property('t', tupleOf(Text, Bigint)).create() - """) - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', 99))} - ) - - # error: [Invalid query] message="Value component 1 is of type int, not bigint" - -This is because the server requires the client to include a GraphSON schema definition -with every UDT or tuple query. In the general case, the driver can't determine what Graph type -is meant by, e.g., an int value, and so it can't serialize the value with the correct type in the schema. -The driver provides some numerical type-wrapper factories that you can use to specify types: - -* :func:`~.to_int` -* :func:`~.to_bigint` -* :func:`~.to_smallint` -* :func:`~.to_float` -* :func:`~.to_double` - -Here's the working example of the case above:: - - from cassandra.graph import to_bigint - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', to_bigint(99))} - ) - -Continuous Paging -~~~~~~~~~~~~~~~~~ - -This is another nice feature that comes with the core engine: continuous paging with -graph queries. If all nodes of the cluster are >= DSE 6.8.0, it is automatically -enabled under the hood to get the best performance. If you want to explicitly -enable/disable it, you can do it through the execution profile:: - - # Disable it - ep = GraphExecutionProfile(..., continuous_paging_options=None)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - - # Enable with a custom max_pages option - ep = GraphExecutionProfile(..., - continuous_paging_options=ContinuousPagingOptions(max_pages=10))) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst deleted file mode 100644 index a59117626f..0000000000 --- a/docs/graph_fluent.rst +++ /dev/null @@ -1,417 +0,0 @@ -:orphan: - -DataStax Graph Fluent API -========================= - -The fluent API adds graph features to the core driver: - -* A TinkerPop GraphTraversalSource builder to execute traversals on a DSE cluster -* The ability to execution traversal queries explicitly using execute_graph -* GraphSON serializers for all DSE Graph types. -* DSE Search predicates - -The Graph fluent API depends on Apache TinkerPop and is not installed by default. Make sure -you have the Graph requirements are properly :ref:`installed `. - -You might be interested in reading the :doc:`DataStax Graph Getting Started documentation ` to -understand the basics of creating a graph and its schema. - -Graph Traversal Queries -~~~~~~~~~~~~~~~~~~~~~~~ - -The driver provides :meth:`.Session.execute_graph`, which allows users to execute traversal -query strings. Here is a simple example:: - - session.execute_graph("g.addV('genre').property('genreId', 1).property('name', 'Action').next();") - -Since graph queries can be very complex, working with strings is not very convenient and is -hard to maintain. This fluent API allows you to build Gremlin traversals and write your graph -queries directly in Python. These native traversal queries can be executed explicitly, with -a `Session` object, or implicitly:: - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.datastax.graph import GraphProtocol - from cassandra.datastax.graph.fluent import DseGraph - - # Create an execution profile, using GraphSON3 for Core graphs - ep_graphson3 = DseGraph.create_execution_profile( - 'my_core_graph_name', - graph_protocol=GraphProtocol.GRAPHSON_3_0) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson3}) - session = cluster.connect() - - # Execute a fluent graph query - g = DseGraph.traversal_source(session=session) - g.addV('genre').property('genreId', 1).property('name', 'Action').next() - - # implicit execution caused by iterating over results - for v in g.V().has('genre', 'name', 'Drama').in_('belongsTo').valueMap(): - print(v) - -These Python types are also supported transparently:: - - g.addV('person').property('name', 'Mike').property('birthday', datetime(1984, 3, 11)). \ - property('house_yard', Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) - -More readings about Gremlin: - -* `DataStax Drivers Fluent API `_ -* `gremlin-python documentation `_ - -Configuring a Traversal Execution Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The fluent api takes advantage of *configuration profiles* to allow -different execution configurations for the various query handlers. Graph traversal -execution requires a custom execution profile to enable Gremlin-bytecode as -query language. With Core graphs, it is important to use GraphSON3. Here is how -to accomplish this configuration: - -.. code-block:: python - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.datastax.graph import GraphProtocol - from cassandra.datastax.graph.fluent import DseGraph - - # Using GraphSON3 as graph protocol is a requirement with Core graphs. - ep = DseGraph.create_execution_profile( - 'graph_name', - graph_protocol=GraphProtocol.GRAPHSON_3_0) - - # For Classic graphs, GraphSON1, GraphSON2 and GraphSON3 (DSE 6.8+) are supported. - ep_classic = DseGraph.create_execution_profile('classic_graph_name') # default is GraphSON2 - - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep, 'classic': ep_classic}) - session = cluster.connect() - - g = DseGraph.traversal_source(session) # Build the GraphTraversalSource - print g.V().toList() # Traverse the Graph - -Note that the execution profile created with :meth:`DseGraph.create_execution_profile <.datastax.graph.fluent.DseGraph.create_execution_profile>` cannot -be used for any groovy string queries. - -If you want to change execution property defaults, please see the :doc:`Execution Profile documentation ` -for a more generalized discussion of the API. Graph traversal queries use the same execution profile defined for DSE graph. If you -need to change the default properties, please refer to the :doc:`DSE Graph query documentation page ` - -Explicit Graph Traversal Execution with a DSE Session -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Traversal queries can be executed explicitly using `session.execute_graph` or `session.execute_graph_async`. These functions -return results as DSE graph types. If you are familiar with DSE queries or need async execution, you might prefer that way. -Below is an example of explicit execution. For this example, assume the schema has been generated as above: - -.. code-block:: python - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.datastax.graph import GraphProtocol - from cassandra.datastax.graph.fluent import DseGraph - from pprint import pprint - - ep = DseGraph.create_execution_profile( - 'graph_name', - graph_protocol=GraphProtocol.GRAPHSON_3_0) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - - g = DseGraph.traversal_source(session=session) - -Convert a traversal to a bytecode query for classic graphs:: - - addV_query = DseGraph.query_from_traversal( - g.addV('genre').property('genreId', 1).property('name', 'Action'), - graph_protocol=GraphProtocol.GRAPHSON_3_0 - ) - v_query = DseGraph.query_from_traversal( - g.V(), - graph_protocol=GraphProtocol.GRAPHSON_3_0) - - for result in session.execute_graph(addV_query): - pprint(result.value) - for result in session.execute_graph(v_query): - pprint(result.value) - -Converting a traversal to a bytecode query for core graphs require some more work, because we -need the cluster context for UDT and tuple types: - -.. code-block:: python - context = { - 'cluster': cluster, - 'graph_name': 'the_graph_for_the_query' - } - addV_query = DseGraph.query_from_traversal( - g.addV('genre').property('genreId', 1).property('name', 'Action'), - graph_protocol=GraphProtocol.GRAPHSON_3_0, - context=context - ) - - for result in session.execute_graph(addV_query): - pprint(result.value) - -Implicit Graph Traversal Execution with TinkerPop -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Using the :class:`DseGraph <.datastax.graph.fluent.DseGraph>` class, you can build a GraphTraversalSource -that will execute queries on a DSE session without explicitly passing anything to -that session. We call this *implicit execution* because the `Session` is not -explicitly involved. Everything is managed internally by TinkerPop while -traversing the graph and the results are TinkerPop types as well. - -Synchronous Example -------------------- - -.. code-block:: python - - # Build the GraphTraversalSource - g = DseGraph.traversal_source(session) - # implicitly execute the query by traversing the TraversalSource - g.addV('genre').property('genreId', 1).property('name', 'Action').next() - - # blocks until the query is completed and return the results - results = g.V().toList() - pprint(results) - -Asynchronous Exemple --------------------- - -You can execute a graph traversal query asynchronously by using `.promise()`. It returns a -python `Future `_. - -.. code-block:: python - - # Build the GraphTraversalSource - g = DseGraph.traversal_source(session) - # implicitly execute the query by traversing the TraversalSource - g.addV('genre').property('genreId', 1).property('name', 'Action').next() # not async - - # get a future and wait - future = g.V().promise() - results = list(future.result()) - pprint(results) - - # or set a callback - def cb(f): - results = list(f.result()) - pprint(results) - future = g.V().promise() - future.add_done_callback(cb) - # do other stuff... - -Specify the Execution Profile explicitly -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you don't want to change the default graph execution profile (`EXEC_PROFILE_GRAPH_DEFAULT`), you can register a new -one as usual and use it explicitly. Here is an example: - -.. code-block:: python - - from cassandra.cluster import Cluster - from cassandra.datastax.graph.fluent import DseGraph - - cluster = Cluster() - ep = DseGraph.create_execution_profile('graph_name', graph_protocol=GraphProtocol.GRAPHSON_3_0) - cluster.add_execution_profile('graph_traversal', ep) - session = cluster.connect() - - g = DseGraph.traversal_source() - query = DseGraph.query_from_traversal(g.V()) - session.execute_graph(query, execution_profile='graph_traversal') - -You can also create multiple GraphTraversalSources and use them with -the same execution profile (for different graphs): - -.. code-block:: python - - g_movies = DseGraph.traversal_source(session, graph_name='movies', ep) - g_series = DseGraph.traversal_source(session, graph_name='series', ep) - - print(g_movies.V().toList()) # Traverse the movies Graph - print(g_series.V().toList()) # Traverse the series Graph - -Batch Queries -~~~~~~~~~~~~~ - -DSE Graph supports batch queries using a :class:`TraversalBatch <.datastax.graph.fluent.query.TraversalBatch>` object -instantiated with :meth:`DseGraph.batch <.datastax.graph.fluent.DseGraph.batch>`. A :class:`TraversalBatch <.datastax.graph.fluent.query.TraversalBatch>` allows -you to execute multiple graph traversals in a single atomic transaction. A -traversal batch is executed with :meth:`.Session.execute_graph` or using -:meth:`TraversalBatch.execute <.datastax.graph.fluent.query.TraversalBatch.execute>` if bounded to a DSE session. - -Either way you choose to execute the traversal batch, you need to configure -the execution profile accordingly. Here is a example:: - - from cassandra.cluster import Cluster - from cassandra.datastax.graph.fluent import DseGraph - - ep = DseGraph.create_execution_profile( - 'graph_name', - graph_protocol=GraphProtocol.GRAPHSON_3_0) - cluster = Cluster(execution_profiles={'graphson3': ep}) - session = cluster.connect() - - g = DseGraph.traversal_source() - -To execute the batch using :meth:`.Session.execute_graph`, you need to convert -the batch to a GraphStatement:: - - batch = DseGraph.batch() - - batch.add( - g.addV('genre').property('genreId', 1).property('name', 'Action')) - batch.add( - g.addV('genre').property('genreId', 2).property('name', 'Drama')) # Don't use `.next()` with a batch - - graph_statement = batch.as_graph_statement(graph_protocol=GraphProtocol.GRAPHSON_3_0) - graph_statement.is_idempotent = True # configure any Statement parameters if needed... - session.execute_graph(graph_statement, execution_profile='graphson3') - -To execute the batch using :meth:`TraversalBatch.execute <.datastax.graph.fluent.query.TraversalBatch.execute>`, you need to bound the batch to a DSE session:: - - batch = DseGraph.batch(session, 'graphson3') # bound the session and execution profile - - batch.add( - g.addV('genre').property('genreId', 1).property('name', 'Action')) - batch.add( - g.addV('genre').property('genreId', 2).property('name', 'Drama')) # Don't use `.next()` with a batch - - batch.execute() - -DSL (Domain Specific Languages) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -DSL are very useful to write better domain-specific APIs and avoiding -code duplication. Let's say we have a graph of `People` and we produce -a lot of statistics based on age. All graph traversal queries of our -application would look like:: - - g.V().hasLabel("people").has("age", P.gt(21))... - - -which is not really verbose and quite annoying to repeat in a code base. Let's create a DSL:: - - from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource - - class MyAppTraversal(GraphTraversal): - - def younger_than(self, age): - return self.has("age", P.lt(age)) - - def older_than(self, age): - return self.has("age", P.gt(age)) - - - class MyAppTraversalSource(GraphTraversalSource): - - def __init__(self, *args, **kwargs): - super(MyAppTraversalSource, self).__init__(*args, **kwargs) - self.graph_traversal = MyAppTraversal - - def people(self): - return self.get_graph_traversal().V().hasLabel("people") - -Now, we can use our DSL that is a lot cleaner:: - - from cassandra.datastax.graph.fluent import DseGraph - - # ... - g = DseGraph.traversal_source(session=session, traversal_class=MyAppTraversalsource) - - g.people().younger_than(21)... - g.people().older_than(30)... - -To see a more complete example of DSL, see the `Python killrvideo DSL app `_ - -Search -~~~~~~ - -DSE Graph can use search indexes that take advantage of DSE Search functionality for -efficient traversal queries. Here are the list of additional search predicates: - -Text tokenization: - -* :meth:`token <.datastax.graph.fluent.predicates.Search.token>` -* :meth:`token_prefix <.datastax.graph.fluent.predicates.Search.token_prefix>` -* :meth:`token_regex <.datastax.graph.fluent.predicates.Search.token_regex>` -* :meth:`token_fuzzy <.datastax.graph.fluent.predicates.Search.token_fuzzy>` - -Text match: - -* :meth:`prefix <.datastax.graph.fluent.predicates.Search.prefix>` -* :meth:`regex <.datastax.graph.fluent.predicates.Search.regex>` -* :meth:`fuzzy <.datastax.graph.fluent.predicates.Search.fuzzy>` -* :meth:`phrase <.datastax.graph.fluent.predicates.Search.phrase>` - -Geo: - -* :meth:`inside <.datastax.graph.fluent.predicates.Geo.inside>` - -Create search indexes ---------------------- - -For text tokenization: - -.. code-block:: python - - - s.execute_graph("schema.vertexLabel('my_vertex_label').index('search').search().by('text_field').asText().add()") - -For text match: - -.. code-block:: python - - - s.execute_graph("schema.vertexLabel('my_vertex_label').index('search').search().by('text_field').asString().add()") - - -For geospatial: - -You can create a geospatial index on Point and LineString fields. - -.. code-block:: python - - - s.execute_graph("schema.vertexLabel('my_vertex_label').index('search').search().by('point_field').add()") - - -Using search indexes --------------------- - -Token: - -.. code-block:: python - - from cassandra.datastax.graph.fluent.predicates import Search - # ... - - g = DseGraph.traversal_source() - query = DseGraph.query_from_traversal( - g.V().has('my_vertex_label','text_field', Search.token_regex('Hello.+World')).values('text_field')) - session.execute_graph(query) - -Text: - -.. code-block:: python - - from cassandra.datastax.graph.fluent.predicates import Search - # ... - - g = DseGraph.traversal_source() - query = DseGraph.query_from_traversal( - g.V().has('my_vertex_label','text_field', Search.prefix('Hello')).values('text_field')) - session.execute_graph(query) - -Geospatial: - -.. code-block:: python - - from cassandra.datastax.graph.fluent.predicates import Geo - from cassandra.util import Distance - # ... - - g = DseGraph.traversal_source() - query = DseGraph.query_from_traversal( - g.V().has('my_vertex_label','point_field', Geo.inside(Distance(46, 71, 100)).values('point_field')) - session.execute_graph(query) - - -For more details, please refer to the official `DSE Search Indexes Documentation `_ diff --git a/docs/index.rst b/docs/index.rst index fed26e9fc9..cd137917d9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Scylla `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. -The driver supports Python 2.7, 3.5, 3.6, 3.7 and 3.8. +The driver supports Python 3.10-3.14. This driver is open source under the `Apache v2 License `_. @@ -17,44 +17,38 @@ Contents :doc:`installation` How to install the driver. -:doc:`getting_started` +:doc:`getting-started` A guide through the first steps of connecting to Scylla and executing queries -:doc:`scylla_specific` +:doc:`scylla-specific` A list of feature available only on ``scylla-driver`` -:doc:`execution_profiles` +:doc:`execution-profiles` An introduction to a more flexible way of configuring request execution :doc:`lwt` Working with results of conditional requests -:doc:`object_mapper` +:doc:`object-mapper` Introduction to the integrated object mapper, cqlengine :doc:`performance` Tips for getting good performance. -:doc:`query_paging` +:doc:`query-paging` Notes on paging large query results :doc:`security` An overview of the security features of the driver -:doc:`upgrading` - A guide to upgrading versions of the driver - -:doc:`user_defined_types` +:doc:`user-defined-types` Working with Scylla's user-defined types (UDT) -:doc:`dates_and_times` +:doc:`dates-and-times` Some discussion on the driver's approach to working with timestamp, date, time types -:doc:`scylla_cloud` - Connect to Scylla Cloud - -:doc:`CHANGELOG` - Log of changes to the driver, organized by version. +:doc:`scylla-cloud` + Connect to ScyllaDB Cloud :doc:`faq` A collection of Frequently Asked Questions @@ -67,26 +61,25 @@ Contents api/index installation - getting_started - scylla_specific - upgrading - execution_profiles + getting-started + scylla-specific + execution-profiles performance - query_paging + query-paging lwt security - user_defined_types - object_mapper - dates_and_times - scylla_cloud + user-defined-types + object-mapper + dates-and-times + scylla-cloud faq Getting Help ------------ Visit the :doc:`FAQ section ` in this documentation. -Please send questions to the Scylla `user list `_. - +Please send questions to `ScyllaDB Community Forum `_ +and the Scylla Users `Slack channel `_. Reporting Issues ---------------- @@ -101,5 +94,3 @@ Copyright © 2016, The Apache Software Foundation. Apache®, Apache Cassandra®, Cassandra®, the Apache feather logo and the Apache Cassandra® Eye logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. No endorsement by The Apache Software Foundation is implied by the use of these marks. - - diff --git a/docs/installation.rst b/docs/installation.rst index 4996a02c1b..ee227618ef 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 2.7, 3.5, 3.6, 3.7 and 3.8 are supported. Both CPython (the standard Python +Python versions 3.10-3.14 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. @@ -24,19 +24,9 @@ Verifying your Installation --------------------------- To check if the installation was successful, you can run:: - python -c 'import cassandra; print cassandra.__version__' + python -c 'import cassandra; print(cassandra.__version__)' -It should print something like "3.22.0". - -.. _installation-datastax-graph: - -(*Optional*) Graph ---------------------------- -The driver provides an optional fluent graph API that depends on Apache TinkerPop (gremlinpython). It is -not installed by default. To be able to build Gremlin traversals, you need to install -the `graph` requirements:: - - pip install scylla-driver[graph] +It should print something like "3.29.7". (*Optional*) Compression Support -------------------------------- @@ -86,6 +76,9 @@ threads used to build the driver and any C extensions: $ # installing from pip $ CASS_DRIVER_BUILD_CONCURRENCY=8 pip install scylla-driver +Note that by default (when CASS_DRIVER_BUILD_CONCURRENCY is not specified), concurrency will be equal to the number of +logical cores on your machine. + OSX Installation Error ^^^^^^^^^^^^^^^^^^^^^^ If you're installing on OSX and have XCode 5.1 installed, you may see an error like this:: @@ -188,16 +181,19 @@ If your sudo configuration does not allow SETENV, you must push the option flag applies these options to all dependencies (which break on the custom flag). Therefore, you must first install dependencies, then use install-option:: - sudo pip install six futures + sudo pip install futures sudo pip install --install-option="--no-cython" +Supported Event Loops +^^^^^^^^^^^^^^^^^^^^^ +For Python versions before 3.12 the driver uses the ``asyncore`` module for its default +event loop. Other event loops such as ``libev``, ``gevent`` and ``eventlet`` are also +available via Python modules or C extensions. Python 3.12 has removed ``asyncore`` entirely +so for this platform one of these other event loops must be used. + libev support ^^^^^^^^^^^^^ -The driver currently uses Python's ``asyncore`` module for its default -event loop. For better performance, ``libev`` is also supported through -a C extension. - If you're on Linux, you should be able to install libev through a package manager. For example, on Debian/Ubuntu:: @@ -212,8 +208,10 @@ through `Homebrew `_. For example, on Mac OS X:: $ brew install libev -The libev extension is not built for Windows (the build process is complex, and the Windows implementation uses -select anyway). +The libev extension can now be built for Windows as of Python driver version 3.29.7. You can +install libev using any Windows package manager. For example, to install using `vcpkg `_: + + $ vcpkg install libev If successful, you should be able to build and install the extension (just using ``setup.py build`` or ``setup.py install``) and then use diff --git a/docs/object_mapper.rst b/docs/object-mapper.rst similarity index 94% rename from docs/object_mapper.rst rename to docs/object-mapper.rst index 50d3cbf320..5eb78f57b6 100644 --- a/docs/object_mapper.rst +++ b/docs/object-mapper.rst @@ -7,7 +7,7 @@ cqlengine is the Cassandra CQL 3 Object Mapper packaged with this driver Contents -------- -:doc:`cqlengine/upgrade_guide` +:doc:`cqlengine/upgrade-guide` For migrating projects from legacy cqlengine, to the integrated product :doc:`cqlengine/models` @@ -25,7 +25,7 @@ Contents :ref:`API Documentation ` Index of API documentation -:doc:`cqlengine/third_party` +:doc:`cqlengine/third-party` High-level examples in Celery and uWSGI :doc:`cqlengine/faq` @@ -33,12 +33,12 @@ Contents .. toctree:: :hidden: - cqlengine/upgrade_guide + cqlengine/upgrade-guide cqlengine/models cqlengine/queryset cqlengine/batches cqlengine/connections - cqlengine/third_party + cqlengine/third-party cqlengine/faq .. _getting-started: @@ -87,7 +87,7 @@ Getting Started >>> q.count() 4 >>> for instance in q: - >>> print instance.description + >>> print(instance.description) example5 example6 example7 @@ -101,5 +101,5 @@ Getting Started >>> q2.count() 1 >>> for instance in q2: - >>> print instance.description + >>> print(instance.description) example5 diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 0c40a9e464..f6ee417aee 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -1,26 +1,61 @@ -[tool.poetry] +[project] name = "python-driver-docs" version = "0.1.0" description = "ScyllaDB Python Driver Docs" -authors = ["Python Driver Contributors"] +authors = [{ name = "ScyllaDB" }] +package-mode = false +requires-python = ">=3.13,<3.14" -[tool.poetry.dependencies] -python = "^3.7" -geomet = "0.1.2" -six = "1.15.0" -futures = "2.2.0" -eventlet = "0.25.2" -gevent = "20.12.1" -scales = "1.0.9" -[tool.poetry.dev-dependencies] -sphinx-autobuild = "0.7.1" -Sphinx = "2.4.4" -jinja2 = "2.8.1" -gremlinpython = "3.4.7" -recommonmark = "0.5.0" -sphinx-scylladb-theme = "~1.0.0" -sphinx-multiversion-scylla = "~0.2.6" +dependencies = [ + "eventlet>=0.40.3,<1.0.0", + "gevent>=25.9.1,<26.0.0", + "gremlinpython==3.7.4", + "pygments>=2.19.2,<3.0.0", + "recommonmark==0.7.1", + "redirects_cli~=0.1.3", + "sphinx-autobuild>=2025.0.0,<2026.0.0", + "sphinx-sitemap>=2.8.0,<3.0.0", + "sphinx-scylladb-theme>=1.8.2,<2.0.0", + "sphinx-multiversion-scylla>=0.3.2,<1.0.0", + "sphinx>=8.2.3,<9.0.0", + "scales>=1.0.9,<2.0.0", + "six>=1.9", + "tornado>=6.5,<7.0", +] + +[dependency-groups] +# Add any dev-only tools here; example shown +dev = ["hatchling==1.28.0"] + +[tool.uv.sources] +# Keep the driver editable from the parent directory +scylla-driver = { path = "../", editable = true } [build-system] -requires = ["poetry>=0.12"] -build-backend = "poetry.masonry.api" +requires = ["hatchling==1.28.0"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] + +# We don't ship a Python package/module; just include the docs tree as data. +# Using 'include' avoids the "Unable to determine which files to ship" error. +include = [ + "**/*.rst", + "**/*.md", + "**/*.txt", + "**/*.py", # e.g., conf.py and any Sphinx helpers + "**/*.yml", + "**/*.yaml", + "**/*.json", + "**/*.css", + "**/*.js", + "**/*.html", + "_static/**", + "_templates/**", +] + +exclude = [ + "**/__pycache__/**", + "**/*.pyc", + ".venv/**", +] \ No newline at end of file diff --git a/docs/query_paging.rst b/docs/query-paging.rst similarity index 100% rename from docs/query_paging.rst rename to docs/query-paging.rst diff --git a/docs/scylla-cloud.rst b/docs/scylla-cloud.rst new file mode 100644 index 0000000000..b5eb6df798 --- /dev/null +++ b/docs/scylla-cloud.rst @@ -0,0 +1,5 @@ +ScyllaDB Cloud +-------------- + +To connect to a `ScyllaDB Cloud `_ cluster, go to the Cluster Connect page, Python example. +For best performance, make sure to use the Scylla Driver. diff --git a/docs/scylla-specific.rst b/docs/scylla-specific.rst new file mode 100644 index 0000000000..e9caaa8793 --- /dev/null +++ b/docs/scylla-specific.rst @@ -0,0 +1,124 @@ +Scylla Specific Features +======================== + +Shard Awareness +--------------- + +**scylla-driver** is shard aware and contains extensions that work with the TokenAwarePolicy supported by Scylla 2.3 and onwards. Using this policy, the driver can select a connection to a particular shard based on the shard's token. +As a result, latency is significantly reduced because there is no need to pass data between the shards. + +Details on the scylla cql protocol extensions +https://github.com/scylladb/scylla/blob/master/docs/dev/protocol-extensions.md#intranode-sharding + +For using it you only need to enable ``TokenAwarePolicy`` on the ``Cluster`` + +See the configuration of ``native_shard_aware_transport_port`` and ``native_shard_aware_transport_port_ssl`` on scylla.yaml: +https://github.com/scylladb/scylla/blob/master/docs/dev/protocols.md#cql-client-protocol + +.. code:: python + + from cassandra.cluster import Cluster + from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy + + cluster = Cluster(load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) + + +New Cluster Helpers +------------------- + +* ``shard_aware_options`` + + Setting it to ``dict(disable=True)`` would disable the shard aware functionally, for cases favoring once connection per host (example, lots of processes connecting from one client host, generating a big load of connections + + Other option is to configure scylla by setting ``enable_shard_aware_drivers: false`` on scylla.yaml. + +.. code:: python + + from cassandra.cluster import Cluster + + cluster = Cluster(shard_aware_options=dict(disable=True)) + session = cluster.connect() + + assert not cluster.is_shard_aware(), "Shard aware should be disabled" + + # or just disable the shard aware port logic + cluster = Cluster(shard_aware_options=dict(disable_shardaware_port=True)) + session = cluster.connect() + +* ``cluster.is_shard_aware()`` + + New method available on ``Cluster`` allowing to check whether the remote cluster supports shard awareness (bool) + +.. code:: python + + from cassandra.cluster import Cluster + + cluster = Cluster() + session = cluster.connect() + + if cluster.is_shard_aware(): + print("connected to a scylla cluster") + +* ``cluster.shard_aware_stats()`` + + New method available on ``Cluster`` allowing to check the status of shard aware connections to all available hosts (dict) + +.. code:: python + + from cassandra.cluster import Cluster + + cluster = Cluster() + session = cluster.connect() + + stats = cluster.shard_aware_stats() + if all([v["shards_count"] == v["connected"] for v in stats.values()]): + print("successfully connected to all shards of all scylla nodes") + + +New Error Types +-------------------- + +* ``SCYLLA_RATE_LIMIT_ERROR`` Error + + The ScyllaDB 5.1 introduced a feature called per-partition rate limiting. In case the (user defined) per-partition rate limit is exceeded, the database will start returning a Scylla-specific type of error: RateLimitReached. + +.. code:: python + + from cassandra import RateLimitReached + from cassandra.cluster import Cluster + + cluster = Cluster() + session = cluster.connect() + session.execute(""" + CREATE KEYSPACE IF NOT EXISTS keyspace1 + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} + """) + + session.execute("USE keyspace1") + session.execute(""" + CREATE TABLE tbl (pk int PRIMARY KEY, v int) + WITH per_partition_rate_limit = {'max_writes_per_second': 1} + """) + + prepared = session.prepare(""" + INSERT INTO tbl (pk, v) VALUES (?, ?) + """) + + try: + for _ in range(1000): + self.session.execute(prepared.bind((123, 456))) + except RateLimitReached: + raise + + +Tablet Awareness +---------------- + +**scylla-driver** is tablet-aware, which means that it is able to parse `TABLETS_ROUTING_V1` extension to ProtocolFeatures, recieve tablet information sent by Scylla in the `custom_payload` part of the `RESULT` message, and utilize it. +Thanks to this, queries to tablet-based tables are still shard-aware. + +Details on the scylla cql protocol extensions +https://github.com/scylladb/scylladb/blob/master/docs/dev/protocol-extensions.md#negotiate-sending-tablets-info-to-the-drivers + +Details on the sending tablet information to the drivers +https://github.com/scylladb/scylladb/blob/master/docs/dev/protocol-extensions.md#sending-tablet-info-to-the-drivers diff --git a/docs/scylla_cloud.rst b/docs/scylla_cloud.rst deleted file mode 100644 index 62aaf76433..0000000000 --- a/docs/scylla_cloud.rst +++ /dev/null @@ -1,5 +0,0 @@ -Scylla Cloud ------------- - -To connect to a `Scylla Cloud `_ cluster, go to the Cluster Connect page, Python example. -For best performance, make sure to use the Scylla Driver. diff --git a/docs/scylla_specific.rst b/docs/scylla_specific.rst deleted file mode 100644 index 366628e59b..0000000000 --- a/docs/scylla_specific.rst +++ /dev/null @@ -1,84 +0,0 @@ -Scylla Specific Features -======================== - -Shard Awareness ---------------- - -**scylla-driver** is shard aware and contains extensions that work with the TokenAwarePolicy supported by Scylla 2.3 and onwards. Using this policy, the driver can select a connection to a particular shard based on the shard's token. -As a result, latency is significantly reduced because there is no need to pass data between the shards. - -Details on the scylla cql protocol extensions -https://github.com/scylladb/scylla/blob/master/docs/design-notes/protocol-extensions.md - -For using it you only need to enable ``TokenAwarePolicy`` on the ``Cluster`` - -.. code:: python - - from cassandra.cluster import Cluster - from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy - - cluster = Cluster(load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) - - -New Cluster Helpers -------------------- - -* ``cluster.is_shard_aware()`` - - New method available on ``Cluster`` allowing to check whether the remote cluster supports shard awareness (bool) - -.. code:: python - - from cassandra.cluster import Cluster - - cluster = Cluster() - session = cluster.connect() - - if cluster.is_shard_aware(): - print("connected to a scylla cluster") - -* ``cluster.shard_aware_stats()`` - - New method available on ``Cluster`` allowing to check the status of shard aware connections to all available hosts (dict) - -.. code:: python - - from cassandra.cluster import Cluster - - cluster = Cluster() - session = cluster.connect() - - stats = cluster.shard_aware_stats() - if all([v["shards_count"] == v["connected"] for v in stats.values()]): - print("successfully connected to all shards of all scylla nodes") - - -New Table Attributes --------------------- - -* ``in_memory`` flag - - New flag available on ``TableMetadata.options`` to indicate that it is an `In Memory `_ table - -.. note:: in memory tables is a feature existing only in Scylla Enterprise - -.. code:: python - - from cassandra.cluster import Cluster - - cluster = Cluster() - session = cluster.connect() - session.execute(""" - CREATE KEYSPACE IF NOT EXISTS keyspace1 - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}; - """) - - session.execute(""" - CREATE TABLE IF NOT EXISTS keyspace1.standard1 ( - key blob PRIMARY KEY, - "C0" blob - ) WITH in_memory=true AND compaction={'class': 'InMemoryCompactionStrategy'} - """) - - cluster.refresh_table_metadata("keyspace1", "standard1") - assert cluster.metadata.keyspaces["keyspace1"].tables["standard1"].options["in_memory"] == True diff --git a/docs/security.rst b/docs/security.rst index c30189562f..57e2be71da 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -37,23 +37,6 @@ If these do not suit your needs, you may need to create your own subclasses of :class:`~.AuthProvider` and :class:`~.Authenticator`. You can use the Sasl classes as example implementations. -Protocol v1 Authentication -^^^^^^^^^^^^^^^^^^^^^^^^^^ -When working with Cassandra 1.2 (or a higher version with -:attr:`~.Cluster.protocol_version` set to ``1``), you will not pass in -an :class:`~.AuthProvider` instance. Instead, you should pass in a -function that takes one argument, the IP address of a host, and returns -a dict of credentials with a ``username`` and ``password`` key: - -.. code-block:: python - - from cassandra.cluster import Cluster - - def get_credentials(host_address): - return {'username': 'joe', 'password': '1234'} - - cluster = Cluster(auth_provider=get_credentials, protocol_version=1) - SSL --- SSL should be used when client encryption is enabled in Cassandra. diff --git a/docs/upgrading.rst b/docs/upgrading.rst deleted file mode 100644 index 9559fa3579..0000000000 --- a/docs/upgrading.rst +++ /dev/null @@ -1,388 +0,0 @@ -Upgrading -========= - -.. toctree:: - :maxdepth: 1 - -Upgrading from dse-driver -------------------------- - -Since 3.21.0, scylla-driver fully supports DataStax products. dse-driver and -dse-graph users should now migrate to scylla-driver to benefit from latest bug fixes -and new features. The upgrade to this new unified driver version is straightforward -with no major API changes. - -Installation -^^^^^^^^^^^^ - -Only the `scylla-driver` package should be installed. `dse-driver` and `dse-graph` -are not required anymore:: - - pip install scylla-driver - -If you need the Graph *Fluent* API (features provided by dse-graph):: - - pip install scylla-driver[graph] - -See :doc:`installation` for more details. - -Import from the cassandra module -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There is no `dse` module, so you should import from the `cassandra` module. You -need to change only the first module of your import statements, not the submodules. - -.. code-block:: python - - from dse.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT - from dse.auth import PlainTextAuthProvider - from dse.policies import WhiteListRoundRobinPolicy - - # becomes - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.auth import PlainTextAuthProvider - from cassandra.policies import WhiteListRoundRobinPolicy - -Also note that the cassandra.hosts module doesn't exist in scylla-driver. This -module is named cassandra.pool. - -dse-graph -^^^^^^^^^ - -dse-graph features are now built into scylla-driver. The only change you need -to do is your import statements: - -.. code-block:: python - - from dse_graph import .. - from dse_graph.query import .. - - # becomes - - from cassandra.datastax.graph.fluent import .. - from cassandra.datastax.graph.fluent.query import .. - -See :mod:`~.datastax.graph.fluent`. - -Session.execute and Session.execute_async API -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Although it is not common to use this API with positional arguments, it is -important to be aware that the `host` and `execute_as` parameters have had -their positional order swapped. This is only because `execute_as` was added -in dse-driver before `host`. - -See :meth:`.Session.execute`. - -Deprecations -^^^^^^^^^^^^ - -These changes are optional, but recommended: - -* Importing from `cassandra.graph` is deprecated. Consider importing from `cassandra.datastax.graph`. -* Use :class:`~.policies.DefaultLoadBalancingPolicy` instead of DSELoadBalancingPolicy. - -Upgrading to 3.0 ----------------- -Version 3.0 of the DataStax Python driver for Apache Cassandra -adds support for Cassandra 3.0 while maintaining support for -previously supported versions. In addition to substantial internal rework, -there are several updates to the API that integrators will need -to consider: - -Default consistency is now ``LOCAL_ONE`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Previous value was ``ONE``. The new value is introduced to mesh with the default -DC-aware load balancing policy and to match other drivers. - -Execution API Updates -^^^^^^^^^^^^^^^^^^^^^ -Result return normalization -~~~~~~~~~~~~~~~~~~~~~~~~~~~ -`PYTHON-368 `_ - -Previously results would be returned as a ``list`` of rows for result rows -up to ``fetch_size``, and ``PagedResult`` afterward. This could break -application code that assumed one type and got another. - -Now, all results are returned as an iterable :class:`~.ResultSet`. - -The preferred way to consume results of unknown size is to iterate through -them, letting automatic paging occur as they are consumed. - -.. code-block:: python - - results = session.execute("SELECT * FROM system.local") - for row in results: - process(row) - -If the expected size of the results is known, it is still possible to -materialize a list using the iterator: - -.. code-block:: python - - results = session.execute("SELECT * FROM system.local") - row_list = list(results) - -For backward compatibility, :class:`~.ResultSet` supports indexing. When -accessed at an index, a `~.ResultSet` object will materialize all its pages: - -.. code-block:: python - - results = session.execute("SELECT * FROM system.local") - first_result = results[0] # materializes results, fetching all pages - -This can send requests and load (possibly large) results into memory, so -`~.ResultSet` will log a warning on implicit materialization. - -Trace information is not attached to executed Statements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -`PYTHON-318 `_ - -Previously trace data was attached to Statements if tracing was enabled. This -could lead to confusion if the same statement was used for multiple executions. - -Now, trace data is associated with the ``ResponseFuture`` and ``ResultSet`` -returned for each query: - -:meth:`.ResponseFuture.get_query_trace()` - -:meth:`.ResponseFuture.get_all_query_traces()` - -:meth:`.ResultSet.get_query_trace()` - -:meth:`.ResultSet.get_all_query_traces()` - -Binding named parameters now ignores extra names -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -`PYTHON-178 `_ - -Previously, :meth:`.BoundStatement.bind()` would raise if a mapping -was passed with extra names not found in the prepared statement. - -Behavior in 3.0+ is to ignore extra names. - -blist removed as soft dependency -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`PYTHON-385 `_ - -Previously the driver had a soft dependency on ``blist sortedset``, using -that where available and using an internal fallback where possible. - -Now, the driver never chooses the ``blist`` variant, instead returning the -internal :class:`.util.SortedSet` for all ``set`` results. The class implements -all standard set operations, so no integration code should need to change unless -it explicitly checks for ``sortedset`` type. - -Metadata API Updates -^^^^^^^^^^^^^^^^^^^^ -`PYTHON-276 `_, `PYTHON-408 `_, `PYTHON-400 `_, `PYTHON-422 `_ - -Cassandra 3.0 brought a substantial overhaul to the internal schema metadata representation. -This version of the driver supports that metadata in addition to the legacy version. Doing so -also brought some changes to the metadata model. - -The present API is documented: :any:`cassandra.metadata`. Changes highlighted below: - -* All types are now exposed as CQL types instead of types derived from the internal server implementation -* Some metadata attributes have changed names to match current nomenclature (for example, :attr:`.Index.kind` in place of ``Index.type``). -* Some metadata attributes removed - - * ``TableMetadata.keyspace`` reference replaced with :attr:`.TableMetadata.keyspace_name` - * ``ColumnMetadata.index`` is removed table- and keyspace-level mappings are still maintained - -Several deprecated features are removed -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`PYTHON-292 `_ - -* ``ResponseFuture.result`` timeout parameter is removed, use ``Session.execute`` timeout instead (`031ebb0 `_) -* ``Cluster.refresh_schema`` removed, use ``Cluster.refresh_*_metadata`` instead (`419fcdf `_) -* ``Cluster.submit_schema_refresh`` removed (`574266d `_) -* ``cqltypes`` time/date functions removed, use ``util`` entry points instead (`bb984ee `_) -* ``decoder`` module removed (`e16a073 `_) -* ``TableMetadata.keyspace`` attribute replaced with ``keyspace_name`` (`cc94073 `_) -* ``cqlengine.columns.TimeUUID.from_datetime`` removed, use ``util`` variant instead (`96489cc `_) -* ``cqlengine.columns.Float(double_precision)`` parameter removed, use ``columns.Double`` instead (`a2d3a98 `_) -* ``cqlengine`` keyspace management functions are removed in favor of the strategy-specific entry points (`4bd5909 `_) -* ``cqlengine.Model.__polymorphic_*__`` attributes removed, use ``__discriminator*`` attributes instead (`9d98c8e `_) -* ``cqlengine.statements`` will no longer warn about list list prepend behavior (`79efe97 `_) - - -Upgrading to 2.1 from 2.0 -------------------------- -Version 2.1 of the DataStax Python driver for Apache Cassandra -adds support for Cassandra 2.1 and version 3 of the native protocol. - -Cassandra 1.2, 2.0, and 2.1 are all supported. However, 1.2 only -supports protocol version 1, and 2.0 only supports versions 1 and -2, so some features may not be available. - -Using the v3 Native Protocol -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -By default, the driver will attempt to use version 2 of the -native protocol. To use version 3, you must explicitly -set the :attr:`~.Cluster.protocol_version`: - -.. code-block:: python - - from cassandra.cluster import Cluster - - cluster = Cluster(protocol_version=3) - -Note that protocol version 3 is only supported by Cassandra 2.1+. - -In future releases, the driver may default to using protocol version -3. - -Working with User-Defined Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Cassandra 2.1 introduced the ability to define new types:: - - USE KEYSPACE mykeyspace; - - CREATE TYPE address (street text, city text, zip int); - -The driver generally expects you to use instances of a specific -class to represent column values of this type. You can let the -driver know what class to use with :meth:`.Cluster.register_user_type`: - -.. code-block:: python - - cluster = Cluster() - - class Address(object): - - def __init__(self, street, city, zipcode): - self.street = street - self.city = text - self.zipcode = zipcode - - cluster.register_user_type('mykeyspace', 'address', Address) - -When inserting data for ``address`` columns, you should pass in -instances of ``Address``. When querying data, ``address`` column -values will be instances of ``Address``. - -If no class is registered for a user-defined type, query results -will use a ``namedtuple`` class and data may only be inserted -though prepared statements. - -See :ref:`udts` for more details. - -Customizing Encoders for Non-prepared Statements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Starting with version 2.1 of the driver, it is possible to customize -how Python types are converted to CQL literals when working with -non-prepared statements. This is done on a per-:class:`~.Session` -basis through :attr:`.Session.encoder`: - -.. code-block:: python - - cluster = Cluster() - session = cluster.connect() - session.encoder.mapping[tuple] = session.encoder.cql_encode_tuple - -See :ref:`type-conversions` for the table of default CQL literal conversions. - -Using Client-Side Protocol-Level Timestamps -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -With version 3 of the native protocol, timestamps may be supplied by the -client at the protocol level. (Normally, if they are not specified within -the CQL query itself, a timestamp is generated server-side.) - -When :attr:`~.Cluster.protocol_version` is set to 3 or higher, the driver -will automatically use client-side timestamps with microsecond precision -unless :attr:`.Session.use_client_timestamp` is changed to :const:`False`. -If a timestamp is specified within the CQL query, it will override the -timestamp generated by the driver. - -Upgrading to 2.0 from 1.x -------------------------- -Version 2.0 of the DataStax Python driver for Apache Cassandra -includes some notable improvements over version 1.x. This version -of the driver supports Cassandra 1.2, 2.0, and 2.1. However, not -all features may be used with Cassandra 1.2, and some new features -in 2.1 are not yet supported. - -Using the v2 Native Protocol -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -By default, the driver will attempt to use version 2 of Cassandra's -native protocol. You can explicitly set the protocol version to -2, though: - -.. code-block:: python - - from cassandra.cluster import Cluster - - cluster = Cluster(protocol_version=2) - -When working with Cassandra 1.2, you will need to -explicitly set the :attr:`~.Cluster.protocol_version` to 1: - -.. code-block:: python - - from cassandra.cluster import Cluster - - cluster = Cluster(protocol_version=1) - -Automatic Query Paging -^^^^^^^^^^^^^^^^^^^^^^ -Version 2 of the native protocol adds support for automatic query -paging, which can make dealing with large result sets much simpler. - -See :ref:`query-paging` for full details. - -Protocol-Level Batch Statements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -With version 1 of the native protocol, batching of statements required -using a `BATCH cql query `_. -With version 2 of the native protocol, you can now batch statements at -the protocol level. This allows you to use many different prepared -statements within a single batch. - -See :class:`~.query.BatchStatement` for details and usage examples. - -SASL-based Authentication -^^^^^^^^^^^^^^^^^^^^^^^^^ -Also new in version 2 of the native protocol is SASL-based authentication. -See the section on :ref:`security` for details and examples. - -Lightweight Transactions -^^^^^^^^^^^^^^^^^^^^^^^^ -`Lightweight transactions `_ are another new feature. To use lightweight transactions, add ``IF`` clauses -to your CQL queries and set the :attr:`~.Statement.serial_consistency_level` -on your statements. - -Calling Cluster.shutdown() -^^^^^^^^^^^^^^^^^^^^^^^^^^ -In order to fix some issues around garbage collection and unclean interpreter -shutdowns, version 2.0 of the driver requires you to call :meth:`.Cluster.shutdown()` -on your :class:`~.Cluster` objects when you are through with them. -This helps to guarantee a clean shutdown. - -Deprecations -^^^^^^^^^^^^ -The following functions have moved from ``cassandra.decoder`` to ``cassandra.query``. -The original functions have been left in place with a :exc:`DeprecationWarning` for -now: - -* :attr:`cassandra.decoder.tuple_factory` has moved to - :attr:`cassandra.query.tuple_factory` -* :attr:`cassandra.decoder.named_tuple_factory` has moved to - :attr:`cassandra.query.named_tuple_factory` -* :attr:`cassandra.decoder.dict_factory` has moved to - :attr:`cassandra.query.dict_factory` -* :attr:`cassandra.decoder.ordered_dict_factory` has moved to - :attr:`cassandra.query.ordered_dict_factory` - -Dependency Changes -^^^^^^^^^^^^^^^^^^ -The following dependencies have officially been made optional: - -* ``scales`` -* ``blist`` - -And one new dependency has been added (to enable Python 3 support): - -* ``six`` diff --git a/docs/user_defined_types.rst b/docs/user-defined-types.rst similarity index 100% rename from docs/user_defined_types.rst rename to docs/user-defined-types.rst diff --git a/docs/uv.lock b/docs/uv.lock new file mode 100644 index 0000000000..23468c7170 --- /dev/null +++ b/docs/uv.lock @@ -0,0 +1,1246 @@ +version = 1 +revision = 3 +requires-python = "==3.13.*" + +[[package]] +name = "aenum" +version = "3.1.16" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/52/6ad8f63ec8da1bf40f96996d25d5b650fdd38f5975f8c813732c47388f18/aenum-3.1.16-py3-none-any.whl", hash = "sha256:9035092855a98e41b66e3d0998bd7b96280e85ceb3a04cc035636138a1943eaf", size = 165627, upload-time = "2025-04-25T03:17:58.89Z" }, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.13.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/78/7e90ca79e5aa39f9694dcfd74f4720782d3c6828113bb1f3197f7e7c4a56/aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be", size = 732139, upload-time = "2025-10-28T20:57:02.455Z" }, + { url = "https://files.pythonhosted.org/packages/db/ed/1f59215ab6853fbaa5c8495fa6cbc39edfc93553426152b75d82a5f32b76/aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742", size = 490082, upload-time = "2025-10-28T20:57:04.784Z" }, + { url = "https://files.pythonhosted.org/packages/68/7b/fe0fe0f5e05e13629d893c760465173a15ad0039c0a5b0d0040995c8075e/aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293", size = 489035, upload-time = "2025-10-28T20:57:06.894Z" }, + { url = "https://files.pythonhosted.org/packages/d2/04/db5279e38471b7ac801d7d36a57d1230feeee130bbe2a74f72731b23c2b1/aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811", size = 1720387, upload-time = "2025-10-28T20:57:08.685Z" }, + { url = "https://files.pythonhosted.org/packages/31/07/8ea4326bd7dae2bd59828f69d7fdc6e04523caa55e4a70f4a8725a7e4ed2/aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a", size = 1688314, upload-time = "2025-10-28T20:57:10.693Z" }, + { url = "https://files.pythonhosted.org/packages/48/ab/3d98007b5b87ffd519d065225438cc3b668b2f245572a8cb53da5dd2b1bc/aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4", size = 1756317, upload-time = "2025-10-28T20:57:12.563Z" }, + { url = "https://files.pythonhosted.org/packages/97/3d/801ca172b3d857fafb7b50c7c03f91b72b867a13abca982ed6b3081774ef/aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a", size = 1858539, upload-time = "2025-10-28T20:57:14.623Z" }, + { url = "https://files.pythonhosted.org/packages/f7/0d/4764669bdf47bd472899b3d3db91fffbe925c8e3038ec591a2fd2ad6a14d/aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e", size = 1739597, upload-time = "2025-10-28T20:57:16.399Z" }, + { url = "https://files.pythonhosted.org/packages/c4/52/7bd3c6693da58ba16e657eb904a5b6decfc48ecd06e9ac098591653b1566/aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb", size = 1555006, upload-time = "2025-10-28T20:57:18.288Z" }, + { url = "https://files.pythonhosted.org/packages/48/30/9586667acec5993b6f41d2ebcf96e97a1255a85f62f3c653110a5de4d346/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded", size = 1683220, upload-time = "2025-10-28T20:57:20.241Z" }, + { url = "https://files.pythonhosted.org/packages/71/01/3afe4c96854cfd7b30d78333852e8e851dceaec1c40fd00fec90c6402dd2/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b", size = 1712570, upload-time = "2025-10-28T20:57:22.253Z" }, + { url = "https://files.pythonhosted.org/packages/11/2c/22799d8e720f4697a9e66fd9c02479e40a49de3de2f0bbe7f9f78a987808/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8", size = 1733407, upload-time = "2025-10-28T20:57:24.37Z" }, + { url = "https://files.pythonhosted.org/packages/34/cb/90f15dd029f07cebbd91f8238a8b363978b530cd128488085b5703683594/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04", size = 1550093, upload-time = "2025-10-28T20:57:26.257Z" }, + { url = "https://files.pythonhosted.org/packages/69/46/12dce9be9d3303ecbf4d30ad45a7683dc63d90733c2d9fe512be6716cd40/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476", size = 1758084, upload-time = "2025-10-28T20:57:28.349Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c8/0932b558da0c302ffd639fc6362a313b98fdf235dc417bc2493da8394df7/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23", size = 1716987, upload-time = "2025-10-28T20:57:30.233Z" }, + { url = "https://files.pythonhosted.org/packages/5d/8b/f5bd1a75003daed099baec373aed678f2e9b34f2ad40d85baa1368556396/aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254", size = 425859, upload-time = "2025-10-28T20:57:32.105Z" }, + { url = "https://files.pythonhosted.org/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210, upload-time = "2024-07-26T18:15:03.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929, upload-time = "2024-07-26T18:15:02.05Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "async-timeout" +version = "4.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/d6/21b30a550dafea84b1b8eee21b5e23fa16d010ae006011221f33dcd8d7f8/async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f", size = 8345, upload-time = "2023-08-10T16:35:56.907Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028", size = 5721, upload-time = "2023-08-10T16:35:55.203Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "babel" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, +] + +[[package]] +name = "beartype" +version = "0.22.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/1d/794ae2acaa67c8b216d91d5919da2606c2bb14086849ffde7f5555f3a3a5/beartype-0.22.8.tar.gz", hash = "sha256:b19b21c9359722ee3f7cc433f063b3e13997b27ae8226551ea5062e621f61165", size = 1602262, upload-time = "2025-12-03T05:11:10.766Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/2a/fbcbf5a025d3e71ddafad7efd43e34ec4362f4d523c3c471b457148fb211/beartype-0.22.8-py3-none-any.whl", hash = "sha256:b832882d04e41a4097bab9f63e6992bc6de58c414ee84cba9b45b67314f5ab2e", size = 1331895, upload-time = "2025-12-03T05:11:08.373Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.14.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/b0/1c6a16426d389813b48d95e26898aff79abbde42ad353958ad95cc8c9b21/beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86", size = 627737, upload-time = "2025-11-30T15:08:26.084Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "commonmark" +version = "0.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/60/48/a60f593447e8f0894ebb7f6e6c1f25dafc5e89c5879fdc9360ae93ff83f0/commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60", size = 95764, upload-time = "2019-10-04T15:37:39.817Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/92/dfd892312d822f36c55366118b95d914e5f16de11044a27cf10a7d71bbbf/commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9", size = 51068, upload-time = "2019-10-04T15:37:37.674Z" }, +] + +[[package]] +name = "dnspython" +version = "2.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/8b/57666417c0f90f08bcafa776861060426765fdb422eb10212086fb811d26/dnspython-2.8.0.tar.gz", hash = "sha256:181d3c6996452cb1189c4046c61599b84a5a86e099562ffde77d26984ff26d0f", size = 368251, upload-time = "2025-09-07T18:58:00.022Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/5a/18ad964b0086c6e62e2e7500f7edc89e3faa45033c71c1893d34eed2b2de/dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af", size = 331094, upload-time = "2025-09-07T18:57:58.071Z" }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444, upload-time = "2024-04-23T18:57:18.24Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408, upload-time = "2024-04-23T18:57:14.835Z" }, +] + +[[package]] +name = "eventlet" +version = "0.40.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "greenlet" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/d8/f72d8583db7c559445e0e9500a9b9787332370c16980802204a403634585/eventlet-0.40.4.tar.gz", hash = "sha256:69bef712b1be18b4930df6f0c495d2a882bf7b63aa111e7b6eeff461cfcaf26f", size = 565920, upload-time = "2025-11-26T13:57:31.126Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/6d/8e1fa901f6a8307f90e7bd932064e27a0062a4a7a16af38966a9c3293c52/eventlet-0.40.4-py3-none-any.whl", hash = "sha256:6326c6d0bf55810bece151f7a5750207c610f389ba110ffd1541ed6e5215485b", size = 364588, upload-time = "2025-11-26T13:57:29.09Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/40/0832c31a37d60f60ed79e9dfb5a92e1e2af4f40a16a29abcc7992af9edff/frozenlist-1.8.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8d92f1a84bb12d9e56f818b3a746f3efba93c1b63c8387a73dde655e1e42282a", size = 85717, upload-time = "2025-10-06T05:36:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/30/ba/b0b3de23f40bc55a7057bd38434e25c34fa48e17f20ee273bbde5e0650f3/frozenlist-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96153e77a591c8adc2ee805756c61f59fef4cf4073a9275ee86fe8cba41241f7", size = 49651, upload-time = "2025-10-06T05:36:28.855Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ab/6e5080ee374f875296c4243c381bbdef97a9ac39c6e3ce1d5f7d42cb78d6/frozenlist-1.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f21f00a91358803399890ab167098c131ec2ddd5f8f5fd5fe9c9f2c6fcd91e40", size = 49417, upload-time = "2025-10-06T05:36:29.877Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4e/e4691508f9477ce67da2015d8c00acd751e6287739123113a9fca6f1604e/frozenlist-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fb30f9626572a76dfe4293c7194a09fb1fe93ba94c7d4f720dfae3b646b45027", size = 234391, upload-time = "2025-10-06T05:36:31.301Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/c202df58e3acdf12969a7895fd6f3bc016c642e6726aa63bd3025e0fc71c/frozenlist-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaa352d7047a31d87dafcacbabe89df0aa506abb5b1b85a2fb91bc3faa02d822", size = 233048, upload-time = "2025-10-06T05:36:32.531Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c0/8746afb90f17b73ca5979c7a3958116e105ff796e718575175319b5bb4ce/frozenlist-1.8.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:03ae967b4e297f58f8c774c7eabcce57fe3c2434817d4385c50661845a058121", size = 226549, upload-time = "2025-10-06T05:36:33.706Z" }, + { url = "https://files.pythonhosted.org/packages/7e/eb/4c7eefc718ff72f9b6c4893291abaae5fbc0c82226a32dcd8ef4f7a5dbef/frozenlist-1.8.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6292f1de555ffcc675941d65fffffb0a5bcd992905015f85d0592201793e0e5", size = 239833, upload-time = "2025-10-06T05:36:34.947Z" }, + { url = "https://files.pythonhosted.org/packages/c2/4e/e5c02187cf704224f8b21bee886f3d713ca379535f16893233b9d672ea71/frozenlist-1.8.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29548f9b5b5e3460ce7378144c3010363d8035cea44bc0bf02d57f5a685e084e", size = 245363, upload-time = "2025-10-06T05:36:36.534Z" }, + { url = "https://files.pythonhosted.org/packages/1f/96/cb85ec608464472e82ad37a17f844889c36100eed57bea094518bf270692/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ec3cc8c5d4084591b4237c0a272cc4f50a5b03396a47d9caaf76f5d7b38a4f11", size = 229314, upload-time = "2025-10-06T05:36:38.582Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6f/4ae69c550e4cee66b57887daeebe006fe985917c01d0fff9caab9883f6d0/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:517279f58009d0b1f2e7c1b130b377a349405da3f7621ed6bfae50b10adf20c1", size = 243365, upload-time = "2025-10-06T05:36:40.152Z" }, + { url = "https://files.pythonhosted.org/packages/7a/58/afd56de246cf11780a40a2c28dc7cbabbf06337cc8ddb1c780a2d97e88d8/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:db1e72ede2d0d7ccb213f218df6a078a9c09a7de257c2fe8fcef16d5925230b1", size = 237763, upload-time = "2025-10-06T05:36:41.355Z" }, + { url = "https://files.pythonhosted.org/packages/cb/36/cdfaf6ed42e2644740d4a10452d8e97fa1c062e2a8006e4b09f1b5fd7d63/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b4dec9482a65c54a5044486847b8a66bf10c9cb4926d42927ec4e8fd5db7fed8", size = 240110, upload-time = "2025-10-06T05:36:42.716Z" }, + { url = "https://files.pythonhosted.org/packages/03/a8/9ea226fbefad669f11b52e864c55f0bd57d3c8d7eb07e9f2e9a0b39502e1/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:21900c48ae04d13d416f0e1e0c4d81f7931f73a9dfa0b7a8746fb2fe7dd970ed", size = 233717, upload-time = "2025-10-06T05:36:44.251Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0b/1b5531611e83ba7d13ccc9988967ea1b51186af64c42b7a7af465dcc9568/frozenlist-1.8.0-cp313-cp313-win32.whl", hash = "sha256:8b7b94a067d1c504ee0b16def57ad5738701e4ba10cec90529f13fa03c833496", size = 39628, upload-time = "2025-10-06T05:36:45.423Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cf/174c91dbc9cc49bc7b7aab74d8b734e974d1faa8f191c74af9b7e80848e6/frozenlist-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:878be833caa6a3821caf85eb39c5ba92d28e85df26d57afb06b35b2efd937231", size = 43882, upload-time = "2025-10-06T05:36:46.796Z" }, + { url = "https://files.pythonhosted.org/packages/c1/17/502cd212cbfa96eb1388614fe39a3fc9ab87dbbe042b66f97acb57474834/frozenlist-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:44389d135b3ff43ba8cc89ff7f51f5a0bb6b63d829c8300f79a2fe4fe61bcc62", size = 39676, upload-time = "2025-10-06T05:36:47.8Z" }, + { url = "https://files.pythonhosted.org/packages/d2/5c/3bbfaa920dfab09e76946a5d2833a7cbdf7b9b4a91c714666ac4855b88b4/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e25ac20a2ef37e91c1b39938b591457666a0fa835c7783c3a8f33ea42870db94", size = 89235, upload-time = "2025-10-06T05:36:48.78Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d6/f03961ef72166cec1687e84e8925838442b615bd0b8854b54923ce5b7b8a/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07cdca25a91a4386d2e76ad992916a85038a9b97561bf7a3fd12d5d9ce31870c", size = 50742, upload-time = "2025-10-06T05:36:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/1e/bb/a6d12b7ba4c3337667d0e421f7181c82dda448ce4e7ad7ecd249a16fa806/frozenlist-1.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e0c11f2cc6717e0a741f84a527c52616140741cd812a50422f83dc31749fb52", size = 51725, upload-time = "2025-10-06T05:36:50.851Z" }, + { url = "https://files.pythonhosted.org/packages/bc/71/d1fed0ffe2c2ccd70b43714c6cab0f4188f09f8a67a7914a6b46ee30f274/frozenlist-1.8.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b3210649ee28062ea6099cfda39e147fa1bc039583c8ee4481cb7811e2448c51", size = 284533, upload-time = "2025-10-06T05:36:51.898Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/fb1685a7b009d89f9bf78a42d94461bc06581f6e718c39344754a5d9bada/frozenlist-1.8.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:581ef5194c48035a7de2aefc72ac6539823bb71508189e5de01d60c9dcd5fa65", size = 292506, upload-time = "2025-10-06T05:36:53.101Z" }, + { url = "https://files.pythonhosted.org/packages/e6/3b/b991fe1612703f7e0d05c0cf734c1b77aaf7c7d321df4572e8d36e7048c8/frozenlist-1.8.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3ef2d026f16a2b1866e1d86fc4e1291e1ed8a387b2c333809419a2f8b3a77b82", size = 274161, upload-time = "2025-10-06T05:36:54.309Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ec/c5c618767bcdf66e88945ec0157d7f6c4a1322f1473392319b7a2501ded7/frozenlist-1.8.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5500ef82073f599ac84d888e3a8c1f77ac831183244bfd7f11eaa0289fb30714", size = 294676, upload-time = "2025-10-06T05:36:55.566Z" }, + { url = "https://files.pythonhosted.org/packages/7c/ce/3934758637d8f8a88d11f0585d6495ef54b2044ed6ec84492a91fa3b27aa/frozenlist-1.8.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:50066c3997d0091c411a66e710f4e11752251e6d2d73d70d8d5d4c76442a199d", size = 300638, upload-time = "2025-10-06T05:36:56.758Z" }, + { url = "https://files.pythonhosted.org/packages/fc/4f/a7e4d0d467298f42de4b41cbc7ddaf19d3cfeabaf9ff97c20c6c7ee409f9/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5c1c8e78426e59b3f8005e9b19f6ff46e5845895adbde20ece9218319eca6506", size = 283067, upload-time = "2025-10-06T05:36:57.965Z" }, + { url = "https://files.pythonhosted.org/packages/dc/48/c7b163063d55a83772b268e6d1affb960771b0e203b632cfe09522d67ea5/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:eefdba20de0d938cec6a89bd4d70f346a03108a19b9df4248d3cf0d88f1b0f51", size = 292101, upload-time = "2025-10-06T05:36:59.237Z" }, + { url = "https://files.pythonhosted.org/packages/9f/d0/2366d3c4ecdc2fd391e0afa6e11500bfba0ea772764d631bbf82f0136c9d/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cf253e0e1c3ceb4aaff6df637ce033ff6535fb8c70a764a8f46aafd3d6ab798e", size = 289901, upload-time = "2025-10-06T05:37:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/b8/94/daff920e82c1b70e3618a2ac39fbc01ae3e2ff6124e80739ce5d71c9b920/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:032efa2674356903cd0261c4317a561a6850f3ac864a63fc1583147fb05a79b0", size = 289395, upload-time = "2025-10-06T05:37:02.115Z" }, + { url = "https://files.pythonhosted.org/packages/e3/20/bba307ab4235a09fdcd3cc5508dbabd17c4634a1af4b96e0f69bfe551ebd/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6da155091429aeba16851ecb10a9104a108bcd32f6c1642867eadaee401c1c41", size = 283659, upload-time = "2025-10-06T05:37:03.711Z" }, + { url = "https://files.pythonhosted.org/packages/fd/00/04ca1c3a7a124b6de4f8a9a17cc2fcad138b4608e7a3fc5877804b8715d7/frozenlist-1.8.0-cp313-cp313t-win32.whl", hash = "sha256:0f96534f8bfebc1a394209427d0f8a63d343c9779cda6fc25e8e121b5fd8555b", size = 43492, upload-time = "2025-10-06T05:37:04.915Z" }, + { url = "https://files.pythonhosted.org/packages/59/5e/c69f733a86a94ab10f68e496dc6b7e8bc078ebb415281d5698313e3af3a1/frozenlist-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5d63a068f978fc69421fb0e6eb91a9603187527c86b7cd3f534a5b77a592b888", size = 48034, upload-time = "2025-10-06T05:37:06.343Z" }, + { url = "https://files.pythonhosted.org/packages/16/6c/be9d79775d8abe79b05fa6d23da99ad6e7763a1d080fbae7290b286093fd/frozenlist-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf0a7e10b077bf5fb9380ad3ae8ce20ef919a6ad93b4552896419ac7e1d8e042", size = 41749, upload-time = "2025-10-06T05:37:07.431Z" }, + { url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, +] + +[[package]] +name = "gevent" +version = "25.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, + { name = "zope-event" }, + { name = "zope-interface" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/48/b3ef2673ffb940f980966694e40d6d32560f3ffa284ecaeb5ea3a90a6d3f/gevent-25.9.1.tar.gz", hash = "sha256:adf9cd552de44a4e6754c51ff2e78d9193b7fa6eab123db9578a210e657235dd", size = 5059025, upload-time = "2025-09-17T16:15:34.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/77/b97f086388f87f8ad3e01364f845004aef0123d4430241c7c9b1f9bde742/gevent-25.9.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:4f84591d13845ee31c13f44bdf6bd6c3dbf385b5af98b2f25ec328213775f2ed", size = 2973739, upload-time = "2025-09-17T14:53:30.279Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2e/9d5f204ead343e5b27bbb2fedaec7cd0009d50696b2266f590ae845d0331/gevent-25.9.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9cdbb24c276a2d0110ad5c978e49daf620b153719ac8a548ce1250a7eb1b9245", size = 1809165, upload-time = "2025-09-17T15:41:27.193Z" }, + { url = "https://files.pythonhosted.org/packages/10/3e/791d1bf1eb47748606d5f2c2aa66571f474d63e0176228b1f1fd7b77ab37/gevent-25.9.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:88b6c07169468af631dcf0fdd3658f9246d6822cc51461d43f7c44f28b0abb82", size = 1890638, upload-time = "2025-09-17T15:49:02.45Z" }, + { url = "https://files.pythonhosted.org/packages/f2/5c/9ad0229b2b4d81249ca41e4f91dd8057deaa0da6d4fbe40bf13cdc5f7a47/gevent-25.9.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b7bb0e29a7b3e6ca9bed2394aa820244069982c36dc30b70eb1004dd67851a48", size = 1857118, upload-time = "2025-09-17T15:49:22.125Z" }, + { url = "https://files.pythonhosted.org/packages/49/2a/3010ed6c44179a3a5c5c152e6de43a30ff8bc2c8de3115ad8733533a018f/gevent-25.9.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2951bb070c0ee37b632ac9134e4fdaad70d2e660c931bb792983a0837fe5b7d7", size = 2111598, upload-time = "2025-09-17T15:15:15.226Z" }, + { url = "https://files.pythonhosted.org/packages/08/75/6bbe57c19a7aa4527cc0f9afcdf5a5f2aed2603b08aadbccb5bf7f607ff4/gevent-25.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e4e17c2d57e9a42e25f2a73d297b22b60b2470a74be5a515b36c984e1a246d47", size = 1829059, upload-time = "2025-09-17T15:52:42.596Z" }, + { url = "https://files.pythonhosted.org/packages/06/6e/19a9bee9092be45679cb69e4dd2e0bf5f897b7140b4b39c57cc123d24829/gevent-25.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d94936f8f8b23d9de2251798fcb603b84f083fdf0d7f427183c1828fb64f117", size = 2173529, upload-time = "2025-09-17T15:24:13.897Z" }, + { url = "https://files.pythonhosted.org/packages/ca/4f/50de9afd879440e25737e63f5ba6ee764b75a3abe17376496ab57f432546/gevent-25.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:eb51c5f9537b07da673258b4832f6635014fee31690c3f0944d34741b69f92fa", size = 1681518, upload-time = "2025-09-17T19:39:47.488Z" }, +] + +[[package]] +name = "greenlet" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/e5/40dbda2736893e3e53d25838e0f19a2b417dfc122b9989c91918db30b5d3/greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb", size = 190651, upload-time = "2025-12-04T14:49:44.05Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/2f/28592176381b9ab2cafa12829ba7b472d177f3acc35d8fbcf3673d966fff/greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739", size = 275140, upload-time = "2025-12-04T14:23:01.282Z" }, + { url = "https://files.pythonhosted.org/packages/2c/80/fbe937bf81e9fca98c981fe499e59a3f45df2a04da0baa5c2be0dca0d329/greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808", size = 599219, upload-time = "2025-12-04T14:50:08.309Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ff/7c985128f0514271b8268476af89aee6866df5eec04ac17dcfbc676213df/greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54", size = 610211, upload-time = "2025-12-04T14:57:43.968Z" }, + { url = "https://files.pythonhosted.org/packages/79/07/c47a82d881319ec18a4510bb30463ed6891f2ad2c1901ed5ec23d3de351f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30a6e28487a790417d036088b3bcb3f3ac7d8babaa7d0139edbaddebf3af9492", size = 624311, upload-time = "2025-12-04T15:07:14.697Z" }, + { url = "https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527", size = 612833, upload-time = "2025-12-04T14:26:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/b5/ba/56699ff9b7c76ca12f1cdc27a886d0f81f2189c3455ff9f65246780f713d/greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39", size = 1567256, upload-time = "2025-12-04T15:04:25.276Z" }, + { url = "https://files.pythonhosted.org/packages/1e/37/f31136132967982d698c71a281a8901daf1a8fbab935dce7c0cf15f942cc/greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8", size = 1636483, upload-time = "2025-12-04T14:27:30.804Z" }, + { url = "https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38", size = 301833, upload-time = "2025-12-04T14:32:23.929Z" }, +] + +[[package]] +name = "gremlinpython" +version = "3.7.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aenum" }, + { name = "aiohttp" }, + { name = "async-timeout" }, + { name = "isodate" }, + { name = "nest-asyncio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/86/a0b20fed6bb699054f19e693b40570c27d06432469d50be677a156b65fcb/gremlinpython-3.7.4.tar.gz", hash = "sha256:d41579a8ef83c1dce9e51ccff2b5fb496170be0fdb0f491d4124c29e7df9b14d", size = 52639, upload-time = "2025-08-08T16:55:23.376Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/84/7a268ae9d5ae4a64a701fa099497b7531820d42f6c19dfec39dcdb238bf7/gremlinpython-3.7.4-py3-none-any.whl", hash = "sha256:b6b336320d0110382b6a3832bc19b4e2bf72e4b3f38dab25fdbedfa1a3167987", size = 78522, upload-time = "2025-08-08T16:55:22.246Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "hatchling" +version = "1.28.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "pathspec" }, + { name = "pluggy" }, + { name = "trove-classifiers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/8e/e480359492affde4119a131da729dd26da742c2c9b604dff74836e47eef9/hatchling-1.28.0.tar.gz", hash = "sha256:4d50b02aece6892b8cd0b3ce6c82cb218594d3ec5836dbde75bf41a21ab004c8", size = 55365, upload-time = "2025-11-27T00:31:13.766Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/a5/48cb7efb8b4718b1a4c0c331e3364a3a33f614ff0d6afd2b93ee883d3c47/hatchling-1.28.0-py3-none-any.whl", hash = "sha256:dc48722b68b3f4bbfa3ff618ca07cdea6750e7d03481289ffa8be1521d18a961", size = 76075, upload-time = "2025-11-27T00:31:12.544Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026, upload-time = "2022-07-01T12:21:05.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769, upload-time = "2022-07-01T12:21:02.467Z" }, +] + +[[package]] +name = "isodate" +version = "0.7.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/4d/e940025e2ce31a8ce1202635910747e5a87cc3a6a6bb2d00973375014749/isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6", size = 29705, upload-time = "2024-10-08T23:04:11.5Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15", size = 22320, upload-time = "2024-10-08T23:04:09.501Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, + { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, + { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, + { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, + { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, + { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, + { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, + { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, + { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, + { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, + { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, + { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, + { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, + { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, + { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, + { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, + { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, +] + +[[package]] +name = "mdit-py-plugins" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "multidict" +version = "6.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, + { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, + { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, + { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, + { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, + { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, + { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, + { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, + { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, + { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, + { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, + { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, + { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, + { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, + { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, + { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, + { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, + { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, + { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, + { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, + { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, + { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, + { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, + { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, + { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, + { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, + { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, + { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, + { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, + { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, + { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, + { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, + { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, +] + +[[package]] +name = "myst-parser" +version = "4.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "jinja2" }, + { name = "markdown-it-py" }, + { name = "mdit-py-plugins" }, + { name = "pyyaml" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/a5/9626ba4f73555b3735ad86247a8077d4603aa8628537687c839ab08bfe44/myst_parser-4.0.1.tar.gz", hash = "sha256:5cfea715e4f3574138aecbf7d54132296bfd72bb614d31168f48c477a830a7c4", size = 93985, upload-time = "2025-02-12T10:53:03.833Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/df/76d0321c3797b54b60fef9ec3bd6f4cfd124b9e422182156a1dd418722cf/myst_parser-4.0.1-py3-none-any.whl", hash = "sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d", size = 84579, upload-time = "2025-02-12T10:53:02.078Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "propcache" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/df/6d9c1b6ac12b003837dde8a10231a7344512186e87b36e855bef32241942/propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf", size = 77750, upload-time = "2025-10-08T19:47:07.648Z" }, + { url = "https://files.pythonhosted.org/packages/8b/e8/677a0025e8a2acf07d3418a2e7ba529c9c33caf09d3c1f25513023c1db56/propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311", size = 44780, upload-time = "2025-10-08T19:47:08.851Z" }, + { url = "https://files.pythonhosted.org/packages/89/a4/92380f7ca60f99ebae761936bc48a72a639e8a47b29050615eef757cb2a7/propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74", size = 46308, upload-time = "2025-10-08T19:47:09.982Z" }, + { url = "https://files.pythonhosted.org/packages/2d/48/c5ac64dee5262044348d1d78a5f85dd1a57464a60d30daee946699963eb3/propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe", size = 208182, upload-time = "2025-10-08T19:47:11.319Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0c/cd762dd011a9287389a6a3eb43aa30207bde253610cca06824aeabfe9653/propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af", size = 211215, upload-time = "2025-10-08T19:47:13.146Z" }, + { url = "https://files.pythonhosted.org/packages/30/3e/49861e90233ba36890ae0ca4c660e95df565b2cd15d4a68556ab5865974e/propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c", size = 218112, upload-time = "2025-10-08T19:47:14.913Z" }, + { url = "https://files.pythonhosted.org/packages/f1/8b/544bc867e24e1bd48f3118cecd3b05c694e160a168478fa28770f22fd094/propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f", size = 204442, upload-time = "2025-10-08T19:47:16.277Z" }, + { url = "https://files.pythonhosted.org/packages/50/a6/4282772fd016a76d3e5c0df58380a5ea64900afd836cec2c2f662d1b9bb3/propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1", size = 199398, upload-time = "2025-10-08T19:47:17.962Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ec/d8a7cd406ee1ddb705db2139f8a10a8a427100347bd698e7014351c7af09/propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24", size = 196920, upload-time = "2025-10-08T19:47:19.355Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6c/f38ab64af3764f431e359f8baf9e0a21013e24329e8b85d2da32e8ed07ca/propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa", size = 203748, upload-time = "2025-10-08T19:47:21.338Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e3/fa846bd70f6534d647886621388f0a265254d30e3ce47e5c8e6e27dbf153/propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61", size = 205877, upload-time = "2025-10-08T19:47:23.059Z" }, + { url = "https://files.pythonhosted.org/packages/e2/39/8163fc6f3133fea7b5f2827e8eba2029a0277ab2c5beee6c1db7b10fc23d/propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66", size = 199437, upload-time = "2025-10-08T19:47:24.445Z" }, + { url = "https://files.pythonhosted.org/packages/93/89/caa9089970ca49c7c01662bd0eeedfe85494e863e8043565aeb6472ce8fe/propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81", size = 37586, upload-time = "2025-10-08T19:47:25.736Z" }, + { url = "https://files.pythonhosted.org/packages/f5/ab/f76ec3c3627c883215b5c8080debb4394ef5a7a29be811f786415fc1e6fd/propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e", size = 40790, upload-time = "2025-10-08T19:47:26.847Z" }, + { url = "https://files.pythonhosted.org/packages/59/1b/e71ae98235f8e2ba5004d8cb19765a74877abf189bc53fc0c80d799e56c3/propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1", size = 37158, upload-time = "2025-10-08T19:47:27.961Z" }, + { url = "https://files.pythonhosted.org/packages/83/ce/a31bbdfc24ee0dcbba458c8175ed26089cf109a55bbe7b7640ed2470cfe9/propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b", size = 81451, upload-time = "2025-10-08T19:47:29.445Z" }, + { url = "https://files.pythonhosted.org/packages/25/9c/442a45a470a68456e710d96cacd3573ef26a1d0a60067e6a7d5e655621ed/propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566", size = 46374, upload-time = "2025-10-08T19:47:30.579Z" }, + { url = "https://files.pythonhosted.org/packages/f4/bf/b1d5e21dbc3b2e889ea4327044fb16312a736d97640fb8b6aa3f9c7b3b65/propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835", size = 48396, upload-time = "2025-10-08T19:47:31.79Z" }, + { url = "https://files.pythonhosted.org/packages/f4/04/5b4c54a103d480e978d3c8a76073502b18db0c4bc17ab91b3cb5092ad949/propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e", size = 275950, upload-time = "2025-10-08T19:47:33.481Z" }, + { url = "https://files.pythonhosted.org/packages/b4/c1/86f846827fb969c4b78b0af79bba1d1ea2156492e1b83dea8b8a6ae27395/propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859", size = 273856, upload-time = "2025-10-08T19:47:34.906Z" }, + { url = "https://files.pythonhosted.org/packages/36/1d/fc272a63c8d3bbad6878c336c7a7dea15e8f2d23a544bda43205dfa83ada/propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b", size = 280420, upload-time = "2025-10-08T19:47:36.338Z" }, + { url = "https://files.pythonhosted.org/packages/07/0c/01f2219d39f7e53d52e5173bcb09c976609ba30209912a0680adfb8c593a/propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0", size = 263254, upload-time = "2025-10-08T19:47:37.692Z" }, + { url = "https://files.pythonhosted.org/packages/2d/18/cd28081658ce597898f0c4d174d4d0f3c5b6d4dc27ffafeef835c95eb359/propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af", size = 261205, upload-time = "2025-10-08T19:47:39.659Z" }, + { url = "https://files.pythonhosted.org/packages/7a/71/1f9e22eb8b8316701c2a19fa1f388c8a3185082607da8e406a803c9b954e/propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393", size = 247873, upload-time = "2025-10-08T19:47:41.084Z" }, + { url = "https://files.pythonhosted.org/packages/4a/65/3d4b61f36af2b4eddba9def857959f1016a51066b4f1ce348e0cf7881f58/propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874", size = 262739, upload-time = "2025-10-08T19:47:42.51Z" }, + { url = "https://files.pythonhosted.org/packages/2a/42/26746ab087faa77c1c68079b228810436ccd9a5ce9ac85e2b7307195fd06/propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7", size = 263514, upload-time = "2025-10-08T19:47:43.927Z" }, + { url = "https://files.pythonhosted.org/packages/94/13/630690fe201f5502d2403dd3cfd451ed8858fe3c738ee88d095ad2ff407b/propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1", size = 257781, upload-time = "2025-10-08T19:47:45.448Z" }, + { url = "https://files.pythonhosted.org/packages/92/f7/1d4ec5841505f423469efbfc381d64b7b467438cd5a4bbcbb063f3b73d27/propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717", size = 41396, upload-time = "2025-10-08T19:47:47.202Z" }, + { url = "https://files.pythonhosted.org/packages/48/f0/615c30622316496d2cbbc29f5985f7777d3ada70f23370608c1d3e081c1f/propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37", size = 44897, upload-time = "2025-10-08T19:47:48.336Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/6002e46eccbe0e33dcd4069ef32f7f1c9e243736e07adca37ae8c4830ec3/propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a", size = 39789, upload-time = "2025-10-08T19:47:49.876Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "python-driver-docs" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "eventlet" }, + { name = "gevent" }, + { name = "gremlinpython" }, + { name = "pygments" }, + { name = "recommonmark" }, + { name = "redirects-cli" }, + { name = "scales" }, + { name = "six" }, + { name = "sphinx" }, + { name = "sphinx-autobuild" }, + { name = "sphinx-multiversion-scylla" }, + { name = "sphinx-scylladb-theme" }, + { name = "sphinx-sitemap" }, + { name = "tornado" }, +] + +[package.dev-dependencies] +dev = [ + { name = "hatchling" }, +] + +[package.metadata] +requires-dist = [ + { name = "eventlet", specifier = ">=0.40.3,<1.0.0" }, + { name = "gevent", specifier = ">=25.9.1,<26.0.0" }, + { name = "gremlinpython", specifier = "==3.7.4" }, + { name = "pygments", specifier = ">=2.19.2,<3.0.0" }, + { name = "recommonmark", specifier = "==0.7.1" }, + { name = "redirects-cli", specifier = "~=0.1.3" }, + { name = "scales", specifier = ">=1.0.9,<2.0.0" }, + { name = "six", specifier = ">=1.9" }, + { name = "sphinx", specifier = ">=8.2.3,<9.0.0" }, + { name = "sphinx-autobuild", specifier = ">=2025.0.0,<2026.0.0" }, + { name = "sphinx-multiversion-scylla", specifier = ">=0.3.2,<1.0.0" }, + { name = "sphinx-scylladb-theme", specifier = ">=1.8.2,<2.0.0" }, + { name = "sphinx-sitemap", specifier = ">=2.8.0,<3.0.0" }, + { name = "tornado", specifier = ">=6.5,<7.0" }, +] + +[package.metadata.requires-dev] +dev = [{ name = "hatchling", specifier = "==1.28.0" }] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, +] + +[[package]] +name = "recommonmark" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "commonmark" }, + { name = "docutils" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/00/3dd2bdc4184b0ce754b5b446325abf45c2e0a347e022292ddc44670f628c/recommonmark-0.7.1.tar.gz", hash = "sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67", size = 34444, upload-time = "2020-12-17T19:24:56.523Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/77/ed589c75db5d02a77a1d5d2d9abc63f29676467d396c64277f98b50b79c2/recommonmark-0.7.1-py2.py3-none-any.whl", hash = "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f", size = 10214, upload-time = "2020-12-17T19:24:55.137Z" }, +] + +[[package]] +name = "redirects-cli" +version = "0.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, + { name = "typer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f0/3e/942a3d5322f05aa75c903de1bdc101800cc0627e4c6c371768ef9070fa28/redirects_cli-0.1.3.tar.gz", hash = "sha256:0cc6f35ae372d087d56bc03cfc639d6e2eac0771454c3c173ac6f3dc233969bc", size = 4404, upload-time = "2022-11-29T19:11:20.776Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/a4/829d6901e0c2c492d0d46190aadf3f4b9c6db6594b4e14a814f844014b28/redirects_cli-0.1.3-py3-none-any.whl", hash = "sha256:8a7a548d5f45b98db7d110fd8affbbb44b966cf250e35b5f4c9bd6541622272d", size = 4655, upload-time = "2022-11-29T19:11:18.898Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rich" +version = "14.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" }, +] + +[[package]] +name = "roman-numerals-py" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/76/48fd56d17c5bdbdf65609abbc67288728a98ed4c02919428d4f52d23b24b/roman_numerals_py-3.1.0.tar.gz", hash = "sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d", size = 9017, upload-time = "2025-02-22T07:34:54.333Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/97/d2cbbaa10c9b826af0e10fdf836e1bf344d9f0abb873ebc34d1f49642d3f/roman_numerals_py-3.1.0-py3-none-any.whl", hash = "sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c", size = 7742, upload-time = "2025-02-22T07:34:52.422Z" }, +] + +[[package]] +name = "scales" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/85/b4a3933f227889b536a76c7ed5a0708ae5f63fe20f81d09a725228349e81/scales-1.0.9.tar.gz", hash = "sha256:8b6930f7d4bf115192290b44c757af5e254e3fcfcb75ff9a51f5c96a404e2753", size = 21889, upload-time = "2015-02-28T18:49:39.538Z" } + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "snowballstemmer" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/75/a7/9810d872919697c9d01295633f5d574fb416d47e535f258272ca1f01f447/snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895", size = 105575, upload-time = "2025-05-09T16:34:51.843Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/78/3565d011c61f5a43488987ee32b6f3f656e7f107ac2782dd57bdd7d91d9a/snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064", size = 103274, upload-time = "2025-05-09T16:34:50.371Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" }, +] + +[[package]] +name = "sphinx" +version = "8.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alabaster" }, + { name = "babel" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "docutils" }, + { name = "imagesize" }, + { name = "jinja2" }, + { name = "packaging" }, + { name = "pygments" }, + { name = "requests" }, + { name = "roman-numerals-py" }, + { name = "snowballstemmer" }, + { name = "sphinxcontrib-applehelp" }, + { name = "sphinxcontrib-devhelp" }, + { name = "sphinxcontrib-htmlhelp" }, + { name = "sphinxcontrib-jsmath" }, + { name = "sphinxcontrib-qthelp" }, + { name = "sphinxcontrib-serializinghtml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/ad/4360e50ed56cb483667b8e6dadf2d3fda62359593faabbe749a27c4eaca6/sphinx-8.2.3.tar.gz", hash = "sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348", size = 8321876, upload-time = "2025-03-02T22:31:59.658Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/53/136e9eca6e0b9dc0e1962e2c908fbea2e5ac000c2a2fbd9a35797958c48b/sphinx-8.2.3-py3-none-any.whl", hash = "sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3", size = 3589741, upload-time = "2025-03-02T22:31:56.836Z" }, +] + +[[package]] +name = "sphinx-autobuild" +version = "2025.8.25" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, + { name = "sphinx" }, + { name = "starlette" }, + { name = "uvicorn" }, + { name = "watchfiles" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e0/3c/a59a3a453d4133777f7ed2e83c80b7dc817d43c74b74298ca0af869662ad/sphinx_autobuild-2025.8.25.tar.gz", hash = "sha256:9cf5aab32853c8c31af572e4fecdc09c997e2b8be5a07daf2a389e270e85b213", size = 15200, upload-time = "2025-08-25T18:44:55.436Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/20/56411b52f917696995f5ad27d2ea7e9492c84a043c5b49a3a3173573cd93/sphinx_autobuild-2025.8.25-py3-none-any.whl", hash = "sha256:b750ac7d5a18603e4665294323fd20f6dcc0a984117026d1986704fa68f0379a", size = 12535, upload-time = "2025-08-25T18:44:54.164Z" }, +] + +[[package]] +name = "sphinx-collapse" +version = "0.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/02/183559e508906f7282d4dd6ccbf443efddaa3114b7f6fab425949b37a003/sphinx_collapse-0.1.3.tar.gz", hash = "sha256:cae141e6f03ecd52ed246a305a69e1b0d5d05e6cdf3fe803d40d583ad6ad895a", size = 18540, upload-time = "2024-02-22T15:24:38.735Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/2f/5889082a6a535aa8613a327308582914517082967583ad45586b7d61c145/sphinx_collapse-0.1.3-py3-none-any.whl", hash = "sha256:85fadb2ec8769b93fd04276538668fa96239ef60c20c4a9eaa3e480387a6e65b", size = 4688, upload-time = "2024-02-22T15:24:29.365Z" }, +] + +[[package]] +name = "sphinx-copybutton" +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/2b/a964715e7f5295f77509e59309959f4125122d648f86b4fe7d70ca1d882c/sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd", size = 23039, upload-time = "2023-04-14T08:10:22.998Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/48/1ea60e74949eecb12cdd6ac43987f9fd331156388dcc2319b45e2ebb81bf/sphinx_copybutton-0.5.2-py3-none-any.whl", hash = "sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e", size = 13343, upload-time = "2023-04-14T08:10:20.844Z" }, +] + +[[package]] +name = "sphinx-last-updated-by-git" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/03/fd/de1685b6dab173dff31da24e0d3b29f02873fc24a1cdbb7678721ddc8581/sphinx_last_updated_by_git-0.3.8.tar.gz", hash = "sha256:c145011f4609d841805b69a9300099fc02fed8f5bb9e5bcef77d97aea97b7761", size = 10785, upload-time = "2024-08-11T07:15:54.601Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/fb/e496f16fa11fbe2dbdd0b5e306ede153dfed050aae4766fc89d500720dc7/sphinx_last_updated_by_git-0.3.8-py3-none-any.whl", hash = "sha256:6382c8285ac1f222483a58569b78c0371af5e55f7fbf9c01e5e8a72d6fdfa499", size = 8580, upload-time = "2024-08-11T07:15:53.244Z" }, +] + +[[package]] +name = "sphinx-multiversion-scylla" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/1d/e2b1a214b20d33cc631422e483ed1c8cf6883870940b58cc46341b65e2d7/sphinx_multiversion_scylla-0.3.4.tar.gz", hash = "sha256:8f7c94a89c794334d78ef21761a8bf455aaa7361e71037cf2ac2ca51cb47a0ba", size = 12427, upload-time = "2025-11-24T07:42:01.506Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/aa/82c27991640fe47921f74894a192d374dc1eb609d2276de4abeefe85f4aa/sphinx_multiversion_scylla-0.3.4-py3-none-any.whl", hash = "sha256:e64d49d39a8eccf06a9cb8bbe88eecb3eb2082e6b91a478b55dc7d0268d8e0b6", size = 12302, upload-time = "2025-11-24T07:42:00.403Z" }, +] + +[[package]] +name = "sphinx-notfound-page" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/b2/67603444a8ee97b4a8ea71b0a9d6bab1727ed65e362c87e02f818ee57b8a/sphinx_notfound_page-1.1.0.tar.gz", hash = "sha256:913e1754370bb3db201d9300d458a8b8b5fb22e9246a816643a819a9ea2b8067", size = 7392, upload-time = "2025-01-28T18:45:02.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/d4/019fe439c840a7966012bbb95ccbdd81c5c10271749706793b43beb05145/sphinx_notfound_page-1.1.0-py3-none-any.whl", hash = "sha256:835dc76ff7914577a1f58d80a2c8418fb6138c0932c8da8adce4d9096fbcd389", size = 8167, upload-time = "2025-01-28T18:45:00.465Z" }, +] + +[[package]] +name = "sphinx-scylladb-theme" +version = "1.8.10" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "pyyaml" }, + { name = "setuptools" }, + { name = "sphinx-collapse" }, + { name = "sphinx-copybutton" }, + { name = "sphinx-notfound-page" }, + { name = "sphinx-substitution-extensions" }, + { name = "sphinx-tabs" }, + { name = "sphinxcontrib-mermaid" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/cd/bbd41f0d058f0ef4997cb044326f15dd28a1a17a4336e9b52cb67b8dd242/sphinx_scylladb_theme-1.8.10.tar.gz", hash = "sha256:8a78a9b692d9a946be2c4a64aa472fd82204cc8ea0b1ee7f60de6db35b356326", size = 1620675, upload-time = "2025-12-05T16:49:38.942Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/0e/7577d9bb6e2e7378e6c9f49263c59061a2ae9e370b806d8d1fd8c3be2a23/sphinx_scylladb_theme-1.8.10-py3-none-any.whl", hash = "sha256:8b930f33bec7308ccaa92698ebb5ad85059bcbf93a463f92917aeaf473fce632", size = 1662434, upload-time = "2025-12-05T16:49:36.265Z" }, +] + +[[package]] +name = "sphinx-sitemap" +version = "2.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx-last-updated-by-git" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/17/56fe0f65e3567f829b2b4153a622be1d4b222b781e0d90d7db5a7738f30f/sphinx_sitemap-2.9.0.tar.gz", hash = "sha256:70f97bcdf444e3d68e118355cf82a1f54c4d3c03d651cd17fe87398b26e25e21", size = 6978, upload-time = "2025-10-06T00:24:00.036Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/94/3c57e8b1985e755c48972e2ecd59526d4bf0b52a1fe805bc52a8e98cb92d/sphinx_sitemap-2.9.0-py3-none-any.whl", hash = "sha256:f1f1d3a9ad012ba17a7ef0b560d303bff2d0db26647567d6e810bcc754466664", size = 6218, upload-time = "2025-10-06T00:23:58.778Z" }, +] + +[[package]] +name = "sphinx-substitution-extensions" +version = "2025.11.17" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beartype" }, + { name = "docutils" }, + { name = "myst-parser" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/53/feccf1b607de2aef65c6411b4b4a34a91aa8daf397e77258a7774f9d1990/sphinx_substitution_extensions-2025.11.17.tar.gz", hash = "sha256:aae17f8db9efc3d454a304373ae3df763f8739e05e0b98d5381db46f6d250b27", size = 30459, upload-time = "2025-11-17T14:34:45.072Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/df/7e9cd4775c2782c894741c9274cc4c596ad02ab31257e5a5417f0a6af893/sphinx_substitution_extensions-2025.11.17-py2.py3-none-any.whl", hash = "sha256:ac18455bdc8324b337b0fe7498c1c0d0b1cb65c74d131459be4dea9edb6abbef", size = 8741, upload-time = "2025-11-17T14:34:43.66Z" }, +] + +[[package]] +name = "sphinx-tabs" +version = "3.4.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "pygments" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/53/a9a91995cb365e589f413b77fc75f1c0e9b4ac61bfa8da52a779ad855cc0/sphinx-tabs-3.4.7.tar.gz", hash = "sha256:991ad4a424ff54119799ba1491701aa8130dd43509474aef45a81c42d889784d", size = 15891, upload-time = "2024-10-08T13:37:27.887Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/c6/f47505b564b918a3ba60c1e99232d4942c4a7e44ecaae603e829e3d05dae/sphinx_tabs-3.4.7-py3-none-any.whl", hash = "sha256:c12d7a36fd413b369e9e9967a0a4015781b71a9c393575419834f19204bd1915", size = 9727, upload-time = "2024-10-08T13:37:26.192Z" }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053, upload-time = "2024-07-29T01:09:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300, upload-time = "2024-07-29T01:08:58.99Z" }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967, upload-time = "2024-07-29T01:09:23.417Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530, upload-time = "2024-07-29T01:09:21.945Z" }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617, upload-time = "2024-07-29T01:09:37.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705, upload-time = "2024-07-29T01:09:36.407Z" }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787, upload-time = "2019-01-21T16:10:16.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071, upload-time = "2019-01-21T16:10:14.333Z" }, +] + +[[package]] +name = "sphinxcontrib-mermaid" +version = "1.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/49/c6ddfe709a4ab76ac6e5a00e696f73626b2c189dc1e1965a361ec102e6cc/sphinxcontrib_mermaid-1.2.3.tar.gz", hash = "sha256:358699d0ec924ef679b41873d9edd97d0773446daf9760c75e18dc0adfd91371", size = 18885, upload-time = "2025-11-26T04:18:32.43Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/39/8b54299ffa00e597d3b0b4d042241a0a0b22cb429ad007ccfb9c1745b4d1/sphinxcontrib_mermaid-1.2.3-py3-none-any.whl", hash = "sha256:5be782b27026bef97bfb15ccb2f7868b674a1afc0982b54cb149702cfc25aa02", size = 13413, upload-time = "2025-11-26T04:18:31.269Z" }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165, upload-time = "2024-07-29T01:09:56.435Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743, upload-time = "2024-07-29T01:09:54.885Z" }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080, upload-time = "2024-07-29T01:10:09.332Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072, upload-time = "2024-07-29T01:10:08.203Z" }, +] + +[[package]] +name = "starlette" +version = "0.50.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, +] + +[[package]] +name = "tornado" +version = "6.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/ce/1eb500eae19f4648281bb2186927bb062d2438c2e5093d1360391afd2f90/tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", size = 510821, upload-time = "2025-08-08T18:27:00.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/48/6a7529df2c9cc12efd2e8f5dd219516184d703b34c06786809670df5b3bd/tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", size = 442563, upload-time = "2025-08-08T18:26:42.945Z" }, + { url = "https://files.pythonhosted.org/packages/f2/b5/9b575a0ed3e50b00c40b08cbce82eb618229091d09f6d14bce80fc01cb0b/tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", size = 440729, upload-time = "2025-08-08T18:26:44.473Z" }, + { url = "https://files.pythonhosted.org/packages/1b/4e/619174f52b120efcf23633c817fd3fed867c30bff785e2cd5a53a70e483c/tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", size = 444295, upload-time = "2025-08-08T18:26:46.021Z" }, + { url = "https://files.pythonhosted.org/packages/95/fa/87b41709552bbd393c85dd18e4e3499dcd8983f66e7972926db8d96aa065/tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", size = 443644, upload-time = "2025-08-08T18:26:47.625Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/fb15f06e33d7430ca89420283a8762a4e6b8025b800ea51796ab5e6d9559/tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108", size = 443878, upload-time = "2025-08-08T18:26:50.599Z" }, + { url = "https://files.pythonhosted.org/packages/11/92/fe6d57da897776ad2e01e279170ea8ae726755b045fe5ac73b75357a5a3f/tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", size = 444549, upload-time = "2025-08-08T18:26:51.864Z" }, + { url = "https://files.pythonhosted.org/packages/9b/02/c8f4f6c9204526daf3d760f4aa555a7a33ad0e60843eac025ccfd6ff4a93/tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", size = 443973, upload-time = "2025-08-08T18:26:53.625Z" }, + { url = "https://files.pythonhosted.org/packages/ae/2d/f5f5707b655ce2317190183868cd0f6822a1121b4baeae509ceb9590d0bd/tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", size = 443954, upload-time = "2025-08-08T18:26:55.072Z" }, + { url = "https://files.pythonhosted.org/packages/e8/59/593bd0f40f7355806bf6573b47b8c22f8e1374c9b6fd03114bd6b7a3dcfd/tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", size = 445023, upload-time = "2025-08-08T18:26:56.677Z" }, + { url = "https://files.pythonhosted.org/packages/c7/2a/f609b420c2f564a748a2d80ebfb2ee02a73ca80223af712fca591386cafb/tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", size = 445427, upload-time = "2025-08-08T18:26:57.91Z" }, + { url = "https://files.pythonhosted.org/packages/5e/4f/e1f65e8f8c76d73658b33d33b81eed4322fb5085350e4328d5c956f0c8f9/tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", size = 444456, upload-time = "2025-08-08T18:26:59.207Z" }, +] + +[[package]] +name = "trove-classifiers" +version = "2025.12.1.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/80/e1/000add3b3e0725ce7ee0ea6ea4543f1e1d9519742f3b2320de41eeefa7c7/trove_classifiers-2025.12.1.14.tar.gz", hash = "sha256:a74f0400524fc83620a9be74a07074b5cbe7594fd4d97fd4c2bfde625fdc1633", size = 16985, upload-time = "2025-12-01T14:47:11.456Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/7e/bc19996fa86cad8801e8ffe6f1bba5836ca0160df76d0410d27432193712/trove_classifiers-2025.12.1.14-py3-none-any.whl", hash = "sha256:a8206978ede95937b9959c3aff3eb258bbf7b07dff391ddd4ea7e61f316635ab", size = 14184, upload-time = "2025-12-01T14:47:10.113Z" }, +] + +[[package]] +name = "typer" +version = "0.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8f/28/7c85c8032b91dbe79725b6f17d2fffc595dff06a35c7a30a37bef73a1ab4/typer-0.20.0.tar.gz", hash = "sha256:1aaf6494031793e4876fb0bacfa6a912b551cf43c1e63c800df8b1a866720c37", size = 106492, upload-time = "2025-10-20T17:03:49.445Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/64/7713ffe4b5983314e9d436a90d5bd4f63b6054e2aca783a3cfc44cb95bbf/typer-0.20.0-py3-none-any.whl", hash = "sha256:5b463df6793ec1dca6213a3cf4c0f03bc6e322ac5e16e13ddd622a889489784a", size = 47028, upload-time = "2025-10-20T17:03:47.617Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/1d/0f3a93cca1ac5e8287842ed4eebbd0f7a991315089b1a0b01c7788aa7b63/urllib3-2.6.1.tar.gz", hash = "sha256:5379eb6e1aba4088bae84f8242960017ec8d8e3decf30480b3a1abdaa9671a3f", size = 432678, upload-time = "2025-12-08T15:25:26.773Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/56/190ceb8cb10511b730b564fb1e0293fa468363dbad26145c34928a60cb0c/urllib3-2.6.1-py3-none-any.whl", hash = "sha256:e67d06fe947c36a7ca39f4994b08d73922d40e6cca949907be05efa6fd75110b", size = 131138, upload-time = "2025-12-08T15:25:25.51Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/ce/f06b84e2697fef4688ca63bdb2fdf113ca0a3be33f94488f2cadb690b0cf/uvicorn-0.38.0.tar.gz", hash = "sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d", size = 80605, upload-time = "2025-10-18T13:46:44.63Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/d9/d88e73ca598f4f6ff671fb5fde8a32925c2e08a637303a1d12883c7305fa/uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02", size = 68109, upload-time = "2025-10-18T13:46:42.958Z" }, +] + +[[package]] +name = "watchfiles" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/c9/8869df9b2a2d6c59d79220a4db37679e74f807c559ffe5265e08b227a210/watchfiles-1.1.1.tar.gz", hash = "sha256:a173cb5c16c4f40ab19cecf48a534c409f7ea983ab8fed0741304a1c0a31b3f2", size = 94440, upload-time = "2025-10-14T15:06:21.08Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/f4/f750b29225fe77139f7ae5de89d4949f5a99f934c65a1f1c0b248f26f747/watchfiles-1.1.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:130e4876309e8686a5e37dba7d5e9bc77e6ed908266996ca26572437a5271e18", size = 404321, upload-time = "2025-10-14T15:05:02.063Z" }, + { url = "https://files.pythonhosted.org/packages/2b/f9/f07a295cde762644aa4c4bb0f88921d2d141af45e735b965fb2e87858328/watchfiles-1.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f3bde70f157f84ece3765b42b4a52c6ac1a50334903c6eaf765362f6ccca88a", size = 391783, upload-time = "2025-10-14T15:05:03.052Z" }, + { url = "https://files.pythonhosted.org/packages/bc/11/fc2502457e0bea39a5c958d86d2cb69e407a4d00b85735ca724bfa6e0d1a/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e0b1fe858430fc0251737ef3824c54027bedb8c37c38114488b8e131cf8219", size = 449279, upload-time = "2025-10-14T15:05:04.004Z" }, + { url = "https://files.pythonhosted.org/packages/e3/1f/d66bc15ea0b728df3ed96a539c777acfcad0eb78555ad9efcaa1274688f0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f27db948078f3823a6bb3b465180db8ebecf26dd5dae6f6180bd87383b6b4428", size = 459405, upload-time = "2025-10-14T15:05:04.942Z" }, + { url = "https://files.pythonhosted.org/packages/be/90/9f4a65c0aec3ccf032703e6db02d89a157462fbb2cf20dd415128251cac0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059098c3a429f62fc98e8ec62b982230ef2c8df68c79e826e37b895bc359a9c0", size = 488976, upload-time = "2025-10-14T15:05:05.905Z" }, + { url = "https://files.pythonhosted.org/packages/37/57/ee347af605d867f712be7029bb94c8c071732a4b44792e3176fa3c612d39/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfb5862016acc9b869bb57284e6cb35fdf8e22fe59f7548858e2f971d045f150", size = 595506, upload-time = "2025-10-14T15:05:06.906Z" }, + { url = "https://files.pythonhosted.org/packages/a8/78/cc5ab0b86c122047f75e8fc471c67a04dee395daf847d3e59381996c8707/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:319b27255aacd9923b8a276bb14d21a5f7ff82564c744235fc5eae58d95422ae", size = 474936, upload-time = "2025-10-14T15:05:07.906Z" }, + { url = "https://files.pythonhosted.org/packages/62/da/def65b170a3815af7bd40a3e7010bf6ab53089ef1b75d05dd5385b87cf08/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c755367e51db90e75b19454b680903631d41f9e3607fbd941d296a020c2d752d", size = 456147, upload-time = "2025-10-14T15:05:09.138Z" }, + { url = "https://files.pythonhosted.org/packages/57/99/da6573ba71166e82d288d4df0839128004c67d2778d3b566c138695f5c0b/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c22c776292a23bfc7237a98f791b9ad3144b02116ff10d820829ce62dff46d0b", size = 630007, upload-time = "2025-10-14T15:05:10.117Z" }, + { url = "https://files.pythonhosted.org/packages/a8/51/7439c4dd39511368849eb1e53279cd3454b4a4dbace80bab88feeb83c6b5/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3a476189be23c3686bc2f4321dd501cb329c0a0469e77b7b534ee10129ae6374", size = 622280, upload-time = "2025-10-14T15:05:11.146Z" }, + { url = "https://files.pythonhosted.org/packages/95/9c/8ed97d4bba5db6fdcdb2b298d3898f2dd5c20f6b73aee04eabe56c59677e/watchfiles-1.1.1-cp313-cp313-win32.whl", hash = "sha256:bf0a91bfb5574a2f7fc223cf95eeea79abfefa404bf1ea5e339c0c1560ae99a0", size = 272056, upload-time = "2025-10-14T15:05:12.156Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/c14e28429f744a260d8ceae18bf58c1d5fa56b50d006a7a9f80e1882cb0d/watchfiles-1.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:52e06553899e11e8074503c8e716d574adeeb7e68913115c4b3653c53f9bae42", size = 288162, upload-time = "2025-10-14T15:05:13.208Z" }, + { url = "https://files.pythonhosted.org/packages/dc/61/fe0e56c40d5cd29523e398d31153218718c5786b5e636d9ae8ae79453d27/watchfiles-1.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:ac3cc5759570cd02662b15fbcd9d917f7ecd47efe0d6b40474eafd246f91ea18", size = 277909, upload-time = "2025-10-14T15:05:14.49Z" }, + { url = "https://files.pythonhosted.org/packages/79/42/e0a7d749626f1e28c7108a99fb9bf524b501bbbeb9b261ceecde644d5a07/watchfiles-1.1.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:563b116874a9a7ce6f96f87cd0b94f7faf92d08d0021e837796f0a14318ef8da", size = 403389, upload-time = "2025-10-14T15:05:15.777Z" }, + { url = "https://files.pythonhosted.org/packages/15/49/08732f90ce0fbbc13913f9f215c689cfc9ced345fb1bcd8829a50007cc8d/watchfiles-1.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3ad9fe1dae4ab4212d8c91e80b832425e24f421703b5a42ef2e4a1e215aff051", size = 389964, upload-time = "2025-10-14T15:05:16.85Z" }, + { url = "https://files.pythonhosted.org/packages/27/0d/7c315d4bd5f2538910491a0393c56bf70d333d51bc5b34bee8e68e8cea19/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce70f96a46b894b36eba678f153f052967a0d06d5b5a19b336ab0dbbd029f73e", size = 448114, upload-time = "2025-10-14T15:05:17.876Z" }, + { url = "https://files.pythonhosted.org/packages/c3/24/9e096de47a4d11bc4df41e9d1e61776393eac4cb6eb11b3e23315b78b2cc/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cb467c999c2eff23a6417e58d75e5828716f42ed8289fe6b77a7e5a91036ca70", size = 460264, upload-time = "2025-10-14T15:05:18.962Z" }, + { url = "https://files.pythonhosted.org/packages/cc/0f/e8dea6375f1d3ba5fcb0b3583e2b493e77379834c74fd5a22d66d85d6540/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:836398932192dae4146c8f6f737d74baeac8b70ce14831a239bdb1ca882fc261", size = 487877, upload-time = "2025-10-14T15:05:20.094Z" }, + { url = "https://files.pythonhosted.org/packages/ac/5b/df24cfc6424a12deb41503b64d42fbea6b8cb357ec62ca84a5a3476f654a/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:743185e7372b7bc7c389e1badcc606931a827112fbbd37f14c537320fca08620", size = 595176, upload-time = "2025-10-14T15:05:21.134Z" }, + { url = "https://files.pythonhosted.org/packages/8f/b5/853b6757f7347de4e9b37e8cc3289283fb983cba1ab4d2d7144694871d9c/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afaeff7696e0ad9f02cbb8f56365ff4686ab205fcf9c4c5b6fdfaaa16549dd04", size = 473577, upload-time = "2025-10-14T15:05:22.306Z" }, + { url = "https://files.pythonhosted.org/packages/e1/f7/0a4467be0a56e80447c8529c9fce5b38eab4f513cb3d9bf82e7392a5696b/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7eb7da0eb23aa2ba036d4f616d46906013a68caf61b7fdbe42fc8b25132e77", size = 455425, upload-time = "2025-10-14T15:05:23.348Z" }, + { url = "https://files.pythonhosted.org/packages/8e/e0/82583485ea00137ddf69bc84a2db88bd92ab4a6e3c405e5fb878ead8d0e7/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:831a62658609f0e5c64178211c942ace999517f5770fe9436be4c2faeba0c0ef", size = 628826, upload-time = "2025-10-14T15:05:24.398Z" }, + { url = "https://files.pythonhosted.org/packages/28/9a/a785356fccf9fae84c0cc90570f11702ae9571036fb25932f1242c82191c/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:f9a2ae5c91cecc9edd47e041a930490c31c3afb1f5e6d71de3dc671bfaca02bf", size = 622208, upload-time = "2025-10-14T15:05:25.45Z" }, +] + +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, +] + +[[package]] +name = "yarl" +version = "1.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/f3/d67de7260456ee105dc1d162d43a019ecad6b91e2f51809d6cddaa56690e/yarl-1.22.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8dee9c25c74997f6a750cd317b8ca63545169c098faee42c84aa5e506c819b53", size = 139980, upload-time = "2025-10-06T14:10:14.601Z" }, + { url = "https://files.pythonhosted.org/packages/01/88/04d98af0b47e0ef42597b9b28863b9060bb515524da0a65d5f4db160b2d5/yarl-1.22.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01e73b85a5434f89fc4fe27dcda2aff08ddf35e4d47bbbea3bdcd25321af538a", size = 93424, upload-time = "2025-10-06T14:10:16.115Z" }, + { url = "https://files.pythonhosted.org/packages/18/91/3274b215fd8442a03975ce6bee5fe6aa57a8326b29b9d3d56234a1dca244/yarl-1.22.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22965c2af250d20c873cdbee8ff958fb809940aeb2e74ba5f20aaf6b7ac8c70c", size = 93821, upload-time = "2025-10-06T14:10:17.993Z" }, + { url = "https://files.pythonhosted.org/packages/61/3a/caf4e25036db0f2da4ca22a353dfeb3c9d3c95d2761ebe9b14df8fc16eb0/yarl-1.22.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4f15793aa49793ec8d1c708ab7f9eded1aa72edc5174cae703651555ed1b601", size = 373243, upload-time = "2025-10-06T14:10:19.44Z" }, + { url = "https://files.pythonhosted.org/packages/6e/9e/51a77ac7516e8e7803b06e01f74e78649c24ee1021eca3d6a739cb6ea49c/yarl-1.22.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5542339dcf2747135c5c85f68680353d5cb9ffd741c0f2e8d832d054d41f35a", size = 342361, upload-time = "2025-10-06T14:10:21.124Z" }, + { url = "https://files.pythonhosted.org/packages/d4/f8/33b92454789dde8407f156c00303e9a891f1f51a0330b0fad7c909f87692/yarl-1.22.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5c401e05ad47a75869c3ab3e35137f8468b846770587e70d71e11de797d113df", size = 387036, upload-time = "2025-10-06T14:10:22.902Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9a/c5db84ea024f76838220280f732970aa4ee154015d7f5c1bfb60a267af6f/yarl-1.22.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:243dda95d901c733f5b59214d28b0120893d91777cb8aa043e6ef059d3cddfe2", size = 397671, upload-time = "2025-10-06T14:10:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/11/c9/cd8538dc2e7727095e0c1d867bad1e40c98f37763e6d995c1939f5fdc7b1/yarl-1.22.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bec03d0d388060058f5d291a813f21c011041938a441c593374da6077fe21b1b", size = 377059, upload-time = "2025-10-06T14:10:26.406Z" }, + { url = "https://files.pythonhosted.org/packages/a1/b9/ab437b261702ced75122ed78a876a6dec0a1b0f5e17a4ac7a9a2482d8abe/yarl-1.22.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0748275abb8c1e1e09301ee3cf90c8a99678a4e92e4373705f2a2570d581273", size = 365356, upload-time = "2025-10-06T14:10:28.461Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9d/8e1ae6d1d008a9567877b08f0ce4077a29974c04c062dabdb923ed98e6fe/yarl-1.22.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47fdb18187e2a4e18fda2c25c05d8251a9e4a521edaed757fef033e7d8498d9a", size = 361331, upload-time = "2025-10-06T14:10:30.541Z" }, + { url = "https://files.pythonhosted.org/packages/ca/5a/09b7be3905962f145b73beb468cdd53db8aa171cf18c80400a54c5b82846/yarl-1.22.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c7044802eec4524fde550afc28edda0dd5784c4c45f0be151a2d3ba017daca7d", size = 382590, upload-time = "2025-10-06T14:10:33.352Z" }, + { url = "https://files.pythonhosted.org/packages/aa/7f/59ec509abf90eda5048b0bc3e2d7b5099dffdb3e6b127019895ab9d5ef44/yarl-1.22.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:139718f35149ff544caba20fce6e8a2f71f1e39b92c700d8438a0b1d2a631a02", size = 385316, upload-time = "2025-10-06T14:10:35.034Z" }, + { url = "https://files.pythonhosted.org/packages/e5/84/891158426bc8036bfdfd862fabd0e0fa25df4176ec793e447f4b85cf1be4/yarl-1.22.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e1b51bebd221006d3d2f95fbe124b22b247136647ae5dcc8c7acafba66e5ee67", size = 374431, upload-time = "2025-10-06T14:10:37.76Z" }, + { url = "https://files.pythonhosted.org/packages/bb/49/03da1580665baa8bef5e8ed34c6df2c2aca0a2f28bf397ed238cc1bbc6f2/yarl-1.22.0-cp313-cp313-win32.whl", hash = "sha256:d3e32536234a95f513bd374e93d717cf6b2231a791758de6c509e3653f234c95", size = 81555, upload-time = "2025-10-06T14:10:39.649Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ee/450914ae11b419eadd067c6183ae08381cfdfcb9798b90b2b713bbebddda/yarl-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:47743b82b76d89a1d20b83e60d5c20314cbd5ba2befc9cda8f28300c4a08ed4d", size = 86965, upload-time = "2025-10-06T14:10:41.313Z" }, + { url = "https://files.pythonhosted.org/packages/98/4d/264a01eae03b6cf629ad69bae94e3b0e5344741e929073678e84bf7a3e3b/yarl-1.22.0-cp313-cp313-win_arm64.whl", hash = "sha256:5d0fcda9608875f7d052eff120c7a5da474a6796fe4d83e152e0e4d42f6d1a9b", size = 81205, upload-time = "2025-10-06T14:10:43.167Z" }, + { url = "https://files.pythonhosted.org/packages/88/fc/6908f062a2f77b5f9f6d69cecb1747260831ff206adcbc5b510aff88df91/yarl-1.22.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:719ae08b6972befcba4310e49edb1161a88cdd331e3a694b84466bd938a6ab10", size = 146209, upload-time = "2025-10-06T14:10:44.643Z" }, + { url = "https://files.pythonhosted.org/packages/65/47/76594ae8eab26210b4867be6f49129861ad33da1f1ebdf7051e98492bf62/yarl-1.22.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:47d8a5c446df1c4db9d21b49619ffdba90e77c89ec6e283f453856c74b50b9e3", size = 95966, upload-time = "2025-10-06T14:10:46.554Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ce/05e9828a49271ba6b5b038b15b3934e996980dd78abdfeb52a04cfb9467e/yarl-1.22.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cfebc0ac8333520d2d0423cbbe43ae43c8838862ddb898f5ca68565e395516e9", size = 97312, upload-time = "2025-10-06T14:10:48.007Z" }, + { url = "https://files.pythonhosted.org/packages/d1/c5/7dffad5e4f2265b29c9d7ec869c369e4223166e4f9206fc2243ee9eea727/yarl-1.22.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4398557cbf484207df000309235979c79c4356518fd5c99158c7d38203c4da4f", size = 361967, upload-time = "2025-10-06T14:10:49.997Z" }, + { url = "https://files.pythonhosted.org/packages/50/b2/375b933c93a54bff7fc041e1a6ad2c0f6f733ffb0c6e642ce56ee3b39970/yarl-1.22.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2ca6fd72a8cd803be290d42f2dec5cdcd5299eeb93c2d929bf060ad9efaf5de0", size = 323949, upload-time = "2025-10-06T14:10:52.004Z" }, + { url = "https://files.pythonhosted.org/packages/66/50/bfc2a29a1d78644c5a7220ce2f304f38248dc94124a326794e677634b6cf/yarl-1.22.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca1f59c4e1ab6e72f0a23c13fca5430f889634166be85dbf1013683e49e3278e", size = 361818, upload-time = "2025-10-06T14:10:54.078Z" }, + { url = "https://files.pythonhosted.org/packages/46/96/f3941a46af7d5d0f0498f86d71275696800ddcdd20426298e572b19b91ff/yarl-1.22.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c5010a52015e7c70f86eb967db0f37f3c8bd503a695a49f8d45700144667708", size = 372626, upload-time = "2025-10-06T14:10:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/c1/42/8b27c83bb875cd89448e42cd627e0fb971fa1675c9ec546393d18826cb50/yarl-1.22.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d7672ecf7557476642c88497c2f8d8542f8e36596e928e9bcba0e42e1e7d71f", size = 341129, upload-time = "2025-10-06T14:10:57.985Z" }, + { url = "https://files.pythonhosted.org/packages/49/36/99ca3122201b382a3cf7cc937b95235b0ac944f7e9f2d5331d50821ed352/yarl-1.22.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b7c88eeef021579d600e50363e0b6ee4f7f6f728cd3486b9d0f3ee7b946398d", size = 346776, upload-time = "2025-10-06T14:10:59.633Z" }, + { url = "https://files.pythonhosted.org/packages/85/b4/47328bf996acd01a4c16ef9dcd2f59c969f495073616586f78cd5f2efb99/yarl-1.22.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f4afb5c34f2c6fecdcc182dfcfc6af6cccf1aa923eed4d6a12e9d96904e1a0d8", size = 334879, upload-time = "2025-10-06T14:11:01.454Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ad/b77d7b3f14a4283bffb8e92c6026496f6de49751c2f97d4352242bba3990/yarl-1.22.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:59c189e3e99a59cf8d83cbb31d4db02d66cda5a1a4374e8a012b51255341abf5", size = 350996, upload-time = "2025-10-06T14:11:03.452Z" }, + { url = "https://files.pythonhosted.org/packages/81/c8/06e1d69295792ba54d556f06686cbd6a7ce39c22307100e3fb4a2c0b0a1d/yarl-1.22.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:5a3bf7f62a289fa90f1990422dc8dff5a458469ea71d1624585ec3a4c8d6960f", size = 356047, upload-time = "2025-10-06T14:11:05.115Z" }, + { url = "https://files.pythonhosted.org/packages/4b/b8/4c0e9e9f597074b208d18cef227d83aac36184bfbc6eab204ea55783dbc5/yarl-1.22.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:de6b9a04c606978fdfe72666fa216ffcf2d1a9f6a381058d4378f8d7b1e5de62", size = 342947, upload-time = "2025-10-06T14:11:08.137Z" }, + { url = "https://files.pythonhosted.org/packages/e0/e5/11f140a58bf4c6ad7aca69a892bff0ee638c31bea4206748fc0df4ebcb3a/yarl-1.22.0-cp313-cp313t-win32.whl", hash = "sha256:1834bb90991cc2999f10f97f5f01317f99b143284766d197e43cd5b45eb18d03", size = 86943, upload-time = "2025-10-06T14:11:10.284Z" }, + { url = "https://files.pythonhosted.org/packages/31/74/8b74bae38ed7fe6793d0c15a0c8207bbb819cf287788459e5ed230996cdd/yarl-1.22.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff86011bd159a9d2dfc89c34cfd8aff12875980e3bd6a39ff097887520e60249", size = 93715, upload-time = "2025-10-06T14:11:11.739Z" }, + { url = "https://files.pythonhosted.org/packages/69/66/991858aa4b5892d57aef7ee1ba6b4d01ec3b7eb3060795d34090a3ca3278/yarl-1.22.0-cp313-cp313t-win_arm64.whl", hash = "sha256:7861058d0582b847bc4e3a4a4c46828a410bca738673f35a29ba3ca5db0b473b", size = 83857, upload-time = "2025-10-06T14:11:13.586Z" }, + { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, +] + +[[package]] +name = "zope-event" +version = "6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/33/d3eeac228fc14de76615612ee208be2d8a5b5b0fada36bf9b62d6b40600c/zope_event-6.1.tar.gz", hash = "sha256:6052a3e0cb8565d3d4ef1a3a7809336ac519bc4fe38398cb8d466db09adef4f0", size = 18739, upload-time = "2025-11-07T08:05:49.934Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/b0/956902e5e1302f8c5d124e219c6bf214e2649f92ad5fce85b05c039a04c9/zope_event-6.1-py3-none-any.whl", hash = "sha256:0ca78b6391b694272b23ec1335c0294cc471065ed10f7f606858fc54566c25a0", size = 6414, upload-time = "2025-11-07T08:05:48.874Z" }, +] + +[[package]] +name = "zope-interface" +version = "8.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/c9/5ec8679a04d37c797d343f650c51ad67d178f0001c363e44b6ac5f97a9da/zope_interface-8.1.1.tar.gz", hash = "sha256:51b10e6e8e238d719636a401f44f1e366146912407b58453936b781a19be19ec", size = 254748, upload-time = "2025-11-15T08:32:52.404Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/81/3c3b5386ce4fba4612fd82ffb8a90d76bcfea33ca2b6399f21e94d38484f/zope_interface-8.1.1-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:84f9be6d959640de9da5d14ac1f6a89148b16da766e88db37ed17e936160b0b1", size = 209046, upload-time = "2025-11-15T08:37:01.473Z" }, + { url = "https://files.pythonhosted.org/packages/4a/e3/32b7cb950c4c4326b3760a8e28e5d6f70ad15f852bfd8f9364b58634f74b/zope_interface-8.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:531fba91dcb97538f70cf4642a19d6574269460274e3f6004bba6fe684449c51", size = 209104, upload-time = "2025-11-15T08:37:02.887Z" }, + { url = "https://files.pythonhosted.org/packages/a3/3d/c4c68e1752a5f5effa2c1f5eaa4fea4399433c9b058fb7000a34bfb1c447/zope_interface-8.1.1-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:fc65f5633d5a9583ee8d88d1f5de6b46cd42c62e47757cfe86be36fb7c8c4c9b", size = 259277, upload-time = "2025-11-15T08:37:04.389Z" }, + { url = "https://files.pythonhosted.org/packages/fd/5b/cf4437b174af7591ee29bbad728f620cab5f47bd6e9c02f87d59f31a0dda/zope_interface-8.1.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:efef80ddec4d7d99618ef71bc93b88859248075ca2e1ae1c78636654d3d55533", size = 264742, upload-time = "2025-11-15T08:37:05.613Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0e/0cf77356862852d3d3e62db9aadae5419a1a7d89bf963b219745283ab5ca/zope_interface-8.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:49aad83525eca3b4747ef51117d302e891f0042b06f32aa1c7023c62642f962b", size = 264252, upload-time = "2025-11-15T08:37:07.035Z" }, + { url = "https://files.pythonhosted.org/packages/8a/10/2af54aa88b2fa172d12364116cc40d325fedbb1877c3bb031b0da6052855/zope_interface-8.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:71cf329a21f98cb2bd9077340a589e316ac8a415cac900575a32544b3dffcb98", size = 212330, upload-time = "2025-11-15T08:37:08.14Z" }, +] diff --git a/doxyfile b/doxyfile deleted file mode 100644 index d453557e22..0000000000 --- a/doxyfile +++ /dev/null @@ -1,2339 +0,0 @@ -# Doxyfile 1.8.8 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "Python Driver" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify an logo or icon that is included in -# the documentation. The maximum height of the logo should not exceed 55 pixels -# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo -# to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = NO - -# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = NO - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a -# new page for each member. If set to NO, the documentation of a member will be -# part of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = "test_assumptions=\par Test Assumptions\n" \ - "note=\par Note\n" \ - "test_category=\par Test Category\n" \ - "jira_ticket=\par JIRA Ticket\n" \ - "expected_result=\par Expected Result\n" \ - "since=\par Since\n" \ - "param=\par Parameters\n" \ - "return=\par Return\n" \ - "expected_errors=\par Expected Errors\n" - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = YES - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO these classes will be included in the various overviews. This option has -# no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the -# todo list. This list is created by putting \todo commands in the -# documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the -# test list. This list is created by putting \test commands in the -# documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES the list -# will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO doxygen will only warn about wrong or incomplete parameter -# documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. -# Note: If this tag is empty the current directory is searched. - -INPUT = ./tests - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. - -FILE_PATTERNS = *.py - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = @Test - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = "python /usr/local/bin/doxypy.py" - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER ) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = YES - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES, then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefor more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra stylesheet files is of importance (e.g. the last -# stylesheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the stylesheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler ( hhc.exe). If non-empty -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated ( -# YES) or that it should be included in the master .chm file ( NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated ( -# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = YES - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using prerendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /